PERFORCE change 149350 for review

Rafal Jaworowski raj at FreeBSD.org
Sat Sep 6 19:27:16 UTC 2008


http://perforce.freebsd.org/chv.cgi?CH=149350

Change 149350 by raj at raj_mimi on 2008/09/06 19:26:50

	mge(4): Import updated version of the Ethernet controller driver.
	
	This brings the following advanced features:
	- multicast
	- VLAN tagging
	- IP/TCP/UDP checksum calculation offloading
	- polling
	- interrupt coalescing 
	
	Obtained from:	Marvell, Semihalf

Affected files ...

.. //depot/projects/arm/src/sys/dev/mge/if_mge.c#4 edit
.. //depot/projects/arm/src/sys/dev/mge/if_mgevar.h#2 edit

Differences ...

==== //depot/projects/arm/src/sys/dev/mge/if_mge.c#4 (text+ko) ====

@@ -29,6 +29,10 @@
  * SUCH DAMAGE.
  */
 
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
@@ -50,6 +54,11 @@
 #include <net/if_dl.h>
 #include <net/if_media.h>
 #include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
 
 #include <sys/sockio.h>
 #include <sys/bus.h>
@@ -60,11 +69,20 @@
 #include <dev/mii/mii.h>
 #include <dev/mii/miivar.h>
 
+#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
+#define  MGE_VER2	1
+#endif
+
+#define	MV_PHY_ADDR_BASE	8
+
 #include <dev/mge/if_mgevar.h>
 #include <arm/mv/mvreg.h>
 
 #include "miibus_if.h"
 
+/* PHY registers are in the address space of the first mge unit */
+static struct mge_softc *sc_mge0 = NULL;
+
 static int mge_probe(device_t dev);
 static int mge_attach(device_t dev);
 static int mge_detach(device_t dev);
@@ -85,8 +103,11 @@
 static void mge_watchdog(struct mge_softc *sc);
 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
 
+static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
 static void mge_intr_rx(void *arg);
+static void mge_intr_rx_locked(struct mge_softc *sc, int count);
 static void mge_intr_tx(void *arg);
+static void mge_intr_tx_locked(struct mge_softc *sc);
 static void mge_intr_misc(void *arg);
 static void mge_intr_sum(void *arg);
 static void mge_intr_err(void *arg);
@@ -107,7 +128,16 @@
 static void mge_free_dma(struct mge_softc *sc);
 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
-
+static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
+    uint32_t status, uint16_t bufsize);
+static void mge_offload_setup_descriptor(struct mge_softc *sc,
+    struct mge_desc_wrapper *dw);
+static uint8_t mge_crc8(uint8_t *data, int size);
+static void mge_setup_multicast(struct mge_softc *sc);
+static void mge_set_rxic(struct mge_softc *sc);
+static void mge_set_txic(struct mge_softc *sc);
+static void mge_add_sysctls(struct mge_softc *sc);
+static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
 
 static device_method_t mge_methods[] = {
 	/* Device interface */
@@ -182,8 +212,7 @@
 	char *if_mac;
 	uint32_t mac_l, mac_h;
 
-	mtx_assert(&sc->transmit_lock, MA_OWNED);
-	mtx_assert(&sc->receive_lock, MA_OWNED);
+	MGE_GLOBAL_LOCK_ASSERT(sc);
 
 	if_mac = (char *)IF_LLADDR(sc->ifp);
 
@@ -203,15 +232,15 @@
 	uint32_t reg_idx, reg_off, reg_val, i;
 
 	last_byte &= 0xf;
-	reg_idx = last_byte / 4;
-	reg_off = (last_byte % 4) * 8;
+	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
+	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
 	reg_val = (1 | (queue << 1)) << reg_off;
 
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
 		if ( i == reg_idx)
-			MGE_WRITE(sc, MGE_DA_FILTER_UCAST + (i * 4), reg_val);
+			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
 		else
-			MGE_WRITE(sc, MGE_DA_FILTER_UCAST + (i * 4), 0);
+			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
 	}
 }
 
@@ -235,8 +264,8 @@
 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
 		}
 
-		for (i = 0; i < 4; i++)
-			MGE_WRITE(sc, MGE_DA_FILTER_UCAST + (i * 4), reg_val);
+		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
+			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
 
 	} else {
 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
@@ -459,7 +488,8 @@
 	struct mge_desc_wrapper *dw;
 	int i;
 
-	mtx_lock(&sc->receive_lock);
+	MGE_RECEIVE_LOCK(sc);
+
 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
 
 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
@@ -480,8 +510,45 @@
 	/* Enable RX queue */
 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
 
-	mtx_unlock(&sc->receive_lock);
+	MGE_RECEIVE_UNLOCK(sc);
+}
+
+#ifdef DEVICE_POLLING
+static poll_handler_t mge_poll;
+
+static void
+mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+	struct mge_softc *sc = ifp->if_softc;
+	uint32_t int_cause, int_cause_ext;
+
+	MGE_GLOBAL_LOCK(sc);
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+		MGE_GLOBAL_UNLOCK(sc);
+		return;
+	}
+
+	if (cmd == POLL_AND_CHECK_STATUS) {
+		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
+		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
+
+		/* Check for resource error */
+		if (int_cause & MGE_PORT_INT_RXERRQ0)
+			mge_reinit_rx(sc);
+
+		if (int_cause || int_cause_ext) {
+			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
+			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
+		}
+	}
+
+	mge_intr_tx_locked(sc);
+	mge_intr_rx_locked(sc, count);
+
+	MGE_GLOBAL_UNLOCK(sc);
 }
+#endif /* DEVICE_POLLING */
 
 static int
 mge_attach(device_t dev)
@@ -494,6 +561,9 @@
 	sc = device_get_softc(dev);
 	sc->dev = dev;
 
+	if (device_get_unit(dev) == 0)
+		sc_mge0 = sc;
+
 	/* Initialize mutexes */
 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
@@ -517,6 +587,11 @@
 	sc->rx_desc_curr = 0;
 	sc->tx_desc_used_idx = 0;
 
+	/* Configure defaults for interrupts coalescing */
+	sc->rx_ic_time = 768;
+	sc->tx_ic_time = 768;
+	mge_add_sysctls(sc);
+
 	/* Allocate network interface */
 	ifp = sc->ifp = if_alloc(IFT_ETHER);
 	if (ifp == NULL) {
@@ -527,23 +602,28 @@
 
 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 	ifp->if_softc = sc;
-	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
-	/* XXX for now IFCAP_RXCSUM is the only extra capability we support */
-	ifp->if_capabilities = IFCAP_RXCSUM;
+	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
+	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
 	ifp->if_capenable = ifp->if_capabilities;
+	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
+
+#ifdef DEVICE_POLLING
+	/* Advertise that polling is supported */
+	ifp->if_capabilities |= IFCAP_POLLING;
+#endif
 
 	ifp->if_init = mge_init;
 	ifp->if_start = mge_start;
 	ifp->if_ioctl = mge_ioctl;
 
+	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
+	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+	IFQ_SET_READY(&ifp->if_snd);
+
 	mge_get_mac_address(sc, hwaddr);
 	ether_ifattach(ifp, hwaddr);
 	callout_init(&sc->wd_callout, 0);
 
-	IFQ_SET_MAXLEN(&ifp->if_snd, MGE_TX_DESC_NUM - 1);
-	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
-	IFQ_SET_READY(&ifp->if_snd);
-
 	/* Probe PHY(s) */
 	error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts);
 	if (error) {
@@ -579,10 +659,13 @@
 
 	sc = device_get_softc(dev);
 
-	/* Stop TSEC controller and free TX queue */
+	/* Stop controller and free TX queue */
 	if (sc->ifp)
 		mge_shutdown(dev);
 
+	/* Wait for stopping ticks */
+        callout_drain(&sc->wd_callout);
+
 	/* Stop and release all interrupts */
 	for (i = 0; i < 2; ++i) {
 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
@@ -616,7 +699,7 @@
 	struct mge_softc *sc = ifp->if_softc;
 	struct mii_data *mii;
 
-	mtx_lock(&sc->transmit_lock);
+	MGE_TRANSMIT_LOCK(sc);
 
 	mii = sc->mii;
 	mii_pollstat(mii);
@@ -624,7 +707,7 @@
 	ifmr->ifm_active = mii->mii_media_active;
 	ifmr->ifm_status = mii->mii_media_status;
 
-	mtx_unlock(&sc->transmit_lock);
+	MGE_TRANSMIT_UNLOCK(sc);
 }
 
 static uint32_t
@@ -667,15 +750,13 @@
 	struct mge_softc *sc = ifp->if_softc;
 
 	if (ifp->if_flags & IFF_UP) {
-		mtx_lock(&sc->transmit_lock);
-		mtx_lock(&sc->receive_lock);
+		MGE_GLOBAL_LOCK(sc);
 
 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
 		mii_mediachg(sc->mii);
 		mge_init_locked(sc);
 
-		mtx_unlock(&sc->receive_lock);
-		mtx_unlock(&sc->transmit_lock);
+		MGE_GLOBAL_UNLOCK(sc);
 	}
 
 	return (0);
@@ -686,13 +767,11 @@
 {
 	struct mge_softc *sc = arg;
 
-	mtx_lock(&sc->transmit_lock);
-	mtx_lock(&sc->receive_lock);
+	MGE_GLOBAL_LOCK(sc);
 
 	mge_init_locked(arg);
 
-	mtx_unlock(&sc->receive_lock);
-	mtx_unlock(&sc->transmit_lock);
+	MGE_GLOBAL_UNLOCK(sc);
 }
 
 static void
@@ -703,33 +782,31 @@
 	volatile uint32_t reg_val;
 	int i, count;
 
+
+	MGE_GLOBAL_LOCK_ASSERT(sc);
+
 	/* Stop interface */
 	mge_stop(sc);
 
-	/* Clear and mask interrrupts */
-	MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
-	MGE_WRITE(sc, MGE_INT_MASK, 0x0);
-
-	MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
-	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
+	/* Disable interrupts */
+	mge_intrs_ctrl(sc, 0);
 
-	MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
-	MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
-
 	/* Set MAC address */
 	mge_set_mac_address(sc);
 
-	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
-		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0x0);
-		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0x0);
-	}
+	/* Setup multicast filters */
+	mge_setup_multicast(sc);
 
+#if defined(MGE_VER2)
+	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
+	MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
+#endif
 	/* Initialize TX queue configuration registers */
 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), MGE_TX_TOKEN_Q0_DFLT);
 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), MGE_TX_TOKEN_Q0_DFLT);
 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), MGE_TX_ARB_Q0_DFLT);
 
-	for (i = 1; i < MGE_RX_QUEUE_NUM; i++) {
+	for (i = 1; i < 7; i++) {
 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), MGE_TX_TOKEN_Q1_7_DFLT);
 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), MGE_TX_TOKEN_Q1_7_DFLT);
 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), MGE_TX_ARB_Q1_7_DFLT);
@@ -749,11 +826,10 @@
 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
 
 	/* Setup SDMA configuration */
-	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SMDA_RX_BYTE_SWAP |
-	    MGE_SMDA_TX_BYTE_SWAP |
+	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
+	    MGE_SDMA_TX_BYTE_SWAP |
 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
-	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
-	    MGE_SMDA_RX_IPG(0));
+	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
 
 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
 
@@ -794,11 +870,21 @@
 		}
 	}
 
+	/* Setup interrupts coalescing */
+	mge_set_rxic(sc);
+	mge_set_txic(sc);
+
 	/* Enable interrupts */
-	MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RX0 |
-	    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERR0);
-	MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR |
-	    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR | MGE_PORT_INT_EXT_TXBUF);
+#ifdef DEVICE_POLLING
+        /*
+	 * * ...only if polling is not turned on. Disable interrupts explicitly
+	 * if polling is enabled.
+	 */
+	if (sc->ifp->if_capenable & IFCAP_POLLING)
+		mge_intrs_ctrl(sc, 0);
+	else
+#endif /* DEVICE_POLLING */
+	mge_intrs_ctrl(sc, 1);
 
 	/* Activate network interface */
 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
@@ -830,86 +916,95 @@
 }
 
 static void
-mge_intr_rx(void *arg)
-{
+mge_intr_rx(void *arg) {
 	struct mge_softc *sc = arg;
-	struct ifnet *ifp;
-	uint32_t int_cause, int_cause_ext, status;
-	struct mge_desc_wrapper* dw;
-	struct mbuf *mb;
-	struct mbuf *rcv_mbufs[MGE_RX_DESC_NUM];
-	int c1 = 0;
-	int c2;
+	uint32_t int_cause, int_cause_ext;
+
+	MGE_RECEIVE_LOCK(sc);
 
-	ifp = sc->ifp;
+#ifdef DEVICE_POLLING
+	if (sc->ifp->if_capenable & IFCAP_POLLING) {
+		MGE_RECEIVE_UNLOCK(sc);
+		return;
+	}
+#endif
 
 	/* Get interrupt cause */
 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 
 	/* Check for resource error */
-	if (int_cause & MGE_PORT_INT_RXERR0) {
+	if (int_cause & MGE_PORT_INT_RXERRQ0) {
 		mge_reinit_rx(sc);
 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
-		    int_cause & ~MGE_PORT_INT_RXERR0);
+		    int_cause & ~MGE_PORT_INT_RXERRQ0);
 	}
 
-	int_cause &= MGE_PORT_INT_RX0;
+	int_cause &= MGE_PORT_INT_RXQ0;
 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
 
 	if (int_cause || int_cause_ext) {
 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
+		mge_intr_rx_locked(sc, -1);
+	}
 
-		mtx_lock(&sc->receive_lock);
+	MGE_RECEIVE_UNLOCK(sc);
+}
+
+
+static void
+mge_intr_rx_locked(struct mge_softc *sc, int count)
+{
+	struct ifnet *ifp = sc->ifp;
+	uint32_t status;
+	uint16_t bufsize;
+	struct mge_desc_wrapper* dw;
+	struct mbuf *mb;
 
-		while(1) {
-			dw = &sc->mge_rx_desc[sc->rx_desc_curr];
-			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
-			    BUS_DMASYNC_POSTREAD);
+	MGE_RECEIVE_LOCK_ASSERT(sc);
 
-			/* Get status */
-			status = dw->mge_desc->cmd_status;
-			if ((status & MGE_DMA_OWNED) != 0)
-				break;
+	while(count != 0) {
+		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
+		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
+		    BUS_DMASYNC_POSTREAD);
 
-			sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
-			if (dw->mge_desc->byte_count &&
-			    ~(status & MGE_ERR_SUMMARY)) {
+		/* Get status */
+		status = dw->mge_desc->cmd_status;
+		bufsize = dw->mge_desc->buff_size;
+		if ((status & MGE_DMA_OWNED) != 0)
+			break;
 
-				bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
-				    BUS_DMASYNC_POSTREAD);
+		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
+		if (dw->mge_desc->byte_count &&
+		    ~(status & MGE_ERR_SUMMARY)) {
 
-				mb = m_devget(dw->buffer->m_data,
-				    dw->mge_desc->byte_count - ETHER_CRC_LEN,
-				    0, ifp, NULL);
+			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
+			    BUS_DMASYNC_POSTREAD);
 
-				mb->m_len -= 2;
-				mb->m_pkthdr.len -= 2;
-				mb->m_data += 2;
+			mb = m_devget(dw->buffer->m_data,
+			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
+			    0, ifp, NULL);
 
-				if (ifp->if_capenable & IFCAP_RXCSUM) {
-					if (status & MGE_RX_IP_OK)
-						mb->m_pkthdr.csum_flags =
-						    CSUM_IP_CHECKED | CSUM_IP_VALID;
-				}
+			mb->m_len -= 2;
+			mb->m_pkthdr.len -= 2;
+			mb->m_data += 2;
 
-				rcv_mbufs[c1++] = mb;
-				if (c1 >= MGE_RX_DESC_NUM)
-					break;
-			}
+			mge_offload_process_frame(ifp, mb, status,
+			    bufsize);
 
-			dw->mge_desc->byte_count = 0;
-			dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
-			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
-			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+			(*ifp->if_input)(ifp, mb);
 		}
 
-		mtx_unlock(&sc->receive_lock);
+		dw->mge_desc->byte_count = 0;
+		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
+		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 
-		for (c2 = 0; c2 < c1; c2++)
-			(*ifp->if_input)(ifp, rcv_mbufs[c2]);
+		if (count > 0)
+			count -= 1;
 	}
+
 	return;
 }
 
@@ -927,26 +1022,42 @@
 mge_intr_tx(void *arg)
 {
 	struct mge_softc *sc = arg;
-	struct ifnet *ifp;
-	uint32_t int_cause_ext, status;
-	struct mge_desc_wrapper *dw;
-	struct mge_desc *desc;
-	int send = 0;
+	uint32_t int_cause_ext;
+
+	MGE_TRANSMIT_LOCK(sc);
 
-	ifp = sc->ifp;
+#ifdef DEVICE_POLLING
+	if (sc->ifp->if_capenable & IFCAP_POLLING) {
+		MGE_TRANSMIT_UNLOCK(sc);
+		return;
+	}
+#endif
 
 	/* Ack the interrupt */
 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
-	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF);
+	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
+
+	mge_intr_tx_locked(sc);
+
+	MGE_TRANSMIT_UNLOCK(sc);
+}
+
+
+static void
+mge_intr_tx_locked(struct mge_softc *sc)
+{
+	struct ifnet *ifp = sc->ifp;
+	struct mge_desc_wrapper *dw;
+	struct mge_desc *desc;
+	uint32_t status;
+	int send = 0;
+
+	MGE_TRANSMIT_LOCK_ASSERT(sc);
 
 	/* Disable watchdog */
 	sc->wd_timer = 0;
 
-	/* Acquire the mutex */
-	mtx_assert(&sc->receive_lock, MA_NOTOWNED);
-	mtx_lock(&sc->transmit_lock);
-
 	while (sc->tx_desc_used_count) {
 		/* Get the descriptor */
 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
@@ -987,8 +1098,6 @@
 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 		mge_start_locked(ifp);
 	}
-
-	mtx_unlock(&sc->transmit_lock);
 }
 
 static int
@@ -996,21 +1105,24 @@
 {
 	struct mge_softc *sc = ifp->if_softc;
 	struct ifreq *ifr = (struct ifreq *)data;
-	int error;
+	int mask, error;
 	uint32_t flags;
 
 	error = 0;
 
 	switch (command) {
 	case SIOCSIFFLAGS:
-		mtx_lock(&sc->transmit_lock);
-		mtx_lock(&sc->receive_lock);
+		MGE_GLOBAL_LOCK(sc);
 
 		if (ifp->if_flags & IFF_UP) {
 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 				flags = ifp->if_flags ^ sc->mge_if_flags;
 				if (flags & IFF_PROMISC)
-					mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
+					mge_set_prom_mode(sc,
+					    MGE_RX_DEFAULT_QUEUE);
+
+				if (flags & IFF_ALLMULTI)
+					mge_setup_multicast(sc);
 			} else
 				mge_init_locked(sc);
 		}
@@ -1018,11 +1130,55 @@
 			mge_stop(sc);
 
 		sc->mge_if_flags = ifp->if_flags;
-		mtx_unlock(&sc->transmit_lock);
-		mtx_unlock(&sc->receive_lock);
+		MGE_GLOBAL_UNLOCK(sc);
+		break;
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			MGE_GLOBAL_LOCK(sc);
+			mge_setup_multicast(sc);
+			MGE_GLOBAL_UNLOCK(sc);
+		}
+		break;
+	case SIOCSIFCAP:
+		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+		if (mask & IFCAP_HWCSUM) {
+			ifp->if_capenable &= ~IFCAP_HWCSUM;
+			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
+			if (ifp->if_capenable & IFCAP_TXCSUM)
+				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
+			else
+				ifp->if_hwassist = 0;
+		}
+#ifdef DEVICE_POLLING
+		if (mask & IFCAP_POLLING) {
+			if (ifr->ifr_reqcap & IFCAP_POLLING) {
+				error = ether_poll_register(mge_poll, ifp);
+				if (error)
+					return(error);
+
+				MGE_GLOBAL_LOCK(sc);
+				mge_intrs_ctrl(sc, 0);
+				ifp->if_capenable |= IFCAP_POLLING;
+				MGE_GLOBAL_UNLOCK(sc);
+			} else {
+				error = ether_poll_deregister(ifp);
+				MGE_GLOBAL_LOCK(sc);
+				mge_intrs_ctrl(sc, 1);
+				ifp->if_capenable &= ~IFCAP_POLLING;
+				MGE_GLOBAL_UNLOCK(sc);
+			}
+		}
+#endif
 		break;
 	case SIOCGIFMEDIA: /* fall through */
 	case SIOCSIFMEDIA:
+		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
+		    && !(ifr->ifr_media & IFM_FDX)) {
+			device_printf(sc->dev,
+			    "1000baseTX half-duplex unsupported\n");
+			return 0;
+		}
 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
 		break;
 	default:
@@ -1034,35 +1190,46 @@
 static int
 mge_miibus_readreg(device_t dev, int phy, int reg)
 {
-	struct mge_softc *sc;
 	uint32_t retries;
 
-	sc = device_get_softc(dev);
-	MGE_WRITE(sc, MGE_REG_SMI, 0x1fffffff &
+	/*
+	 * We assume static PHY address <=> device unit mapping:
+	 * PHY Address = MV_PHY_ADDR_BASE + devce unit.
+	 * This is true for most Marvell boards.
+	 * 
+	 * Code below grants proper PHY detection on each device
+	 * unit.
+	 */
+
+	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
+		return (0);
+
+	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
 
 	retries = MGE_SMI_READ_RETRIES;
-	while (--retries && !(MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID))
+	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
 		DELAY(MGE_SMI_READ_DELAY);
 
 	if (retries == 0)
 		device_printf(dev, "Timeout while reading from PHY\n");
 
-	return (MGE_READ(sc, MGE_REG_SMI) & 0xffff);
+	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
 }
 
 static void
 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
 {
-	struct mge_softc *sc;
 	uint32_t retries;
 
-	sc = device_get_softc(dev);
-	MGE_WRITE(sc, MGE_REG_SMI, 0x1fffffff &
+	if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy)
+		return;
+
+	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
 
 	retries = MGE_SMI_WRITE_RETRIES;
-	while (--retries && MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY)
+	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
 		DELAY(MGE_SMI_WRITE_DELAY);
 
 	if (retries == 0)
@@ -1074,7 +1241,6 @@
 {
 
 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
-
 	return (BUS_PROBE_DEFAULT);
 }
 
@@ -1091,13 +1257,16 @@
 {
 	struct mge_softc *sc = device_get_softc(dev);
 
-	mtx_lock(&sc->transmit_lock);
-	mtx_lock(&sc->receive_lock);
+	MGE_GLOBAL_LOCK(sc);
+
+#ifdef DEVICE_POLLING
+        if (sc->ifp->if_capenable & IFCAP_POLLING)
+		ether_poll_deregister(sc->ifp);
+#endif
 
 	mge_stop(sc);
 
-	mtx_unlock(&sc->transmit_lock);
-	mtx_unlock(&sc->receive_lock);
+	MGE_GLOBAL_UNLOCK(sc);
 
 	return (0);
 }
@@ -1116,9 +1285,10 @@
 	ifp = sc->ifp;
 
 	/* Check for free descriptors */
-	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM)
+	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
 		/* No free descriptors */
 		return (-1);
+	}
 
 	/* Fetch unused map */
 	desc_no = sc->tx_desc_curr;
@@ -1143,9 +1313,12 @@
 		dw->mge_desc->byte_count = segs[seg].ds_len;
 		dw->mge_desc->buffer = segs[seg].ds_addr;
 		dw->buffer = m0;
-		dw->mge_desc->cmd_status = MGE_TX_DESC_LAST |
-		    MGE_TX_DESC_FIRST | MGE_TX_DESC_ETH_CRC |
-		    MGE_TX_DESC_EN_INT | MGE_TX_DESC_PADDING | MGE_DMA_OWNED;
+		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
+		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
+		    MGE_DMA_OWNED;
+
+		if (seg == 0)
+			mge_offload_setup_descriptor(sc, dw);
 	}
 
 	bus_dmamap_sync(sc->mge_desc_dtag, mapp,
@@ -1181,12 +1354,10 @@
 
 	ifp = sc->ifp;
 
-	mtx_lock(&sc->transmit_lock);
-	mtx_lock(&sc->receive_lock);
+	MGE_GLOBAL_LOCK(sc);
 
 	if (sc->wd_timer == 0 || --sc->wd_timer) {
-		mtx_unlock(&sc->receive_lock);
-		mtx_unlock(&sc->transmit_lock);
+		MGE_GLOBAL_UNLOCK(sc);
 		return;
 	}
 
@@ -1196,8 +1367,7 @@
 	mge_stop(sc);
 	mge_init_locked(sc);
 
-	mtx_unlock(&sc->receive_lock);
-	mtx_unlock(&sc->transmit_lock);
+	MGE_GLOBAL_UNLOCK(sc);
 }
 
 static void
@@ -1205,12 +1375,11 @@
 {
 	struct mge_softc *sc = ifp->if_softc;
 
-	mtx_assert(&sc->receive_lock, MA_NOTOWNED);
-	mtx_lock(&sc->transmit_lock);
+	MGE_TRANSMIT_LOCK(sc);
 
 	mge_start_locked(ifp);
 
-	mtx_unlock(&sc->transmit_lock);
+	MGE_TRANSMIT_UNLOCK(sc);
 }
 
 static void
@@ -1218,10 +1387,12 @@
 {
 	struct mge_softc *sc;
 	struct mbuf *m0, *mtmp;
-	unsigned int queued = 0;
+	uint32_t reg_val, queued = 0;
 
 	sc = ifp->if_softc;
 
+	MGE_TRANSMIT_LOCK_ASSERT(sc);
+
 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 	    IFF_DRV_RUNNING)
 		return;
@@ -1247,7 +1418,8 @@
 
 	if (queued) {
 		/* Enable transmitter and watchdog timer */
-		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, MGE_ENABLE_TXQ);
+		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
+		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
 		sc->wd_timer = 5;
 	}
 }
@@ -1266,16 +1438,19 @@
 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 		return;
 
+	/* Stop tick engine */
+	callout_stop(&sc->wd_callout);
+
 	/* Disable interface */
 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 	sc->wd_timer = 0;
 
-	/* Disable all interrupts and stop DMA */
-	MGE_WRITE(sc, MGE_PORT_INT_MASK , 0x0);
-	MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , 0x0);
+	/* Disable interrupts */
+	mge_intrs_ctrl(sc, 0);
 
 	/* Disable Rx and Tx */
-	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, MGE_DISABLE_TXQ);
+	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
+	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
 
 	/* Remove pending data from TX queue */
@@ -1329,3 +1504,252 @@
 	device_printf(dev, "%s\n", __FUNCTION__);
 	return (0);
 }
+
+static void
+mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
+    uint32_t status, uint16_t bufsize)
+{
+	int csum_flags = 0;
+
+	if (ifp->if_capenable & IFCAP_RXCSUM) {
+		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
+			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
+
+		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
+		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
+		    (status & MGE_RX_L4_CSUM_OK)) {
+			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+			frame->m_pkthdr.csum_data = 0xFFFF;
+		}
+
+		frame->m_pkthdr.csum_flags = csum_flags;
+	}
+}
+
+static void
+mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
+{
+	struct mbuf *m0 = dw->buffer;
+	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
+	int csum_flags = m0->m_pkthdr.csum_flags;
+	int cmd_status = 0;
+	struct ip *ip;
+	int ehlen, etype;
+
+	if (csum_flags) {
+		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+			etype = ntohs(eh->evl_proto);
+			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+			csum_flags |= MGE_TX_VLAN_TAGGED;
+		} else {
+			etype = ntohs(eh->evl_encap_proto);
+			ehlen = ETHER_HDR_LEN;
+		}
+
+		if (etype != ETHERTYPE_IP) {
+			if_printf(sc->ifp,
+			    "TCP/IP Offload enabled for unsupported "
+			    "protocol!\n");
+			return;
+		}
+
+		ip = (struct ip *)(m0->m_data + ehlen);
+		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
+
+		if ((m0->m_flags & M_FRAG) == 0)
+			cmd_status |= MGE_TX_NOT_FRAGMENT;
+	}
+
+	if (csum_flags & CSUM_IP)
+		cmd_status |= MGE_TX_GEN_IP_CSUM;
+
+	if (csum_flags & CSUM_TCP)
+		cmd_status |= MGE_TX_GEN_L4_CSUM;
+
+	if (csum_flags & CSUM_UDP)
+		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
+
+	dw->mge_desc->cmd_status |= cmd_status;
+}
+
+static void
+mge_intrs_ctrl(struct mge_softc *sc, int enable)
+{
+
+	if (enable) {
+		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
+		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
+		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
+		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
+		    MGE_PORT_INT_EXT_TXBUF0);
+	} else {
+		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
+		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
+
+		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
+		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
+
+		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
+		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
+	}
+}
+
+static uint8_t
+mge_crc8(uint8_t *data, int size)
+{

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list