kern/120232: Bring in nfe(4) to RELENG_6 [PATCH]

Gary Stanley gary at velocity-servers.net
Sat Feb 2 20:00:06 PST 2008


>Number:         120232
>Category:       kern
>Synopsis:       Bring in nfe(4) to RELENG_6 [PATCH]
>Confidential:   no
>Severity:       non-critical
>Priority:       low
>Responsible:    freebsd-bugs
>State:          open
>Quarter:        
>Keywords:       
>Date-Required:
>Class:          update
>Submitter-Id:   current-users
>Arrival-Date:   Sun Feb 03 04:00:02 UTC 2008
>Closed-Date:
>Last-Modified:
>Originator:     Gary Stanley
>Release:        6.3-RELEASE
>Organization:
>Environment:
>Description:
I've created a patch file that contains the nfe(4) driver from yongari's nfe site (nfe-20071124.tar.gz). I've modified them to be built in the kernel as well as a module. They all pass any local tests I have, and they work flawlessly on some Asus boards I have here to test.

These drivers currently lack a man page, so I might consider writing it or porting it over from RELENG_7 (which should be fine). 






>How-To-Repeat:

>Fix:


Patch attached with submission follows:

diff -ur sys.old/conf/files sys/conf/files
--- sys.old/conf/files	2007-12-07 08:45:27.000000000 +0000
+++ sys/conf/files	2008-02-02 12:44:53.000000000 +0000
@@ -795,6 +795,7 @@
 dev/mpt/mpt_pci.c		optional mpt pci
 dev/mpt/mpt_raid.c		optional mpt
 dev/msk/if_msk.c		optional msk
+dev/nfe/if_nfe.c		optional nfe
 dev/my/if_my.c			optional my
 dev/ncv/ncr53c500.c		optional ncv
 dev/ncv/ncr53c500_pccard.c	optional ncv pccard
diff -ur -N sys.old/dev/nfe/if_nfe.c sys/dev/nfe/if_nfe.c
--- sys.old/dev/nfe/if_nfe.c	1970-01-01 00:00:00.000000000 +0000
+++ sys/dev/nfe/if_nfe.c	2008-02-02 12:58:51.000000000 +0000
@@ -0,0 +1,3213 @@
+/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
+
+/*-
+ * Copyright (c) 2006 Shigeaki Tagashira <shigeaki at se.hiroshima-u.ac.jp>
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini at free.fr>
+ * Copyright (c) 2005, 2006 Jonathan Gray <jsg at openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: /usr/local/www/cvsroot/FreeBSD/src/sys/dev/nfe/if_nfe.c,v 1.21 2007/09/14 05:12:25 yongari Exp $");
+
+/* Uncomment the following line to enable polling. */
+/* #define	DEVICE_POLLING */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <net/bpf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/nfe/if_nfereg.h>
+#include <dev/nfe/if_nfevar.h>
+
+MODULE_DEPEND(nfe, pci, 1, 1, 1);
+MODULE_DEPEND(nfe, ether, 1, 1, 1);
+MODULE_DEPEND(nfe, miibus, 1, 1, 1);
+
+/* "device miibus" required.  See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+static int  nfe_probe(device_t);
+static int  nfe_attach(device_t);
+static int  nfe_detach(device_t);
+static int  nfe_suspend(device_t);
+static int  nfe_resume(device_t);
+static void nfe_shutdown(device_t);
+static void nfe_power(struct nfe_softc *);
+static int  nfe_miibus_readreg(device_t, int, int);
+static int  nfe_miibus_writereg(device_t, int, int, int);
+static void nfe_miibus_statchg(device_t);
+static void nfe_link_task(void *, int);
+static void nfe_set_intr(struct nfe_softc *);
+static __inline void nfe_enable_intr(struct nfe_softc *);
+static __inline void nfe_disable_intr(struct nfe_softc *);
+static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
+static void nfe_alloc_msix(struct nfe_softc *, int);
+static void nfe_intr(void *);
+static void nfe_int_task(void *, int);
+static void *nfe_jalloc(struct nfe_softc *);
+static void nfe_jfree(void *, void *);
+static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
+static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
+static int nfe_newbuf(struct nfe_softc *, int);
+static int nfe_jnewbuf(struct nfe_softc *, int);
+static int  nfe_rxeof(struct nfe_softc *, int);
+static int  nfe_jrxeof(struct nfe_softc *, int);
+static void nfe_txeof(struct nfe_softc *);
+static struct mbuf *nfe_defrag(struct mbuf *, int, int);
+static int  nfe_encap(struct nfe_softc *, struct mbuf **);
+static void nfe_setmulti(struct nfe_softc *);
+static void nfe_tx_task(void *, int);
+static void nfe_start(struct ifnet *);
+static void nfe_watchdog(struct ifnet *);
+static void nfe_init(void *);
+static void nfe_init_locked(void *);
+static void nfe_stop(struct ifnet *);
+static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
+static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
+static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
+static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
+static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
+static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
+static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
+static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
+static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
+static int  nfe_ifmedia_upd(struct ifnet *);
+static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+static void nfe_tick(void *);
+static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
+static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
+static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
+
+static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
+static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
+
+#ifdef NFE_DEBUG
+static int nfedebug = 0;
+#define	DPRINTF(sc, ...)	do {				\
+	if (nfedebug)						\
+		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
+} while (0)
+#define	DPRINTFN(sc, n, ...)	do {				\
+	if (nfedebug >= (n))					\
+		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
+} while (0)
+#else
+#define	DPRINTF(sc, ...)
+#define	DPRINTFN(sc, n, ...)
+#endif
+
+#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
+#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
+#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
+
+#define	NFE_JLIST_LOCK(_sc)	mtx_lock(&(_sc)->nfe_jlist_mtx)
+#define	NFE_JLIST_UNLOCK(_sc)	mtx_unlock(&(_sc)->nfe_jlist_mtx)
+
+/* definitions for backporting RELENG-6 */
+#ifndef	IFCAP_TSO4
+#define	IFCAP_TSO4	0
+#define	CSUM_TSO	0
+#endif
+#ifndef	VLAN_CAPABILITIES
+#define	VLAN_CAPABILITIES(x)
+#endif
+#ifndef	IFCAP_VLAN_HWCSUM
+#define	IFCAP_VLAN_HWCSUM	0
+#endif
+
+/* Tunables. */
+static int msi_disable = 0;
+static int msix_disable = 0;
+static int jumbo_disable = 0;
+TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
+TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
+TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
+
+static device_method_t nfe_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		nfe_probe),
+	DEVMETHOD(device_attach,	nfe_attach),
+	DEVMETHOD(device_detach,	nfe_detach),
+	DEVMETHOD(device_suspend,	nfe_suspend),
+	DEVMETHOD(device_resume,	nfe_resume),
+	DEVMETHOD(device_shutdown,	nfe_shutdown),
+
+	/* bus interface */
+	DEVMETHOD(bus_print_child,	bus_generic_print_child),
+	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
+
+	/* MII interface */
+	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
+	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
+	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
+
+	{ NULL, NULL }
+};
+
+static driver_t nfe_driver = {
+	"nfe",
+	nfe_methods,
+	sizeof(struct nfe_softc)
+};
+
+static devclass_t nfe_devclass;
+
+DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
+DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
+
+static struct nfe_type nfe_devs[] = {
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
+	    "NVIDIA nForce MCP Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
+	    "NVIDIA nForce2 MCP2 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
+	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
+	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
+	    "NVIDIA nForce3 MCP3 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
+	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
+	    "NVIDIA nForce3 MCP7 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
+	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
+	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
+	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
+	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
+	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
+	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
+	    "NVIDIA nForce MCP55 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
+	    "NVIDIA nForce MCP55 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
+	    "NVIDIA nForce MCP61 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
+	    "NVIDIA nForce MCP61 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
+	    "NVIDIA nForce MCP61 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
+	    "NVIDIA nForce MCP61 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
+	    "NVIDIA nForce MCP65 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
+	    "NVIDIA nForce MCP65 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
+	    "NVIDIA nForce MCP65 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
+	    "NVIDIA nForce MCP65 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
+	    "NVIDIA nForce MCP67 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
+	    "NVIDIA nForce MCP67 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
+	    "NVIDIA nForce MCP67 Networking Adapter"},
+	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
+	    "NVIDIA nForce MCP67 Networking Adapter"},
+	{0, 0, NULL}
+};
+
+
+/* Probe for supported hardware ID's */
+static int
+nfe_probe(device_t dev)
+{
+	struct nfe_type *t;
+
+	t = nfe_devs;
+	/* Check for matching PCI DEVICE ID's */
+	while (t->name != NULL) {
+		if ((pci_get_vendor(dev) == t->vid_id) &&
+		    (pci_get_device(dev) == t->dev_id)) {
+			device_set_desc(dev, t->name);
+			return (BUS_PROBE_DEFAULT);
+		}
+		t++;
+	}
+
+	return (ENXIO);
+}
+
+static void
+nfe_alloc_msix(struct nfe_softc *sc, int count)
+{
+	int rid;
+
+	rid = PCIR_BAR(2);
+	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
+	    &rid, RF_ACTIVE);
+	if (sc->nfe_msix_res == NULL) {
+		device_printf(sc->nfe_dev,
+		    "couldn't allocate MSIX table resource\n");
+		return;
+	}
+	rid = PCIR_BAR(3);
+	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
+	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
+	if (sc->nfe_msix_pba_res == NULL) {
+		device_printf(sc->nfe_dev,
+		    "couldn't allocate MSIX PBA resource\n");
+		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
+		    sc->nfe_msix_res);
+		sc->nfe_msix_res = NULL;
+		return;
+	}
+
+	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
+		if (count == NFE_MSI_MESSAGES) {
+			if (bootverbose)
+				device_printf(sc->nfe_dev,
+				    "Using %d MSIX messages\n", count);
+			sc->nfe_msix = 1;
+		} else {
+			if (bootverbose)
+				device_printf(sc->nfe_dev,
+				    "couldn't allocate MSIX\n");
+			pci_release_msi(sc->nfe_dev);
+			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
+			    PCIR_BAR(3), sc->nfe_msix_pba_res);
+			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
+			    PCIR_BAR(2), sc->nfe_msix_res);
+			sc->nfe_msix_pba_res = NULL;
+			sc->nfe_msix_res = NULL;
+		}
+	}
+}
+
+static int
+nfe_attach(device_t dev)
+{
+	struct nfe_softc *sc;
+	struct ifnet *ifp;
+	bus_addr_t dma_addr_max;
+	int error = 0, i, msic, reg, rid;
+
+	sc = device_get_softc(dev);
+	sc->nfe_dev = dev;
+
+	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+	    MTX_DEF);
+	mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF);
+	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
+	TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
+	SLIST_INIT(&sc->nfe_jfree_listhead);
+	SLIST_INIT(&sc->nfe_jinuse_listhead);
+
+	pci_enable_busmaster(dev);
+
+	rid = PCIR_BAR(0);
+	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+	    RF_ACTIVE);
+	if (sc->nfe_res[0] == NULL) {
+		device_printf(dev, "couldn't map memory resources\n");
+		mtx_destroy(&sc->nfe_mtx);
+		return (ENXIO);
+	}
+
+	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
+		uint16_t v, width;
+
+		v = pci_read_config(dev, reg + 0x08, 2);
+		/* Change max. read request size to 4096. */
+		v &= ~(7 << 12);
+		v |= (5 << 12);
+		pci_write_config(dev, reg + 0x08, v, 2);
+
+		v = pci_read_config(dev, reg + 0x0c, 2);
+		/* link capability */
+		v = (v >> 4) & 0x0f;
+		width = pci_read_config(dev, reg + 0x12, 2);
+		/* negotiated link width */
+		width = (width >> 4) & 0x3f;
+		if (v != width)
+			device_printf(sc->nfe_dev,
+			    "warning, negotiated width of link(x%d) != "
+			    "max. width of link(x%d)\n", width, v);
+	}
+
+	/* Allocate interrupt */
+	if (msix_disable == 0 || msi_disable == 0) {
+		if (msix_disable == 0 &&
+		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
+			nfe_alloc_msix(sc, msic);
+		if (msi_disable == 0 && sc->nfe_msix == 0 &&
+		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
+		    pci_alloc_msi(dev, &msic) == 0) {
+			if (msic == NFE_MSI_MESSAGES) {
+				if (bootverbose)
+					device_printf(dev,
+					    "Using %d MSI messages\n", msic);
+				sc->nfe_msi = 1;
+			} else
+				pci_release_msi(dev);
+		}
+	}
+
+	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
+		rid = 0;
+		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+		    RF_SHAREABLE | RF_ACTIVE);
+		if (sc->nfe_irq[0] == NULL) {
+			device_printf(dev, "couldn't allocate IRQ resources\n");
+			error = ENXIO;
+			goto fail;
+		}
+	} else {
+		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
+			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
+			    SYS_RES_IRQ, &rid, RF_ACTIVE);
+			if (sc->nfe_irq[i] == NULL) {
+				device_printf(dev,
+				    "couldn't allocate IRQ resources for "
+				    "message %d\n", rid);
+				error = ENXIO;
+				goto fail;
+			}
+		}
+		/* Map interrupts to vector 0. */
+		if (sc->nfe_msix != 0) {
+			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
+			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
+		} else if (sc->nfe_msi != 0) {
+			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
+			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
+		}
+	}
+
+	/* Set IRQ status/mask register. */
+	sc->nfe_irq_status = NFE_IRQ_STATUS;
+	sc->nfe_irq_mask = NFE_IRQ_MASK;
+	sc->nfe_intrs = NFE_IRQ_WANTED;
+	sc->nfe_nointrs = 0;
+	if (sc->nfe_msix != 0) {
+		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
+		sc->nfe_nointrs = NFE_IRQ_WANTED;
+	} else if (sc->nfe_msi != 0) {
+		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
+		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
+	}
+
+	sc->nfe_devid = pci_get_device(dev);
+	sc->nfe_revid = pci_get_revid(dev);
+	sc->nfe_flags = 0;
+
+	switch (sc->nfe_devid) {
+	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
+	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
+	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
+	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
+		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
+		break;
+	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
+		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
+		break;
+	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
+	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
+	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
+		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
+		break;
+	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
+		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
+		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
+		break;
+
+	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
+	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
+	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
+	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
+	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
+	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
+		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
+		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
+		break;
+	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
+	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
+	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
+	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
+		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
+		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
+		break;
+	}
+
+	nfe_power(sc);
+	/* Check for reversed ethernet address */
+	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
+		sc->nfe_flags |= NFE_CORRECT_MACADDR;
+	nfe_get_macaddr(sc, sc->eaddr);
+	/*
+	 * Allocate the parent bus DMA tag appropriate for PCI.
+	 */
+	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
+	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
+		dma_addr_max = NFE_DMA_MAXADDR;
+	error = bus_dma_tag_create(
+	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
+	    1, 0,				/* alignment, boundary */
+	    dma_addr_max,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
+	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &sc->nfe_parent_tag);
+	if (error)
+		goto fail;
+
+	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "can not if_alloc()\n");
+		error = ENOSPC;
+		goto fail;
+	}
+	TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
+
+	/*
+	 * Allocate Tx and Rx rings.
+	 */
+	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
+		goto fail;
+
+	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
+		goto fail;
+
+	nfe_alloc_jrx_ring(sc, &sc->jrxq);
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
+	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
+	    "max number of Rx events to process");
+
+	sc->nfe_process_limit = NFE_PROC_DEFAULT;
+	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
+	    "process_limit", &sc->nfe_process_limit);
+	if (error == 0) {
+		if (sc->nfe_process_limit < NFE_PROC_MIN ||
+		    sc->nfe_process_limit > NFE_PROC_MAX) {
+			device_printf(dev, "process_limit value out of range; "
+			    "using default: %d\n", NFE_PROC_DEFAULT);
+			sc->nfe_process_limit = NFE_PROC_DEFAULT;
+		}
+	}
+
+	ifp->if_softc = sc;
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+	ifp->if_mtu = ETHERMTU;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_ioctl = nfe_ioctl;
+	ifp->if_start = nfe_start;
+	ifp->if_hwassist = 0;
+	ifp->if_capabilities = 0;
+	ifp->if_watchdog = NULL;
+	ifp->if_init = nfe_init;
+	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
+	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
+	IFQ_SET_READY(&ifp->if_snd);
+
+	if (sc->nfe_flags & NFE_HW_CSUM) {
+		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
+		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
+	}
+	ifp->if_capenable = ifp->if_capabilities;
+
+	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
+	/* VLAN capability setup. */
+	ifp->if_capabilities |= IFCAP_VLAN_MTU;
+	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
+		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
+			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
+	}
+	ifp->if_capenable = ifp->if_capabilities;
+
+	/*
+	 * Tell the upper layer(s) we support long frames.
+	 * Must appear after the call to ether_ifattach() because
+	 * ether_ifattach() sets ifi_hdrlen to the default value.
+	 */
+	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+#ifdef DEVICE_POLLING
+	ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+	/* Do MII setup */
+	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
+	    nfe_ifmedia_sts)) {
+		device_printf(dev, "MII without any phy!\n");
+		error = ENXIO;
+		goto fail;
+	}
+	ether_ifattach(ifp, sc->eaddr);
+
+	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
+	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
+	    taskqueue_thread_enqueue, &sc->nfe_tq);
+	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
+	    device_get_nameunit(sc->nfe_dev));
+	error = 0;
+	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
+		error = bus_setup_intr(dev, sc->nfe_irq[0],
+		    INTR_TYPE_NET | INTR_MPSAFE | INTR_FAST, nfe_intr, sc,
+		    &sc->nfe_intrhand[0]);
+	} else {
+		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
+			error = bus_setup_intr(dev, sc->nfe_irq[i],
+			    INTR_TYPE_NET | INTR_MPSAFE | INTR_FAST, nfe_intr, sc,
+			    &sc->nfe_intrhand[i]);
+			if (error != 0)
+				break;
+		}
+	}
+	if (error) {
+		device_printf(dev, "couldn't set up irq\n");
+		taskqueue_free(sc->nfe_tq);
+		sc->nfe_tq = NULL;
+		ether_ifdetach(ifp);
+		goto fail;
+	}
+
+fail:
+	if (error)
+		nfe_detach(dev);
+
+	return (error);
+}
+
+
+static int
+nfe_detach(device_t dev)
+{
+	struct nfe_softc *sc;
+	struct ifnet *ifp;
+	uint8_t eaddr[ETHER_ADDR_LEN];
+	int i, rid;
+
+	sc = device_get_softc(dev);
+	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
+	ifp = sc->nfe_ifp;
+
+#ifdef DEVICE_POLLING
+	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
+		ether_poll_deregister(ifp);
+#endif
+	if (device_is_attached(dev)) {
+		NFE_LOCK(sc);
+		nfe_stop(ifp);
+		ifp->if_flags &= ~IFF_UP;
+		NFE_UNLOCK(sc);
+		callout_drain(&sc->nfe_stat_ch);
+		taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
+		taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
+		ether_ifdetach(ifp);
+	}
+
+	if (ifp) {
+		/* restore ethernet address */
+		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
+			for (i = 0; i < ETHER_ADDR_LEN; i++) {
+				eaddr[i] = sc->eaddr[5 - i];
+			}
+		} else
+			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
+		nfe_set_macaddr(sc, eaddr);
+		if_free(ifp);
+	}
+	if (sc->nfe_miibus)
+		device_delete_child(dev, sc->nfe_miibus);
+	bus_generic_detach(dev);
+	if (sc->nfe_tq != NULL) {
+		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
+		taskqueue_free(sc->nfe_tq);
+		sc->nfe_tq = NULL;
+	}
+
+	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
+		if (sc->nfe_intrhand[i] != NULL) {
+			bus_teardown_intr(dev, sc->nfe_irq[i],
+			    sc->nfe_intrhand[i]);
+			sc->nfe_intrhand[i] = NULL;
+		}
+	}
+
+	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
+		if (sc->nfe_irq[0] != NULL)
+			bus_release_resource(dev, SYS_RES_IRQ, 0,
+			    sc->nfe_irq[0]);
+	} else {
+		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
+			if (sc->nfe_irq[i] != NULL) {
+				bus_release_resource(dev, SYS_RES_IRQ, rid,
+				    sc->nfe_irq[i]);
+				sc->nfe_irq[i] = NULL;
+			}
+		}
+		pci_release_msi(dev);
+	}
+	if (sc->nfe_msix_pba_res != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
+		    sc->nfe_msix_pba_res);
+		sc->nfe_msix_pba_res = NULL;
+	}
+	if (sc->nfe_msix_res != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
+		    sc->nfe_msix_res);
+		sc->nfe_msix_res = NULL;
+	}
+	if (sc->nfe_res[0] != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
+		    sc->nfe_res[0]);
+		sc->nfe_res[0] = NULL;
+	}
+
+	nfe_free_tx_ring(sc, &sc->txq);
+	nfe_free_rx_ring(sc, &sc->rxq);
+	nfe_free_jrx_ring(sc, &sc->jrxq);
+
+	if (sc->nfe_parent_tag) {
+		bus_dma_tag_destroy(sc->nfe_parent_tag);
+		sc->nfe_parent_tag = NULL;
+	}
+
+	mtx_destroy(&sc->nfe_jlist_mtx);
+	mtx_destroy(&sc->nfe_mtx);
+
+	return (0);
+}
+
+
+static int
+nfe_suspend(device_t dev)
+{
+	struct nfe_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	NFE_LOCK(sc);
+	nfe_stop(sc->nfe_ifp);
+	sc->nfe_suspended = 1;
+	NFE_UNLOCK(sc);
+
+	return (0);
+}
+
+
+static int
+nfe_resume(device_t dev)
+{
+	struct nfe_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+
+	NFE_LOCK(sc);
+	ifp = sc->nfe_ifp;
+	if (ifp->if_flags & IFF_UP)
+		nfe_init_locked(sc);
+	sc->nfe_suspended = 0;
+	NFE_UNLOCK(sc);
+
+	return (0);
+}
+
+
+/* Take PHY/NIC out of powerdown, from Linux */
+static void
+nfe_power(struct nfe_softc *sc)
+{
+	uint32_t pwr;
+
+	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
+		return;
+	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
+	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
+	DELAY(100);
+	NFE_WRITE(sc, NFE_MAC_RESET, 0);
+	DELAY(100);
+	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
+	pwr = NFE_READ(sc, NFE_PWR2_CTL);
+	pwr &= ~NFE_PWR2_WAKEUP_MASK;
+	if (sc->nfe_revid >= 0xa3 &&
+	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
+	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
+		pwr |= NFE_PWR2_REVA3;
+	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
+}
+
+
+static void
+nfe_miibus_statchg(device_t dev)
+{
+	struct nfe_softc *sc;
+
+	sc = device_get_softc(dev);
+	taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
+}
+
+
+static void
+nfe_link_task(void *arg, int pending)
+{
+	struct nfe_softc *sc;
+	struct mii_data *mii;
+	struct ifnet *ifp;
+	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
+	uint32_t gmask, rxctl, txctl, val;
+
+	sc = (struct nfe_softc *)arg;
+
+	NFE_LOCK(sc);
+
+	mii = device_get_softc(sc->nfe_miibus);
+	ifp = sc->nfe_ifp;
+	if (mii == NULL || ifp == NULL) {
+		NFE_UNLOCK(sc);
+		return;
+	}
+
+	if (mii->mii_media_status & IFM_ACTIVE) {
+		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
+			sc->nfe_link = 1;
+	} else
+		sc->nfe_link = 0;
+
+	phy = NFE_READ(sc, NFE_PHY_IFACE);
+	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
+
+	seed = NFE_READ(sc, NFE_RNDSEED);
+	seed &= ~NFE_SEED_MASK;
+
+	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
+		phy  |= NFE_PHY_HDX;	/* half-duplex */
+		misc |= NFE_MISC1_HDX;
+	}
+
+	switch (IFM_SUBTYPE(mii->mii_media_active)) {
+	case IFM_1000_T:	/* full-duplex only */
+		link |= NFE_MEDIA_1000T;
+		seed |= NFE_SEED_1000T;
+		phy  |= NFE_PHY_1000T;
+		break;
+	case IFM_100_TX:
+		link |= NFE_MEDIA_100TX;
+		seed |= NFE_SEED_100TX;
+		phy  |= NFE_PHY_100TX;
+		break;
+	case IFM_10_T:
+		link |= NFE_MEDIA_10T;
+		seed |= NFE_SEED_10T;
+		break;
+	}
+
+	if ((phy & 0x10000000) != 0) {
+		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
+			val = NFE_R1_MAGIC_1000;
+		else
+			val = NFE_R1_MAGIC_10_100;
+	} else
+		val = NFE_R1_MAGIC_DEFAULT;
+	NFE_WRITE(sc, NFE_SETUP_R1, val);
+
+	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
+
+	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
+	NFE_WRITE(sc, NFE_MISC1, misc);
+	NFE_WRITE(sc, NFE_LINKSPEED, link);
+
+	gmask = mii->mii_media_active & IFM_GMASK;
+	if ((gmask & IFM_FDX) != 0) {
+		/* It seems all hardwares supports Rx pause frames. */
+		val = NFE_READ(sc, NFE_RXFILTER);
+		if ((gmask & IFM_FLAG0) != 0)
+			val |= NFE_PFF_RX_PAUSE;
+		else
+			val &= ~NFE_PFF_RX_PAUSE;
+		NFE_WRITE(sc, NFE_RXFILTER, val);
+		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
+			val = NFE_READ(sc, NFE_MISC1);
+			if ((gmask & IFM_FLAG1) != 0) {
+				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
+				    NFE_TX_PAUSE_FRAME_ENABLE);
+				val |= NFE_MISC1_TX_PAUSE;
+			} else {
+				val &= ~NFE_MISC1_TX_PAUSE;
+				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
+				    NFE_TX_PAUSE_FRAME_DISABLE);
+			}
+			NFE_WRITE(sc, NFE_MISC1, val);
+		}
+	} else {
+		/* disable rx/tx pause frames */
+		val = NFE_READ(sc, NFE_RXFILTER);
+		val &= ~NFE_PFF_RX_PAUSE;
+		NFE_WRITE(sc, NFE_RXFILTER, val);
+		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
+			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
+			    NFE_TX_PAUSE_FRAME_DISABLE);
+			val = NFE_READ(sc, NFE_MISC1);
+			val &= ~NFE_MISC1_TX_PAUSE;
+			NFE_WRITE(sc, NFE_MISC1, val);
+		}
+	}
+
+	txctl = NFE_READ(sc, NFE_TX_CTL);
+	rxctl = NFE_READ(sc, NFE_RX_CTL);
+	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+		txctl |= NFE_TX_START;
+		rxctl |= NFE_RX_START;
+	} else {
+		txctl &= ~NFE_TX_START;
+		rxctl &= ~NFE_RX_START;
+	}
+	NFE_WRITE(sc, NFE_TX_CTL, txctl);
+	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
+
+	NFE_UNLOCK(sc);
+}
+
+
+static int
+nfe_miibus_readreg(device_t dev, int phy, int reg)
+{
+	struct nfe_softc *sc = device_get_softc(dev);
+	uint32_t val;
+	int ntries;
+
+	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
+
+	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
+		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
+		DELAY(100);
+	}
+
+	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
+
+	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
+		DELAY(100);
+		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
+			break;
+	}
+	if (ntries == NFE_TIMEOUT) {
+		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
+		return 0;
+	}
+
+	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
+		DPRINTFN(sc, 2, "could not read PHY\n");
+		return 0;
+	}
+
+	val = NFE_READ(sc, NFE_PHY_DATA);
+	if (val != 0xffffffff && val != 0)
+		sc->mii_phyaddr = phy;
+
+	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
+
+	return (val);
+}
+
+
+static int
+nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+	struct nfe_softc *sc = device_get_softc(dev);
+	uint32_t ctl;
+	int ntries;
+
+	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
+
+	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
+		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
+		DELAY(100);
+	}
+
+	NFE_WRITE(sc, NFE_PHY_DATA, val);
+	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
+	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
+
+	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
+		DELAY(100);
+		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
+			break;
+	}
+#ifdef NFE_DEBUG
+	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
+		device_printf(sc->nfe_dev, "could not write to PHY\n");
+#endif
+	return (0);
+}
+
+/*
+ * Allocate a jumbo buffer.
+ */
+static void *
+nfe_jalloc(struct nfe_softc *sc)
+{
+	struct nfe_jpool_entry *entry;
+
+	NFE_JLIST_LOCK(sc);
+
+	entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
+
+	if (entry == NULL) {
+		NFE_JLIST_UNLOCK(sc);
+		return (NULL);
+	}
+
+	SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
+	SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries);
+
+	NFE_JLIST_UNLOCK(sc);
+
+	return (sc->jrxq.jslots[entry->slot]);
+}
+
+/*
+ * Release a jumbo buffer.
+ */
+static void
+nfe_jfree(void *buf, void *args)
+{
+	struct nfe_softc *sc;
+	struct nfe_jpool_entry *entry;
+	int i;
+
+	/* Extract the softc struct pointer. */
+	sc = (struct nfe_softc *)args;
+	KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__));
+
+	NFE_JLIST_LOCK(sc);
+	/* Calculate the slot this buffer belongs to. */
+	i = ((vm_offset_t)buf
+	     - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN;
+	KASSERT(i >= 0 && i < NFE_JSLOTS,
+	    ("%s: asked to free buffer that we don't manage!", __func__));
+
+	entry = SLIST_FIRST(&sc->nfe_jinuse_listhead);
+	KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
+	entry->slot = i;
+	SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
+	SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries);
+	if (SLIST_EMPTY(&sc->nfe_jinuse_listhead))
+		wakeup(sc);
+
+	NFE_JLIST_UNLOCK(sc);
+}
+
+struct nfe_dmamap_arg {
+	bus_addr_t nfe_busaddr;
+};
+
+static int
+nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
+{
+	struct nfe_dmamap_arg ctx;
+	struct nfe_rx_data *data;
+	void *desc;
+	int i, error, descsize;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	ring->cur = ring->next = 0;
+
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
+	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &ring->rx_desc_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
+		goto fail;
+	}
+
+	/* allocate memory to desc */
+	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
+	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
+		goto fail;
+	}
+	if (sc->nfe_flags & NFE_40BIT_ADDR)
+		ring->desc64 = desc;
+	else
+		ring->desc32 = desc;
+
+	/* map desc to device visible address space */
+	ctx.nfe_busaddr = 0;
+	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
+	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
+		goto fail;
+	}
+	ring->physaddr = ctx.nfe_busaddr;
+
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    1, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,		/* lowaddr */
+	    BUS_SPACE_MAXADDR,		/* highaddr */
+	    NULL, NULL,			/* filter, filterarg */
+	    MCLBYTES, 1,		/* maxsize, nsegments */
+	    MCLBYTES,			/* maxsegsize */
+	    0,				/* flags */
+	    NULL, NULL,			/* lockfunc, lockarg */
+	    &ring->rx_data_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
+		goto fail;
+	}
+
+	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not create Rx DMA spare map\n");
+		goto fail;
+	}
+
+	/*
+	 * Pre-allocate Rx buffers and populate Rx ring.
+	 */
+	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
+		data = &sc->rxq.data[i];
+		data->rx_data_map = NULL;
+		data->m = NULL;
+		error = bus_dmamap_create(ring->rx_data_tag, 0,
+		    &data->rx_data_map);
+		if (error != 0) {
+			device_printf(sc->nfe_dev,
+			    "could not create Rx DMA map\n");
+			goto fail;
+		}
+	}
+
+fail:
+	return (error);
+}
+
+
+static void
+nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
+{
+	struct nfe_dmamap_arg ctx;
+	struct nfe_rx_data *data;
+	void *desc;
+	struct nfe_jpool_entry *entry;
+	uint8_t *ptr;
+	int i, error, descsize;
+
+	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
+		return;
+	if (jumbo_disable != 0) {
+		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
+		sc->nfe_jumbo_disable = 1;
+		return;
+	}
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->jdesc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->jdesc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	ring->jcur = ring->jnext = 0;
+
+	/* Create DMA tag for jumbo Rx ring. */
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
+	    1, 					/* nsegments */
+	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &ring->jrx_desc_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not create jumbo ring DMA tag\n");
+		goto fail;
+	}
+
+	/* Create DMA tag for jumbo buffer blocks. */
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    PAGE_SIZE, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    NFE_JMEM,				/* maxsize */
+	    1, 					/* nsegments */
+	    NFE_JMEM,				/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &ring->jrx_jumbo_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not create jumbo Rx buffer block DMA tag\n");
+		goto fail;
+	}
+
+	/* Create DMA tag for jumbo Rx buffers. */
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    PAGE_SIZE, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    NFE_JLEN,				/* maxsize */
+	    1,					/* nsegments */
+	    NFE_JLEN,				/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &ring->jrx_data_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not create jumbo Rx buffer DMA tag\n");
+		goto fail;
+	}
+
+	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
+	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
+	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not allocate DMA'able memory for jumbo Rx ring\n");
+		goto fail;
+	}
+	if (sc->nfe_flags & NFE_40BIT_ADDR)
+		ring->jdesc64 = desc;
+	else
+		ring->jdesc32 = desc;
+
+	ctx.nfe_busaddr = 0;
+	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
+	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not load DMA'able memory for jumbo Rx ring\n");
+		goto fail;
+	}
+	ring->jphysaddr = ctx.nfe_busaddr;
+
+	/* Create DMA maps for jumbo Rx buffers. */
+	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not create jumbo Rx DMA spare map\n");
+		goto fail;
+	}
+
+	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
+		data = &sc->jrxq.jdata[i];
+		data->rx_data_map = NULL;
+		data->m = NULL;
+		error = bus_dmamap_create(ring->jrx_data_tag, 0,
+		    &data->rx_data_map);
+		if (error != 0) {
+			device_printf(sc->nfe_dev,
+			    "could not create jumbo Rx DMA map\n");
+			goto fail;
+		}
+	}
+
+	/* Allocate DMA'able memory and load the DMA map for jumbo buf. */
+	error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool,
+	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
+	    &ring->jrx_jumbo_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not allocate DMA'able memory for jumbo pool\n");
+		goto fail;
+	}
+
+	ctx.nfe_busaddr = 0;
+	error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map,
+	    ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "could not load DMA'able memory for jumbo pool\n");
+		goto fail;
+	}
+
+	/*
+	 * Now divide it up into 9K pieces and save the addresses
+	 * in an array.
+	 */
+	ptr = ring->jpool;
+	for (i = 0; i < NFE_JSLOTS; i++) {
+		ring->jslots[i] = ptr;
+		ptr += NFE_JLEN;
+		entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF,
+		    M_WAITOK);
+		if (entry == NULL) {
+			device_printf(sc->nfe_dev,
+			    "no memory for jumbo buffers!\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		entry->slot = i;
+		SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
+		    jpool_entries);
+	}
+
+	return;
+
+fail:
+	/*
+	 * Running without jumbo frame support is ok for most cases
+	 * so don't fail on creating dma tag/map for jumbo frame.
+	 */
+	nfe_free_jrx_ring(sc, ring);
+	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
+	    "resource shortage\n");
+	sc->nfe_jumbo_disable = 1;
+}
+
+
+static int
+nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
+{
+	void *desc;
+	size_t descsize;
+	int i;
+
+	ring->cur = ring->next = 0;
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+	bzero(desc, descsize * NFE_RX_RING_COUNT);
+	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
+		if (nfe_newbuf(sc, i) != 0)
+			return (ENOBUFS);
+	}
+
+	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	return (0);
+}
+
+
+static int
+nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
+{
+	void *desc;
+	size_t descsize;
+	int i;
+
+	ring->jcur = ring->jnext = 0;
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->jdesc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->jdesc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+	bzero(desc, descsize * NFE_RX_RING_COUNT);
+	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
+		if (nfe_jnewbuf(sc, i) != 0)
+			return (ENOBUFS);
+	}
+
+	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	return (0);
+}
+
+
+static void
+nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
+{
+	struct nfe_rx_data *data;
+	void *desc;
+	int i, descsize;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
+		data = &ring->data[i];
+		if (data->rx_data_map != NULL) {
+			bus_dmamap_destroy(ring->rx_data_tag,
+			    data->rx_data_map);
+			data->rx_data_map = NULL;
+		}
+		if (data->m != NULL) {
+			m_freem(data->m);
+			data->m = NULL;
+		}
+	}
+	if (ring->rx_data_tag != NULL) {
+		if (ring->rx_spare_map != NULL) {
+			bus_dmamap_destroy(ring->rx_data_tag,
+			    ring->rx_spare_map);
+			ring->rx_spare_map = NULL;
+		}
+		bus_dma_tag_destroy(ring->rx_data_tag);
+		ring->rx_data_tag = NULL;
+	}
+
+	if (desc != NULL) {
+		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
+		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
+		ring->desc64 = NULL;
+		ring->desc32 = NULL;
+		ring->rx_desc_map = NULL;
+	}
+	if (ring->rx_desc_tag != NULL) {
+		bus_dma_tag_destroy(ring->rx_desc_tag);
+		ring->rx_desc_tag = NULL;
+	}
+}
+
+
+static void
+nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
+{
+	struct nfe_jpool_entry *entry;
+	struct nfe_rx_data *data;
+	void *desc;
+	int i, descsize;
+
+	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
+		return;
+
+	NFE_JLIST_LOCK(sc);
+	while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) {
+		device_printf(sc->nfe_dev,
+		    "asked to free buffer that is in use!\n");
+		SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
+		SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
+		    jpool_entries);
+	}
+
+	while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) {
+		entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
+		SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
+		free(entry, M_DEVBUF);
+	}
+        NFE_JLIST_UNLOCK(sc);
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->jdesc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->jdesc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
+		data = &ring->jdata[i];
+		if (data->rx_data_map != NULL) {
+			bus_dmamap_destroy(ring->jrx_data_tag,
+			    data->rx_data_map);
+			data->rx_data_map = NULL;
+		}
+		if (data->m != NULL) {
+			m_freem(data->m);
+			data->m = NULL;
+		}
+	}
+	if (ring->jrx_data_tag != NULL) {
+		if (ring->jrx_spare_map != NULL) {
+			bus_dmamap_destroy(ring->jrx_data_tag,
+			    ring->jrx_spare_map);
+			ring->jrx_spare_map = NULL;
+		}
+		bus_dma_tag_destroy(ring->jrx_data_tag);
+		ring->jrx_data_tag = NULL;
+	}
+
+	if (desc != NULL) {
+		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
+		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
+		ring->jdesc64 = NULL;
+		ring->jdesc32 = NULL;
+		ring->jrx_desc_map = NULL;
+	}
+	/* Destroy jumbo buffer block. */
+	if (ring->jrx_jumbo_map != NULL)
+		bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map);
+	if (ring->jrx_jumbo_map != NULL) {
+		bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool,
+		    ring->jrx_jumbo_map);
+		ring->jpool = NULL;
+		ring->jrx_jumbo_map = NULL;
+	}
+	if (ring->jrx_desc_tag != NULL) {
+		bus_dma_tag_destroy(ring->jrx_desc_tag);
+		ring->jrx_desc_tag = NULL;
+	}
+}
+
+
+static int
+nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
+{
+	struct nfe_dmamap_arg ctx;
+	int i, error;
+	void *desc;
+	int descsize;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	ring->queued = 0;
+	ring->cur = ring->next = 0;
+
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
+	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
+	    0,					/* flags */
+	    NULL, NULL,				/* lockfunc, lockarg */
+	    &ring->tx_desc_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
+		goto fail;
+	}
+
+	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
+	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
+		goto fail;
+	}
+	if (sc->nfe_flags & NFE_40BIT_ADDR)
+		ring->desc64 = desc;
+	else
+		ring->desc32 = desc;
+
+	ctx.nfe_busaddr = 0;
+	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
+	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
+		goto fail;
+	}
+	ring->physaddr = ctx.nfe_busaddr;
+
+	error = bus_dma_tag_create(sc->nfe_parent_tag,
+	    1, 0,
+	    BUS_SPACE_MAXADDR,
+	    BUS_SPACE_MAXADDR,
+	    NULL, NULL,
+	    NFE_TSO_MAXSIZE,
+	    NFE_MAX_SCATTER,
+	    NFE_TSO_MAXSGSIZE,
+	    0,
+	    NULL, NULL,
+	    &ring->tx_data_tag);
+	if (error != 0) {
+		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
+		goto fail;
+	}
+
+	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
+		error = bus_dmamap_create(ring->tx_data_tag, 0,
+		    &ring->data[i].tx_data_map);
+		if (error != 0) {
+			device_printf(sc->nfe_dev,
+			    "could not create Tx DMA map\n");
+			goto fail;
+		}
+	}
+
+fail:
+	return (error);
+}
+
+
+static void
+nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
+{
+	void *desc;
+	size_t descsize;
+
+	sc->nfe_force_tx = 0;
+	ring->queued = 0;
+	ring->cur = ring->next = 0;
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+	bzero(desc, descsize * NFE_TX_RING_COUNT);
+
+	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+
+static void
+nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
+{
+	struct nfe_tx_data *data;
+	void *desc;
+	int i, descsize;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc = ring->desc64;
+		descsize = sizeof (struct nfe_desc64);
+	} else {
+		desc = ring->desc32;
+		descsize = sizeof (struct nfe_desc32);
+	}
+
+	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
+		data = &ring->data[i];
+
+		if (data->m != NULL) {
+			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
+			    BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
+			m_freem(data->m);
+			data->m = NULL;
+		}
+		if (data->tx_data_map != NULL) {
+			bus_dmamap_destroy(ring->tx_data_tag,
+			    data->tx_data_map);
+			data->tx_data_map = NULL;
+		}
+	}
+
+	if (ring->tx_data_tag != NULL) {
+		bus_dma_tag_destroy(ring->tx_data_tag);
+		ring->tx_data_tag = NULL;
+	}
+
+	if (desc != NULL) {
+		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
+		    BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
+		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
+		ring->desc64 = NULL;
+		ring->desc32 = NULL;
+		ring->tx_desc_map = NULL;
+		bus_dma_tag_destroy(ring->tx_desc_tag);
+		ring->tx_desc_tag = NULL;
+	}
+}
+
+#ifdef DEVICE_POLLING
+static poll_handler_t nfe_poll;
+
+
+static void
+nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+	struct nfe_softc *sc = ifp->if_softc;
+	uint32_t r;
+
+	NFE_LOCK(sc);
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+		NFE_UNLOCK(sc);
+		return;
+	}
+
+	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
+		nfe_jrxeof(sc, count);
+	else
+		nfe_rxeof(sc, count);
+	nfe_txeof(sc);
+	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task);
+
+	if (cmd == POLL_AND_CHECK_STATUS) {
+		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
+			NFE_UNLOCK(sc);
+			return;
+		}
+		NFE_WRITE(sc, sc->nfe_irq_status, r);
+
+		if (r & NFE_IRQ_LINK) {
+			NFE_READ(sc, NFE_PHY_STATUS);
+			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
+			DPRINTF(sc, "link state changed\n");
+		}
+	}
+	NFE_UNLOCK(sc);
+}
+#endif /* DEVICE_POLLING */
+
+static void
+nfe_set_intr(struct nfe_softc *sc)
+{
+
+	if (sc->nfe_msi != 0)
+		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
+}
+
+
+/* In MSIX, a write to mask reegisters behaves as XOR. */
+static __inline void
+nfe_enable_intr(struct nfe_softc *sc)
+{
+
+	if (sc->nfe_msix != 0) {
+		/* XXX Should have a better way to enable interrupts! */
+		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
+			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
+	} else
+		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
+}
+
+
+static __inline void
+nfe_disable_intr(struct nfe_softc *sc)
+{
+
+	if (sc->nfe_msix != 0) {
+		/* XXX Should have a better way to disable interrupts! */
+		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
+			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
+	} else
+		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
+}
+
+
+static int
+nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct nfe_softc *sc;
+	struct ifreq *ifr;
+	struct mii_data *mii;
+	int error, init, mask;
+
+	sc = ifp->if_softc;
+	ifr = (struct ifreq *) data;
+	error = 0;
+	init = 0;
+	switch (cmd) {
+	case SIOCSIFMTU:
+		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
+			error = EINVAL;
+		else if (ifp->if_mtu != ifr->ifr_mtu) {
+			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
+			    (sc->nfe_jumbo_disable != 0)) &&
+			    ifr->ifr_mtu > ETHERMTU)
+				error = EINVAL;
+			else {
+				NFE_LOCK(sc);
+				ifp->if_mtu = ifr->ifr_mtu;
+				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+					nfe_init_locked(sc);
+				NFE_UNLOCK(sc);
+			}
+		}
+		break;
+	case SIOCSIFFLAGS:
+		NFE_LOCK(sc);
+		if (ifp->if_flags & IFF_UP) {
+			/*
+			 * If only the PROMISC or ALLMULTI flag changes, then
+			 * don't do a full re-init of the chip, just update
+			 * the Rx filter.
+			 */
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
+			    ((ifp->if_flags ^ sc->nfe_if_flags) &
+			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
+				nfe_setmulti(sc);
+			else
+				nfe_init_locked(sc);
+		} else {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				nfe_stop(ifp);
+		}
+		sc->nfe_if_flags = ifp->if_flags;
+		NFE_UNLOCK(sc);
+		error = 0;
+		break;
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+			NFE_LOCK(sc);
+			nfe_setmulti(sc);
+			NFE_UNLOCK(sc);
+			error = 0;
+		}
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		mii = device_get_softc(sc->nfe_miibus);
+		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+		break;
+	case SIOCSIFCAP:
+		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+#ifdef DEVICE_POLLING
+		if ((mask & IFCAP_POLLING) != 0) {
+			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
+				error = ether_poll_register(nfe_poll, ifp);
+				if (error)
+					break;
+				NFE_LOCK(sc);
+				nfe_disable_intr(sc);
+				ifp->if_capenable |= IFCAP_POLLING;
+				NFE_UNLOCK(sc);
+			} else {
+				error = ether_poll_deregister(ifp);
+				/* Enable interrupt even in error case */
+				NFE_LOCK(sc);
+				nfe_enable_intr(sc);
+				ifp->if_capenable &= ~IFCAP_POLLING;
+				NFE_UNLOCK(sc);
+			}
+		}
+#endif /* DEVICE_POLLING */
+		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
+		    (mask & IFCAP_HWCSUM) != 0) {
+			ifp->if_capenable ^= IFCAP_HWCSUM;
+			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
+			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
+				ifp->if_hwassist |= NFE_CSUM_FEATURES;
+			else
+				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
+			init++;
+		}
+		if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
+		    (mask & IFCAP_VLAN_HWTAGGING) != 0) {
+			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+			init++;
+		}
+		/*
+		 * XXX
+		 * It seems that VLAN stripping requires Rx checksum offload.
+		 * Unfortunately FreeBSD has no way to disable only Rx side
+		 * VLAN stripping. So when we know Rx checksum offload is
+		 * disabled turn entire hardware VLAN assist off.
+		 */
+		if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
+		    (NFE_HW_CSUM | NFE_HW_VLAN)) {
+			if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
+				ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
+		}
+
+		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
+		    (mask & IFCAP_TSO4) != 0) {
+			ifp->if_capenable ^= IFCAP_TSO4;
+			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
+			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
+				ifp->if_hwassist |= CSUM_TSO;
+			else
+				ifp->if_hwassist &= ~CSUM_TSO;
+		}
+
+		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+			nfe_init(sc);
+		}
+		if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
+			VLAN_CAPABILITIES(ifp);
+		break;
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return (error);
+}
+
+
+static void
+nfe_intr(void *arg)
+{
+	struct nfe_softc *sc;
+	uint32_t status;
+
+	sc = (struct nfe_softc *)arg;
+
+	status = NFE_READ(sc, sc->nfe_irq_status);
+	if (status == 0 || status == 0xffffffff)
+		return;
+	nfe_disable_intr(sc);
+	taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task);
+
+	return;
+}
+
+
+static void
+nfe_int_task(void *arg, int pending)
+{
+	struct nfe_softc *sc = arg;
+	struct ifnet *ifp = sc->nfe_ifp;
+	uint32_t r;
+	int domore;
+
+	NFE_LOCK(sc);
+
+	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
+		nfe_enable_intr(sc);
+		NFE_UNLOCK(sc);
+		return;	/* not for us */
+	}
+	NFE_WRITE(sc, sc->nfe_irq_status, r);
+
+	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
+
+#ifdef DEVICE_POLLING
+	if (ifp->if_capenable & IFCAP_POLLING) {
+		NFE_UNLOCK(sc);
+		return;
+	}
+#endif
+
+	if (r & NFE_IRQ_LINK) {
+		NFE_READ(sc, NFE_PHY_STATUS);
+		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
+		DPRINTF(sc, "link state changed\n");
+	}
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		NFE_UNLOCK(sc);
+		nfe_enable_intr(sc);
+		return;
+	}
+
+	domore = 0;
+	/* check Rx ring */
+	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
+		domore = nfe_jrxeof(sc, sc->nfe_process_limit);
+	else
+		domore = nfe_rxeof(sc, sc->nfe_process_limit);
+	/* check Tx ring */
+	nfe_txeof(sc);
+
+	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task);
+
+	NFE_UNLOCK(sc);
+
+	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
+		taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task);
+		return;
+	}
+
+	/* Reenable interrupts. */
+	nfe_enable_intr(sc);
+}
+
+
+static __inline void
+nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
+{
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct nfe_rx_data *data;
+	struct mbuf *m;
+
+	data = &sc->rxq.data[idx];
+	m = data->m;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc64 = &sc->rxq.desc64[idx];
+		/* VLAN packet may have overwritten it. */
+		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
+		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
+		desc64->length = htole16(m->m_len);
+		desc64->flags = htole16(NFE_RX_READY);
+	} else {
+		desc32 = &sc->rxq.desc32[idx];
+		desc32->length = htole16(m->m_len);
+		desc32->flags = htole16(NFE_RX_READY);
+	}
+}
+
+
+static __inline void
+nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
+{
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct nfe_rx_data *data;
+	struct mbuf *m;
+
+	data = &sc->jrxq.jdata[idx];
+	m = data->m;
+
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc64 = &sc->jrxq.jdesc64[idx];
+		/* VLAN packet may have overwritten it. */
+		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
+		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
+		desc64->length = htole16(m->m_len);
+		desc64->flags = htole16(NFE_RX_READY);
+	} else {
+		desc32 = &sc->jrxq.jdesc32[idx];
+		desc32->length = htole16(m->m_len);
+		desc32->flags = htole16(NFE_RX_READY);
+	}
+}
+
+
+static int
+nfe_newbuf(struct nfe_softc *sc, int idx)
+{
+	struct nfe_rx_data *data;
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct mbuf *m;
+	bus_dma_segment_t segs[1];
+	bus_dmamap_t map;
+	int nsegs;
+
+	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+	if (m == NULL)
+		return (ENOBUFS);
+
+	m->m_len = m->m_pkthdr.len = MCLBYTES;
+	m_adj(m, ETHER_ALIGN);
+
+	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
+	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
+		m_freem(m);
+		return (ENOBUFS);
+	}
+	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+	data = &sc->rxq.data[idx];
+	if (data->m != NULL) {
+		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
+		    BUS_DMASYNC_POSTREAD);
+		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
+	}
+	map = data->rx_data_map;
+	data->rx_data_map = sc->rxq.rx_spare_map;
+	sc->rxq.rx_spare_map = map;
+	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
+	    BUS_DMASYNC_PREREAD);
+	data->paddr = segs[0].ds_addr;
+	data->m = m;
+	/* update mapping address in h/w descriptor */
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc64 = &sc->rxq.desc64[idx];
+		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
+		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
+		desc64->length = htole16(segs[0].ds_len);
+		desc64->flags = htole16(NFE_RX_READY);
+	} else {
+		desc32 = &sc->rxq.desc32[idx];
+		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
+		desc32->length = htole16(segs[0].ds_len);
+		desc32->flags = htole16(NFE_RX_READY);
+	}
+
+	return (0);
+}
+
+
+static int
+nfe_jnewbuf(struct nfe_softc *sc, int idx)
+{
+	struct nfe_rx_data *data;
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct mbuf *m;
+	bus_dma_segment_t segs[1];
+	bus_dmamap_t map;
+	int nsegs;
+	void *buf;
+
+	MGETHDR(m, M_DONTWAIT, MT_DATA);
+	if (m == NULL)
+		return (ENOBUFS);
+	buf = nfe_jalloc(sc);
+	if (buf == NULL) {
+		m_freem(m);
+		return (ENOBUFS);
+	}
+	/* Attach the buffer to the mbuf. */
+	MEXTADD(m, buf, NFE_JLEN, nfe_jfree, (struct nfe_softc *)sc, 0,
+	    EXT_NET_DRV);
+	if ((m->m_flags & M_EXT) == 0) {
+		m_freem(m);
+		return (ENOBUFS);
+	}
+	m->m_pkthdr.len = m->m_len = NFE_JLEN;
+	m_adj(m, ETHER_ALIGN);
+
+	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
+	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
+		m_freem(m);
+		return (ENOBUFS);
+	}
+	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+	data = &sc->jrxq.jdata[idx];
+	if (data->m != NULL) {
+		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
+		    BUS_DMASYNC_POSTREAD);
+		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
+	}
+	map = data->rx_data_map;
+	data->rx_data_map = sc->jrxq.jrx_spare_map;
+	sc->jrxq.jrx_spare_map = map;
+	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
+	    BUS_DMASYNC_PREREAD);
+	data->paddr = segs[0].ds_addr;
+	data->m = m;
+	/* update mapping address in h/w descriptor */
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc64 = &sc->jrxq.jdesc64[idx];
+		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
+		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
+		desc64->length = htole16(segs[0].ds_len);
+		desc64->flags = htole16(NFE_RX_READY);
+	} else {
+		desc32 = &sc->jrxq.jdesc32[idx];
+		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
+		desc32->length = htole16(segs[0].ds_len);
+		desc32->flags = htole16(NFE_RX_READY);
+	}
+
+	return (0);
+}
+
+
+static int
+nfe_rxeof(struct nfe_softc *sc, int count)
+{
+	struct ifnet *ifp = sc->nfe_ifp;
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct nfe_rx_data *data;
+	struct mbuf *m;
+	uint16_t flags;
+	int len, prog;
+	uint32_t vtag = 0;
+
+	NFE_LOCK_ASSERT(sc);
+
+	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
+	    BUS_DMASYNC_POSTREAD);
+
+	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
+		if (count <= 0)
+			break;
+		count--;
+
+		data = &sc->rxq.data[sc->rxq.cur];
+
+		if (sc->nfe_flags & NFE_40BIT_ADDR) {
+			desc64 = &sc->rxq.desc64[sc->rxq.cur];
+			vtag = le32toh(desc64->physaddr[1]);
+			flags = le16toh(desc64->flags);
+			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
+		} else {
+			desc32 = &sc->rxq.desc32[sc->rxq.cur];
+			flags = le16toh(desc32->flags);
+			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
+		}
+
+		if (flags & NFE_RX_READY)
+			break;
+		prog++;
+		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
+			if (!(flags & NFE_RX_VALID_V1)) {
+				ifp->if_ierrors++;
+				nfe_discard_rxbuf(sc, sc->rxq.cur);
+				continue;
+			}
+			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
+				flags &= ~NFE_RX_ERROR;
+				len--;	/* fix buffer length */
+			}
+		} else {
+			if (!(flags & NFE_RX_VALID_V2)) {
+				ifp->if_ierrors++;
+				nfe_discard_rxbuf(sc, sc->rxq.cur);
+				continue;
+			}
+
+			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
+				flags &= ~NFE_RX_ERROR;
+				len--;	/* fix buffer length */
+			}
+		}
+
+		if (flags & NFE_RX_ERROR) {
+			ifp->if_ierrors++;
+			nfe_discard_rxbuf(sc, sc->rxq.cur);
+			continue;
+		}
+
+		m = data->m;
+		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
+			ifp->if_iqdrops++;
+			nfe_discard_rxbuf(sc, sc->rxq.cur);
+			continue;
+		}
+
+		if ((vtag & NFE_RX_VTAG) != 0 &&
+		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
+			VLAN_INPUT_TAG_NEW(ifp, m, vtag & 0xffff);
+			if (m == NULL) continue;
+		}
+
+		m->m_pkthdr.len = m->m_len = len;
+		m->m_pkthdr.rcvif = ifp;
+
+		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
+				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
+				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
+					m->m_pkthdr.csum_flags |=
+					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+					m->m_pkthdr.csum_data = 0xffff;
+				}
+			}
+		}
+
+		ifp->if_ipackets++;
+
+		NFE_UNLOCK(sc);
+		(*ifp->if_input)(ifp, m);
+		NFE_LOCK(sc);
+	}
+
+	if (prog > 0)
+		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	return (count > 0 ? 0 : EAGAIN);
+}
+
+
+static int
+nfe_jrxeof(struct nfe_softc *sc, int count)
+{
+	struct ifnet *ifp = sc->nfe_ifp;
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct nfe_rx_data *data;
+	struct mbuf *m;
+	uint16_t flags;
+	int len, prog;
+	uint32_t vtag = 0;
+
+	NFE_LOCK_ASSERT(sc);
+
+	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
+	    BUS_DMASYNC_POSTREAD);
+
+	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
+	    vtag = 0) {
+		if (count <= 0)
+			break;
+		count--;
+
+		data = &sc->jrxq.jdata[sc->jrxq.jcur];
+
+		if (sc->nfe_flags & NFE_40BIT_ADDR) {
+			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
+			vtag = le32toh(desc64->physaddr[1]);
+			flags = le16toh(desc64->flags);
+			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
+		} else {
+			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
+			flags = le16toh(desc32->flags);
+			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
+		}
+
+		if (flags & NFE_RX_READY)
+			break;
+		prog++;
+		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
+			if (!(flags & NFE_RX_VALID_V1)) {
+				ifp->if_ierrors++;
+				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
+				continue;
+			}
+			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
+				flags &= ~NFE_RX_ERROR;
+				len--;	/* fix buffer length */
+			}
+		} else {
+			if (!(flags & NFE_RX_VALID_V2)) {
+				ifp->if_ierrors++;
+				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
+				continue;
+			}
+
+			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
+				flags &= ~NFE_RX_ERROR;
+				len--;	/* fix buffer length */
+			}
+		}
+
+		if (flags & NFE_RX_ERROR) {
+			ifp->if_ierrors++;
+			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
+			continue;
+		}
+
+		m = data->m;
+		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
+			ifp->if_iqdrops++;
+			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
+			continue;
+		}
+
+		if ((vtag & NFE_RX_VTAG) != 0 &&
+		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
+			VLAN_INPUT_TAG_NEW(ifp, m, vtag & 0xffff);
+			if (m == NULL) continue;
+		}
+
+		m->m_pkthdr.len = m->m_len = len;
+		m->m_pkthdr.rcvif = ifp;
+
+		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
+				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
+				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
+					m->m_pkthdr.csum_flags |=
+					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+					m->m_pkthdr.csum_data = 0xffff;
+				}
+			}
+		}
+
+		ifp->if_ipackets++;
+
+		NFE_UNLOCK(sc);
+		(*ifp->if_input)(ifp, m);
+		NFE_LOCK(sc);
+	}
+
+	if (prog > 0)
+		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	return (count > 0 ? 0 : EAGAIN);
+}
+
+
+static void
+nfe_txeof(struct nfe_softc *sc)
+{
+	struct ifnet *ifp = sc->nfe_ifp;
+	struct nfe_desc32 *desc32;
+	struct nfe_desc64 *desc64;
+	struct nfe_tx_data *data = NULL;
+	uint16_t flags;
+	int cons, prog;
+
+	NFE_LOCK_ASSERT(sc);
+
+	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
+	    BUS_DMASYNC_POSTREAD);
+
+	prog = 0;
+	for (cons = sc->txq.next; cons != sc->txq.cur;
+	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
+		if (sc->nfe_flags & NFE_40BIT_ADDR) {
+			desc64 = &sc->txq.desc64[cons];
+			flags = le16toh(desc64->flags);
+		} else {
+			desc32 = &sc->txq.desc32[cons];
+			flags = le16toh(desc32->flags);
+		}
+
+		if (flags & NFE_TX_VALID)
+			break;
+
+		prog++;
+		sc->txq.queued--;
+		data = &sc->txq.data[cons];
+
+		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
+			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
+				continue;
+			if ((flags & NFE_TX_ERROR_V1) != 0) {
+				device_printf(sc->nfe_dev,
+				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
+
+				ifp->if_oerrors++;
+			} else
+				ifp->if_opackets++;
+		} else {
+			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
+				continue;
+			if ((flags & NFE_TX_ERROR_V2) != 0) {
+				device_printf(sc->nfe_dev,
+				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
+				ifp->if_oerrors++;
+			} else
+				ifp->if_opackets++;
+		}
+
+		/* last fragment of the mbuf chain transmitted */
+		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
+		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
+		    BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
+		m_freem(data->m);
+		data->m = NULL;
+	}
+
+	if (prog > 0) {
+		sc->nfe_force_tx = 0;
+		sc->txq.next = cons;
+		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+		if (sc->txq.queued == 0)
+			sc->nfe_watchdog_timer = 0;
+	}
+}
+
+/*
+ * It's copy of ath_defrag(ath(4)).
+ *
+ * Defragment an mbuf chain, returning at most maxfrags separate
+ * mbufs+clusters.  If this is not possible NULL is returned and
+ * the original mbuf chain is left in it's present (potentially
+ * modified) state.  We use two techniques: collapsing consecutive
+ * mbufs and replacing consecutive mbufs by a cluster.
+ */
+static struct mbuf *
+nfe_defrag(struct mbuf *m0, int how, int maxfrags)
+{
+	struct mbuf *m, *n, *n2, **prev;
+	u_int curfrags;
+
+	/*
+	 * Calculate the current number of frags.
+	 */
+	curfrags = 0;
+	for (m = m0; m != NULL; m = m->m_next)
+		curfrags++;
+	/*
+	 * First, try to collapse mbufs.  Note that we always collapse
+	 * towards the front so we don't need to deal with moving the
+	 * pkthdr.  This may be suboptimal if the first mbuf has much
+	 * less data than the following.
+	 */
+	m = m0;
+again:
+	for (;;) {
+		n = m->m_next;
+		if (n == NULL)
+			break;
+		if ((m->m_flags & M_RDONLY) == 0 &&
+		    n->m_len < M_TRAILINGSPACE(m)) {
+			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
+				n->m_len);
+			m->m_len += n->m_len;
+			m->m_next = n->m_next;
+			m_free(n);
+			if (--curfrags <= maxfrags)
+				return (m0);
+		} else
+			m = n;
+	}
+	KASSERT(maxfrags > 1,
+		("maxfrags %u, but normal collapse failed", maxfrags));
+	/*
+	 * Collapse consecutive mbufs to a cluster.
+	 */
+	prev = &m0->m_next;		/* NB: not the first mbuf */
+	while ((n = *prev) != NULL) {
+		if ((n2 = n->m_next) != NULL &&
+		    n->m_len + n2->m_len < MCLBYTES) {
+			m = m_getcl(how, MT_DATA, 0);
+			if (m == NULL)
+				goto bad;
+			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
+			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
+				n2->m_len);
+			m->m_len = n->m_len + n2->m_len;
+			m->m_next = n2->m_next;
+			*prev = m;
+			m_free(n);
+			m_free(n2);
+			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
+				return m0;
+			/*
+			 * Still not there, try the normal collapse
+			 * again before we allocate another cluster.
+			 */
+			goto again;
+		}
+		prev = &n->m_next;
+	}
+	/*
+	 * No place where we can collapse to a cluster; punt.
+	 * This can occur if, for example, you request 2 frags
+	 * but the packet requires that both be clusters (we
+	 * never reallocate the first mbuf to avoid moving the
+	 * packet header).
+	 */
+bad:
+	return (NULL);
+}
+
+
+static int
+nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
+{
+	struct nfe_desc32 *desc32 = NULL;
+	struct nfe_desc64 *desc64 = NULL;
+	bus_dmamap_t map;
+	bus_dma_segment_t segs[NFE_MAX_SCATTER];
+	int error, i, nsegs, prod, si;
+	uint16_t cflags, flags;
+	struct mbuf *m;
+	struct m_tag *vtag=NULL;
+
+	prod = si = sc->txq.cur;
+	map = sc->txq.data[prod].tx_data_map;
+
+	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
+	    &nsegs, BUS_DMA_NOWAIT);
+	if (error == EFBIG) {
+		m = nfe_defrag(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
+		if (m == NULL) {
+			m_freem(*m_head);
+			*m_head = NULL;
+			return (ENOBUFS);
+		}
+		*m_head = m;
+		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
+		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0) {
+			m_freem(*m_head);
+			*m_head = NULL;
+			return (ENOBUFS);
+		}
+	} else if (error != 0)
+		return (error);
+	if (nsegs == 0) {
+		m_freem(*m_head);
+		*m_head = NULL;
+		return (EIO);
+	}
+
+	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
+		bus_dmamap_unload(sc->txq.tx_data_tag, map);
+		return (ENOBUFS);
+	}
+
+	m = *m_head;
+
+	if (sc->nfe_flags & NFE_HW_VLAN)
+		vtag = VLAN_OUTPUT_TAG(sc->nfe_ifp, m);
+
+	cflags = flags = 0;
+	if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
+		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
+			cflags |= NFE_TX_IP_CSUM;
+		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
+			cflags |= NFE_TX_TCP_UDP_CSUM;
+		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
+			cflags |= NFE_TX_TCP_UDP_CSUM;
+	}
+
+	for (i = 0; i < nsegs; i++) {
+		if (sc->nfe_flags & NFE_40BIT_ADDR) {
+			desc64 = &sc->txq.desc64[prod];
+			desc64->physaddr[0] =
+			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
+			desc64->physaddr[1] =
+			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
+			desc64->vtag = 0;
+			desc64->length = htole16(segs[i].ds_len - 1);
+			desc64->flags = htole16(flags);
+		} else {
+			desc32 = &sc->txq.desc32[prod];
+			desc32->physaddr =
+			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
+			desc32->length = htole16(segs[i].ds_len - 1);
+			desc32->flags = htole16(flags);
+		}
+
+		/*
+		 * Setting of the valid bit in the first descriptor is
+		 * deferred until the whole chain is fully setup.
+		 */
+		flags |= NFE_TX_VALID;
+
+		sc->txq.queued++;
+		NFE_INC(prod, NFE_TX_RING_COUNT);
+	}
+
+	/*
+	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
+	 * csum flags, vtag and TSO belong to the first fragment only.
+	 */
+	if (sc->nfe_flags & NFE_40BIT_ADDR) {
+		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
+		desc64 = &sc->txq.desc64[si];
+		if ((m->m_flags & M_VLANTAG) != 0)
+			desc64->vtag = htole32(NFE_TX_VTAG |
+			    VLAN_TAG_VALUE(vtag));
+
+		/*
+		 * finally, set the valid/checksum/TSO bit in the first
+		 * descriptor.
+		 */
+		desc64->flags |= htole16(NFE_TX_VALID | cflags);
+	} else {
+		if (sc->nfe_flags & NFE_JUMBO_SUP)
+			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
+		else
+			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
+		desc32 = &sc->txq.desc32[si];
+		/*
+		 * finally, set the valid/checksum/TSO bit in the first
+		 * descriptor.
+		 */
+		desc32->flags |= htole16(NFE_TX_VALID | cflags);
+	}
+
+	sc->txq.cur = prod;
+	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
+	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
+	sc->txq.data[prod].tx_data_map = map;
+	sc->txq.data[prod].m = m;
+
+	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
+
+	return (0);
+}
+
+
+static void
+nfe_setmulti(struct nfe_softc *sc)
+{
+	struct ifnet *ifp = sc->nfe_ifp;
+	struct ifmultiaddr *ifma;
+	int i;
+	uint32_t filter;
+	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
+	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+
+	NFE_LOCK_ASSERT(sc);
+
+	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
+		bzero(addr, ETHER_ADDR_LEN);
+		bzero(mask, ETHER_ADDR_LEN);
+		goto done;
+	}
+
+	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
+	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
+
+	IF_ADDR_LOCK(ifp);
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		u_char *addrp;
+
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+
+		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
+		for (i = 0; i < ETHER_ADDR_LEN; i++) {
+			u_int8_t mcaddr = addrp[i];
+			addr[i] &= mcaddr;
+			mask[i] &= ~mcaddr;
+		}
+	}
+	IF_ADDR_UNLOCK(ifp);
+
+	for (i = 0; i < ETHER_ADDR_LEN; i++) {
+		mask[i] |= addr[i];
+	}
+
+done:
+	addr[0] |= 0x01;	/* make sure multicast bit is set */
+
+	NFE_WRITE(sc, NFE_MULTIADDR_HI,
+	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
+	NFE_WRITE(sc, NFE_MULTIADDR_LO,
+	    addr[5] <<  8 | addr[4]);
+	NFE_WRITE(sc, NFE_MULTIMASK_HI,
+	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
+	NFE_WRITE(sc, NFE_MULTIMASK_LO,
+	    mask[5] <<  8 | mask[4]);
+
+	filter = NFE_READ(sc, NFE_RXFILTER);
+	filter &= NFE_PFF_RX_PAUSE;
+	filter |= NFE_RXFILTER_MAGIC;
+	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
+	NFE_WRITE(sc, NFE_RXFILTER, filter);
+}
+
+
+static void
+nfe_tx_task(void *arg, int pending)
+{
+	struct ifnet *ifp;
+
+	ifp = (struct ifnet *)arg;
+	nfe_start(ifp);
+}
+
+
+static void
+nfe_start(struct ifnet *ifp)
+{
+	struct nfe_softc *sc = ifp->if_softc;
+	struct mbuf *m0;
+	int enq;
+
+	NFE_LOCK(sc);
+
+	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+	    IFF_DRV_RUNNING || sc->nfe_link == 0) {
+		NFE_UNLOCK(sc);
+		return;
+	}
+
+	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
+		if (m0 == NULL)
+			break;
+
+		if (nfe_encap(sc, &m0) != 0) {
+			if (m0 == NULL)
+				break;
+			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
+			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+			break;
+		}
+		enq++;
+		ETHER_BPF_MTAP(ifp, m0);
+	}
+
+	if (enq > 0) {
+		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+		/* kick Tx */
+		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
+
+		/*
+		 * Set a timeout in case the chip goes out to lunch.
+		 */
+		sc->nfe_watchdog_timer = 5;
+	}
+
+	NFE_UNLOCK(sc);
+}
+
+
+static void
+nfe_watchdog(struct ifnet *ifp)
+{
+	struct nfe_softc *sc = ifp->if_softc;
+
+	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
+		return;
+
+	/* Check if we've lost Tx completion interrupt. */
+	nfe_txeof(sc);
+	if (sc->txq.queued == 0) {
+		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
+		    "-- recovering\n");
+		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+			taskqueue_enqueue_fast(taskqueue_fast,
+			    &sc->nfe_tx_task);
+		return;
+	}
+	/* Check if we've lost start Tx command. */
+	sc->nfe_force_tx++;
+	if (sc->nfe_force_tx <= 3) {
+		/*
+		 * If this is the case for watchdog timeout, the following
+		 * code should go to nfe_txeof().
+		 */
+		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
+		return;
+	}
+	sc->nfe_force_tx = 0;
+
+	if_printf(ifp, "watchdog timeout\n");
+
+	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+	ifp->if_oerrors++;
+	nfe_init_locked(sc);
+}
+
+
+static void
+nfe_init(void *xsc)
+{
+	struct nfe_softc *sc = xsc;
+
+	NFE_LOCK(sc);
+	nfe_init_locked(sc);
+	NFE_UNLOCK(sc);
+}
+
+
+static void
+nfe_init_locked(void *xsc)
+{
+	struct nfe_softc *sc = xsc;
+	struct ifnet *ifp = sc->nfe_ifp;
+	struct mii_data *mii;
+	uint32_t val;
+	int error;
+
+	NFE_LOCK_ASSERT(sc);
+
+	mii = device_get_softc(sc->nfe_miibus);
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+		return;
+
+	nfe_stop(ifp);
+
+	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
+
+	nfe_init_tx_ring(sc, &sc->txq);
+	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
+		error = nfe_init_jrx_ring(sc, &sc->jrxq);
+	else
+		error = nfe_init_rx_ring(sc, &sc->rxq);
+	if (error != 0) {
+		device_printf(sc->nfe_dev,
+		    "initialization failed: no memory for rx buffers\n");
+		nfe_stop(ifp);
+		return;
+	}
+
+	val = 0;
+	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
+		val |= NFE_MAC_ADDR_INORDER;
+	NFE_WRITE(sc, NFE_TX_UNK, val);
+	NFE_WRITE(sc, NFE_STATUS, 0);
+
+	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
+		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
+
+	sc->rxtxctl = NFE_RXTX_BIT2;
+	if (sc->nfe_flags & NFE_40BIT_ADDR)
+		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
+	else if (sc->nfe_flags & NFE_JUMBO_SUP)
+		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
+
+	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+		sc->rxtxctl |= NFE_RXTX_RXCSUM;
+	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
+
+	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
+	DELAY(10);
+	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
+
+	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
+	else
+		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
+
+	NFE_WRITE(sc, NFE_SETUP_R6, 0);
+
+	/* set MAC address */
+	nfe_set_macaddr(sc, IF_LLADDR(ifp));
+
+	/* tell MAC where rings are in memory */
+	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
+		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
+		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
+		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
+		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
+	} else {
+		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
+		    NFE_ADDR_HI(sc->rxq.physaddr));
+		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
+		    NFE_ADDR_LO(sc->rxq.physaddr));
+	}
+	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
+	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
+
+	NFE_WRITE(sc, NFE_RING_SIZE,
+	    (NFE_RX_RING_COUNT - 1) << 16 |
+	    (NFE_TX_RING_COUNT - 1));
+
+	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
+
+	/* force MAC to wakeup */
+	val = NFE_READ(sc, NFE_PWR_STATE);
+	if ((val & NFE_PWR_WAKEUP) == 0)
+		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
+	DELAY(10);
+	val = NFE_READ(sc, NFE_PWR_STATE);
+	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
+
+#if 1
+	/* configure interrupts coalescing/mitigation */
+	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
+#else
+	/* no interrupt mitigation: one interrupt per packet */
+	NFE_WRITE(sc, NFE_IMTIMER, 970);
+#endif
+
+	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
+	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
+	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
+
+	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
+	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
+
+	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
+	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
+
+	sc->rxtxctl &= ~NFE_RXTX_BIT2;
+	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
+	DELAY(10);
+	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
+
+	/* set Rx filter */
+	nfe_setmulti(sc);
+
+	/* enable Rx */
+	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
+
+	/* enable Tx */
+	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
+
+	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
+
+#ifdef DEVICE_POLLING
+	if (ifp->if_capenable & IFCAP_POLLING)
+		nfe_disable_intr(sc);
+	else
+#endif
+	nfe_set_intr(sc);
+	nfe_enable_intr(sc); /* enable interrupts */
+
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+	sc->nfe_link = 0;
+	mii_mediachg(mii);
+
+	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
+}
+
+
+static void
+nfe_stop(struct ifnet *ifp)
+{
+	struct nfe_softc *sc = ifp->if_softc;
+	struct nfe_rx_ring *rx_ring;
+	struct nfe_jrx_ring *jrx_ring;
+	struct nfe_tx_ring *tx_ring;
+	struct nfe_rx_data *rdata;
+	struct nfe_tx_data *tdata;
+	int i;
+
+	NFE_LOCK_ASSERT(sc);
+
+	sc->nfe_watchdog_timer = 0;
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+	callout_stop(&sc->nfe_stat_ch);
+
+	/* abort Tx */
+	NFE_WRITE(sc, NFE_TX_CTL, 0);
+
+	/* disable Rx */
+	NFE_WRITE(sc, NFE_RX_CTL, 0);
+
+	/* disable interrupts */
+	nfe_disable_intr(sc);
+
+	sc->nfe_link = 0;
+
+	/* free Rx and Tx mbufs still in the queues. */
+	rx_ring = &sc->rxq;
+	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
+		rdata = &rx_ring->data[i];
+		if (rdata->m != NULL) {
+			bus_dmamap_sync(rx_ring->rx_data_tag,
+			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
+			bus_dmamap_unload(rx_ring->rx_data_tag,
+			    rdata->rx_data_map);
+			m_freem(rdata->m);
+			rdata->m = NULL;
+		}
+	}
+
+	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
+		jrx_ring = &sc->jrxq;
+		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
+			rdata = &jrx_ring->jdata[i];
+			if (rdata->m != NULL) {
+				bus_dmamap_sync(jrx_ring->jrx_data_tag,
+				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
+				bus_dmamap_unload(jrx_ring->jrx_data_tag,
+				    rdata->rx_data_map);
+				m_freem(rdata->m);
+				rdata->m = NULL;
+			}
+		}
+	}
+
+	tx_ring = &sc->txq;
+	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
+		tdata = &tx_ring->data[i];
+		if (tdata->m != NULL) {
+			bus_dmamap_sync(tx_ring->tx_data_tag,
+			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(tx_ring->tx_data_tag,
+			    tdata->tx_data_map);
+			m_freem(tdata->m);
+			tdata->m = NULL;
+		}
+	}
+}
+
+
+static int
+nfe_ifmedia_upd(struct ifnet *ifp)
+{
+	struct nfe_softc *sc = ifp->if_softc;
+	struct mii_data *mii;
+
+	NFE_LOCK(sc);
+	mii = device_get_softc(sc->nfe_miibus);
+	mii_mediachg(mii);
+	NFE_UNLOCK(sc);
+
+	return (0);
+}
+
+
+static void
+nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+	struct nfe_softc *sc;
+	struct mii_data *mii;
+
+	sc = ifp->if_softc;
+
+	NFE_LOCK(sc);
+	mii = device_get_softc(sc->nfe_miibus);
+	mii_pollstat(mii);
+	NFE_UNLOCK(sc);
+
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+}
+
+
+void
+nfe_tick(void *xsc)
+{
+	struct nfe_softc *sc;
+	struct mii_data *mii;
+	struct ifnet *ifp;
+
+	sc = (struct nfe_softc *)xsc;
+
+	NFE_LOCK_ASSERT(sc);
+
+	ifp = sc->nfe_ifp;
+
+	mii = device_get_softc(sc->nfe_miibus);
+	mii_tick(mii);
+	nfe_watchdog(ifp);
+	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
+}
+
+
+static void
+nfe_shutdown(device_t dev)
+{
+	struct nfe_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+
+	NFE_LOCK(sc);
+	ifp = sc->nfe_ifp;
+	nfe_stop(ifp);
+	/* nfe_reset(sc); */
+	NFE_UNLOCK(sc);
+}
+
+
+static void
+nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
+{
+	uint32_t val;
+
+	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
+		val = NFE_READ(sc, NFE_MACADDR_LO);
+		addr[0] = (val >> 8) & 0xff;
+		addr[1] = (val & 0xff);
+
+		val = NFE_READ(sc, NFE_MACADDR_HI);
+		addr[2] = (val >> 24) & 0xff;
+		addr[3] = (val >> 16) & 0xff;
+		addr[4] = (val >>  8) & 0xff;
+		addr[5] = (val & 0xff);
+	} else {
+		val = NFE_READ(sc, NFE_MACADDR_LO);
+		addr[5] = (val >> 8) & 0xff;
+		addr[4] = (val & 0xff);
+
+		val = NFE_READ(sc, NFE_MACADDR_HI);
+		addr[3] = (val >> 24) & 0xff;
+		addr[2] = (val >> 16) & 0xff;
+		addr[1] = (val >>  8) & 0xff;
+		addr[0] = (val & 0xff);
+	}
+}
+
+
+static void
+nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
+{
+
+	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
+	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
+	    addr[1] << 8 | addr[0]);
+}
+
+
+/*
+ * Map a single buffer address.
+ */
+
+static void
+nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+	struct nfe_dmamap_arg *ctx;
+
+	if (error != 0)
+		return;
+
+	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
+
+	ctx = (struct nfe_dmamap_arg *)arg;
+	ctx->nfe_busaddr = segs[0].ds_addr;
+}
+
+
+static int
+sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
+{
+	int error, value;
+
+	if (!arg1)
+		return (EINVAL);
+	value = *(int *)arg1;
+	error = sysctl_handle_int(oidp, &value, 0, req);
+	if (error || !req->newptr)
+		return (error);
+	if (value < low || value > high)
+		return (EINVAL);
+	*(int *)arg1 = value;
+
+	return (0);
+}
+
+
+static int
+sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+
+	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
+	    NFE_PROC_MAX));
+}
diff -ur -N sys.old/dev/nfe/if_nfereg.h sys/dev/nfe/if_nfereg.h
--- sys.old/dev/nfe/if_nfereg.h	1970-01-01 00:00:00.000000000 +0000
+++ sys/dev/nfe/if_nfereg.h	2008-02-02 12:47:40.000000000 +0000
@@ -0,0 +1,322 @@
+/*	$OpenBSD: if_nfereg.h,v 1.16 2006/02/22 19:23:44 damien Exp $	*/
+
+/*-
+ * Copyright (c) 2005 Jonathan Gray <jsg at openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD: src/sys/dev/nfe/if_nfereg.h,v 1.10 2007/06/12 10:51:47 yongari Exp $
+ */
+
+#define	NFE_RX_RING_COUNT	256
+#define	NFE_JUMBO_RX_RING_COUNT	NFE_RX_RING_COUNT
+#define	NFE_TX_RING_COUNT	256
+
+#define	NFE_PROC_DEFAULT	((NFE_RX_RING_COUNT * 3) / 4)
+#define	NFE_PROC_MIN		50
+#define	NFE_PROC_MAX		(NFE_RX_RING_COUNT - 1)
+
+#define	NFE_INC(x, y)	(x) = ((x) + 1) % y
+
+/* RX/TX MAC addr + type + VLAN + align + slack */
+#define	NFE_RX_HEADERS		64
+
+/* Maximum MTU size. */
+#define	NV_PKTLIMIT_1		ETH_DATA_LEN	/* Hard limit not known. */
+#define	NV_PKTLIMIT_2		9100 /* Actual limit according to NVidia:9202 */
+
+#define	NFE_JUMBO_FRAMELEN	NV_PKTLIMIT_2
+#define	NFE_JUMBO_MTU		\
+	(NFE_JUMBO_FRAMELEN - NFE_RX_HEADERS)
+#define	NFE_MIN_FRAMELEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
+#define	NFE_JSLOTS		((NFE_JUMBO_RX_RING_COUNT * 3) / 2)
+
+#define	NFE_JRAWLEN		(NFE_JUMBO_FRAMELEN + ETHER_ALIGN)
+#define	NFE_JLEN		\
+	(NFE_JRAWLEN + (sizeof(uint64_t) - (NFE_JRAWLEN % sizeof(uint64_t))))
+#define	NFE_JPAGESZ	PAGE_SIZE
+#define	NFE_RESID	\
+	(NFE_JPAGESZ - (NFE_JLEN * NFE_JSLOTS) % NFE_JPAGESZ)
+#define	NFE_JMEM	((NFE_JLEN * NFE_JSLOTS) + NFE_RESID)
+
+#define	NFE_MAX_SCATTER		32
+#define	NFE_TSO_MAXSGSIZE	4096
+#define	NFE_TSO_MAXSIZE		(65535 + sizeof(struct ether_vlan_header))
+
+#define	NFE_IRQ_STATUS		0x000
+#define	NFE_IRQ_MASK		0x004
+#define	NFE_SETUP_R6		0x008
+#define	NFE_IMTIMER		0x00c
+#define	NFE_MSI_MAP0		0x020
+#define	NFE_MSI_MAP1		0x024
+#define	NFE_MSI_IRQ_MASK	0x030
+#define	NFE_MAC_RESET		0x03c
+#define	NFE_MISC1		0x080
+#define	NFE_TX_CTL		0x084
+#define	NFE_TX_STATUS		0x088
+#define	NFE_RXFILTER		0x08c
+#define	NFE_RXBUFSZ		0x090
+#define	NFE_RX_CTL		0x094
+#define	NFE_RX_STATUS		0x098
+#define	NFE_RNDSEED		0x09c
+#define	NFE_SETUP_R1		0x0a0
+#define	NFE_SETUP_R2		0x0a4
+#define	NFE_MACADDR_HI		0x0a8
+#define	NFE_MACADDR_LO		0x0ac
+#define	NFE_MULTIADDR_HI	0x0b0
+#define	NFE_MULTIADDR_LO	0x0b4
+#define	NFE_MULTIMASK_HI	0x0b8
+#define	NFE_MULTIMASK_LO	0x0bc
+#define	NFE_PHY_IFACE		0x0c0
+#define	NFE_TX_RING_ADDR_LO	0x100
+#define	NFE_RX_RING_ADDR_LO	0x104
+#define	NFE_RING_SIZE		0x108
+#define	NFE_TX_UNK		0x10c
+#define	NFE_LINKSPEED		0x110
+#define	NFE_SETUP_R5		0x130
+#define	NFE_SETUP_R3		0x13C
+#define	NFE_SETUP_R7		0x140
+#define	NFE_RXTX_CTL		0x144
+#define	NFE_TX_RING_ADDR_HI	0x148
+#define	NFE_RX_RING_ADDR_HI	0x14c
+#define	NFE_TX_PAUSE_FRAME	0x170
+#define	NFE_PHY_STATUS		0x180
+#define	NFE_SETUP_R4		0x184
+#define	NFE_STATUS		0x188
+#define	NFE_PHY_SPEED		0x18c
+#define	NFE_PHY_CTL		0x190
+#define	NFE_PHY_DATA		0x194
+#define	NFE_WOL_CTL		0x200
+#define	NFE_PATTERN_CRC		0x204
+#define	NFE_PATTERN_MASK	0x208
+#define	NFE_PWR_CAP		0x268
+#define	NFE_PWR_STATE		0x26c
+#define	NFE_VTAG_CTL		0x300
+#define	NFE_MSIX_MAP0		0x3e0
+#define	NFE_MSIX_MAP1		0x3e4
+#define	NFE_MSIX_IRQ_STATUS	0x3f0
+#define	NFE_PWR2_CTL		0x600
+
+#define	NFE_MAC_RESET_MAGIC	0x00f3
+
+#define	NFE_MAC_ADDR_INORDER	0x8000
+
+#define	NFE_PHY_ERROR		0x00001
+#define	NFE_PHY_WRITE		0x00400
+#define	NFE_PHY_BUSY		0x08000
+#define	NFE_PHYADD_SHIFT	5
+
+#define	NFE_STATUS_MAGIC	0x140000
+
+#define	NFE_R1_MAGIC_1000	0x14050f
+#define	NFE_R1_MAGIC_10_100	0x16070f
+#define	NFE_R1_MAGIC_DEFAULT	0x15050f
+#define	NFE_R2_MAGIC		0x16
+#define	NFE_R4_MAGIC		0x08
+#define	NFE_R6_MAGIC		0x03
+#define	NFE_WOL_MAGIC		0x1111
+#define	NFE_RX_START		0x01
+#define	NFE_TX_START		0x01
+
+#define	NFE_IRQ_RXERR		0x0001
+#define	NFE_IRQ_RX		0x0002
+#define	NFE_IRQ_RX_NOBUF	0x0004
+#define	NFE_IRQ_TXERR		0x0008
+#define	NFE_IRQ_TX_DONE		0x0010
+#define	NFE_IRQ_TIMER		0x0020
+#define	NFE_IRQ_LINK		0x0040
+#define	NFE_IRQ_TXERR2		0x0080
+#define	NFE_IRQ_TX1		0x0100
+
+#define	NFE_IRQ_WANTED							\
+	(NFE_IRQ_RXERR | NFE_IRQ_RX_NOBUF | NFE_IRQ_RX |		\
+	 NFE_IRQ_TXERR | NFE_IRQ_TXERR2 | NFE_IRQ_TX_DONE |		\
+	 NFE_IRQ_LINK)
+
+#define	NFE_RXTX_KICKTX		0x0001
+#define	NFE_RXTX_BIT1		0x0002
+#define	NFE_RXTX_BIT2		0x0004
+#define	NFE_RXTX_RESET		0x0010
+#define	NFE_RXTX_VTAG_STRIP	0x0040
+#define	NFE_RXTX_VTAG_INSERT	0x0080
+#define	NFE_RXTX_RXCSUM		0x0400
+#define	NFE_RXTX_V2MAGIC	0x2100
+#define	NFE_RXTX_V3MAGIC	0x2200
+#define	NFE_RXFILTER_MAGIC	0x007f0000
+#define	NFE_PFF_RX_PAUSE	(1 << 3)
+#define	NFE_PFF_LOOPBACK	(1 << 4)
+#define	NFE_PFF_U2M		(1 << 5)
+#define	NFE_PFF_PROMISC		(1 << 7)
+#define	NFE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
+
+/* default interrupt moderation timer of 128us */
+#define	NFE_IM_DEFAULT	((128 * 100) / 1024)
+
+#define	NFE_VTAG_ENABLE		(1 << 13)
+
+#define	NFE_PWR_VALID		(1 << 8)
+#define	NFE_PWR_WAKEUP		(1 << 15)
+
+#define	NFE_PWR2_WAKEUP_MASK	0x0f11
+#define	NFE_PWR2_REVA3		(1 << 0)
+
+#define	NFE_MEDIA_SET		0x10000
+#define	NFE_MEDIA_1000T		0x00032
+#define	NFE_MEDIA_100TX		0x00064
+#define	NFE_MEDIA_10T		0x003e8
+
+#define	NFE_PHY_100TX		(1 << 0)
+#define	NFE_PHY_1000T		(1 << 1)
+#define	NFE_PHY_HDX		(1 << 8)
+
+#define	NFE_MISC1_MAGIC		0x003b0f3c
+#define	NFE_MISC1_TX_PAUSE	(1 << 0)
+#define	NFE_MISC1_HDX		(1 << 1)
+
+#define	NFE_TX_PAUSE_FRAME_DISABLE	0x1ff0080
+#define	NFE_TX_PAUSE_FRAME_ENABLE	0x0c00030
+
+#define	NFE_SEED_MASK		0x0003ff00
+#define	NFE_SEED_10T		0x00007f00
+#define	NFE_SEED_100TX		0x00002d00
+#define	NFE_SEED_1000T		0x00007400
+
+#define	NFE_MSI_MESSAGES	8
+#define	NFE_MSI_VECTOR_0_ENABLED	0x01
+
+/*
+ * It seems that nForce supports only the lower 40 bits of a DMA address.
+ */
+#if (BUS_SPACE_MAXADDR < 0xFFFFFFFFFF)
+#define	NFE_DMA_MAXADDR		BUS_SPACE_MAXADDR
+#else
+#define	NFE_DMA_MAXADDR		0xFFFFFFFFFF
+#endif
+
+#define	NFE_ADDR_LO(x)		((u_int64_t) (x) & 0xffffffff)
+#define	NFE_ADDR_HI(x)		((u_int64_t) (x) >> 32)
+
+/* Rx/Tx descriptor */
+struct nfe_desc32 {
+	uint32_t	physaddr;
+	uint16_t	length;
+	uint16_t	flags;
+#define	NFE_RX_FIXME_V1		0x6004
+#define	NFE_RX_VALID_V1		(1 << 0)
+#define	NFE_TX_ERROR_V1		0x7808
+#define	NFE_TX_LASTFRAG_V1	(1 << 0)
+#define	NFE_RX_ERROR1_V1	(1<<7)
+#define	NFE_RX_ERROR2_V1	(1<<8)
+#define	NFE_RX_ERROR3_V1	(1<<9)
+#define	NFE_RX_ERROR4_V1	(1<<10)
+} __packed;
+
+#define	NFE_V1_TXERR	"\020"	\
+	"\14TXERROR\13UNDERFLOW\12LATECOLLISION\11LOSTCARRIER\10DEFERRED" \
+	"\08FORCEDINT\03RETRY\00LASTPACKET"
+
+/* V2 Rx/Tx descriptor */
+struct nfe_desc64 {
+	uint32_t	physaddr[2];
+	uint32_t	vtag;
+#define	NFE_RX_VTAG		(1 << 16)
+#define	NFE_TX_VTAG		(1 << 18)
+	uint16_t	length;
+	uint16_t	flags;
+#define	NFE_RX_FIXME_V2		0x4300
+#define	NFE_RX_VALID_V2		(1 << 13)
+#define	NFE_TX_ERROR_V2		0x5c04
+#define	NFE_TX_LASTFRAG_V2	(1 << 13)
+#define	NFE_RX_ERROR1_V2	(1<<2)
+#define	NFE_RX_ERROR2_V2	(1<<3)
+#define	NFE_RX_ERROR3_V2	(1<<4)
+#define	NFE_RX_ERROR4_V2	(1<<5)
+} __packed;
+
+#define	NFE_V2_TXERR	"\020"	\
+	"\14FORCEDINT\13LASTPACKET\12UNDERFLOW\10LOSTCARRIER\09DEFERRED\02RETRY"
+
+#define	NFE_RING_ALIGN	(sizeof(struct nfe_desc64))
+
+/* flags common to V1/V2 descriptors */
+#define	NFE_RX_UDP_CSUMOK	(1 << 10)
+#define	NFE_RX_TCP_CSUMOK	(1 << 11)
+#define	NFE_RX_IP_CSUMOK	(1 << 12)
+#define	NFE_RX_ERROR		(1 << 14)
+#define	NFE_RX_READY		(1 << 15)
+#define	NFE_RX_LEN_MASK		0x3fff
+#define	NFE_TX_TCP_UDP_CSUM	(1 << 10)
+#define	NFE_TX_IP_CSUM		(1 << 11)
+#define	NFE_TX_TSO		(1 << 12)
+#define	NFE_TX_TSO_SHIFT	14
+#define	NFE_TX_VALID		(1 << 15)
+
+#define	NFE_READ(sc, reg) \
+	bus_read_4((sc)->nfe_res[0], (reg))
+
+#define	NFE_WRITE(sc, reg, val) \
+	bus_write_4((sc)->nfe_res[0], (reg), (val))
+
+#define	NFE_TIMEOUT	1000
+
+#ifndef PCI_VENDOR_NVIDIA
+#define	PCI_VENDOR_NVIDIA	0x10DE
+#endif
+
+#define	PCI_PRODUCT_NVIDIA_NFORCE_LAN		0x01C3
+#define	PCI_PRODUCT_NVIDIA_NFORCE2_LAN		0x0066
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN1		0x00D6
+#define	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1	0x0086
+#define	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2	0x008C
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN	0x00E6
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN4		0x00DF
+#define	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1		0x0056
+#define	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2		0x0057
+#define	PCI_PRODUCT_NVIDIA_MCP04_LAN1		0x0037
+#define	PCI_PRODUCT_NVIDIA_MCP04_LAN2		0x0038
+#define	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1	0x0268
+#define	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2	0x0269
+#define	PCI_PRODUCT_NVIDIA_MCP55_LAN1		0x0372
+#define	PCI_PRODUCT_NVIDIA_MCP55_LAN2		0x0373
+#define	PCI_PRODUCT_NVIDIA_MCP61_LAN1		0x03e5
+#define	PCI_PRODUCT_NVIDIA_MCP61_LAN2		0x03e6
+#define	PCI_PRODUCT_NVIDIA_MCP61_LAN3		0x03ee
+#define	PCI_PRODUCT_NVIDIA_MCP61_LAN4		0x03ef
+#define	PCI_PRODUCT_NVIDIA_MCP65_LAN1		0x0450
+#define	PCI_PRODUCT_NVIDIA_MCP65_LAN2		0x0451
+#define	PCI_PRODUCT_NVIDIA_MCP65_LAN3		0x0452
+#define	PCI_PRODUCT_NVIDIA_MCP65_LAN4		0x0453
+#define	PCI_PRODUCT_NVIDIA_MCP67_LAN1		0x054c
+#define	PCI_PRODUCT_NVIDIA_MCP67_LAN2		0x054d
+#define	PCI_PRODUCT_NVIDIA_MCP67_LAN3		0x054e
+#define	PCI_PRODUCT_NVIDIA_MCP67_LAN4		0x054f
+
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN2	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN3	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
+#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN5	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
+#define	PCI_PRODUCT_NVIDIA_CK804_LAN1	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
+#define	PCI_PRODUCT_NVIDIA_CK804_LAN2	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
+#define	PCI_PRODUCT_NVIDIA_MCP51_LAN1	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
+#define	PCI_PRODUCT_NVIDIA_MCP51_LAN2	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
+
+#define	NFE_DEBUG		0x0000
+#define	NFE_DEBUG_INIT		0x0001
+#define	NFE_DEBUG_RUNNING	0x0002
+#define	NFE_DEBUG_DEINIT 	0x0004
+#define	NFE_DEBUG_IOCTL		0x0008
+#define	NFE_DEBUG_INTERRUPT	0x0010
+#define	NFE_DEBUG_API		0x0020
+#define	NFE_DEBUG_LOCK		0x0040
+#define	NFE_DEBUG_BROKEN	0x0080
+#define	NFE_DEBUG_MII		0x0100
+#define	NFE_DEBUG_ALL		0xFFFF
diff -ur -N sys.old/dev/nfe/if_nfevar.h sys/dev/nfe/if_nfevar.h
--- sys.old/dev/nfe/if_nfevar.h	1970-01-01 00:00:00.000000000 +0000
+++ sys/dev/nfe/if_nfevar.h	2008-02-02 12:47:40.000000000 +0000
@@ -0,0 +1,138 @@
+/*	$OpenBSD: if_nfevar.h,v 1.11 2006/02/19 13:57:02 damien Exp $	*/
+
+/*-
+ * Copyright (c) 2005 Jonathan Gray <jsg at openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD: src/sys/dev/nfe/if_nfevar.h,v 1.5 2007/07/24 01:11:00 yongari Exp $
+ */
+
+struct nfe_tx_data {
+	bus_dmamap_t	tx_data_map;
+	struct mbuf	*m;
+};
+
+struct nfe_tx_ring {
+	bus_dma_tag_t		tx_desc_tag;
+	bus_dmamap_t		tx_desc_map;
+	bus_addr_t		physaddr;
+	struct nfe_desc32	*desc32;
+	struct nfe_desc64	*desc64;
+	bus_dma_tag_t		tx_data_tag;
+	struct nfe_tx_data	data[NFE_TX_RING_COUNT];
+	int			queued;
+	int			cur;
+	int			next;
+};
+
+struct nfe_jpool_entry {
+	int				slot;
+	SLIST_ENTRY(nfe_jpool_entry)	jpool_entries;
+};
+
+struct nfe_rx_data {
+	bus_dmamap_t	rx_data_map;
+	bus_addr_t	paddr;
+	struct mbuf	*m;
+};
+
+struct nfe_rx_ring {
+	bus_dma_tag_t		rx_desc_tag;
+	bus_dmamap_t		rx_desc_map;
+	bus_addr_t		physaddr;
+	struct nfe_desc32	*desc32;
+	struct nfe_desc64	*desc64;
+	bus_dma_tag_t		rx_data_tag;
+	bus_dmamap_t		rx_spare_map;
+	struct nfe_rx_data	data[NFE_RX_RING_COUNT];
+	int			cur;
+	int			next;
+};
+
+struct nfe_jrx_ring {
+	bus_dma_tag_t		jrx_desc_tag;
+	bus_dmamap_t		jrx_desc_map;
+	bus_dma_tag_t		jrx_jumbo_tag;
+	bus_dmamap_t		jrx_jumbo_map;
+	void			*jpool;
+	caddr_t			jslots[NFE_JSLOTS];
+	bus_addr_t		jphysaddr;
+	struct nfe_desc32	*jdesc32;
+	struct nfe_desc64	*jdesc64;
+	bus_dma_tag_t		jrx_data_tag;
+	bus_dmamap_t		jrx_spare_map;
+	struct nfe_rx_data	jdata[NFE_JUMBO_RX_RING_COUNT];
+	int			jcur;
+	int			jnext;
+};
+
+struct nfe_softc {
+	struct ifnet		*nfe_ifp;
+	device_t		nfe_dev;
+	uint16_t		nfe_devid;
+	uint16_t		nfe_revid;
+	device_t		nfe_miibus;
+	struct mtx		nfe_mtx;
+	struct resource		*nfe_res[1];
+	struct resource		*nfe_msix_res;
+	struct resource		*nfe_msix_pba_res;
+	struct resource		*nfe_irq[NFE_MSI_MESSAGES];
+	void			*nfe_intrhand[NFE_MSI_MESSAGES];
+	struct callout		nfe_stat_ch;
+	int			nfe_watchdog_timer;
+
+	bus_dma_tag_t		nfe_parent_tag;
+
+	int			nfe_if_flags;
+	uint32_t		nfe_flags;
+#define	NFE_JUMBO_SUP		0x0001
+#define	NFE_40BIT_ADDR		0x0002
+#define	NFE_HW_CSUM		0x0004
+#define	NFE_HW_VLAN		0x0008
+#define	NFE_PWR_MGMT		0x0010
+#define	NFE_CORRECT_MACADDR	0x0020
+#define	NFE_TX_FLOW_CTRL	0x0040
+	int			nfe_jumbo_disable;
+	uint32_t		rxtxctl;
+	uint8_t			mii_phyaddr;
+	uint8_t			eaddr[ETHER_ADDR_LEN];
+	struct taskqueue	*nfe_tq;
+	struct task		nfe_int_task;
+	struct task		nfe_tx_task;
+	struct task		nfe_link_task;
+	int			nfe_link;
+	int			nfe_suspended;
+	int			nfe_framesize;
+	int			nfe_process_limit;
+	int			nfe_force_tx;
+	uint32_t		nfe_irq_status;
+	uint32_t		nfe_irq_mask;
+	uint32_t		nfe_intrs;
+	uint32_t		nfe_nointrs;
+	uint32_t		nfe_msi;
+	uint32_t		nfe_msix;
+
+	struct nfe_tx_ring	txq;
+	struct nfe_rx_ring	rxq;
+	struct nfe_jrx_ring	jrxq;
+	SLIST_HEAD(__nfe_jfreehead, nfe_jpool_entry)	nfe_jfree_listhead;
+	SLIST_HEAD(__nfe_jinusehead, nfe_jpool_entry)	nfe_jinuse_listhead;
+	struct mtx		nfe_jlist_mtx;
+};
+
+struct nfe_type {
+	uint16_t	vid_id;
+	uint16_t	dev_id;
+	char		*name;
+};
diff -ur -N sys.old/modules/nfe/Makefile sys/modules/nfe/Makefile
--- sys.old/modules/nfe/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ sys/modules/nfe/Makefile	2008-02-02 12:57:41.000000000 +0000
@@ -0,0 +1,7 @@
+.PATH: ${.CURDIR}/../../dev/nfe
+
+KMOD=	if_nfe
+SRCS=	if_nfe.c miibus_if.h opt_bdg.h device_if.h bus_if.h pci_if.h 
+SRCS+=  if_nfereg.h if_nfevar.h
+
+.include <bsd.kmod.mk>


>Release-Note:
>Audit-Trail:
>Unformatted:


More information about the freebsd-bugs mailing list