svn commit: r256806 - in head/sys: arm/freescale/imx dev/ffec

Ian Lepore ian at FreeBSD.org
Sun Oct 20 21:07:39 UTC 2013


Author: ian
Date: Sun Oct 20 21:07:38 2013
New Revision: 256806
URL: http://svnweb.freebsd.org/changeset/base/256806

Log:
  Add a driver for the Freescale Fast Ethernet Controller found on various
  Freescale SoCs including the i.MX series.  This also works for the newer
  SoCs with the ENET gigabit controller, but doesn't use any of the new
  hardware features other than enabling gigabit speed.

Added:
  head/sys/dev/ffec/
  head/sys/dev/ffec/if_ffec.c   (contents, props changed)
  head/sys/dev/ffec/if_ffecreg.h   (contents, props changed)
Modified:
  head/sys/arm/freescale/imx/files.imx53

Modified: head/sys/arm/freescale/imx/files.imx53
==============================================================================
--- head/sys/arm/freescale/imx/files.imx53	Sun Oct 20 21:04:44 2013	(r256805)
+++ head/sys/arm/freescale/imx/files.imx53	Sun Oct 20 21:07:38 2013	(r256806)
@@ -49,3 +49,6 @@ dev/ofw/ofw_iicbus.c			optional fsliic
 # IPU - Image Processing Unit (frame buffer also)
 arm/freescale/imx/imx51_ipuv3.c		optional sc
 
+# Fast Ethernet Controller
+dev/ffec/if_ffec.c 			optional ffec
+

Added: head/sys/dev/ffec/if_ffec.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/ffec/if_ffec.c	Sun Oct 20 21:07:38 2013	(r256806)
@@ -0,0 +1,1740 @@
+/*-
+ * Copyright (c) 2013 Ian Lepore <ian at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Driver for Freescale Fast Ethernet Controller, found on imx-series SoCs among
+ * others.  Also works for the ENET Gigibit controller found on imx6 and imx28,
+ * but the driver doesn't currently use any of the ENET advanced features other
+ * than enabling gigabit.
+ *
+ * The interface name 'fec' is already taken by netgraph's Fast Etherchannel
+ * (netgraph/ng_fec.c), so we use 'ffec'.
+ *
+ * Requires an FDT entry with at least these properties:
+ *   fec: ethernet at 02188000 {
+ *      compatible = "fsl,imxNN-fec";
+ *      reg = <0x02188000 0x4000>;
+ *      interrupts = <150 151>;
+ *      phy-mode = "rgmii";
+ *      phy-disable-preamble; // optional
+ *   };
+ * The second interrupt number is for IEEE-1588, and is not currently used; it
+ * need not be present.  phy-mode must be one of: "mii", "rmii", "rgmii".
+ * There is also an optional property, phy-disable-preamble, which if present
+ * will disable the preamble bits, cutting the size of each mdio transaction
+ * (and thus the busy-wait time) in half.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/if_vlan_var.h>
+
+#include <dev/ffec/if_ffecreg.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include "miibus_if.h"
+
+/*
+ * Driver data and defines.
+ */
+#define	RX_DESC_COUNT	64
+#define	RX_DESC_SIZE	(sizeof(struct ffec_hwdesc) * RX_DESC_COUNT)
+#define	TX_DESC_COUNT	64
+#define	TX_DESC_SIZE	(sizeof(struct ffec_hwdesc) * TX_DESC_COUNT)
+
+#define	WATCHDOG_TIMEOUT_SECS	5
+#define	STATS_HARVEST_INTERVAL	3
+
+struct ffec_bufmap {
+	struct mbuf	*mbuf;
+	bus_dmamap_t	map;
+};
+
+enum {
+	PHY_CONN_UNKNOWN,
+	PHY_CONN_MII,
+	PHY_CONN_RMII,
+	PHY_CONN_RGMII
+};
+
+enum {
+	FECTYPE_GENERIC,
+	FECTYPE_IMX51,
+	FECTYPE_IMX53,
+	FECTYPE_IMX6,
+};
+
+struct ffec_softc {
+	device_t		dev;
+	device_t		miibus;
+	struct mii_data *	mii_softc;
+	struct ifnet		*ifp;
+	int			if_flags;
+	struct mtx		mtx;
+	struct resource		*irq_res;
+	struct resource		*mem_res;
+	void *			intr_cookie;
+	struct callout		ffec_callout;
+	uint8_t			phy_conn_type;
+	uint8_t			fectype;
+	boolean_t		link_is_up;
+	boolean_t		is_attached;
+	boolean_t		is_detaching;
+	int			tx_watchdog_count;
+	int			stats_harvest_count;
+
+	bus_dma_tag_t		rxdesc_tag;
+	bus_dmamap_t		rxdesc_map;
+	struct ffec_hwdesc	*rxdesc_ring;
+	bus_addr_t		rxdesc_ring_paddr;
+	bus_dma_tag_t		rxbuf_tag;
+	struct ffec_bufmap	rxbuf_map[RX_DESC_COUNT];
+	uint32_t		rx_idx;
+
+	bus_dma_tag_t		txdesc_tag;
+	bus_dmamap_t		txdesc_map;
+	struct ffec_hwdesc	*txdesc_ring;
+	bus_addr_t		txdesc_ring_paddr;
+	bus_dma_tag_t		txbuf_tag;
+	struct ffec_bufmap	txbuf_map[RX_DESC_COUNT];
+	uint32_t		tx_idx_head;
+	uint32_t		tx_idx_tail;
+	int			txcount;
+};
+
+#define	FFEC_LOCK(sc)			mtx_lock(&(sc)->mtx)
+#define	FFEC_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
+#define	FFEC_LOCK_INIT(sc)		mtx_init(&(sc)->mtx, \
+	    device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
+#define	FFEC_LOCK_DESTROY(sc)		mtx_destroy(&(sc)->mtx);
+#define	FFEC_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED);
+#define	FFEC_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED);
+
+static void ffec_init_locked(struct ffec_softc *sc);
+static void ffec_stop_locked(struct ffec_softc *sc);
+static void ffec_txstart_locked(struct ffec_softc *sc);
+static void ffec_txfinish_locked(struct ffec_softc *sc);
+
+static inline uint16_t
+RD2(struct ffec_softc *sc, bus_size_t off)
+{
+
+	return (bus_read_2(sc->mem_res, off));
+}
+
+static inline void
+WR2(struct ffec_softc *sc, bus_size_t off, uint16_t val)
+{
+
+	bus_write_2(sc->mem_res, off, val);
+}
+
+static inline uint32_t
+RD4(struct ffec_softc *sc, bus_size_t off)
+{
+
+	return (bus_read_4(sc->mem_res, off));
+}
+
+static inline void
+WR4(struct ffec_softc *sc, bus_size_t off, uint32_t val)
+{
+
+	bus_write_4(sc->mem_res, off, val);
+}
+
+static inline uint32_t
+next_rxidx(struct ffec_softc *sc, uint32_t curidx)
+{
+
+	return ((curidx == RX_DESC_COUNT - 1) ? 0 : curidx + 1);
+}
+
+static inline uint32_t
+next_txidx(struct ffec_softc *sc, uint32_t curidx)
+{
+
+	return ((curidx == TX_DESC_COUNT - 1) ? 0 : curidx + 1);
+}
+
+static void
+ffec_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+	if (error != 0)
+		return;
+	*(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+static void
+ffec_miigasket_setup(struct ffec_softc *sc)
+{
+	uint32_t ifmode;
+
+	/*
+	 * We only need the gasket for MII and RMII connections on certain SoCs.
+	 */
+
+	switch (sc->fectype)
+	{
+	case FECTYPE_IMX53:
+		break;
+	default:
+		return;
+	}
+
+	switch (sc->phy_conn_type)
+	{
+	case PHY_CONN_MII:
+		ifmode = 0;
+		break;
+	case PHY_CONN_RMII:
+		ifmode = FEC_MIIGSK_CFGR_IF_MODE_RMII;
+		break;
+	default:
+		return;
+	}
+
+	/*
+	 * Disable the gasket, configure for either MII or RMII, then enable.
+	 */
+
+	WR2(sc, FEC_MIIGSK_ENR, 0);
+	while (RD2(sc, FEC_MIIGSK_ENR) & FEC_MIIGSK_ENR_READY)
+		continue;
+
+	WR2(sc, FEC_MIIGSK_CFGR, ifmode);
+
+	WR2(sc, FEC_MIIGSK_ENR, FEC_MIIGSK_ENR_EN);
+	while (!(RD2(sc, FEC_MIIGSK_ENR) & FEC_MIIGSK_ENR_READY))
+		continue;
+}
+
+static boolean_t
+ffec_miibus_iowait(struct ffec_softc *sc)
+{
+	uint32_t timeout;
+
+	for (timeout = 10000; timeout != 0; --timeout)
+		if (RD4(sc, FEC_IER_REG) & FEC_IER_MII)
+			return (true);
+
+	return (false);
+}
+
+static int
+ffec_miibus_readreg(device_t dev, int phy, int reg)
+{
+	struct ffec_softc *sc;
+	int val;
+
+	sc = device_get_softc(dev);
+
+	WR4(sc, FEC_IER_REG, FEC_IER_MII);
+
+	WR4(sc, FEC_MMFR_REG, FEC_MMFR_OP_READ |
+	    FEC_MMFR_ST_VALUE | FEC_MMFR_TA_VALUE |
+	    ((phy << FEC_MMFR_PA_SHIFT) & FEC_MMFR_PA_MASK) |
+	    ((reg << FEC_MMFR_RA_SHIFT) & FEC_MMFR_RA_MASK));
+
+	if (!ffec_miibus_iowait(sc)) {
+		device_printf(dev, "timeout waiting for mii read\n");
+		return (-1); /* All-ones is a symptom of bad mdio. */
+	}
+
+	val = RD4(sc, FEC_MMFR_REG) & FEC_MMFR_DATA_MASK;
+
+	return (val);
+}
+
+static int
+ffec_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+	struct ffec_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	WR4(sc, FEC_IER_REG, FEC_IER_MII);
+
+	WR4(sc, FEC_MMFR_REG, FEC_MMFR_OP_WRITE |
+	    FEC_MMFR_ST_VALUE | FEC_MMFR_TA_VALUE |
+	    ((phy << FEC_MMFR_PA_SHIFT) & FEC_MMFR_PA_MASK) |
+	    ((reg << FEC_MMFR_RA_SHIFT) & FEC_MMFR_RA_MASK) |
+	    (val & FEC_MMFR_DATA_MASK));
+
+	if (!ffec_miibus_iowait(sc)) {
+		device_printf(dev, "timeout waiting for mii write\n");
+		return (-1);
+	}
+
+	return (0);
+}
+
+static void
+ffec_miibus_statchg(device_t dev)
+{
+	struct ffec_softc *sc;
+	struct mii_data *mii;
+	uint32_t ecr, rcr, tcr;
+
+	/*
+	 * Called by the MII bus driver when the PHY establishes link to set the
+	 * MAC interface registers.
+	 */
+
+	sc = device_get_softc(dev);
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	mii = sc->mii_softc;
+
+	if (mii->mii_media_status & IFM_ACTIVE)
+		sc->link_is_up = true;
+	else
+		sc->link_is_up = false;
+
+	ecr = RD4(sc, FEC_ECR_REG) & ~FEC_ECR_SPEED;
+	rcr = RD4(sc, FEC_RCR_REG) & ~(FEC_RCR_RMII_10T | FEC_RCR_RMII_MODE |
+	    FEC_RCR_RGMII_EN | FEC_RCR_DRT | FEC_RCR_FCE);
+	tcr = RD4(sc, FEC_TCR_REG) & ~FEC_TCR_FDEN;
+
+	rcr |= FEC_RCR_MII_MODE; /* Must always be on even for R[G]MII. */
+	switch (sc->phy_conn_type) {
+	case PHY_CONN_MII:
+		break;
+	case PHY_CONN_RMII:
+		rcr |= FEC_RCR_RMII_MODE;
+		break;
+	case PHY_CONN_RGMII:
+		rcr |= FEC_RCR_RGMII_EN;
+		break;
+	}
+
+	switch (IFM_SUBTYPE(mii->mii_media_active)) {
+	case IFM_1000_T:
+	case IFM_1000_SX:
+		ecr |= FEC_ECR_SPEED;
+		break;
+	case IFM_100_TX:
+		/* Not-FEC_ECR_SPEED + not-FEC_RCR_RMII_10T means 100TX */
+		break;
+	case IFM_10_T:
+		rcr |= FEC_RCR_RMII_10T;
+		break;
+	case IFM_NONE:
+		sc->link_is_up = false;
+		return;
+	default:
+		sc->link_is_up = false;
+		device_printf(dev, "Unsupported media %u\n",
+		    IFM_SUBTYPE(mii->mii_media_active));
+		return;
+	}
+
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
+		tcr |= FEC_TCR_FDEN;
+	else
+		rcr |= FEC_RCR_DRT;
+
+	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FLOW) != 0)
+		rcr |= FEC_RCR_FCE;
+
+	WR4(sc, FEC_RCR_REG, rcr);
+	WR4(sc, FEC_TCR_REG, tcr);
+	WR4(sc, FEC_ECR_REG, ecr);
+}
+
+static void
+ffec_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
+{
+	struct ffec_softc *sc;
+	struct mii_data *mii;
+
+
+	sc = ifp->if_softc;
+	mii = sc->mii_softc;
+	FFEC_LOCK(sc);
+	mii_pollstat(mii);
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+	FFEC_UNLOCK(sc);
+}
+
+static int
+ffec_media_change_locked(struct ffec_softc *sc)
+{
+
+	return (mii_mediachg(sc->mii_softc));
+}
+
+static int
+ffec_media_change(struct ifnet * ifp)
+{
+	struct ffec_softc *sc;
+	int error;
+
+	sc = ifp->if_softc;
+
+	FFEC_LOCK(sc);
+	error = ffec_media_change_locked(sc);
+	FFEC_UNLOCK(sc);
+	return (error);
+}
+
+static void ffec_clear_stats(struct ffec_softc *sc)
+{
+
+	WR4(sc, FEC_RMON_R_PACKETS, 0);
+	WR4(sc, FEC_RMON_R_MC_PKT, 0);
+	WR4(sc, FEC_RMON_R_CRC_ALIGN, 0);
+	WR4(sc, FEC_RMON_R_UNDERSIZE, 0);
+	WR4(sc, FEC_RMON_R_OVERSIZE, 0);
+	WR4(sc, FEC_RMON_R_FRAG, 0);
+	WR4(sc, FEC_RMON_R_JAB, 0);
+	WR4(sc, FEC_RMON_T_PACKETS, 0);
+	WR4(sc, FEC_RMON_T_MC_PKT, 0);
+	WR4(sc, FEC_RMON_T_CRC_ALIGN, 0);
+	WR4(sc, FEC_RMON_T_UNDERSIZE, 0);
+	WR4(sc, FEC_RMON_T_OVERSIZE , 0);
+	WR4(sc, FEC_RMON_T_FRAG, 0);
+	WR4(sc, FEC_RMON_T_JAB, 0);
+	WR4(sc, FEC_RMON_T_COL, 0);
+}
+
+static void
+ffec_harvest_stats(struct ffec_softc *sc)
+{
+	struct ifnet *ifp;
+
+	/* We don't need to harvest too often. */
+	if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL)
+		return;
+
+	/*
+	 * Try to avoid harvesting unless the IDLE flag is on, but if it has
+	 * been too long just go ahead and do it anyway, the worst that'll
+	 * happen is we'll lose a packet count or two as we clear at the end.
+	 */
+	if (sc->stats_harvest_count < (2 * STATS_HARVEST_INTERVAL) &&
+	    ((RD4(sc, FEC_MIBC_REG) & FEC_MIBC_IDLE) == 0))
+		return;
+
+	sc->stats_harvest_count = 0;
+	ifp = sc->ifp;
+
+	ifp->if_ipackets   += RD4(sc, FEC_RMON_R_PACKETS);
+	ifp->if_imcasts    += RD4(sc, FEC_RMON_R_MC_PKT);
+	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_CRC_ALIGN);
+	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_UNDERSIZE);
+	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_OVERSIZE);
+	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_FRAG);
+	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_JAB);
+
+	ifp->if_opackets   += RD4(sc, FEC_RMON_T_PACKETS);
+	ifp->if_omcasts    += RD4(sc, FEC_RMON_T_MC_PKT);
+	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_CRC_ALIGN);
+	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_UNDERSIZE);
+	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_OVERSIZE );
+	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_FRAG);
+	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_JAB);
+
+	ifp->if_collisions += RD4(sc, FEC_RMON_T_COL);
+
+	ffec_clear_stats(sc);
+}
+
+static void
+ffec_tick(void *arg)
+{
+	struct ffec_softc *sc;
+	struct ifnet *ifp;
+	int link_was_up;
+
+	sc = arg;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+	    return;
+
+	/*
+	 * Typical tx watchdog.  If this fires it indicates that we enqueued
+	 * packets for output and never got a txdone interrupt for them.  Maybe
+	 * it's a missed interrupt somehow, just pretend we got one.
+	 */
+	if (sc->tx_watchdog_count > 0) {
+		if (--sc->tx_watchdog_count == 0) {
+			ffec_txfinish_locked(sc);
+		}
+	}
+
+	/* Gather stats from hardware counters. */
+	ffec_harvest_stats(sc);
+
+	/* Check the media status. */
+	link_was_up = sc->link_is_up;
+	mii_tick(sc->mii_softc);
+	if (sc->link_is_up && !link_was_up)
+		ffec_txstart_locked(sc);
+
+	/* Schedule another check one second from now. */
+	callout_reset(&sc->ffec_callout, hz, ffec_tick, sc);
+}
+
+inline static uint32_t
+ffec_setup_txdesc(struct ffec_softc *sc, int idx, bus_addr_t paddr, 
+    uint32_t len)
+{
+	uint32_t nidx;
+	uint32_t flags;
+
+	nidx = next_txidx(sc, idx);
+
+	/* Addr/len 0 means we're clearing the descriptor after xmit done. */
+	if (paddr == 0 || len == 0) {
+		flags = 0;
+		--sc->txcount;
+	} else {
+		flags = FEC_TXDESC_READY | FEC_TXDESC_L | FEC_TXDESC_TC;
+		++sc->txcount;
+	}
+	if (nidx == 0)
+		flags |= FEC_TXDESC_WRAP;
+
+	/*
+	 * The hardware requires 32-bit physical addresses.  We set up the dma
+	 * tag to indicate that, so the cast to uint32_t should never lose
+	 * significant bits.
+	 */
+	sc->txdesc_ring[idx].buf_paddr = (uint32_t)paddr;
+	sc->txdesc_ring[idx].flags_len = flags | len; /* Must be set last! */
+
+	return (nidx);
+}
+
+static int
+ffec_setup_txbuf(struct ffec_softc *sc, int idx, struct mbuf **mp)
+{
+	struct mbuf * m;
+	int error, nsegs;
+	struct bus_dma_segment seg;
+
+	if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
+		return (ENOMEM);
+	*mp = m;
+
+	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
+	    m, &seg, &nsegs, 0);
+	if (error != 0) {
+		return (ENOMEM);
+	}
+	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 
+	    BUS_DMASYNC_PREWRITE);
+
+	sc->txbuf_map[idx].mbuf = m;
+	ffec_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
+
+	return (0);
+
+}
+
+static void
+ffec_txstart_locked(struct ffec_softc *sc)
+{
+	struct ifnet *ifp;
+	struct mbuf *m;
+	int enqueued;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	if (!sc->link_is_up)
+		return;
+
+	ifp = sc->ifp;
+
+	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
+		return;
+
+	enqueued = 0;
+
+	for (;;) {
+		if (sc->txcount == (TX_DESC_COUNT-1)) {
+			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+			break;
+		}
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+		if (m == NULL)
+			break;
+		if (ffec_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
+			IFQ_DRV_PREPEND(&ifp->if_snd, m);
+			break;
+		}
+		BPF_MTAP(ifp, m);
+		sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
+		++enqueued;
+	}
+
+	if (enqueued != 0) {
+		WR4(sc, FEC_TDAR_REG, FEC_TDAR_TDAR);
+		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
+	}
+}
+
+static void
+ffec_txstart(struct ifnet *ifp)
+{
+	struct ffec_softc *sc = ifp->if_softc;
+
+	FFEC_LOCK(sc);
+	ffec_txstart_locked(sc);
+	FFEC_UNLOCK(sc);
+}
+
+static void
+ffec_txfinish_locked(struct ffec_softc *sc)
+{
+	struct ifnet *ifp;
+	struct ffec_hwdesc *desc;
+	struct ffec_bufmap *bmap;
+	boolean_t retired_buffer;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+	retired_buffer = false;
+	while (sc->tx_idx_tail != sc->tx_idx_head) {
+		desc = &sc->txdesc_ring[sc->tx_idx_tail];
+		if (desc->flags_len & FEC_TXDESC_READY)
+			break;
+		retired_buffer = true;
+		bmap = &sc->txbuf_map[sc->tx_idx_tail];
+		bus_dmamap_sync(sc->txbuf_tag, bmap->map, 
+		    BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
+		m_freem(bmap->mbuf);
+		bmap->mbuf = NULL;
+		ffec_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
+		sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
+	}
+
+	/*
+	 * If we retired any buffers, there will be open tx slots available in
+	 * the descriptor ring, go try to start some new output.
+	 */
+	if (retired_buffer) {
+		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+		ffec_txstart_locked(sc);
+	}
+
+	/* If there are no buffers outstanding, muzzle the watchdog. */
+	if (sc->tx_idx_tail == sc->tx_idx_head) {
+		sc->tx_watchdog_count = 0;
+	}
+}
+
+inline static uint32_t
+ffec_setup_rxdesc(struct ffec_softc *sc, int idx, bus_addr_t paddr)
+{
+	uint32_t nidx;
+
+	/*
+	 * The hardware requires 32-bit physical addresses.  We set up the dma
+	 * tag to indicate that, so the cast to uint32_t should never lose
+	 * significant bits.
+	 */
+	nidx = next_rxidx(sc, idx);
+	sc->rxdesc_ring[idx].buf_paddr = (uint32_t)paddr;
+	sc->rxdesc_ring[idx].flags_len = FEC_RXDESC_EMPTY | 
+		((nidx == 0) ? FEC_RXDESC_WRAP : 0);
+
+	return (nidx);
+}
+
+static int
+ffec_setup_rxbuf(struct ffec_softc *sc, int idx, struct mbuf * m)
+{
+	int error, nsegs;
+	struct bus_dma_segment seg;
+
+	/*
+	 * We need to leave at least ETHER_ALIGN bytes free at the beginning of
+	 * the buffer to allow the data to be re-aligned after receiving it (by
+	 * copying it backwards ETHER_ALIGN bytes in the same buffer).  We also
+	 * have to ensure that the beginning of the buffer is aligned to the
+	 * hardware's requirements.
+	 */
+	m_adj(m, roundup(ETHER_ALIGN, FEC_RXBUF_ALIGN));
+
+	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
+	    m, &seg, &nsegs, 0);
+	if (error != 0) {
+		return (error);
+	}
+
+	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 
+	    BUS_DMASYNC_PREREAD);
+
+	sc->rxbuf_map[idx].mbuf = m;
+	ffec_setup_rxdesc(sc, idx, seg.ds_addr);
+	
+	return (0);
+}
+
+static struct mbuf *
+ffec_alloc_mbufcl(struct ffec_softc *sc)
+{
+	struct mbuf *m;
+
+	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
+
+	return (m);
+}
+
+static void
+ffec_rxfinish_onebuf(struct ffec_softc *sc, int len)
+{
+	struct mbuf *m, *newmbuf;
+	struct ffec_bufmap *bmap;
+	uint8_t *dst, *src;
+	int error;
+
+	/*
+	 *  First try to get a new mbuf to plug into this slot in the rx ring.
+	 *  If that fails, drop the current packet and recycle the current
+	 *  mbuf, which is still mapped and loaded.
+	 */
+	if ((newmbuf = ffec_alloc_mbufcl(sc)) == NULL) {
+		++sc->ifp->if_iqdrops;
+		ffec_setup_rxdesc(sc, sc->rx_idx, 
+		    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
+		return;
+	}
+
+	/*
+	 *  Unfortunately, the protocol headers need to be aligned on a 32-bit
+	 *  boundary for the upper layers.  The hardware requires receive
+	 *  buffers to be 16-byte aligned.  The ethernet header is 14 bytes,
+	 *  leaving the protocol header unaligned.  We used m_adj() after
+	 *  allocating the buffer to leave empty space at the start of the
+	 *  buffer, now we'll use the alignment agnostic bcopy() routine to
+	 *  shuffle all the data backwards 2 bytes and adjust m_data.
+	 *
+	 *  XXX imx6 hardware is able to do this 2-byte alignment by setting the
+	 *  SHIFT16 bit in the RACC register.  Older hardware doesn't have that
+	 *  feature, but for them could we speed this up by copying just the
+	 *  protocol headers into their own small mbuf then chaining the cluster
+	 *  to it?  That way we'd only need to copy like 64 bytes or whatever
+	 *  the biggest header is, instead of the whole 1530ish-byte frame.
+	 */
+
+	FFEC_UNLOCK(sc);
+
+	bmap = &sc->rxbuf_map[sc->rx_idx];
+	len -= ETHER_CRC_LEN;
+	bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD);
+	bus_dmamap_unload(sc->rxbuf_tag, bmap->map);
+	m = bmap->mbuf;
+	bmap->mbuf = NULL;
+	m->m_len = len;
+	m->m_pkthdr.len = len;
+	m->m_pkthdr.rcvif = sc->ifp;
+
+	src = mtod(m, uint8_t*);
+	dst = src - ETHER_ALIGN;
+	bcopy(src, dst, len);
+	m->m_data = dst;
+	sc->ifp->if_input(sc->ifp, m);
+
+	FFEC_LOCK(sc);
+
+	if ((error = ffec_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) {
+		device_printf(sc->dev, "ffec_setup_rxbuf error %d\n", error);
+		/* XXX Now what?  We've got a hole in the rx ring. */
+	}
+
+}
+
+static void
+ffec_rxfinish_locked(struct ffec_softc *sc)
+{
+	struct ffec_hwdesc *desc;
+	int len;
+	boolean_t produced_empty_buffer;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	produced_empty_buffer = false;
+	for (;;) {
+		desc = &sc->rxdesc_ring[sc->rx_idx];
+		if (desc->flags_len & FEC_RXDESC_EMPTY)
+			break;
+		produced_empty_buffer = true;
+		len = (desc->flags_len & FEC_RXDESC_LEN_MASK);
+		if (len < 64) {
+			/*
+			 * Just recycle the descriptor and continue.           .
+			 */
+			ffec_setup_rxdesc(sc, sc->rx_idx,
+			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
+		} else if ((desc->flags_len & FEC_RXDESC_L) == 0) {
+			/*
+			 * The entire frame is not in this buffer.  Impossible.
+			 * Recycle the descriptor and continue.
+			 *
+			 * XXX what's the right way to handle this? Probably we
+			 * should stop/init the hardware because this should
+			 * just really never happen when we have buffers bigger
+			 * than the maximum frame size.
+			 */
+			device_printf(sc->dev, 
+			    "fec_rxfinish: received frame without LAST bit set");
+			ffec_setup_rxdesc(sc, sc->rx_idx, 
+			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
+		} else if (desc->flags_len & FEC_RXDESC_ERROR_BITS) {
+			/*
+			 *  Something went wrong with receiving the frame, we
+			 *  don't care what (the hardware has counted the error
+			 *  in the stats registers already), we just reuse the
+			 *  same mbuf, which is still dma-mapped, by resetting
+			 *  the rx descriptor.
+			 */
+			ffec_setup_rxdesc(sc, sc->rx_idx, 
+			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
+		} else {
+			/*
+			 *  Normal case: a good frame all in one buffer.
+			 */
+			ffec_rxfinish_onebuf(sc, len);
+		}
+		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
+	}
+
+	if (produced_empty_buffer) {
+		WR4(sc, FEC_RDAR_REG, FEC_RDAR_RDAR);
+	}
+}
+
+static void
+ffec_get_hwaddr(struct ffec_softc *sc, uint8_t *hwaddr)
+{
+	uint32_t palr, paur, rnd;
+
+	/*
+	 * Try to recover a MAC address from the running hardware. If there's
+	 * something non-zero there, assume the bootloader did the right thing
+	 * and just use it.
+	 *
+	 * Otherwise, set the address to a convenient locally assigned address,
+	 * 'bsd' + random 24 low-order bits.  'b' is 0x62, which has the locally
+	 * assigned bit set, and the broadcast/multicast bit clear.
+	 */
+	palr = RD4(sc, FEC_PALR_REG);
+	paur = RD4(sc, FEC_PAUR_REG);
+	if ((palr | paur) != 0) {
+		hwaddr[0] = palr >> 24;
+		hwaddr[1] = palr >> 16;
+		hwaddr[2] = palr >>  8;
+		hwaddr[3] = palr >>  0;
+		hwaddr[4] = paur >> 24;
+		hwaddr[5] = paur >> 16;
+		return;
+	} else {
+		rnd = arc4random() & 0x00ffffff;
+		hwaddr[0] = 'b';
+		hwaddr[1] = 's';
+		hwaddr[2] = 'd';
+		hwaddr[3] = rnd >> 16;
+		hwaddr[4] = rnd >>  8;
+		hwaddr[5] = rnd >>  0;
+	}
+
+	if (bootverbose) {
+		device_printf(sc->dev,
+		    "MAC address %02x:%02x:%02x:%02x:%02x:%02x:\n",
+		    hwaddr[0], hwaddr[1], hwaddr[2], 
+		    hwaddr[3], hwaddr[4], hwaddr[5]);
+	}
+}
+
+static void
+ffec_setup_rxfilter(struct ffec_softc *sc)
+{
+	struct ifnet *ifp;
+	struct ifmultiaddr *ifma;
+	uint8_t *eaddr;
+	uint32_t crc;
+	uint64_t ghash, ihash;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+
+	/*
+	 * Set the multicast (group) filter hash.
+	 */
+	if ((ifp->if_flags & IFF_ALLMULTI))
+		ghash = 0xffffffffffffffffLLU;
+	else {
+		ghash = 0;
+		if_maddr_rlock(ifp);
+		TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
+			if (ifma->ifma_addr->sa_family != AF_LINK)
+				continue;
+			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
+			    ifma->ifma_addr), ETHER_ADDR_LEN);
+			ghash |= 1 << (crc & 0x3f);
+		}
+		if_maddr_runlock(ifp);
+	}
+	WR4(sc, FEC_GAUR_REG, (uint32_t)(ghash >> 32));
+	WR4(sc, FEC_GALR_REG, (uint32_t)ghash);
+
+	/*
+	 * Set the individual address filter hash.
+	 *
+	 * XXX Is 0 the right value when promiscuous is off?  This hw feature
+	 * seems to support the concept of MAC address aliases, does such a
+	 * thing even exist?
+	 */
+	if ((ifp->if_flags & IFF_PROMISC))
+		ihash = 0xffffffffffffffffLLU;
+	else {
+		ihash = 0;
+	}
+	WR4(sc, FEC_IAUR_REG, (uint32_t)(ihash >> 32));
+	WR4(sc, FEC_IALR_REG, (uint32_t)ihash);
+
+	/*
+	 * Set the primary address.
+	 */
+	eaddr = IF_LLADDR(ifp);
+	WR4(sc, FEC_PALR_REG, (eaddr[0] << 24) | (eaddr[1] << 16) |
+	    (eaddr[2] <<  8) | eaddr[3]);
+	WR4(sc, FEC_PAUR_REG, (eaddr[4] << 24) | (eaddr[5] << 16));
+}
+
+static void
+ffec_stop_locked(struct ffec_softc *sc)
+{
+	struct ifnet *ifp;
+	struct ffec_hwdesc *desc;
+	struct ffec_bufmap *bmap;
+	int idx;
+
+	FFEC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+	sc->tx_watchdog_count = 0;
+	sc->stats_harvest_count = 0;
+
+	/* 
+	 * Stop the hardware, mask all interrupts, and clear all current
+	 * interrupt status bits.
+	 */

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list