svn commit: r277243 - in projects/ifnet/sys: dev/bge dev/mii dev/msk dev/virtio/network dev/xl net netgraph
Gleb Smirnoff
glebius at FreeBSD.org
Fri Jan 16 11:45:58 UTC 2015
Author: glebius
Date: Fri Jan 16 11:45:54 2015
New Revision: 277243
URL: https://svnweb.freebsd.org/changeset/base/277243
Log:
Make API around if_mtu, if_capabilities, if_capenable and if_hwassist
more strict. Disallow direct access to these fields at all.
o if_mtu
- Stack guarantees that any MTU change attempt is reported via SIOCSIFMTU,
so drivers can store it in softc and don't need go to ifnet layer.
- Stack does the equal MTU check, drivers don't need to copy & paste it.
- If driver doesn't report error, then stack updates MTU.
o if_capabilities
- Drivers should init them in ifat_capabilities at if_attach(), and are
not allowed to modify later. Stack neither.
o if_capenable
- Stack guarantees that any capenable change is reported via SIOCSIFCAP.
Stack supplies current capabilities (in case if driver doesn't store them)
in ifr_curcap, and requested capabilities in ifr_reqcap.
- Stack does the equal check, as well as that requested capenable are subset
of interface capabilities. Drivers don't need to cut-n-paste that.
- Stack enforces IFCAP_VLAN_HWTAGGING in case of IFCAP_VLAN_HWTSO.
- Drivers may alter ifr_reqcap, as in some edge cases they could be
actually not capable of running capabilities announced at if_attach.
- Drivers must either return error or, return 0 and in the latter case must
initialize ifr_hwassist value in the ifreq.
o There are some edge cases when a driver may want to change its capenable
as result of MTU change. Or vice versa? In this case, the driver should call
if_drvioctl() upon itself.
Sponsored by: Nginx, Inc.
Modified:
projects/ifnet/sys/dev/bge/if_bge.c
projects/ifnet/sys/dev/bge/if_bgereg.h
projects/ifnet/sys/dev/mii/brgphy.c
projects/ifnet/sys/dev/mii/miivar.h
projects/ifnet/sys/dev/mii/truephy.c
projects/ifnet/sys/dev/msk/if_msk.c
projects/ifnet/sys/dev/msk/if_mskreg.h
projects/ifnet/sys/dev/virtio/network/if_vtnet.c
projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h
projects/ifnet/sys/dev/xl/if_xl.c
projects/ifnet/sys/dev/xl/if_xlreg.h
projects/ifnet/sys/net/if.c
projects/ifnet/sys/net/if.h
projects/ifnet/sys/net/if_var.h
projects/ifnet/sys/netgraph/ng_iface.c
Modified: projects/ifnet/sys/dev/bge/if_bge.c
==============================================================================
--- projects/ifnet/sys/dev/bge/if_bge.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/bge/if_bge.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -116,7 +116,6 @@ __FBSDID("$FreeBSD$");
#include <dev/bge/if_bgereg.h>
-#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
MODULE_DEPEND(bge, pci, 1, 1, 1);
@@ -1331,14 +1330,12 @@ static uint64_t
bge_miibus_readvar(device_t dev, int var)
{
struct bge_softc *sc;
- if_t ifp;
sc = device_get_softc(dev);
- ifp = sc->bge_ifp;
switch (var) {
- case IF_MTU:
- return (if_get(ifp, IF_MTU));
+ case MIIVAR_MTU:
+ return (sc->bge_mtu);
default:
return (0);
}
@@ -1357,7 +1354,7 @@ bge_newbuf_std(struct bge_softc *sc, int
int error, nsegs;
if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
- (if_get(sc->bge_ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ (sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
@@ -1698,7 +1695,7 @@ bge_setvlan(struct bge_softc *sc)
ifp = sc->bge_ifp;
/* Enable or disable VLAN tag stripping as needed. */
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING)
+ if (sc->bge_capenable & IFCAP_VLAN_HWTAGGING)
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
else
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
@@ -2032,7 +2029,7 @@ bge_blockinit(struct bge_softc *sc)
/* Configure mbuf pool watermarks */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
- if (if_get(sc->bge_ifp, IF_MTU) > ETHERMTU) {
+ if (sc->bge_mtu > ETHERMTU) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
} else {
@@ -3756,9 +3753,9 @@ bge_attach(device_t dev)
sc->bge_tx_max_coal_bds = 10;
/* Initialize checksum features to use. */
- sc->bge_csum_features = BGE_CSUM_FEATURES;
+ sc->bge_hwassist = (CSUM_IP | CSUM_TCP);
if (sc->bge_forced_udpcsum != 0)
- sc->bge_csum_features |= CSUM_UDP;
+ sc->bge_hwassist |= CSUM_UDP;
/*
* Figure out what sort of media we have by checking the
@@ -3924,7 +3921,7 @@ again:
ifat.ifat_softc = sc;
ifat.ifat_dunit = device_get_unit(dev);
ifat.ifat_lla = eaddr;
- ifat.ifat_hwassist = sc->bge_csum_features;
+ ifat.ifat_hwassist = sc->bge_hwassist;
if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
ifat.ifat_hwassist |= CSUM_TSO;
ifat.ifat_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
@@ -3942,7 +3939,8 @@ again:
ifat.ifat_capenable &= ~IFCAP_HWCSUM;
ifat.ifat_hwassist = 0;
}
-
+ sc->bge_capenable = ifat.ifat_capenable;
+ sc->bge_mtu = ETHERMTU;
sc->bge_ifp = if_attach(&ifat);
return (0);
@@ -3962,7 +3960,7 @@ bge_detach(device_t dev)
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING)
+ if (sc->bge_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
@@ -4332,7 +4330,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
if (BGE_IS_JUMBO_CAPABLE(sc) &&
- if_get(ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
@@ -4345,7 +4343,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t
int have_tag = 0;
#ifdef DEVICE_POLLING
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) {
+ if (sc->bge_capenable & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
@@ -4357,7 +4355,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t
rxidx = cur_rx->bge_idx;
BGE_INC(rx_cons, sc->bge_return_ring_cnt);
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING &&
+ if (sc->bge_capenable & IFCAP_VLAN_HWTAGGING &&
cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
have_tag = 1;
vlan_tag = cur_rx->bge_vlan_tag;
@@ -4406,7 +4404,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t
m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM)
+ if (sc->bge_capenable & IFCAP_RXCSUM)
bge_rxcsum(sc, cur_rx, m);
/*
@@ -4683,7 +4681,7 @@ bge_intr(void *xsc)
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) {
+ if (sc->bge_capenable & IFCAP_POLLING) {
BGE_UNLOCK(sc);
return;
}
@@ -5204,7 +5202,7 @@ bge_encap(struct bge_softc *sc, struct m
return (ENOBUFS);
csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
BGE_TXBDFLAG_CPU_POST_DMA;
- } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
+ } else if ((m->m_pkthdr.csum_flags & sc->bge_hwassist) != 0) {
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum_flags |= BGE_TXBDFLAG_IP_CSUM;
if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
@@ -5454,9 +5452,9 @@ bge_init_locked(struct bge_softc *sc)
ifp = sc->bge_ifp;
/* Specify MTU. */
- CSR_WRITE_4(sc, BGE_RX_MTU, if_get(ifp, IF_MTU) +
+ CSR_WRITE_4(sc, BGE_RX_MTU, sc->bge_mtu +
ETHER_HDR_LEN + ETHER_CRC_LEN +
- (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
+ (sc->bge_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
/* Load our MAC address. */
m = (uint16_t *)if_lladdr(sc->bge_ifp);
@@ -5474,14 +5472,9 @@ bge_init_locked(struct bge_softc *sc)
/* Override UDP checksum offloading. */
if (sc->bge_forced_udpcsum == 0)
- sc->bge_csum_features &= ~CSUM_UDP;
+ sc->bge_hwassist &= ~CSUM_UDP;
else
- sc->bge_csum_features |= CSUM_UDP;
- if (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM &&
- if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) {
- if_clrflags(ifp, IF_CAPABILITIES, BGE_CSUM_FEATURES | CSUM_UDP);
- if_addflags(ifp, IF_CAPABILITIES, sc->bge_csum_features);
- }
+ sc->bge_hwassist |= CSUM_UDP;
/* Init RX ring. */
if (bge_init_rx_ring_std(sc) != 0) {
@@ -5510,7 +5503,7 @@ bge_init_locked(struct bge_softc *sc)
/* Init jumbo RX ring. */
if (BGE_IS_JUMBO_CAPABLE(sc) &&
- if_get(ifp, IF_MTU) + ETHER_HDR_LEN + ETHER_CRC_LEN +
+ sc->bge_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
if (bge_init_rx_ring_jumbo(sc) != 0) {
device_printf(sc->bge_dev,
@@ -5572,7 +5565,7 @@ bge_init_locked(struct bge_softc *sc)
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) {
+ if (sc->bge_capenable & IFCAP_POLLING) {
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
@@ -5766,12 +5759,10 @@ bge_ioctl(if_t ifp, u_long command, void
break;
}
BGE_LOCK(sc);
- if (if_get(ifp, IF_MTU) != ifr->ifr_mtu) {
- if_set(ifp, IF_MTU, ifr->ifr_mtu);
- if (sc->bge_flags & BGE_FLAG_RUNNING) {
- sc->bge_flags &= ~BGE_FLAG_RUNNING;
- bge_init_locked(sc);
- }
+ sc->bge_mtu = ifr->ifr_mtu;
+ if (sc->bge_flags & BGE_FLAG_RUNNING) {
+ sc->bge_flags &= ~BGE_FLAG_RUNNING;
+ bge_init_locked(sc);
}
BGE_UNLOCK(sc);
break;
@@ -5825,7 +5816,7 @@ bge_ioctl(if_t ifp, u_long command, void
}
break;
case SIOCSIFCAP:
- mask = ifr->ifr_reqcap ^ if_get(ifp, IF_CAPENABLE);
+ mask = ifr->ifr_reqcap ^ ifr->ifr_curcap;
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
@@ -5836,7 +5827,6 @@ bge_ioctl(if_t ifp, u_long command, void
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
- if_setcapenablebit(ifp, IFCAP_POLLING, 0);
BGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
@@ -5845,49 +5835,24 @@ bge_ioctl(if_t ifp, u_long command, void
BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
- if_setcapenablebit(ifp, 0, IFCAP_POLLING);
BGE_UNLOCK(sc);
+ if (error)
+ return (error);
}
}
#endif
- if ((mask & IFCAP_TXCSUM) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM) != 0) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_TXCSUM);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) != 0)
- if_addflags(ifp, IF_HWASSIST,
- sc->bge_csum_features);
- else
- if_clrflags(ifp, IF_HWASSIST,
- sc->bge_csum_features);
- }
-
- if ((mask & IFCAP_RXCSUM) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_RXCSUM) != 0)
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_RXCSUM);
-
- if ((mask & IFCAP_TSO4) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_TSO4) != 0) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_TSO4);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TSO4) != 0)
- if_addflags(ifp, IF_HWASSIST, CSUM_TSO);
- else
- if_clrflags(ifp, IF_HWASSIST, CSUM_TSO);
- }
-
+ sc->bge_capenable = ifr->ifr_reqcap;
+ ifr->ifr_hwassist = 0;
+ if ((sc->bge_capenable & IFCAP_TXCSUM) != 0)
+ ifr->ifr_hwassist = sc->bge_hwassist;
+ if ((sc->bge_capenable & IFCAP_TSO4) != 0 &&
+ (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0)
+ ifr->ifr_hwassist |= CSUM_TSO;
if (mask & IFCAP_VLAN_MTU) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_MTU);
sc->bge_flags &= ~BGE_FLAG_RUNNING;
bge_init(sc);
}
-
- if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_VLAN_HWTSO) != 0)
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTSO);
- if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_VLAN_HWTAGGING) != 0) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTAGGING);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) == 0)
- if_clrflags(ifp, IF_CAPENABLE, IFCAP_VLAN_HWTSO);
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
BGE_LOCK(sc);
bge_setvlan(sc);
BGE_UNLOCK(sc);
Modified: projects/ifnet/sys/dev/bge/if_bgereg.h
==============================================================================
--- projects/ifnet/sys/dev/bge/if_bgereg.h Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/bge/if_bgereg.h Fri Jan 16 11:45:54 2015 (r277243)
@@ -3043,13 +3043,15 @@ struct bge_softc {
int bge_forced_collapse;
int bge_forced_udpcsum;
int bge_msi;
- int bge_csum_features;
struct callout bge_stat_ch;
uint32_t bge_rx_discards;
uint32_t bge_rx_inerrs;
uint32_t bge_rx_nobds;
uint32_t bge_tx_discards;
uint32_t bge_tx_collisions;
+ uint32_t bge_mtu;
+ uint32_t bge_capenable;
+ uint64_t bge_hwassist;
#ifdef DEVICE_POLLING
int rxcycles;
#endif /* DEVICE_POLLING */
Modified: projects/ifnet/sys/dev/mii/brgphy.c
==============================================================================
--- projects/ifnet/sys/dev/mii/brgphy.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/mii/brgphy.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -928,7 +928,7 @@ brgphy_reset(struct mii_softc *sc)
return;
}
- mtu = MIIBUS_READVAR(sc->mii_dev, IF_MTU);
+ mtu = MIIBUS_READVAR(sc->mii_dev, MIIVAR_MTU);
/* Find the driver associated with this PHY. */
if (mii_phy_mac_match(sc, "bge"))
Modified: projects/ifnet/sys/dev/mii/miivar.h
==============================================================================
--- projects/ifnet/sys/dev/mii/miivar.h Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/mii/miivar.h Fri Jan 16 11:45:54 2015 (r277243)
@@ -265,6 +265,11 @@ u_int mii_oui(u_int, u_int);
#define MII_MODEL(id2) (((id2) & IDR2_MODEL) >> 4)
#define MII_REV(id2) ((id2) & IDR2_REV)
+/* Arguments for miibus_readvar(). */
+enum {
+ MIIVAR_MTU,
+};
+
#endif /* _KERNEL */
#endif /* _DEV_MII_MIIVAR_H_ */
Modified: projects/ifnet/sys/dev/mii/truephy.c
==============================================================================
--- projects/ifnet/sys/dev/mii/truephy.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/mii/truephy.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -258,7 +258,7 @@ truephy_reset(struct mii_softc *sc)
mii_phy_reset(sc);
- if (TRUEPHY_FRAMELEN((MIIBUS_READVAR(sc->mii_dev, IF_MTU)) > 2048)) {
+ if (TRUEPHY_FRAMELEN((MIIBUS_READVAR(sc->mii_dev, MIIVAR_MTU)) > 2048)) {
int conf;
conf = PHY_READ(sc, TRUEPHY_CONF);
Modified: projects/ifnet/sys/dev/msk/if_msk.c
==============================================================================
--- projects/ifnet/sys/dev/msk/if_msk.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/msk/if_msk.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -156,6 +156,8 @@ static int jumbo_disable = 0;
TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
+#define MSK_DEFAULT_FRAMESIZE \
+ (ETHERMTU + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
/*
* Devices supported by this driver.
@@ -314,7 +316,7 @@ static int msk_miibus_writereg(device_t,
static void msk_miibus_statchg(device_t);
static void msk_rxfilter(struct msk_if_softc *);
-static void msk_setvlan(struct msk_if_softc *, if_t);
+static void msk_setvlan(struct msk_if_softc *);
static void msk_stats_clear(struct msk_if_softc *);
static void msk_stats_update(struct msk_if_softc *);
@@ -642,12 +644,12 @@ msk_rxfilter(struct msk_if_softc *sc_if)
}
static void
-msk_setvlan(struct msk_if_softc *sc_if, if_t ifp)
+msk_setvlan(struct msk_if_softc *sc_if)
{
struct msk_softc *sc;
sc = sc_if->msk_softc;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) {
+ if (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) {
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
@@ -667,7 +669,7 @@ msk_rx_fill(struct msk_if_softc *sc_if,
int i;
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
- (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) {
+ (sc_if->msk_capenable & IFCAP_RXCSUM) != 0) {
/* Wait until controller executes OP_TCPSTART command. */
for (i = 100; i > 0; i--) {
DELAY(100);
@@ -733,7 +735,7 @@ msk_init_rx_ring(struct msk_if_softc *sc
prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
- (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM)) {
+ (sc_if->msk_capenable & IFCAP_RXCSUM)) {
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
rxd->rx_m = NULL;
@@ -801,7 +803,7 @@ msk_init_jumbo_rx_ring(struct msk_if_sof
prod = 0;
/* Have controller know how to compute Rx checksum. */
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
- (if_get(sc_if->msk_ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0) {
+ (sc_if->msk_capenable & IFCAP_RXCSUM) != 0) {
#ifdef MSK_64BIT_DMA
rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
rxd->rx_m = NULL;
@@ -1066,14 +1068,13 @@ msk_mediastatus(if_t ifp, struct ifmedia
}
static int
-msk_ioctl(if_t ifp, u_long command, void *data, struct thread *td)
+msk_ioctl(if_t ifp, u_long command, void *data, struct thread *td)
{
struct msk_if_softc *sc_if;
struct ifreq *ifr;
struct mii_data *mii;
int error, reinit, setvlan;
- uint32_t flags, capenable, capabilities, mask;
- uint64_t hwassist;
+ uint32_t flags, mask;
sc_if = if_getsoftc(ifp, IF_DRIVER_SOFTC);
ifr = (struct ifreq *)data;
@@ -1085,9 +1086,6 @@ msk_ioctl(if_t ifp, u_long command, void
error = EINVAL;
break;
}
- if (if_get(ifp, IF_MTU) == ifr->ifr_mtu)
- break;
-
MSK_IF_LOCK(sc_if);
if (ifr->ifr_mtu > ETHERMTU) {
if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
@@ -1095,15 +1093,19 @@ msk_ioctl(if_t ifp, u_long command, void
MSK_IF_UNLOCK(sc_if);
break;
}
- if ((sc_if->msk_flags &
- MSK_FLAG_JUMBO_NOCSUM) != 0) {
- if_clrflags(ifp, IF_HWASSIST,
- MSK_CSUM_FEATURES | CSUM_TSO);
- if_clrflags(ifp, IF_CAPENABLE,
- IFCAP_TSO4 | IFCAP_TXCSUM);
+ if ((sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
+ struct ifreq tmp;
+
+ MSK_IF_UNLOCK(sc_if);
+ if_drvioctl(SIOCGIFCAP, ifp, &tmp, td);
+ tmp.ifr_reqcap = tmp.ifr_curcap &
+ ~(MSK_CSUM_FEATURES | CSUM_TSO);
+ if_drvioctl(SIOCSIFCAP, ifp, &tmp, td);
+ MSK_IF_LOCK(sc_if);
}
}
- if_set(ifp, IF_MTU, ifr->ifr_mtu);
+ sc_if->msk_framesize = ifr->ifr_mtu + ETHER_HDR_LEN +
+ ETHER_VLAN_ENCAP_LEN;
if ((sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) {
sc_if->msk_flags &= ~MSK_FLAG_RUNNING;
msk_init_locked(sc_if);
@@ -1140,57 +1142,27 @@ msk_ioctl(if_t ifp, u_long command, void
case SIOCSIFCAP:
reinit = 0;
setvlan = 0;
- MSK_IF_LOCK(sc_if);
- capenable = if_get(ifp, IF_CAPENABLE);
- capabilities = if_get(ifp, IF_CAPABILITIES);
- hwassist = if_get(ifp, IF_HWASSIST);
- mask = ifr->ifr_reqcap ^ capenable;
- if ((mask & IFCAP_TXCSUM) != 0 &&
- (IFCAP_TXCSUM & capabilities) != 0) {
- capenable ^= IFCAP_TXCSUM;
- if ((IFCAP_TXCSUM & capenable) != 0)
- hwassist |= MSK_CSUM_FEATURES;
- else
- hwassist &= ~MSK_CSUM_FEATURES;
- }
+ ifr->ifr_hwassist = 0;
+ mask = ifr->ifr_reqcap ^ ifr->ifr_curcap;
+ if ((IFCAP_TXCSUM & ifr->ifr_reqcap) != 0)
+ ifr->ifr_hwassist |= MSK_CSUM_FEATURES;
if ((mask & IFCAP_RXCSUM) != 0 &&
- (IFCAP_RXCSUM & capabilities) != 0) {
- capenable ^= IFCAP_RXCSUM;
- if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
+ (sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
reinit = 1;
- }
- if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
- (IFCAP_VLAN_HWCSUM & capabilities) != 0)
- capenable ^= IFCAP_VLAN_HWCSUM;
- if ((mask & IFCAP_TSO4) != 0 &&
- (IFCAP_TSO4 & capabilities) != 0) {
- capenable ^= IFCAP_TSO4;
- if ((IFCAP_TSO4 & capenable) != 0)
- hwassist |= CSUM_TSO;
- else
- hwassist &= ~CSUM_TSO;
- }
- if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
- (IFCAP_VLAN_HWTSO & capabilities) != 0)
- capenable ^= IFCAP_VLAN_HWTSO;
- if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
- (IFCAP_VLAN_HWTAGGING & capabilities) != 0) {
- capenable ^= IFCAP_VLAN_HWTAGGING;
- if ((IFCAP_VLAN_HWTAGGING & capenable) == 0)
- capenable &=
- ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
+ if ((IFCAP_TSO4 & ifr->ifr_reqcap) != 0)
+ ifr->ifr_hwassist |= CSUM_TSO;
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0)
setvlan = 1;
- }
- if (if_get(ifp, IF_MTU) > ETHERMTU &&
+ if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE &&
(sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
- hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
- capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
+ ifr->ifr_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
+ ifr->ifr_reqcap &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
}
- if_set(ifp, IF_HWASSIST, hwassist);
- if_set(ifp, IF_CAPENABLE, capenable);
+ MSK_IF_LOCK(sc_if);
+ sc_if->msk_capenable = ifr->ifr_reqcap;
if (setvlan)
- msk_setvlan(sc_if, ifp);
- if (reinit > 0 && (sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) {
+ msk_setvlan(sc_if);
+ if (reinit && (sc_if->msk_flags & MSK_FLAG_RUNNING) != 0) {
sc_if->msk_flags &= ~MSK_FLAG_RUNNING;
msk_init_locked(sc_if);
}
@@ -1727,9 +1699,11 @@ msk_attach(device_t dev)
ifat.ifat_lla = eaddr;
ifat.ifat_softc = sc_if;
ifat.ifat_dunit = device_get_unit(dev);
-
ifp = sc_if->msk_ifp = if_attach(&ifat);
+ sc_if->msk_capenable = ifat.ifat_capenable;
+ sc_if->msk_framesize = MSK_DEFAULT_FRAMESIZE;
+
return (0);
fail:
@@ -3197,7 +3171,7 @@ msk_rxeof(struct msk_if_softc *sc_if, ui
do {
rxlen = status >> 16;
if ((status & GMR_FS_VLAN) != 0 &&
- (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0)
+ (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0)
rxlen -= ETHER_VLAN_ENCAP_LEN;
if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
/*
@@ -3239,11 +3213,11 @@ msk_rxeof(struct msk_if_softc *sc_if, ui
msk_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0)
+ if ((sc_if->msk_capenable & IFCAP_RXCSUM) != 0)
msk_rxcsum(sc_if, control, m);
/* Check for VLAN tagged packets. */
if ((status & GMR_FS_VLAN) != 0 &&
- (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) {
+ (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
m->m_flags |= M_VLANTAG;
}
@@ -3273,7 +3247,7 @@ msk_jumbo_rxeof(struct msk_if_softc *sc_
do {
rxlen = status >> 16;
if ((status & GMR_FS_VLAN) != 0 &&
- (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0)
+ (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0)
rxlen -= ETHER_VLAN_ENCAP_LEN;
if (len > sc_if->msk_framesize ||
((status & GMR_FS_ANY_ERR) != 0) ||
@@ -3304,11 +3278,11 @@ msk_jumbo_rxeof(struct msk_if_softc *sc_
msk_fixup_rx(m);
#endif
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM)
+ if (sc_if->msk_capenable & IFCAP_RXCSUM)
msk_rxcsum(sc_if, control, m);
/* Check for VLAN tagged packets. */
if ((status & GMR_FS_VLAN) != 0 &&
- (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) != 0) {
+ (sc_if->msk_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
m->m_flags |= M_VLANTAG;
}
@@ -3752,7 +3726,7 @@ msk_set_tx_stfwd(struct msk_if_softc *sc
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
TX_STFW_ENA);
} else {
- if (if_get(ifp, IF_MTU) > ETHERMTU) {
+ if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE) {
/* Set Tx GMAC FIFO Almost Empty Threshold. */
CSR_WRITE_4(sc,
MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
@@ -3801,17 +3775,6 @@ msk_init_locked(struct msk_if_softc *sc_
/* Cancel pending I/O and free all Rx/Tx buffers. */
msk_stop(sc_if);
- if (if_get(ifp, IF_MTU) < ETHERMTU)
- sc_if->msk_framesize = ETHERMTU;
- else
- sc_if->msk_framesize = if_get(ifp, IF_MTU);
- sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- if (if_get(ifp, IF_MTU) > ETHERMTU &&
- (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
- if_clrflags(ifp, IF_HWASSIST, MSK_CSUM_FEATURES | CSUM_TSO);
- if_clrflags(ifp, IF_CAPENABLE, IFCAP_TSO4 | IFCAP_TXCSUM);
- }
-
/* GMAC Control reset. */
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
@@ -3851,7 +3814,7 @@ msk_init_locked(struct msk_if_softc *sc_
gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
- if (if_get(ifp, IF_MTU) > ETHERMTU)
+ if (sc_if->msk_framesize > MSK_DEFAULT_FRAMESIZE)
gmac |= GM_SMOD_JUMBO_ENA;
GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
@@ -3913,7 +3876,7 @@ msk_init_locked(struct msk_if_softc *sc_
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
/* Configure hardware VLAN tag insertion/stripping. */
- msk_setvlan(sc_if, ifp);
+ msk_setvlan(sc_if);
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
/* Set Rx Pause threshold. */
@@ -3990,7 +3953,7 @@ msk_init_locked(struct msk_if_softc *sc_
/* Disable Rx checksum offload and RSS hash. */
reg = BMU_DIS_RX_RSS_HASH;
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
- (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) != 0)
+ (sc_if->msk_capenable & IFCAP_RXCSUM) != 0)
reg |= BMU_ENA_RX_CHKSUM;
else
reg |= BMU_DIS_RX_CHKSUM;
Modified: projects/ifnet/sys/dev/msk/if_mskreg.h
==============================================================================
--- projects/ifnet/sys/dev/msk/if_mskreg.h Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/msk/if_mskreg.h Fri Jan 16 11:45:54 2015 (r277243)
@@ -2561,9 +2561,10 @@ struct msk_if_softc {
device_t msk_miibus;
device_t msk_if_dev;
int32_t msk_port; /* port # on controller */
- int msk_framesize;
int msk_phytype;
int msk_phyaddr;
+ uint32_t msk_framesize;
+ uint32_t msk_capenable;
uint32_t msk_flags;
#define MSK_FLAG_MSI 0x00000001
#define MSK_FLAG_FASTETHER 0x00000004
Modified: projects/ifnet/sys/dev/virtio/network/if_vtnet.c
==============================================================================
--- projects/ifnet/sys/dev/virtio/network/if_vtnet.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/virtio/network/if_vtnet.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -1017,7 +1017,7 @@ vtnet_ioctl(if_t ifp, u_long cmd, void *
{
struct vtnet_softc *sc;
struct ifreq *ifr;
- int reinit, capenable, mask, error;
+ int error;
sc = if_getsoftc(ifp, IF_DRIVER_SOFTC);
ifr = (struct ifreq *) data;
@@ -1025,11 +1025,9 @@ vtnet_ioctl(if_t ifp, u_long cmd, void *
switch (cmd) {
case SIOCSIFMTU:
- if (if_get(ifp, IF_MTU) != ifr->ifr_mtu) {
- VTNET_CORE_LOCK(sc);
- error = vtnet_change_mtu(sc, ifr->ifr_mtu);
- VTNET_CORE_UNLOCK(sc);
- }
+ VTNET_CORE_LOCK(sc);
+ error = vtnet_change_mtu(sc, ifr->ifr_mtu);
+ VTNET_CORE_UNLOCK(sc);
break;
case SIOCSIFFLAGS:
@@ -1069,49 +1067,26 @@ vtnet_ioctl(if_t ifp, u_long cmd, void *
break;
case SIOCSIFCAP:
- VTNET_CORE_LOCK(sc);
- capenable = if_get(ifp, IF_CAPENABLE);
- mask = ifr->ifr_reqcap ^ capenable;
-
- if (mask & IFCAP_TXCSUM)
- capenable ^= IFCAP_TXCSUM;
- if (mask & IFCAP_TXCSUM_IPV6)
- capenable ^= IFCAP_TXCSUM_IPV6;
- if (mask & IFCAP_TSO4)
- capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_TSO6)
- capenable ^= IFCAP_TSO6;
-
- if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
- IFCAP_VLAN_HWFILTER)) {
- /* These Rx features require us to renegotiate. */
- reinit = 1;
-
- if (mask & IFCAP_RXCSUM)
- capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- capenable ^= IFCAP_RXCSUM_IPV6;
- if (mask & IFCAP_LRO)
- capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWFILTER)
- capenable ^= IFCAP_VLAN_HWFILTER;
- } else
- reinit = 0;
-
- if (mask & IFCAP_VLAN_HWTSO)
- capenable ^= IFCAP_VLAN_HWTSO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- capenable ^= IFCAP_VLAN_HWTAGGING;
-
- if (reinit && (sc->vtnet_flags & VTNET_FLAG_RUNNING)) {
+ sc->vtnet_capenable = ifr->ifr_reqcap;
+ /* These Rx features require us to renegotiate. */
+ if ((ifr->ifr_reqcap ^ ifr->ifr_curcap) &
+ (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
+ IFCAP_VLAN_HWFILTER) &&
+ (sc->vtnet_flags & VTNET_FLAG_RUNNING)) {
+ VTNET_CORE_LOCK(sc);
sc->vtnet_flags &= ~VTNET_FLAG_RUNNING;
vtnet_init_locked(sc);
+ VTNET_CORE_UNLOCK(sc);
}
-
- VTNET_CORE_UNLOCK(sc);
-
- if_set(ifp, IF_CAPENABLE, capenable);
-
+ ifr->ifr_hwassist = 0;
+ if (ifr->ifr_reqcap & IFCAP_TXCSUM)
+ ifr->ifr_hwassist |= VTNET_CSUM_OFFLOAD;
+ if (ifr->ifr_reqcap & IFCAP_TXCSUM_IPV6)
+ ifr->ifr_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
+ if (ifr->ifr_reqcap & IFCAP_TSO4)
+ ifr->ifr_hwassist |= CSUM_TSO;
+ if (ifr->ifr_reqcap & IFCAP_TSO6)
+ ifr->ifr_hwassist |= CSUM_IP6_TSO;
break;
default:
@@ -1653,7 +1628,7 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, s
sc = rxq->vtnrx_sc;
ifp = sc->vtnet_ifp;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWTAGGING) {
+ if (sc->vtnet_capenable & IFCAP_VLAN_HWTAGGING) {
eh = mtod(m, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
vtnet_vlan_tag_remove(m);
@@ -2691,7 +2666,7 @@ vtnet_virtio_reinit(struct vtnet_softc *
device_t dev;
struct ifnet *ifp;
uint64_t features;
- uint32_t caps, capenable, mask;
+ uint32_t mask;
int error;
dev = sc->vtnet_dev;
@@ -2709,29 +2684,20 @@ vtnet_virtio_reinit(struct vtnet_softc *
/*
* Re-negotiate with the host, removing any disabled receive
* features. Transmit features are disabled only on our side
- * via IF_CAPEANBLE and IF_HWASSIST.
+ * via if_capenable and if_hwassist.
+ *
+ * We require both IPv4 and IPv6 offloading to be enabled
+ * in order to negotiated it: VirtIO does not distinguish
+ * between the two.
*/
- caps = if_get(ifp, IF_CAPABILITIES);
- capenable = if_get(ifp, IF_CAPENABLE);
- if (caps & mask) {
- /*
- * We require both IPv4 and IPv6 offloading to be enabled
- * in order to negotiated it: VirtIO does not distinguish
- * between the two.
- */
- if ((capenable & mask) != mask)
- features &= ~VIRTIO_NET_F_GUEST_CSUM;
- }
+ if ((sc->vtnet_capenable & mask) != mask)
+ features &= ~VIRTIO_NET_F_GUEST_CSUM;
- if (caps & IFCAP_LRO) {
- if ((capenable & IFCAP_LRO) == 0)
- features &= ~VTNET_LRO_FEATURES;
- }
+ if ((sc->vtnet_capenable & IFCAP_LRO) == 0)
+ features &= ~VTNET_LRO_FEATURES;
- if (caps & IFCAP_VLAN_HWFILTER) {
- if ((capenable & IFCAP_VLAN_HWFILTER) == 0)
- features &= ~VIRTIO_NET_F_CTRL_VLAN;
- }
+ if ((sc->vtnet_capenable & IFCAP_VLAN_HWFILTER) == 0)
+ features &= ~VIRTIO_NET_F_CTRL_VLAN;
error = virtio_reinit(dev, features);
if (error)
@@ -2754,7 +2720,7 @@ vtnet_init_rx_filters(struct vtnet_softc
vtnet_rx_filter_mac(sc);
}
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWFILTER)
+ if (sc->vtnet_capenable & IFCAP_VLAN_HWFILTER)
vtnet_rx_filter_vlan(sc);
}
@@ -2871,7 +2837,6 @@ static int
vtnet_reinit(struct vtnet_softc *sc)
{
struct ifnet *ifp;
- uint64_t hwassist;
int error;
ifp = sc->vtnet_ifp;
@@ -2882,17 +2847,6 @@ vtnet_reinit(struct vtnet_softc *sc)
vtnet_set_active_vq_pairs(sc);
- hwassist = 0;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM)
- hwassist |= VTNET_CSUM_OFFLOAD;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM_IPV6)
- hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_TSO4)
- hwassist |= CSUM_TSO;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_TSO6)
- hwassist |= CSUM_IP6_TSO;
- if_set(ifp, IF_HWASSIST, hwassist);
-
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
vtnet_init_rx_filters(sc);
@@ -3345,7 +3299,7 @@ vtnet_update_vlan_filter(struct vtnet_so
else
sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_VLAN_HWFILTER) &&
+ if ((sc->vtnet_capenable & IFCAP_VLAN_HWFILTER) &&
vtnet_exec_vlan_filter(sc, add, tag) != 0) {
device_printf(sc->vtnet_dev,
"cannot %s VLAN %d %s the host filter table\n",
@@ -3385,7 +3339,7 @@ vtnet_is_link_up(struct vtnet_softc *sc)
dev = sc->vtnet_dev;
ifp = sc->vtnet_ifp;
- if ((if_get(ifp, IF_CAPABILITIES) & IFCAP_LINKSTATE) == 0)
+ if (!virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
status = VIRTIO_NET_S_LINK_UP;
else
status = virtio_read_dev_config_2(dev,
Modified: projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h
==============================================================================
--- projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/virtio/network/if_vtnetvar.h Fri Jan 16 11:45:54 2015 (r277243)
@@ -129,6 +129,8 @@ struct vtnet_softc {
struct vtnet_rxq *vtnet_rxqs;
struct vtnet_txq *vtnet_txqs;
+ uint64_t vtnet_hwassist;
+ uint32_t vtnet_capenable;
uint32_t vtnet_flags;
#define VTNET_FLAG_SUSPENDED 0x0001
#define VTNET_FLAG_MAC 0x0002
Modified: projects/ifnet/sys/dev/xl/if_xl.c
==============================================================================
--- projects/ifnet/sys/dev/xl/if_xl.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/xl/if_xl.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -1921,7 +1921,7 @@ again:
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = total_len;
- if (if_get(ifp, IF_CAPENABLE) & IFCAP_RXCSUM) {
+ if (sc->xl_capenable & IFCAP_RXCSUM) {
/* Do IP checksum checking. */
if (rxstat & XL_RXSTAT_IPCKOK)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
@@ -2982,7 +2982,7 @@ xl_ioctl(if_t ifp, u_long command, void
{
struct xl_softc *sc;
struct ifreq *ifr = (struct ifreq *) data;
- uint32_t flags, mask;
+ uint32_t flags;
int error = 0;
struct mii_data *mii = NULL;
@@ -3026,19 +3026,15 @@ xl_ioctl(if_t ifp, u_long command, void
&mii->mii_media, command);
break;
case SIOCSIFCAP:
- mask = ifr->ifr_reqcap ^ if_get(ifp, IF_CAPENABLE);
#ifdef DEVICE_POLLING
- if ((mask & IFCAP_POLLING) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_POLLING) != 0) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_POLLING);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_POLLING) != 0) {
+ if (((ifr->ifr_reqcap ^ ifr->ifr_curcap) & IFCAP_POLLING)) {
+ if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
error = ether_poll_register(xl_poll, ifp);
if (error)
break;
XL_LOCK(sc);
/* Disable interrupts */
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
- if_addflags(ifp, IF_CAPENABLE, IFCAP_POLLING);
XL_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
@@ -3052,27 +3048,16 @@ xl_ioctl(if_t ifp, u_long command, void
bus_space_write_4(sc->xl_ftag,
sc->xl_fhandle, 4, 0x8000);
XL_UNLOCK(sc);
+ if (error)
+ break;
}
}
#endif /* DEVICE_POLLING */
- XL_LOCK(sc);
- if ((mask & IFCAP_TXCSUM) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_TXCSUM) != 0) {
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_TXCSUM);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_TXCSUM) != 0)
- if_addflags(ifp, IF_HWASSIST,
- XL905B_CSUM_FEATURES);
- else
- if_clrflags(ifp, IF_HWASSIST,
- XL905B_CSUM_FEATURES);
- }
- if ((mask & IFCAP_RXCSUM) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_RXCSUM) != 0)
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_RXCSUM);
- if ((mask & IFCAP_WOL_MAGIC) != 0 &&
- (if_get(ifp, IF_CAPABILITIES) & IFCAP_WOL_MAGIC) != 0)
- if_xorflags(ifp, IF_CAPENABLE, IFCAP_WOL_MAGIC);
- XL_UNLOCK(sc);
+ sc->xl_capenable = ifr->ifr_reqcap;
+ if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0)
+ ifr->ifr_hwassist = XL905B_CSUM_FEATURES;
+ else
+ ifr->ifr_hwassist = 0;
break;
default:
error = EOPNOTSUPP;
@@ -3267,16 +3252,16 @@ xl_setwol(struct xl_softc *sc)
/* Clear any pending PME events. */
CSR_READ_2(sc, XL_W7_BM_PME);
cfg = 0;
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0)
+ if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0)
cfg |= XL_BM_PME_MAGIC;
CSR_WRITE_2(sc, XL_W7_BM_PME, cfg);
/* Enable RX. */
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0)
+ if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
/* Request PME. */
pmstat = pci_read_config(sc->xl_dev,
sc->xl_pmcap + PCIR_POWER_STATUS, 2);
- if ((if_get(ifp, IF_CAPENABLE) & IFCAP_WOL_MAGIC) != 0)
+ if ((sc->xl_capenable & IFCAP_WOL_MAGIC) != 0)
pmstat |= PCIM_PSTAT_PMEENABLE;
else
pmstat &= ~PCIM_PSTAT_PMEENABLE;
Modified: projects/ifnet/sys/dev/xl/if_xlreg.h
==============================================================================
--- projects/ifnet/sys/dev/xl/if_xlreg.h Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/dev/xl/if_xlreg.h Fri Jan 16 11:45:54 2015 (r277243)
@@ -610,6 +610,7 @@ struct xl_softc {
struct xl_chain_data xl_cdata;
struct callout xl_tick_callout;
int xl_wdog_timer;
+ uint32_t xl_capenable;
int xl_flags;
struct resource *xl_fres;
bus_space_handle_t xl_fhandle;
Modified: projects/ifnet/sys/net/if.c
==============================================================================
--- projects/ifnet/sys/net/if.c Fri Jan 16 11:42:42 2015 (r277242)
+++ projects/ifnet/sys/net/if.c Fri Jan 16 11:45:54 2015 (r277243)
@@ -1528,21 +1528,9 @@ if_getfeature(if_t ifp, ift_feature f, u
case IF_FLAGS:
*f32 = &ifp->if_flags;
break;
- case IF_CAPABILITIES:
- *f32 = &ifp->if_capabilities;
- break;
- case IF_CAPENABLE:
- *f32 = &ifp->if_capenable;
- break;
- case IF_MTU:
- *f32 = &ifp->if_mtu;
- break;
case IF_FIB:
*f32 = &ifp->if_fib;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list