socsvn commit: r302612 - in soc2016/vincenzo/head/sys: dev/netmap modules/netmap net
vincenzo at FreeBSD.org
vincenzo at FreeBSD.org
Wed May 11 15:23:27 UTC 2016
Author: vincenzo
Date: Wed May 11 15:23:23 2016
New Revision: 302612
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=302612
Log:
Update netmap sources to the github ones
Added:
soc2016/vincenzo/head/sys/dev/netmap/if_nfe_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/netmap_virt.h
soc2016/vincenzo/head/sys/dev/netmap/ptnetmap.c
Modified:
soc2016/vincenzo/head/sys/dev/netmap/if_em_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/if_ixl_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/if_lem_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/if_vtnet_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/ixgbe_netmap.h
soc2016/vincenzo/head/sys/dev/netmap/netmap.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_freebsd.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_generic.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_kern.h
soc2016/vincenzo/head/sys/dev/netmap/netmap_mbq.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_mbq.h
soc2016/vincenzo/head/sys/dev/netmap/netmap_mem2.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_mem2.h
soc2016/vincenzo/head/sys/dev/netmap/netmap_monitor.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_offloadings.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_pipe.c
soc2016/vincenzo/head/sys/dev/netmap/netmap_vale.c
soc2016/vincenzo/head/sys/modules/netmap/Makefile
soc2016/vincenzo/head/sys/net/netmap.h
soc2016/vincenzo/head/sys/net/netmap_user.h
Modified: soc2016/vincenzo/head/sys/dev/netmap/if_em_netmap.h
==============================================================================
--- soc2016/vincenzo/head/sys/dev/netmap/if_em_netmap.h Wed May 11 14:59:54 2016 (r302611)
+++ soc2016/vincenzo/head/sys/dev/netmap/if_em_netmap.h Wed May 11 15:23:23 2016 (r302612)
@@ -148,7 +148,7 @@
/* device-specific */
struct e1000_tx_desc *curr = &txr->tx_base[nic_i];
- struct em_txbuffer *txbuf = &txr->tx_buffers[nic_i];
+ struct em_buffer *txbuf = &txr->tx_buffers[nic_i];
int flags = (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
E1000_TXD_CMD_RS : 0;
@@ -239,12 +239,12 @@
nm_i = netmap_idx_n2k(kring, nic_i);
for (n = 0; ; n++) { // XXX no need to count
- union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
- uint32_t staterr = le32toh(curr->wb.upper.status_error);
+ struct e1000_rx_desc *curr = &rxr->rx_base[nic_i];
+ uint32_t staterr = le32toh(curr->status);
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
- ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
+ ring->slot[nm_i].len = le16toh(curr->length);
ring->slot[nm_i].flags = slot_flags;
bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map,
BUS_DMASYNC_POSTREAD);
@@ -271,19 +271,19 @@
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
- union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
- struct em_rxbuffer *rxbuf = &rxr->rx_buffers[nic_i];
+ struct e1000_rx_desc *curr = &rxr->rx_base[nic_i];
+ struct em_buffer *rxbuf = &rxr->rx_buffers[nic_i];
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
goto ring_reset;
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
- curr->read.buffer_addr = htole64(paddr);
+ curr->buffer_addr = htole64(paddr);
netmap_reload_map(na, rxr->rxtag, rxbuf->map, addr);
slot->flags &= ~NS_BUF_CHANGED;
}
- curr->wb.upper.status_error = 0;
+ curr->status = 0;
bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
Modified: soc2016/vincenzo/head/sys/dev/netmap/if_ixl_netmap.h
==============================================================================
--- soc2016/vincenzo/head/sys/dev/netmap/if_ixl_netmap.h Wed May 11 14:59:54 2016 (r302611)
+++ soc2016/vincenzo/head/sys/dev/netmap/if_ixl_netmap.h Wed May 11 15:23:23 2016 (r302612)
@@ -59,7 +59,7 @@
/*
* device-specific sysctl variables:
*
- * ixl_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
+ * ixl_crcstrip: 0: NIC keeps CRC in rx frames (default), 1: NIC strips it.
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
@@ -74,7 +74,7 @@
int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
#if 0
SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_crcstrip,
- CTLFLAG_RW, &ixl_crcstrip, 1, "strip CRC on rx frames");
+ CTLFLAG_RW, &ixl_crcstrip, 1, "NIC strips CRC on rx frames");
#endif
SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss,
CTLFLAG_RW, &ixl_rx_miss, 0, "potentially missed rx intr");
Modified: soc2016/vincenzo/head/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- soc2016/vincenzo/head/sys/dev/netmap/if_lem_netmap.h Wed May 11 14:59:54 2016 (r302611)
+++ soc2016/vincenzo/head/sys/dev/netmap/if_lem_netmap.h Wed May 11 15:23:23 2016 (r302612)
@@ -38,6 +38,10 @@
#include <vm/vm.h>
#include <vm/pmap.h> /* vtophys ? */
#include <dev/netmap/netmap_kern.h>
+#ifdef WITH_PTNETMAP_GUEST
+#include <dev/netmap/netmap_mem2.h>
+#endif
+#include <dev/netmap/netmap_virt.h>
extern int netmap_adaptive_io;
@@ -80,6 +84,20 @@
return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
}
+static void
+lem_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+
+ EM_CORE_LOCK(adapter);
+ if (onoff) {
+ lem_enable_intr(adapter);
+ } else {
+ lem_disable_intr(adapter);
+ }
+ EM_CORE_UNLOCK(adapter);
+}
/*
* Reconcile kernel and user view of the transmit ring.
@@ -470,6 +488,176 @@
return netmap_ring_reinit(kring);
}
+#if defined (NIC_PTNETMAP) && defined (WITH_PTNETMAP_GUEST)
+/*
+ * ptnetmap support for: lem (FreeBSD version)
+ *
+ * For details on ptnetmap support please see if_vtnet_netmap.h
+ */
+static uint32_t lem_ptnetmap_ptctl(struct ifnet *, uint32_t);
+
+/* Returns device configuration from the CSB */
+static int
+lem_ptnetmap_config(struct netmap_adapter *na,
+ u_int *txr, u_int *txd, u_int *rxr, u_int *rxd)
+{
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+ struct paravirt_csb *csb = adapter->csb;
+ int ret;
+
+ if (csb == NULL)
+ return EINVAL;
+
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_CONFIG);
+ if (ret)
+ return ret;
+
+ *txr = 1; //*txr = csb->num_tx_rings;
+ *rxr = 1; //*rxr = csb->num_rx_rings;
+ *txd = csb->num_tx_slots;
+ *rxd = csb->num_rx_slots;
+
+ D("txr %u rxr %u txd %u rxd %u",
+ *txr, *rxr, *txd, *rxd);
+
+ return 0;
+}
+
+/* Reconcile host and guest view of the transmit ring. */
+static int
+lem_ptnetmap_txsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ //u_int ring_nr = kring->ring_id;
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+ bool notify;
+
+ notify = netmap_pt_guest_txsync(&adapter->csb->tx_ring, kring, flags);
+ if (notify)
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
+
+ return 0;
+}
+
+/* Reconcile host and guest view of the receive ring. */
+static int
+lem_ptnetmap_rxsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ //u_int ring_nr = kring->ring_id;
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+ bool notify;
+
+ notify = netmap_pt_guest_rxsync(&adapter->csb->rx_ring, kring, flags);
+ if (notify)
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
+
+ return 0;
+}
+
+/* Register/unregister. We are already under netmap lock. */
+static int
+lem_ptnetmap_reg(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct adapter *adapter = ifp->if_softc;
+ struct paravirt_csb *csb = adapter->csb;
+ struct netmap_kring *kring;
+ int ret;
+
+ if (onoff) {
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_REGIF);
+ if (ret)
+ return ret;
+
+ na->na_flags |= NAF_NETMAP_ON;
+ adapter->ptnetmap_enabled = 1;
+ /*
+ * Init ring and kring pointers
+ * After PARAVIRT_PTCTL_REGIF, the csb contains a snapshot of a
+ * host kring pointers.
+ * XXX This initialization is required, because we don't close
+ * the host port on UNREGIF.
+ */
+
+ // Init rx ring
+ kring = na->rx_rings;
+ kring->rhead = kring->ring->head = csb->rx_ring.head;
+ kring->rcur = kring->ring->cur = csb->rx_ring.cur;
+ kring->nr_hwcur = csb->rx_ring.hwcur;
+ kring->nr_hwtail = kring->rtail = kring->ring->tail =
+ csb->rx_ring.hwtail;
+
+ // Init tx ring
+ kring = na->tx_rings;
+ kring->rhead = kring->ring->head = csb->tx_ring.head;
+ kring->rcur = kring->ring->cur = csb->tx_ring.cur;
+ kring->nr_hwcur = csb->tx_ring.hwcur;
+ kring->nr_hwtail = kring->rtail = kring->ring->tail =
+ csb->tx_ring.hwtail;
+ } else {
+ na->na_flags &= ~NAF_NETMAP_ON;
+ adapter->ptnetmap_enabled = 0;
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_UNREGIF);
+ }
+
+ return lem_netmap_reg(na, onoff);
+}
+
+
+static int
+lem_ptnetmap_bdg_attach(const char *bdg_name, struct netmap_adapter *na)
+{
+ return EOPNOTSUPP;
+}
+
+/* Send command to the host through PTCTL register. */
+static uint32_t
+lem_ptnetmap_ptctl(struct ifnet *ifp, uint32_t val)
+{
+ struct adapter *adapter = ifp->if_softc;
+ uint32_t ret;
+
+ E1000_WRITE_REG(&adapter->hw, E1000_PTCTL, val);
+ ret = E1000_READ_REG(&adapter->hw, E1000_PTSTS);
+ D("PTSTS = %u", ret);
+
+ return ret;
+}
+
+/* Features negotiation with the host through PTFEAT */
+static uint32_t
+lem_ptnetmap_features(struct adapter *adapter)
+{
+ uint32_t features;
+ /* tell the device the features we support */
+ E1000_WRITE_REG(&adapter->hw, E1000_PTFEAT, NET_PTN_FEATURES_BASE);
+ /* get back the acknowledged features */
+ features = E1000_READ_REG(&adapter->hw, E1000_PTFEAT);
+ device_printf(adapter->dev, "ptnetmap support: %s\n",
+ (features & NET_PTN_FEATURES_BASE) ? "base" :
+ "none");
+ return features;
+}
+
+static void
+lem_ptnetmap_dtor(struct netmap_adapter *na)
+{
+ netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
+}
+
+/* XXX: these warning affect proper kernel compilation
+#elif defined (NIC_PTNETMAP)
+#warning "if_lem supports ptnetmap but netmap does not support it"
+#warning "(configure netmap with ptnetmap support)"
+#elif defined (WITH_PTNETMAP_GUEST)
+#warning "netmap supports ptnetmap but e1000 does not support it"
+#warning "(configure if_lem with ptnetmap support)"
+*/
+#endif /* NIC_PTNETMAP && WITH_PTNETMAP_GUEST */
static void
lem_netmap_attach(struct adapter *adapter)
@@ -486,7 +674,34 @@
na.nm_rxsync = lem_netmap_rxsync;
na.nm_register = lem_netmap_reg;
na.num_tx_rings = na.num_rx_rings = 1;
- netmap_attach(&na);
+ na.nm_intr = lem_netmap_intr;
+#if defined (NIC_PTNETMAP) && defined (WITH_PTNETMAP_GUEST)
+ /* XXX: check if the device support ptnetmap (now we use PARA_SUBDEV) */
+ if ((adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) &&
+ (lem_ptnetmap_features(adapter) & NET_PTN_FEATURES_BASE)) {
+ int err;
+
+ na.nm_config = lem_ptnetmap_config;
+ na.nm_register = lem_ptnetmap_reg;
+ na.nm_txsync = lem_ptnetmap_txsync;
+ na.nm_rxsync = lem_ptnetmap_rxsync;
+ na.nm_bdg_attach = lem_ptnetmap_bdg_attach; /* XXX */
+ na.nm_dtor = lem_ptnetmap_dtor;
+
+ /* Ask the device to fill in some configuration fields. Here we
+ * just need nifp_offset. */
+ err = lem_ptnetmap_ptctl(na.ifp, NET_PARAVIRT_PTCTL_CONFIG);
+ if (err) {
+ D("Failed to get nifp_offset from passthrough device");
+ return;
+ }
+
+ netmap_pt_guest_attach(&na, adapter->csb,
+ adapter->csb->nifp_offset,
+ lem_ptnetmap_ptctl);
+ } else
+#endif /* NIC_PTNETMAP && defined WITH_PTNETMAP_GUEST */
+ netmap_attach(&na);
}
/* end of file */
Added: soc2016/vincenzo/head/sys/dev/netmap/if_nfe_netmap.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ soc2016/vincenzo/head/sys/dev/netmap/if_nfe_netmap.h Wed May 11 15:23:23 2016 (r302612)
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2011-2014 Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD: head/sys/dev/netmap/if_em_netmap.h 231881 2012-02-17 14:09:04Z luigi $
+ *
+ * netmap support for: nfe XXX not yet tested.
+ *
+ * For more details on netmap support please see ixgbe_netmap.h
+ */
+
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <dev/netmap/netmap_kern.h>
+
+
+static int
+nfe_netmap_init_buffers(struct nfe_softc *sc)
+{
+ struct netmap_adapter *na = NA(sc->nfe_ifp);
+ struct netmap_slot *slot;
+ int i, l, n, max_avail;
+ struct nfe_desc32 *desc32 = NULL;
+ struct nfe_desc64 *desc64 = NULL;
+ void *addr;
+ uint64_t paddr;
+
+ slot = netmap_reset(na, NR_TX, 0, 0);
+ if (!slot)
+ return 0; // not in native mode
+ // XXX init the tx ring
+ n = NFE_TX_RING_COUNT;
+ for (i = 0; i < n; i++) {
+ l = netmap_idx_n2k(&na->tx_rings[0], i);
+ addr = PNMB(na, slot + l, &paddr);
+ netmap_reload_map(sc->txq.tx_data_tag,
+ sc->txq.data[l].tx_data_map, addr);
+ slot[l].flags = 0;
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->txq.desc64[l];
+ desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr));
+ desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr));
+ desc64->vtag = 0;
+ desc64->length = htole16(0);
+ desc64->flags = htole16(0);
+ } else {
+ desc32 = &sc->txq.desc32[l];
+ desc32->physaddr = htole32(NFE_ADDR_LO(paddr));
+ desc32->length = htole16(0);
+ desc32->flags = htole16(0);
+ }
+ }
+
+ slot = netmap_reset(na, NR_RX, 0, 0);
+ // XXX init the rx ring
+ /*
+ * preserve buffers still owned by the driver (and keep one empty).
+ */
+ n = NFE_RX_RING_COUNT;
+ max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]);
+ for (i = 0; i < n; i++) {
+ uint16_t flags;
+ l = netmap_idx_n2k(&na->rx_rings[0], i);
+ addr = PNMB(na, slot + l, &paddr);
+ flags = (i < max_avail) ? NFE_RX_READY : 0;
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->rxq.desc64[l];
+ desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr));
+ desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr));
+ desc64->vtag = 0;
+ desc64->length = htole16(NETMAP_BUF_SIZE);
+ desc64->flags = htole16(NFE_RX_READY);
+ } else {
+ desc32 = &sc->rxq.desc32[l];
+ desc32->physaddr = htole32(NFE_ADDR_LO(paddr));
+ desc32->length = htole16(NETMAP_BUF_SIZE);
+ desc32->flags = htole16(NFE_RX_READY);
+ }
+
+ netmap_reload_map(sc->rxq.rx_data_tag,
+ sc->rxq.data[l].rx_data_map, addr);
+ bus_dmamap_sync(sc->rxq.rx_data_tag,
+ sc->rxq.data[l].rx_data_map, BUS_DMASYNC_PREREAD);
+ }
+
+ return 1;
+}
+
+
+/*
+ * Register/unregister. We are already under netmap lock.
+ */
+static int
+nfe_netmap_reg(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct nfe_softc *sc = ifp->if_softc;
+
+ NFE_LOCK(sc);
+ nfe_stop(ifp); /* also clear IFF_DRV_RUNNING */
+ if (onoff) {
+ nm_set_native_flags(na);
+ } else {
+ nm_clear_native_flags(na);
+ }
+ nfe_init_locked(sc); /* also enable intr */
+ NFE_UNLOCK(sc);
+ return (0);
+}
+
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ */
+static int
+nfe_netmap_txsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct ifnet *ifp = na->ifp;
+ struct netmap_ring *ring = kring->ring;
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = kring->rhead;
+ /* generate an interrupt approximately every half ring */
+ u_int report_frequency = kring->nkr_num_slots >> 1;
+
+ /* device-specific */
+ struct nfe_softc *sc = ifp->if_softc;
+ struct nfe_desc32 *desc32 = NULL;
+ struct nfe_desc64 *desc64 = NULL;
+
+ bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
+ BUS_DMASYNC_POSTREAD);
+
+ /*
+ * First part: process new packets to send.
+ */
+
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) { /* we have new packets to send */
+ nic_i = netmap_idx_k2n(kring, nm_i);
+ for (n = 0; nm_i != head; n++) {
+ /* slot is the current slot in the netmap ring */
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ u_int len = slot->len;
+ uint64_t paddr;
+ void *addr = PNMB(na, slot, &paddr);
+
+ NM_CHECK_ADDR_LEN(addr, len);
+
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(sc->txq.tx_data_tag,
+ sc->txq.data[l].tx_data_map, addr);
+ }
+ slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
+
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->txq.desc64[l];
+ desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr));
+ desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr));
+ desc64->vtag = 0;
+ desc64->length = htole16(len - 1);
+ desc64->flags =
+ htole16(NFE_TX_VALID | NFE_TX_LASTFRAG_V2);
+ } else {
+ desc32 = &sc->txq.desc32[l];
+ desc32->physaddr = htole32(NFE_ADDR_LO(paddr));
+ desc32->length = htole16(len - 1);
+ desc32->flags =
+ htole16(NFE_TX_VALID | NFE_TX_LASTFRAG_V1);
+ }
+
+ bus_dmamap_sync(sc->txq.tx_data_tag,
+ sc->txq.data[l].tx_data_map, BUS_DMASYNC_PREWRITE);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ kring->nr_hwcur = head;
+ sc->txq.cur = nic_i;
+
+ bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* XXX something missing ? where is the last pkt marker ? */
+ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
+ }
+
+ /*
+ * Second part: reclaim buffers for completed transmissions.
+ */
+ if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
+ u_int nic_cur = sc->txq.cur;
+ nic_i = sc->txq.next;
+ for (n = 0; nic_i != nic_cur; n++, NFE_INC(nic_i, NFE_TX_RING_COUNT)) {
+ uint16_t flags;
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->txq.desc64[l];
+ flags = le16toh(desc64->flags);
+ } else {
+ desc32 = &sc->txq.desc32[l];
+ flags = le16toh(desc32->flags);
+ }
+ if (flags & NFE_TX_VALID)
+ break;
+ }
+ if (n > 0) {
+ sc->txq.next = nic_i;
+ kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
+ }
+ }
+
+
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ */
+static int
+nfe_netmap_rxsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct ifnet *ifp = na->ifp;
+ struct netmap_ring *ring = kring->ring;
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = kring->rhead;
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+
+ /* device-specific */
+ struct nfe_softc *sc = ifp->if_softc;
+ struct nfe_desc32 *desc32;
+ struct nfe_desc64 *desc64;
+
+ if (head > lim)
+ return netmap_ring_reinit(kring);
+
+ bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * First part: import newly received packets.
+ */
+ if (netmap_no_pendintr || force_update) {
+ uint16_t flags, len;
+ uint16_t slot_flags = kring->nkr_slot_flags;
+
+ nic_i = sc->rxq.cur;
+ nm_i = netmap_idx_n2k(kring, nic_i);
+ for (n = 0; ; n++) {
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->rxq.desc64[sc->rxq.cur];
+ flags = le16toh(desc64->flags);
+ len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
+ } else {
+ desc32 = &sc->rxq.desc32[sc->rxq.cur];
+ flags = le16toh(desc32->flags);
+ len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
+ }
+
+ if (flags & NFE_RX_READY)
+ break;
+
+ ring->slot[nm_i].len = len;
+ ring->slot[nm_i].flags = slot_flags;
+ bus_dmamap_sync(sc->rxq.rx_data_tag,
+ sc->rxq.data[nic_i].rx_data_map,
+ BUS_DMASYNC_POSTREAD);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ if (n) { /* update the state variables */
+ sc->rxq.cur = nic_i;
+ kring->nr_hwtail = nm_i;
+ }
+ kring->nr_kflags &= ~NKR_PENDINTR;
+ }
+
+ /*
+ * Second part: skip past packets that userspace has released.
+ */
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) {
+ nic_i = netmap_idx_k2n(kring, nm_i);
+ for (n = 0; nm_i != head; n++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ uint64_t paddr;
+ void *addr = PNMB(na, slot, &paddr);
+
+ if (addr == netmap_buffer_base) /* bad buf */
+ goto ring_reset;
+
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(sc->rxq.rx_data_tag,
+ sc->rxq.data[l].rx_data_map, addr);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+ if (sc->nfe_flags & NFE_40BIT_ADDR) {
+ desc64 = &sc->rxq.desc64[nic_i];
+ desc64->physaddr[0] =
+ htole32(NFE_ADDR_HI(paddr));
+ desc64->physaddr[1] =
+ htole32(NFE_ADDR_LO(paddr));
+ desc64->length = htole16(NETMAP_BUF_SIZE);
+ desc64->flags = htole16(NFE_RX_READY);
+ } else {
+ desc32 = &sc->rxq.desc32[nic_i];
+ desc32->physaddr =
+ htole32(NFE_ADDR_LO(paddr));
+ desc32->length = htole16(NETMAP_BUF_SIZE);
+ desc32->flags = htole16(NFE_RX_READY);
+ }
+
+ bus_dmamap_sync(sc->rxq.rx_data_tag,
+ sc->rxq.data[nic_i].rx_data_map,
+ BUS_DMASYNC_PREREAD);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ kring->nr_hwcur = head;
+ bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+
+
+ return 0;
+
+ring_reset:
+ return netmap_ring_reinit(kring);
+}
+
+
+static void
+nfe_netmap_attach(struct nfe_softc *sc)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = sc->nfe_ifp;
+ na.na_flags = NAF_BDG_MAYSLEEP;
+ na.num_tx_desc = NFE_TX_RING_COUNT;
+ na.num_rx_desc = NFE_RX_RING_COUNT;
+ na.nm_txsync = nfe_netmap_txsync;
+ na.nm_rxsync = nfe_netmap_rxsync;
+ na.nm_register = nfe_netmap_reg;
+ na.num_tx_rings = na.num_rx_rings = 1;
+ netmap_attach(&na, 1);
+}
+
+/* end of file */
Modified: soc2016/vincenzo/head/sys/dev/netmap/if_vtnet_netmap.h
==============================================================================
--- soc2016/vincenzo/head/sys/dev/netmap/if_vtnet_netmap.h Wed May 11 14:59:54 2016 (r302611)
+++ soc2016/vincenzo/head/sys/dev/netmap/if_vtnet_netmap.h Wed May 11 15:23:23 2016 (r302612)
@@ -32,6 +32,15 @@
#include <vm/vm.h>
#include <vm/pmap.h> /* vtophys ? */
#include <dev/netmap/netmap_kern.h>
+#ifdef WITH_PTNETMAP_GUEST
+#include <dev/netmap/netmap_virt.h>
+#include <dev/netmap/netmap_mem2.h>
+static int vtnet_ptnetmap_txsync(struct netmap_kring *kring, int flags);
+#define VTNET_PTNETMAP_ON(_na) \
+ ((nm_netmap_on(_na)) && ((_na)->nm_txsync == vtnet_ptnetmap_txsync))
+#else /* !WITH_PTNETMAP_GUEST */
+#define VTNET_PTNETMAP_ON(_na) 0
+#endif /* WITH_PTNETMAP_GUEST */
#define SOFTC_T vtnet_softc
@@ -353,6 +362,9 @@
struct netmap_adapter* na = NA(ifp);
unsigned int r;
+ /* if ptnetmap is enabled we must not init netmap buffers */
+ if (VTNET_PTNETMAP_ON(na))
+ return 1;
if (!nm_native_on(na))
return 0;
for (r = 0; r < na->num_rx_rings; r++) {
@@ -403,6 +415,331 @@
return 0;
}
+#ifdef WITH_PTNETMAP_GUEST
+/*
+ * ptnetmap support for: virtio-net (FreeBSD version)
+ *
+ * this part od this file is meant to be a reference on how to implement
+ * ptnetmap support for a network driver.
+ * this file contains code but only static or inline functions used
+ * by a single driver.
+ */
+
+/*
+ * virtio-specific macro and fucntions
+ */
+/* ptnetmap virtio register BASE */
+#define PTNETMAP_VIRTIO_IO_BASE sizeof(struct virtio_net_config)
+#ifndef VIRTIO_NET_F_PTNETMAP
+#define VIRTIO_NET_F_PTNETMAP 0x2000000 /* linux/qeum 25 */
+#endif /* VIRTIO_NET_F_PTNETMAP */
+
+static void inline
+vtnet_ptnetmap_iowrite4(device_t dev, uint32_t addr, uint32_t val)
+{
+ int i;
+ /*
+ * virtio_pci config_set use multiple iowrite8, we need to split the
+ * call and reverse the order
+ */
+ for (i = 3; i >= 0; i--) {
+ virtio_write_dev_config_1(dev, PTNETMAP_VIRTIO_IO_BASE + addr + i,
+ *(((uint8_t *)&val) + i));
+ }
+}
+
+static uint32_t inline
+vtnet_ptnetmap_ioread4(device_t dev, uint32_t addr)
+{
+ uint32_t val;
+ int i;
+
+ for (i = 0; i <= 3; i++) {
+ *(((uint8_t *)&val) + i) = virtio_read_dev_config_1(dev,
+ PTNETMAP_VIRTIO_IO_BASE + addr + i);
+ }
+ return val;
+}
+
+/*
+ * CSB (Communication Status Block) allocation.
+ * CSB is the shared memory used by the netmap instance running in the guest
+ * and the ptnetmap kthreads in the host.
+ * The CSBBAH/CSBBAL registers must be added to the virtio-net device.
+ *
+ * Only called after netmap_pt_guest_attach().
+ */
+static struct paravirt_csb *
+vtnet_ptnetmap_alloc_csb(struct SOFTC_T *sc)
+{
+ device_t dev = sc->vtnet_dev;
+ struct paravirt_csb *csb;
+
+ vm_paddr_t csb_phyaddr;
+
+ csb = contigmalloc(NET_PARAVIRT_CSB_SIZE, M_DEVBUF,
+ M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
+ if (!csb) {
+ D("Communication Status Block allocation failed!");
+ return NULL;
+ }
+
+ csb_phyaddr = vtophys(csb);
+
+ csb->guest_csb_on = 1;
+
+ /* Tell the device the CSB physical address. */
+ vtnet_ptnetmap_iowrite4(dev, PTNETMAP_VIRTIO_IO_CSBBAH,
+ (uint32_t)(csb_phyaddr >> 32));
+ vtnet_ptnetmap_iowrite4(dev, PTNETMAP_VIRTIO_IO_CSBBAL,
+ (uint32_t)(csb_phyaddr));
+
+ return csb;
+}
+
+/*
+ * CSB (Communication Status Block) deallocation.
+ */
+static void
+vtnet_ptnetmap_free_csb(struct SOFTC_T *sc)
+{
+ device_t dev = sc->vtnet_dev;
+ struct ifnet *ifp = sc->vtnet_ifp;
+ struct netmap_pt_guest_adapter* ptna =
+ (struct netmap_pt_guest_adapter *)NA(ifp);
+
+ if (ptna->csb) {
+ /* CSB deallocation protocol. */
+ vtnet_ptnetmap_iowrite4(dev, PTNETMAP_VIRTIO_IO_CSBBAH, 0x0ULL);
+ vtnet_ptnetmap_iowrite4(dev, PTNETMAP_VIRTIO_IO_CSBBAL, 0x0ULL);
+
+ contigfree(ptna->csb, NET_PARAVIRT_CSB_SIZE, M_DEVBUF);
+ ptna->csb = NULL;
+ }
+}
+
+static uint32_t vtnet_ptnetmap_ptctl(struct ifnet *, uint32_t);
+
+/*
+ * Returns device configuration from the CSB, after sending the PTCTL_CONFIG
+ * command to the host (hypervisor virtio fronted).
+ * The host reads the configuration from the netmap port (opened in the host)
+ * and it stores the values in the CSB.
+ */
+static int
+vtnet_ptnetmap_config(struct netmap_adapter *na,
+ u_int *txr, u_int *txd, u_int *rxr, u_int *rxd)
+{
+ struct netmap_pt_guest_adapter *ptna =
+ (struct netmap_pt_guest_adapter *)na;
+ struct paravirt_csb *csb = ptna->csb;
+ int ret;
+
+ if (csb == NULL)
+ return EINVAL;
+
+ ret = vtnet_ptnetmap_ptctl(na->ifp, NET_PARAVIRT_PTCTL_CONFIG);
+ if (ret)
+ return ret;
+
+ *txr = 1; //*txr = csb->num_tx_rings;
+ *rxr = 1; //*rxr = csb->num_rx_rings;
+ *txd = csb->num_tx_slots;
+ *rxd = csb->num_rx_slots;
+
+ ND("txr %u rxr %u txd %u rxd %u",
+ *txr, *rxr, *txd, *rxd);
+ return 0;
+}
+
+/*
+ * Reconcile host and guest view of the transmit ring.
+ * Use generic netmap_pt_guest_txsync().
+ * Only the notification to the host is device-specific.
+ */
+static int
+vtnet_ptnetmap_txsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct netmap_pt_guest_adapter *ptna =
+ (struct netmap_pt_guest_adapter *)na;
+ struct paravirt_csb *csb = ptna->csb;
+ struct ifnet *ifp = na->ifp;
+ u_int ring_nr = kring->ring_id;
+ struct SOFTC_T *sc = ifp->if_softc;
+ struct virtqueue *vq = sc->vtnet_txqs[ring_nr].vtntx_vq;
+ bool notify;
+
+ notify = netmap_pt_guest_txsync(&csb->tx_ring, kring, flags);
+ if (notify)
+ virtqueue_notify(vq);
+
+ ND("TX - vq_index: %d", vq->index);
+
+ return 0;
+}
+
+/*
+ * Reconcile host and guest view of the receive ring.
+ * Use generic netmap_pt_guest_rxsync().
+ * Only the notification to the host is device-specific.
+ */
+static int
+vtnet_ptnetmap_rxsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct netmap_pt_guest_adapter *ptna =
+ (struct netmap_pt_guest_adapter *)na;
+ struct paravirt_csb *csb = ptna->csb;
+ struct ifnet *ifp = na->ifp;
+ u_int ring_nr = kring->ring_id;
+ struct SOFTC_T *sc = ifp->if_softc;
+ struct virtqueue *vq = sc->vtnet_rxqs[ring_nr].vtnrx_vq;
+ bool notify;
+
+ notify = netmap_pt_guest_rxsync(&csb->rx_ring, kring, flags);
+ if (notify)
+ virtqueue_notify(vq);
+
+ ND("RX - vq_index: %d", vq->index);
+
+ return 0;
+}
+
+/*
+ * Register/unregister. We are already under netmap lock.
+ * Only called on the first register or the last unregister.
+ */
+static int
+vtnet_ptnetmap_reg(struct netmap_adapter *na, int onoff)
+{
+ struct netmap_pt_guest_adapter *ptna =
+ (struct netmap_pt_guest_adapter *)na;
+
+ /* device-specific */
+ struct ifnet *ifp = na->ifp;
+ struct SOFTC_T *sc = ifp->if_softc;
+ struct paravirt_csb *csb = ptna->csb;
+ struct netmap_kring *kring;
+ int ret = 0;
+
+ if (na == NULL)
+ return EINVAL;
+
+ VTNET_CORE_LOCK(sc);
+ /* enable or disable flags and callbacks in na and ifp */
+ if (onoff) {
+ int i;
+ nm_set_native_flags(na);
+ /* push fake-elem in the tx queues to enable interrupts */
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ struct vtnet_txq *txq = &sc->vtnet_txqs[i];
+ struct mbuf *m0;
+ m0 = m_gethdr(M_NOWAIT, MT_DATA);
+ m0->m_len = 64;
+
+ if (m0) {
+ ret = vtnet_txq_encap(txq, &m0);
+ }
+ }
+ ret = vtnet_ptnetmap_ptctl(na->ifp, NET_PARAVIRT_PTCTL_REGIF);
+ if (ret) {
+ //na->na_flags &= ~NAF_NETMAP_ON;
+ nm_clear_native_flags(na);
+ goto out;
+ }
+ /*
+ * Init ring and kring pointers
+ * After PARAVIRT_PTCTL_REGIF, the csb contains a snapshot of a
+ * host kring pointers.
+ * XXX This initialization is required, because we don't close
+ * the host port on UNREGIF.
+ */
+ // Init rx ring
+ kring = na->rx_rings;
+ kring->rhead = kring->ring->head = csb->rx_ring.head;
+ kring->rcur = kring->ring->cur = csb->rx_ring.cur;
+ kring->nr_hwcur = csb->rx_ring.hwcur;
+ kring->nr_hwtail = kring->rtail = kring->ring->tail =
+ csb->rx_ring.hwtail;
+
+ // Init tx ring
+ kring = na->tx_rings;
+ kring->rhead = kring->ring->head = csb->tx_ring.head;
+ kring->rcur = kring->ring->cur = csb->tx_ring.cur;
+ kring->nr_hwcur = csb->tx_ring.hwcur;
+ kring->nr_hwtail = kring->rtail = kring->ring->tail =
+ csb->tx_ring.hwtail;
+ } else {
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ //na->na_flags &= ~NAF_NETMAP_ON;
+ nm_clear_native_flags(na);
+ ret = vtnet_ptnetmap_ptctl(na->ifp, NET_PARAVIRT_PTCTL_UNREGIF);
+ vtnet_init_locked(sc); /* also enable intr */
+ }
+out:
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-soc-all
mailing list