socsvn commit: r289213 - in soc2015/stefano/ptnetmap/stable/10/sys: conf dev/netmap net
stefano at FreeBSD.org
stefano at FreeBSD.org
Tue Aug 4 15:36:52 UTC 2015
Author: stefano
Date: Tue Aug 4 15:36:49 2015
New Revision: 289213
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=289213
Log:
[ptnetmap] extend ptnetmap kthread support for FreeBSD:
- add ioctl to send notification (irq) to vm
- add tsleep() and implement nm_kthread_wakeup_worker()
for FreeBSD to receive notification.
- generalize ptnetmap_txsync() and ptnetmap_rxsync()
- cleanup
Modified:
soc2015/stefano/ptnetmap/stable/10/sys/conf/files
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_lem_netmap.h
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_vtnet_netmap.h
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_freebsd.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_kern.h
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_mem2.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_monitor.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_pipe.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_vale.c
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_virt.h
soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/ptnetmap.c
soc2015/stefano/ptnetmap/stable/10/sys/net/netmap.h
Modified: soc2015/stefano/ptnetmap/stable/10/sys/conf/files
==============================================================================
--- soc2015/stefano/ptnetmap/stable/10/sys/conf/files Tue Aug 4 15:22:52 2015 (r289212)
+++ soc2015/stefano/ptnetmap/stable/10/sys/conf/files Tue Aug 4 15:36:49 2015 (r289213)
@@ -1941,6 +1941,7 @@
dev/netmap/netmap_offloadings.c optional netmap
dev/netmap/netmap_pipe.c optional netmap
dev/netmap/netmap_vale.c optional netmap
+dev/netmap/ptnetmap.c optional netmap
dev/nge/if_nge.c optional nge
dev/nxge/if_nxge.c optional nxge \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}"
Modified: soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_lem_netmap.h Tue Aug 4 15:22:52 2015 (r289212)
+++ soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_lem_netmap.h Tue Aug 4 15:36:49 2015 (r289213)
@@ -472,7 +472,7 @@
}
#if defined (NIC_PTNETMAP) && defined (WITH_PTNETMAP_GUEST)
-static uint32_t lem_netmap_ptctl(struct ifnet *, uint32_t);
+static uint32_t lem_ptnetmap_ptctl(struct ifnet *, uint32_t);
static int
lem_ptnetmap_config(struct netmap_adapter *na,
@@ -486,7 +486,7 @@
if (csb == NULL)
return EINVAL;
- ret = lem_netmap_ptctl(ifp, NET_PARAVIRT_PTCTL_CONFIG);
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_CONFIG);
if (ret)
return ret;
@@ -508,54 +508,14 @@
//u_int ring_nr = kring->ring_id;
struct ifnet *ifp = na->ifp;
struct adapter *adapter = ifp->if_softc;
- struct paravirt_csb *csb = adapter->csb;
- bool send_kick = false;
-
- /* Disable notifications */
- csb->guest_need_txkick = 0;
+ int ret, notify = 0;
- /*
- * First part: process new packets to send.
- */
- kring->nr_hwcur = csb->tx_ring.hwcur;
- ptnetmap_guest_write_kring_csb(&csb->tx_ring, kring->rcur, kring->rhead);
- if (kring->rhead != kring->nr_hwcur) {
- send_kick = true;
- }
+ ret = ptnetmap_txsync(kring, flags, ¬ify);
- /* Send kick to the host if it needs them */
- if ((send_kick && ACCESS_ONCE(csb->host_need_txkick)) || (flags & NAF_FORCE_RECLAIM)) {
- csb->tx_ring.sync_flags = flags;
+ if (notify)
E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
- }
- /*
- * Second part: reclaim buffers for completed transmissions.
- */
- if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
- ptnetmap_guest_read_kring_csb(&csb->tx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- }
-
- /*
- * Ring full. The user thread will go to sleep and
- * we need a notification (interrupt) from the NIC,
- * whene there is free space.
- */
- if (kring->rcur == kring->nr_hwtail) {
- /* Reenable notifications. */
- csb->guest_need_txkick = 1;
- /* Double check */
- ptnetmap_guest_read_kring_csb(&csb->tx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- /* If there is new free space, disable notifications */
- if (kring->rcur != kring->nr_hwtail) {
- csb->guest_need_txkick = 0;
- }
- }
-
- ND("TX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u",
- csb->tx_ring.head, csb->tx_ring.cur, csb->tx_ring.hwtail, kring->rhead, kring->rcur);
-
- return 0;
+ return ret;
}
static int
@@ -565,59 +525,14 @@
//u_int ring_nr = kring->ring_id;
struct ifnet *ifp = na->ifp;
struct adapter *adapter = ifp->if_softc;
- struct paravirt_csb *csb = adapter->csb;
-
- int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
- uint32_t h_hwcur = kring->nr_hwcur, h_hwtail = kring->nr_hwtail;
+ int ret, notify = 0;
- /* Disable notifications */
- csb->guest_need_rxkick = 0;
-
- ptnetmap_guest_read_kring_csb(&csb->rx_ring, &h_hwcur, &h_hwtail, kring->nkr_num_slots);
-
- /*
- * First part: import newly received packets.
- */
- if (netmap_no_pendintr || force_update) {
- kring->nr_hwtail = h_hwtail;
- kring->nr_kflags &= ~NKR_PENDINTR;
- }
-
- /*
- * Second part: skip past packets that userspace has released.
- */
- kring->nr_hwcur = h_hwcur;
- if (kring->rhead != kring->nr_hwcur) {
- ptnetmap_guest_write_kring_csb(&csb->rx_ring, kring->rcur, kring->rhead);
- /* Send kick to the host if it needs them */
- if (ACCESS_ONCE(csb->host_need_rxkick)) {
- csb->rx_ring.sync_flags = flags;
- E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
- }
- }
-
- /*
- * Ring empty. The user thread will go to sleep and
- * we need a notification (interrupt) from the NIC,
- * whene there are new packets.
- */
- if (kring->rcur == kring->nr_hwtail) {
- /* Reenable notifications. */
- csb->guest_need_rxkick = 1;
- /* Double check */
- ptnetmap_guest_read_kring_csb(&csb->rx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- /* If there are new packets, disable notifications */
- if (kring->rcur != kring->nr_hwtail) {
- csb->guest_need_rxkick = 0;
- }
- }
-
- ND("RX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u",
- csb->rx_ring.head, csb->rx_ring.cur, csb->rx_ring.hwtail, kring->rhead, kring->rcur);
-
- return 0;
+ ret = ptnetmap_rxsync(kring, flags, ¬ify);
+ if (notify)
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
+ return ret;
}
static int
@@ -630,7 +545,7 @@
int ret;
if (onoff) {
- ret = lem_netmap_ptctl(ifp, NET_PARAVIRT_PTCTL_REGIF);
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_REGIF);
if (ret)
return ret;
@@ -660,7 +575,7 @@
} else {
na->na_flags &= ~NAF_NETMAP_ON;
adapter->ptnetmap_enabled = 0;
- ret = lem_netmap_ptctl(ifp, NET_PARAVIRT_PTCTL_UNREGIF);
+ ret = lem_ptnetmap_ptctl(ifp, NET_PARAVIRT_PTCTL_UNREGIF);
}
return lem_netmap_reg(na, onoff);
@@ -673,16 +588,17 @@
return EOPNOTSUPP;
}
-static struct paravirt_csb *
-lem_netmap_getcsb(struct ifnet *ifp)
+static void
+lem_ptnetmap_setup_csb(struct adapter *adapter)
{
- struct adapter *adapter = ifp->if_softc;
+ struct ifnet *ifp = adapter->ifp;
+ struct netmap_pt_guest_adapter* ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
- return adapter->csb;
+ ptna->csb = adapter->csb;
}
static uint32_t
-lem_netmap_ptctl(struct ifnet *ifp, uint32_t val)
+lem_ptnetmap_ptctl(struct ifnet *ifp, uint32_t val)
{
struct adapter *adapter = ifp->if_softc;
uint32_t ret;
@@ -704,22 +620,21 @@
E1000_WRITE_REG(&adapter->hw, E1000_PTFEAT, NET_PTN_FEATURES_BASE);
/* get back the acknowledged features */
features = E1000_READ_REG(&adapter->hw, E1000_PTFEAT);
- device_printf(adapter->dev, "netmap passthrough: %s\n",
+ device_printf(adapter->dev, "ptnetmap support: %s\n",
(features & NET_PTN_FEATURES_BASE) ? "base" :
"none");
return features;
}
static struct netmap_pt_guest_ops lem_ptnetmap_ops = {
- .nm_getcsb = lem_netmap_getcsb,
- .nm_ptctl = lem_netmap_ptctl,
+ .nm_ptctl = lem_ptnetmap_ptctl,
};
#elif defined (NIC_PTNETMAP)
#warning "if_lem supports ptnetmap but netmap does not support it"
-#warning "(configure netmap with passthrough support)"
+#warning "(configure netmap with ptnetmap support)"
#elif defined (WITH_PTNETMAP_GUEST)
#warning "netmap supports ptnetmap but e1000 does not support it"
-#warning "(configure if_lem with passthrough support)"
+#warning "(configure if_lem with ptnetmap support)"
#endif /* NIC_PTNETMAP && WITH_PTNETMAP_GUEST */
static void
@@ -747,6 +662,7 @@
na.nm_rxsync = lem_ptnetmap_rxsync;
na.nm_bdg_attach = lem_ptnetmap_bdg_attach; /* XXX */
netmap_pt_guest_attach(&na, &lem_ptnetmap_ops);
+ lem_ptnetmap_setup_csb(adapter);
} else
#endif /* NIC_PTNETMAP && defined WITH_PTNETMAP_GUEST */
netmap_attach(&na);
Modified: soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_vtnet_netmap.h
==============================================================================
--- soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_vtnet_netmap.h Tue Aug 4 15:22:52 2015 (r289212)
+++ soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/if_vtnet_netmap.h Tue Aug 4 15:36:49 2015 (r289213)
@@ -418,7 +418,7 @@
#define PTNETMAP_VIRTIO_IO_BASE sizeof(struct virtio_net_config)
#ifndef VIRTIO_NET_F_PTNETMAP
-#define VIRTIO_NET_F_PTNETMAP 0x1000000 /* linux/qeum 24 */
+#define VIRTIO_NET_F_PTNETMAP 0x2000000 /* linux/qeum 25 */
#endif /* VIRTIO_NET_F_PTNETMAP */
static void inline
@@ -520,130 +520,40 @@
vtnet_ptnetmap_txsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
- struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)na;
struct ifnet *ifp = na->ifp;
u_int ring_nr = kring->ring_id;
-
- /* device-specific */
struct SOFTC_T *sc = ifp->if_softc;
- struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
- struct virtqueue *vq = txq->vtntx_vq;
- struct paravirt_csb *csb = ptna->csb;
- bool send_kick = false;
+ struct virtqueue *vq = sc->vtnet_txqs[ring_nr].vtntx_vq;
+ int ret, notify = 0;
- /* Disable notifications */
- csb->guest_need_txkick = 0;
+ ret = ptnetmap_txsync(kring, flags, ¬ify);
- /*
- * First part: process new packets to send.
- */
- kring->nr_hwcur = csb->tx_ring.hwcur;
- ptnetmap_guest_write_kring_csb(&csb->tx_ring, kring->rcur, kring->rhead);
- if (kring->rhead != kring->nr_hwcur) {
- send_kick = true;
- }
-
- /* Send kick to the host if it needs them */
- if ((send_kick && ACCESS_ONCE(csb->host_need_txkick)) || (flags & NAF_FORCE_RECLAIM)) {
- csb->tx_ring.sync_flags = flags;
+ if (notify)
virtqueue_notify(vq);
- }
-
- /*
- * Second part: reclaim buffers for completed transmissions.
- */
- if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
- ptnetmap_guest_read_kring_csb(&csb->tx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- }
- /*
- * Ring full. The user thread will go to sleep and
- * we need a notification (interrupt) from the NIC,
- * whene there is free space.
- */
- if (kring->rcur == kring->nr_hwtail) {
- /* Reenable notifications. */
- csb->guest_need_txkick = 1;
- /* Double check */
- ptnetmap_guest_read_kring_csb(&csb->tx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- /* If there is new free space, disable notifications */
- if (kring->rcur != kring->nr_hwtail) {
- csb->guest_need_txkick = 0;
- }
- }
-
-
- ND(1,"TX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u tail: %u",
- csb->tx_ring.head, csb->tx_ring.cur, csb->tx_ring.hwtail, kring->rhead, kring->rcur, kring->nr_hwtail);
ND("TX - vq_index: %d", vq->index);
- return 0;
+ return ret;
}
static int
vtnet_ptnetmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
- struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)na;
struct ifnet *ifp = na->ifp;
u_int ring_nr = kring->ring_id;
-
- /* device-specific */
struct SOFTC_T *sc = ifp->if_softc;
- struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr];
- struct virtqueue *vq = rxq->vtnrx_vq;
- struct paravirt_csb *csb = ptna->csb;
-
- int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
- uint32_t h_hwcur = kring->nr_hwcur, h_hwtail = kring->nr_hwtail;
-
- /* Disable notifications */
- csb->guest_need_rxkick = 0;
+ struct virtqueue *vq = sc->vtnet_rxqs[ring_nr].vtnrx_vq;
+ int ret, notify = 0;
- ptnetmap_guest_read_kring_csb(&csb->rx_ring, &h_hwcur, &h_hwtail, kring->nkr_num_slots);
-
- /*
- * First part: import newly received packets.
- */
- if (netmap_no_pendintr || force_update) {
- kring->nr_hwtail = h_hwtail;
- kring->nr_kflags &= ~NKR_PENDINTR;
- }
-
- /*
- * Second part: skip past packets that userspace has released.
- */
- kring->nr_hwcur = h_hwcur;
- if (kring->rhead != kring->nr_hwcur) {
- ptnetmap_guest_write_kring_csb(&csb->rx_ring, kring->rcur, kring->rhead);
- /* Send kick to the host if it needs them */
- if (ACCESS_ONCE(csb->host_need_rxkick)) {
- csb->rx_ring.sync_flags = flags;
- virtqueue_notify(vq);
- }
- }
+ ret = ptnetmap_rxsync(kring, flags, ¬ify);
- /*
- * Ring empty. The user thread will go to sleep and
- * we need a notification (interrupt) from the NIC,
- * whene there are new packets.
- */
- if (kring->rcur == kring->nr_hwtail) {
- /* Reenable notifications. */
- csb->guest_need_rxkick = 1;
- /* Double check */
- ptnetmap_guest_read_kring_csb(&csb->rx_ring, &kring->nr_hwcur, &kring->nr_hwtail, kring->nkr_num_slots);
- /* If there are new packets, disable notifications */
- if (kring->rcur != kring->nr_hwtail) {
- csb->guest_need_rxkick = 0;
- }
- }
+ if (notify)
+ virtqueue_notify(vq);
- ND("RX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u",
- csb->rx_ring.head, csb->rx_ring.cur, csb->rx_ring.hwtail, kring->rhead, kring->rcur);
ND("RX - vq_index: %d", vq->index);
- return 0;
+ return ret;
}
static int
@@ -721,14 +631,6 @@
return EOPNOTSUPP;
}
-static struct paravirt_csb *
-vtnet_ptnetmap_getcsb(struct ifnet *ifp)
-{
- struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
-
- return ptna->csb;
-}
-
static uint32_t
vtnet_ptnetmap_ptctl(struct ifnet *ifp, uint32_t val)
{
@@ -753,7 +655,7 @@
vtnet_ptnetmap_iowrite4(dev, PTNETMAP_VIRTIO_IO_PTFEAT, NET_PTN_FEATURES_BASE);
/* get back the acknowledged features */
features = vtnet_ptnetmap_ioread4(dev, PTNETMAP_VIRTIO_IO_PTFEAT);
- D("netmap passthrough: %s\n",
+ D("ptnetmap support: %s\n",
(features & NET_PTN_FEATURES_BASE) ? "base" :
"none");
return features;
@@ -769,7 +671,6 @@
}
static struct netmap_pt_guest_ops vtnet_ptnetmap_ops = {
- .nm_getcsb = vtnet_ptnetmap_getcsb, /* TODO: remove */
.nm_ptctl = vtnet_ptnetmap_ptctl,
};
#endif /* WITH_PTNETMAP_GUEST */
Modified: soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap.c
==============================================================================
--- soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap.c Tue Aug 4 15:22:52 2015 (r289212)
+++ soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap.c Tue Aug 4 15:36:49 2015 (r289213)
@@ -542,6 +542,7 @@
SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, &netmap_generic_rings, 0 , "");
NMG_LOCK_T netmap_global_lock;
+int netmap_use_count = 0; /* number of active netmap instances */
/*
* mark the ring as stopped, and run through the locks
@@ -726,6 +727,9 @@
return 1;
}
+static void netmap_txsync_to_host(struct netmap_adapter *na);
+static int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait);
+
/* kring->nm_sync callback for the host tx ring */
static int
netmap_txsync_to_host_compat(struct netmap_kring *kring, int flags)
@@ -959,11 +963,12 @@
}
/*
- * Destructor of the netmap_priv_d, called when the fd has
- * no active open() and mmap().
- * Undo all the things done by NIOCREGIF.
+ * Destructor of the netmap_priv_d, called when the fd is closed
+ * Action: undo all the things done by NIOCREGIF,
+ * On FreeBSD we need to track whether there are active mmap()s,
+ * and we use np_active_mmaps for that. On linux, the field is always 0.
+ * Return: 1 if we can free priv, 0 otherwise.
*
- * returns 1 if this is the last instance and we can free priv
*/
/* call with NMG_LOCK held */
int
@@ -971,17 +976,13 @@
{
struct netmap_adapter *na = priv->np_na;
-#ifdef __FreeBSD__
- /*
- * np_refcount is the number of active mmaps on
- * this file descriptor
- */
- if (--priv->np_refcount > 0) {
+ /* number of active references to this fd */
+ if (--priv->np_refs > 0) {
return 0;
}
-#endif /* __FreeBSD__ */
+ netmap_use_count--;
if (!na) {
- return 1; //XXX is it correct?
+ return 1; //XXX is it correct?
}
netmap_do_unregif(priv);
netmap_adapter_put(na);
@@ -1139,7 +1140,7 @@
* can be among multiple user threads erroneously calling
* this routine concurrently.
*/
-void
+static void
netmap_txsync_to_host(struct netmap_adapter *na)
{
struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
@@ -1177,7 +1178,7 @@
* returns the number of packets delivered to tx queues in
* transparent mode, or a negative value if error
*/
-int
+static int
netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait)
{
struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
@@ -1388,7 +1389,7 @@
* !0 !NULL impossible
*/
- /* try to see if this is a passthrough port */
+ /* try to see if this is a ptnetmap port */
error = netmap_get_pt_host_na(nmr, na, create);
if (error || *na != NULL)
return error;
@@ -1454,15 +1455,17 @@
* hwcur, rhead, rtail and hwtail are reliable
*/
u_int
-nm_txsync_prologue(struct netmap_kring *kring, uint32_t head, uint32_t cur, uint32_t *tail)
+nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
{
#define NM_ASSERT(t) if (t) { D("fail " #t); goto error; }
+ u_int head = ring->head; /* read only once */
+ u_int cur = ring->cur; /* read only once */
u_int n = kring->nkr_num_slots;
ND(5, "%s kcur %d ktail %d head %d cur %d tail %d",
kring->name,
kring->nr_hwcur, kring->nr_hwtail,
- head, cur, tail ? *tail : 0);
+ ring->head, ring->cur, ring->tail);
#if 1 /* kernel sanity checks; but we can trust the kring. */
if (kring->nr_hwcur >= n || kring->rhead >= n ||
kring->rtail >= n || kring->nr_hwtail >= n)
@@ -1495,10 +1498,10 @@
NM_ASSERT(cur > kring->rtail && cur < head);
}
}
- if (tail && *tail != kring->rtail) {
+ if (ring->tail != kring->rtail) {
RD(5, "tail overwritten was %d need %d",
- *tail, kring->rtail);
- *tail = kring->rtail;
+ ring->tail, kring->rtail);
+ ring->tail = kring->rtail;
}
kring->rhead = head;
kring->rcur = cur;
@@ -1507,7 +1510,7 @@
error:
RD(5, "%s kring error: head %d cur %d tail %d rhead %d rcur %d rtail %d hwcur %d hwtail %d",
kring->name,
- head, cur, tail ? *tail : 0,
+ head, cur, ring->tail,
kring->rhead, kring->rcur, kring->rtail,
kring->nr_hwcur, kring->nr_hwtail);
return n;
@@ -1527,14 +1530,15 @@
*
*/
u_int
-nm_rxsync_prologue(struct netmap_kring *kring, uint32_t head, uint32_t cur, uint32_t *tail)
+nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
{
uint32_t const n = kring->nkr_num_slots;
+ uint32_t head, cur;
ND(5,"%s kc %d kt %d h %d c %d t %d",
kring->name,
kring->nr_hwcur, kring->nr_hwtail,
- head, cur, tail ? *tail : 0);
+ ring->head, ring->cur, ring->tail);
/*
* Before storing the new values, we should check they do not
* move backwards. However:
@@ -1542,8 +1546,8 @@
* - cur could in principle go back, however it does not matter
* because we are processing a brand new rxsync()
*/
- kring->rcur = cur; /* read only once */
- kring->rhead = head; /* read only once */
+ cur = kring->rcur = ring->cur; /* read only once */
+ head = kring->rhead = ring->head; /* read only once */
#if 1 /* kernel sanity checks */
if (kring->nr_hwcur >= n || kring->nr_hwtail >= n)
goto error;
@@ -1571,11 +1575,11 @@
goto error;
}
}
- if (tail && *tail != kring->rtail) {
+ if (ring->tail != kring->rtail) {
RD(5, "%s tail overwritten was %d need %d",
kring->name,
- *tail, kring->rtail);
- *tail = kring->rtail;
+ ring->tail, kring->rtail);
+ ring->tail = kring->rtail;
}
return head;
@@ -1583,7 +1587,7 @@
RD(5, "kring error: hwcur %d rcur %d hwtail %d head %d cur %d tail %d",
kring->nr_hwcur,
kring->rcur, kring->nr_hwtail,
- kring->rhead, kring->rcur, tail ? *tail : 0);
+ kring->rhead, kring->rcur, ring->tail);
return n;
}
@@ -2025,6 +2029,41 @@
}
+/*
+ * update kring and ring at the end of txsync.
+ */
+static inline void
+nm_txsync_finalize(struct netmap_kring *kring)
+{
+ /* update ring tail to what the kernel knows */
+ kring->ring->tail = kring->rtail = kring->nr_hwtail;
+
+ /* note, head/rhead/hwcur might be behind cur/rcur
+ * if no carrier
+ */
+ ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
+ kring->name, kring->nr_hwcur, kring->nr_hwtail,
+ kring->rhead, kring->rcur, kring->rtail);
+}
+
+
+/*
+ * update kring and ring at the end of rxsync
+ */
+static inline void
+nm_rxsync_finalize(struct netmap_kring *kring)
+{
+ /* tell userspace that there might be new packets */
+ //struct netmap_ring *ring = kring->ring;
+ ND("head %d cur %d tail %d -> %d", ring->head, ring->cur, ring->tail,
+ kring->nr_hwtail);
+ kring->ring->tail = kring->rtail = kring->nr_hwtail;
+ /* make a copy of the state for next round */
+ kring->rhead = kring->ring->head;
+ kring->rcur = kring->ring->cur;
+}
+
+
/*
* ioctl(2) support for the "netmap" device.
@@ -2232,8 +2271,7 @@
D("pre txsync ring %d cur %d hwcur %d",
i, ring->cur,
kring->nr_hwcur);
- if (nm_txsync_prologue(kring, ring->head, ring->cur,
- &ring->tail) >= kring->nkr_num_slots) {
+ if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
netmap_ring_reinit(kring);
} else if (kring->nm_sync(kring, NAF_FORCE_RECLAIM) == 0) {
nm_txsync_finalize(kring);
@@ -2243,8 +2281,7 @@
i, ring->cur,
kring->nr_hwcur);
} else {
- if (nm_rxsync_prologue(kring, ring->head, ring->cur,
- &ring->tail) >= kring->nkr_num_slots) {
+ if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
netmap_ring_reinit(kring);
} else if (kring->nm_sync(kring, NAF_FORCE_READ) == 0) {
nm_rxsync_finalize(kring);
@@ -2440,8 +2477,7 @@
priv, i);
continue;
}
- if (nm_txsync_prologue(kring, ring->head, ring->cur,
- &ring->tail) >= kring->nkr_num_slots) {
+ if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
netmap_ring_reinit(kring);
revents |= POLLERR;
} else {
@@ -2494,8 +2530,7 @@
continue;
}
- if (nm_rxsync_prologue(kring, ring->head, ring->cur,
- &ring->tail) >= kring->nkr_num_slots) {
+ if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
netmap_ring_reinit(kring);
revents |= POLLERR;
}
Modified: soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_freebsd.c
==============================================================================
--- soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_freebsd.c Tue Aug 4 15:22:52 2015 (r289212)
+++ soc2015/stefano/ptnetmap/stable/10/sys/dev/netmap/netmap_freebsd.c Tue Aug 4 15:36:49 2015 (r289213)
@@ -35,6 +35,7 @@
#include <sys/kernel.h> /* types used in module initialization */
#include <sys/conf.h> /* DEV_MODULE */
#include <sys/endian.h>
+#include <sys/syscallsubr.h> /* kern_ioctl() */
#include <sys/rwlock.h>
@@ -823,7 +824,7 @@
goto err_unlock;
}
vmh->priv = priv;
- priv->np_refcount++;
+ priv->np_refs++;
NMG_UNLOCK();
obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
@@ -840,7 +841,7 @@
err_deref:
NMG_LOCK();
- priv->np_refcount--;
+ priv->np_refs--;
err_unlock:
NMG_UNLOCK();
// err:
@@ -849,14 +850,14 @@
}
/*
- * netmap_close() is called on every close(), but we do not need to do
- * anything at that moment, since the process may have other open file
- * descriptors for /dev/netmap. Instead, we pass netmap_dtor() to
+ * On FreeBSD the close routine is only called on the last close on
+ * the device (/dev/netmap) so we cannot do anything useful.
+ * To track close() on individual file descriptors we pass netmap_dtor() to
* devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
* when the last fd pointing to the device is closed.
*
- * Unfortunately, FreeBSD does not automatically track active mmap()s on an fd,
- * so we have to track them by ourselvesi (see above). The result is that
+ * Note that FreeBSD does not even munmap() on close() so we also have
+ * to track mmap() ourselves, and postpone the call to
* netmap_dtor() is called when the process has no open fds and no active
* memory maps on /dev/netmap, as in linux.
*/
@@ -881,36 +882,29 @@
(void)devtype;
(void)td;
- // XXX wait or nowait ?
priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (priv == NULL)
return ENOMEM;
-
+ priv->np_refs = 1;
error = devfs_set_cdevpriv(priv, netmap_dtor);
- if (error)
- return error;
-
- priv->np_refcount = 1;
-
- return 0;
+ if (error) {
+ free(priv, M_DEVBUF);
+ } else {
+ NMG_LOCK();
+ netmap_use_count++;
+ NMG_UNLOCK();
+ }
+ return error;
}
/******************** kthread wrapper ****************/
+#include <sys/sysproto.h>
+
struct nm_kthread_ctx {
- void *ioevent_file;
- void *irq_file;
-#if 0 /* to be dane after eventfd implementation */
- /* files to exchange notifications */
- struct file *ioevent_file; /* notification from guest */
- struct file *irq_file; /* notification to guest (interrupt) */
- struct eventfd_ctx *irq_ctx;
-
- /* poll ioeventfd to receive notification from the guest */
- poll_table poll_table;
- wait_queue_head_t *waitq_head;
- wait_queue_t waitq;
-#endif /* 0 */
+ struct thread *user_td; /* thread user-space (kthread creator) to send ioctl */
+ struct ioctl_args irq_ioctl; /* notification to guest (interrupt) */
+ void *ioevent_file; /* notification from guest */
/* worker function and parameter */
nm_kthread_worker_fn_t worker_fn;
@@ -923,46 +917,45 @@
};
struct nm_kthread {
- //struct mm_struct *mm;
struct thread *worker;
struct mtx worker_lock;
- uint64_t scheduled; /* currently not used */
+ uint64_t scheduled; /* pending wake_up request */
struct nm_kthread_ctx worker_ctx;
+ int run; /* used to stop kthread */
int affinity;
};
void inline
nm_kthread_wakeup_worker(struct nm_kthread *nmk)
{
- (void)nmk;
+ /*
+ * There may be a race between FE and BE,
+ * which call both this function, and worker kthread,
+ * that reads nmk->scheduled.
+ *
+ * For us it is not important the counter value,
+ * but simply that it has changed since the last
+ * time the kthread saw it.
+ */
+ nmk->scheduled++;
+ if (nmk->worker_ctx.ioevent_file) {
+ wakeup(nmk->worker_ctx.ioevent_file);
+ }
}
void inline
nm_kthread_send_irq(struct nm_kthread *nmk)
{
- (void)nmk;
-}
-
-static
-int is_suspended(void)
-{
- struct proc *p;
- struct thread *td;
- int ret = 0;
-
- td = curthread;
- p = td->td_proc;
+ struct nm_kthread_ctx *ctx = &nmk->worker_ctx;
+ int err;
- if ((td->td_pflags & TDP_KTHREAD) == 0)
- panic("%s: curthread is not a valid kthread", __func__);
- PROC_LOCK(p);
- if (td->td_flags & TDF_KTH_SUSP) {
- wakeup(&td->td_flags);
- //msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0);
- ret = 1;
+ if (ctx->irq_ioctl.fd > 0) {
+ err = kern_ioctl(ctx->user_td, ctx->irq_ioctl.fd, ctx->irq_ioctl.com, ctx->irq_ioctl.data);
+ if (err) {
+ D("kern_ioctl error: %d ioctl parameters: fd %d com %lu data %p",
+ err, ctx->irq_ioctl.fd, ctx->irq_ioctl.com, ctx->irq_ioctl.data);
+ }
}
- PROC_UNLOCK(p);
- return ret;
}
static void
@@ -970,53 +963,57 @@
{
struct nm_kthread *nmk = data;
struct nm_kthread_ctx *ctx = &nmk->worker_ctx;
+ uint64_t old_scheduled = 0, new_scheduled = 0;
thread_lock(curthread);
- if (nmk->affinity >= 0)
+ if (nmk->affinity >= 0) {
sched_bind(curthread, nmk->affinity);
- thread_unlock(curthread);
- for (; !is_suspended();) {
- if (nmk->worker == NULL)
- break;
- ctx->worker_fn(ctx->worker_private); /* worker_body */
}
- kthread_exit();
-}
+ thread_unlock(curthread);
-static int
-nm_kthread_open_files(struct nm_kthread *nmk, struct nm_eventfd_cfg_ring *ring_cfg)
-{
- (void)nmk;
- (void)ring_cfg;
- return 0;
-}
+ while (nmk->run) {
+ kthread_suspend_check();
-static void
-nm_kthread_close_files(struct nm_kthread *nmk)
-{
- (void)nmk;
-}
+ new_scheduled = nmk->scheduled;
-static void
-nm_kthread_init_poll(struct nm_kthread *nmk, struct nm_kthread_ctx *ctx)
-{
- (void)nmk;
- (void)ctx;
- return;
+ /* checks if there is a pending notification */
+ if (likely(new_scheduled != old_scheduled)) {
+ old_scheduled = new_scheduled;
+ ctx->worker_fn(ctx->worker_private); /* worker_body */
+ } else if (nmk->run) {
+ if (ctx->ioevent_file) {
+ /* wait on event with timetout 1 second */
+ tsleep_sbt(ctx->ioevent_file, PPAUSE, "nmk_event", SBT_1S, SBT_1MS, C_ABSOLUTE);
+ nmk->scheduled++;
+ }
+ }
+ }
+
+ kthread_exit();
}
static int
-nm_kthread_start_poll(struct nm_kthread_ctx *ctx, void *file)
+nm_kthread_open_files(struct nm_kthread *nmk, struct nm_kthread_cfg *cfg)
{
- (void)ctx;
- (void)file;
+ /* send irq through ioctl to bhyve (vmm.ko) */
+ if (cfg->ring.irqfd) {
+ nmk->worker_ctx.irq_ioctl.fd = cfg->ring.irqfd;
+ nmk->worker_ctx.irq_ioctl.com = cfg->ioctl.com;
+ nmk->worker_ctx.irq_ioctl.data = (caddr_t)cfg->ioctl.data;
+ }
+ /* ring.ioeventfd contains the chan where do tsleep to wait events */
+ if (cfg->ring.ioeventfd) {
+ nmk->worker_ctx.ioevent_file = (void *)cfg->ring.ioeventfd;
+ }
+
return 0;
}
static void
-nm_kthread_stop_poll(struct nm_kthread_ctx *ctx)
+nm_kthread_close_files(struct nm_kthread *nmk)
{
- (void)ctx;
+ nmk->worker_ctx.irq_ioctl.fd = 0;
+ nmk->worker_ctx.ioevent_file = NULL;
}
void
@@ -1036,16 +1033,16 @@
return NULL;
mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF);
+ nmk->worker_ctx.user_td = curthread;
nmk->worker_ctx.worker_fn = cfg->worker_fn;
nmk->worker_ctx.worker_private = cfg->worker_private;
nmk->worker_ctx.type = cfg->type;
nmk->affinity = -1;
/* open event fd */
- error = nm_kthread_open_files(nmk, &cfg->ring);
+ error = nm_kthread_open_files(nmk, cfg);
if (error)
goto err;
- nm_kthread_init_poll(nmk, &nmk->worker_ctx);
return nmk;
err:
@@ -1061,21 +1058,20 @@
if (nmk->worker) {
return EBUSY;
}
-
- error = kthread_add(nm_kthread_worker, nmk, curproc,
+ /* enable kthread main loop */
+ nmk->run = 1;
+ /* create kthread */
+ if((error = kthread_add(nm_kthread_worker, nmk, curproc,
&nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld",
- nmk->worker_ctx.type);
- if (error)
+ nmk->worker_ctx.type))) {
goto err;
- D("started td 0x%p", nmk->worker);
+ }
+
+ D("nm_kthread started td 0x%p", nmk->worker);
- error = nm_kthread_start_poll(&nmk->worker_ctx, nmk->worker_ctx.ioevent_file);
- if (error)
- goto err_kstop;
return 0;
-err_kstop:
- kthread_suspend(nmk->worker, 0);
err:
+ D("nm_kthread start failed err %d", error);
nmk->worker = NULL;
return error;
}
@@ -1086,8 +1082,13 @@
if (!nmk->worker) {
return;
}
- nm_kthread_stop_poll(&nmk->worker_ctx);
- kthread_suspend(nmk->worker, 100);
+ /* tell to kthread to exit from main loop */
+ nmk->run = 0;
+
+ /* wake up kthread if it sleeps */
+ kthread_resume(nmk->worker);
+ nm_kthread_wakeup_worker(nmk);
+
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-soc-all
mailing list