svn commit: r332319 - in head/sys: dev/netmap net

Vincenzo Maffione vmaffione at FreeBSD.org
Mon Apr 9 09:24:28 UTC 2018


Author: vmaffione
Date: Mon Apr  9 09:24:26 2018
New Revision: 332319
URL: https://svnweb.freebsd.org/changeset/base/332319

Log:
  netmap: align codebase to upstream version v11.4
  
  Changelist:
    - remove unused nkr_slot_flags
    - new nm_intr adapter callback to enable/disable interrupts
    - remove unused sysctls and document the other sysctls
    - new infrastructure to support NS_MOREFRAG for NIC ports
    - support for external memory allocator (for now linux-only),
      including linux-specific changes in common headers
    - optimizations within netmap pipes datapath
    - improvements on VALE control API
    - new nm_parse() helper function in netmap_user.h
    - various bug fixes and code clean up
  
  Approved by:	hrs (mentor)

Modified:
  head/sys/dev/netmap/if_em_netmap.h
  head/sys/dev/netmap/if_igb_netmap.h
  head/sys/dev/netmap/if_ixl_netmap.h
  head/sys/dev/netmap/if_lem_netmap.h
  head/sys/dev/netmap/if_ptnet.c
  head/sys/dev/netmap/if_re_netmap.h
  head/sys/dev/netmap/if_vtnet_netmap.h
  head/sys/dev/netmap/ixgbe_netmap.h
  head/sys/dev/netmap/netmap.c
  head/sys/dev/netmap/netmap_freebsd.c
  head/sys/dev/netmap/netmap_generic.c
  head/sys/dev/netmap/netmap_kern.h
  head/sys/dev/netmap/netmap_mem2.c
  head/sys/dev/netmap/netmap_mem2.h
  head/sys/dev/netmap/netmap_monitor.c
  head/sys/dev/netmap/netmap_offloadings.c
  head/sys/dev/netmap/netmap_pipe.c
  head/sys/dev/netmap/netmap_pt.c
  head/sys/dev/netmap/netmap_vale.c
  head/sys/net/netmap.h
  head/sys/net/netmap_user.h
  head/sys/net/netmap_virt.h

Modified: head/sys/dev/netmap/if_em_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_em_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_em_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -235,8 +235,6 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags
 	 * First part: import newly received packets.
 	 */
 	if (netmap_no_pendintr || force_update) {
-		uint16_t slot_flags = kring->nkr_slot_flags;
-
 		nic_i = rxr->next_to_check;
 		nm_i = netmap_idx_n2k(kring, nic_i);
 
@@ -247,7 +245,7 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags
 			if ((staterr & E1000_RXD_STAT_DD) == 0)
 				break;
 			ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map,
 				BUS_DMASYNC_POSTREAD);
 			nm_i = nm_next(nm_i, lim);

Modified: head/sys/dev/netmap/if_igb_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_igb_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_igb_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -217,8 +217,6 @@ igb_netmap_rxsync(struct netmap_kring *kring, int flag
 	 * First part: import newly received packets.
 	 */
 	if (netmap_no_pendintr || force_update) {
-		uint16_t slot_flags = kring->nkr_slot_flags;
-
 		nic_i = rxr->next_to_check;
 		nm_i = netmap_idx_n2k(kring, nic_i);
 
@@ -229,7 +227,7 @@ igb_netmap_rxsync(struct netmap_kring *kring, int flag
 			if ((staterr & E1000_RXD_STAT_DD) == 0)
 				break;
 			ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			bus_dmamap_sync(rxr->ptag,
 			    rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
 			nm_i = nm_next(nm_i, lim);

Modified: head/sys/dev/netmap/if_ixl_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_ixl_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_ixl_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -331,7 +331,6 @@ ixl_netmap_rxsync(struct netmap_kring *kring, int flag
 	 */
 	if (netmap_no_pendintr || force_update) {
 		int crclen = ixl_crcstrip ? 0 : 4;
-		uint16_t slot_flags = kring->nkr_slot_flags;
 
 		nic_i = rxr->next_check; // or also k2n(kring->nr_hwtail)
 		nm_i = netmap_idx_n2k(kring, nic_i);
@@ -346,7 +345,7 @@ ixl_netmap_rxsync(struct netmap_kring *kring, int flag
 				break;
 			ring->slot[nm_i].len = ((qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
 			    >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - crclen;
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			bus_dmamap_sync(rxr->ptag,
 			    rxr->buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
 			nm_i = nm_next(nm_i, lim);

Modified: head/sys/dev/netmap/if_lem_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_lem_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_lem_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -216,8 +216,6 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
 	 * First part: import newly received packets.
 	 */
 	if (netmap_no_pendintr || force_update) {
-		uint16_t slot_flags = kring->nkr_slot_flags;
-
 		nic_i = adapter->next_rx_desc_to_check;
 		nm_i = netmap_idx_n2k(kring, nic_i);
 
@@ -234,7 +232,7 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flag
 				len = 0;
 			}
 			ring->slot[nm_i].len = len;
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			bus_dmamap_sync(adapter->rxtag,
 				adapter->rx_buffer_area[nic_i].map,
 				BUS_DMASYNC_POSTREAD);

Modified: head/sys/dev/netmap/if_ptnet.c
==============================================================================
--- head/sys/dev/netmap/if_ptnet.c	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_ptnet.c	Mon Apr  9 09:24:26 2018	(r332319)
@@ -216,6 +216,7 @@ static void	ptnet_update_vnet_hdr(struct ptnet_softc *
 static int	ptnet_nm_register(struct netmap_adapter *na, int onoff);
 static int	ptnet_nm_txsync(struct netmap_kring *kring, int flags);
 static int	ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
+static void	ptnet_nm_intr(struct netmap_adapter *na, int onoff);
 
 static void	ptnet_tx_intr(void *opaque);
 static void	ptnet_rx_intr(void *opaque);
@@ -477,6 +478,7 @@ ptnet_attach(device_t dev)
 	na_arg.nm_krings_create = ptnet_nm_krings_create;
 	na_arg.nm_krings_delete = ptnet_nm_krings_delete;
 	na_arg.nm_dtor = ptnet_nm_dtor;
+	na_arg.nm_intr = ptnet_nm_intr;
 	na_arg.nm_register = ptnet_nm_register;
 	na_arg.nm_txsync = ptnet_nm_txsync;
 	na_arg.nm_rxsync = ptnet_nm_rxsync;
@@ -1296,6 +1298,18 @@ ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
 	}
 
 	return 0;
+}
+
+static void
+ptnet_nm_intr(struct netmap_adapter *na, int onoff)
+{
+	struct ptnet_softc *sc = if_getsoftc(na->ifp);
+	int i;
+
+	for (i = 0; i < sc->num_rings; i++) {
+		struct ptnet_queue *pq = sc->queues + i;
+		pq->ptgh->guest_need_kick = onoff;
+	}
 }
 
 static void

Modified: head/sys/dev/netmap/if_re_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_re_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_re_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -201,7 +201,6 @@ re_netmap_rxsync(struct netmap_kring *kring, int flags
 	 * is to stop right before nm_hwcur.
 	 */
 	if (netmap_no_pendintr || force_update) {
-		uint16_t slot_flags = kring->nkr_slot_flags;
 		uint32_t stop_i = nm_prev(kring->nr_hwcur, lim);
 
 		nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
@@ -218,7 +217,7 @@ re_netmap_rxsync(struct netmap_kring *kring, int flags
 			/* XXX subtract crc */
 			total_len = (total_len < 4) ? 0 : total_len - 4;
 			ring->slot[nm_i].len = total_len;
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			/*  sync was in re_newbuf() */
 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
 			    rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);

Modified: head/sys/dev/netmap/if_vtnet_netmap.h
==============================================================================
--- head/sys/dev/netmap/if_vtnet_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/if_vtnet_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -122,6 +122,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
 	struct SOFTC_T *sc = ifp->if_softc;
 	struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr];
 	struct virtqueue *vq = txq->vtntx_vq;
+	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
 
 	/*
 	 * First part: process new packets to send.
@@ -179,7 +180,9 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
 			ring->head, ring->tail, virtqueue_nused(vq),
 			(virtqueue_dump(vq), 1));
 		virtqueue_notify(vq);
-		virtqueue_enable_intr(vq); // like postpone with 0
+		if (interrupts) {
+			virtqueue_enable_intr(vq); // like postpone with 0
+		}
 	}
 
 
@@ -209,7 +212,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int fl
 	if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) {
 		ND(3, "disable intr, hwcur %d", nm_i);
 		virtqueue_disable_intr(vq);
-	} else {
+	} else if (interrupts) {
 		ND(3, "enable intr, hwcur %d", nm_i);
 		virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT);
 	}
@@ -277,6 +280,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
 	u_int const lim = kring->nkr_num_slots - 1;
 	u_int const head = kring->rhead;
 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+	int interrupts = !(kring->nr_kflags & NKR_NOINTR);
 
 	/* device-specific */
 	struct SOFTC_T *sc = ifp->if_softc;
@@ -297,7 +301,6 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
 	 * and vtnet_netmap_init_buffers().
 	 */
 	if (netmap_no_pendintr || force_update) {
-		uint16_t slot_flags = kring->nkr_slot_flags;
                 struct netmap_adapter *token;
 
                 nm_i = kring->nr_hwtail;
@@ -309,7 +312,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
                                 break;
                         if (likely(token == (void *)rxq)) {
                             ring->slot[nm_i].len = len;
-                            ring->slot[nm_i].flags = slot_flags;
+                            ring->slot[nm_i].flags = 0;
                             nm_i = nm_next(nm_i, lim);
                             n++;
                         } else {
@@ -334,7 +337,9 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
 		kring->nr_hwcur = err;
 		virtqueue_notify(vq);
 		/* After draining the queue may need an intr from the hypervisor */
-        	vtnet_rxq_enable_intr(rxq);
+		if (interrupts) {
+			vtnet_rxq_enable_intr(rxq);
+		}
 	}
 
         ND("[C] h %d c %d t %d hwcur %d hwtail %d",
@@ -345,6 +350,28 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int fl
 }
 
 
+/* Enable/disable interrupts on all virtqueues. */
+static void
+vtnet_netmap_intr(struct netmap_adapter *na, int onoff)
+{
+	struct SOFTC_T *sc = na->ifp->if_softc;
+	int i;
+
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i];
+		struct vtnet_txq *txq = &sc->vtnet_txqs[i];
+		struct virtqueue *txvq = txq->vtntx_vq;
+
+		if (onoff) {
+			vtnet_rxq_enable_intr(rxq);
+			virtqueue_enable_intr(txvq);
+		} else {
+			vtnet_rxq_disable_intr(rxq);
+			virtqueue_disable_intr(txvq);
+		}
+	}
+}
+
 /* Make RX virtqueues buffers pointing to netmap buffers. */
 static int
 vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc)
@@ -417,6 +444,7 @@ vtnet_netmap_attach(struct SOFTC_T *sc)
 	na.nm_txsync = vtnet_netmap_txsync;
 	na.nm_rxsync = vtnet_netmap_rxsync;
 	na.nm_config = vtnet_netmap_config;
+	na.nm_intr = vtnet_netmap_intr;
 	na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs;
 	D("max rings %d", sc->vtnet_max_vq_pairs);
 	netmap_attach(&na);

Modified: head/sys/dev/netmap/ixgbe_netmap.h
==============================================================================
--- head/sys/dev/netmap/ixgbe_netmap.h	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/ixgbe_netmap.h	Mon Apr  9 09:24:26 2018	(r332319)
@@ -397,7 +397,6 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
 	 */
 	if (netmap_no_pendintr || force_update) {
 		int crclen = (ix_crcstrip || IXGBE_IS_VF(adapter) ) ? 0 : 4;
-		uint16_t slot_flags = kring->nkr_slot_flags;
 
 		nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
 		nm_i = netmap_idx_n2k(kring, nic_i);
@@ -409,7 +408,7 @@ ixgbe_netmap_rxsync(struct netmap_kring *kring, int fl
 			if ((staterr & IXGBE_RXD_STAT_DD) == 0)
 				break;
 			ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
-			ring->slot[nm_i].flags = slot_flags;
+			ring->slot[nm_i].flags = 0;
 			bus_dmamap_sync(rxr->ptag,
 			    rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
 			nm_i = nm_next(nm_i, lim);

Modified: head/sys/dev/netmap/netmap.c
==============================================================================
--- head/sys/dev/netmap/netmap.c	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/netmap.c	Mon Apr  9 09:24:26 2018	(r332319)
@@ -482,10 +482,8 @@ ports attached to the switch)
 int netmap_verbose;
 
 static int netmap_no_timestamp; /* don't timestamp on rxsync */
-int netmap_mitigate = 1;
 int netmap_no_pendintr = 1;
 int netmap_txsync_retry = 2;
-int netmap_flags = 0;	/* debug flags */
 static int netmap_fwd = 0;	/* force transparent forwarding */
 
 /*
@@ -515,7 +513,9 @@ int netmap_generic_mit = 100*1000;
  * Anyway users looking for the best performance should
  * use native adapters.
  */
+#ifdef linux
 int netmap_generic_txqdisc = 1;
+#endif
 
 /* Default number of slots and queues for generic adapters. */
 int netmap_generic_ringsize = 1024;
@@ -539,21 +539,32 @@ SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
     CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
     CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
-SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
-    CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
+SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
+    0, "Always look for new received packets.");
 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
-    &netmap_txsync_retry, 0 , "Number of txsync loops in bridge's flush.");
+    &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
 
-SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW, &netmap_generic_ringsize, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, &netmap_generic_rings, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW, &netmap_generic_txqdisc, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 0 , "");
-SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW, &ptnetmap_tx_workers, 0 , "");
+SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
+    "Force NR_FORWARD mode");
+SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
+    "Adapter mode. 0 selects the best option available,"
+    "1 forces native adapter, 2 forces emulated adapter");
+SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
+    0, "RX notification interval in nanoseconds");
+SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
+    &netmap_generic_ringsize, 0,
+    "Number of per-ring slots for emulated netmap mode");
+SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
+    &netmap_generic_rings, 0,
+    "Number of TX/RX queues for emulated netmap adapters");
+#ifdef linux
+SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
+    &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
+#endif
+SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
+    0, "Allow ptnet devices to use virtio-net headers");
+SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW,
+    &ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing");
 
 SYSEND;
 
@@ -912,8 +923,20 @@ netmap_hw_krings_delete(struct netmap_adapter *na)
 	netmap_krings_delete(na);
 }
 
+static void
+netmap_mem_drop(struct netmap_adapter *na)
+{
+	int last = netmap_mem_deref(na->nm_mem, na);
+	/* if the native allocator had been overrided on regif,
+	 * restore it now and drop the temporary one
+	 */
+	if (last && na->nm_mem_prev) {
+		netmap_mem_put(na->nm_mem);
+		na->nm_mem = na->nm_mem_prev;
+		na->nm_mem_prev = NULL;
+	}
+}
 
-
 /*
  * Undo everything that was done in netmap_do_regif(). In particular,
  * call nm_register(ifp,0) to stop netmap mode on the interface and
@@ -980,7 +1003,7 @@ netmap_do_unregif(struct netmap_priv_d *priv)
 	/* delete the nifp */
 	netmap_mem_if_delete(na, priv->np_nifp);
 	/* drop the allocator */
-	netmap_mem_deref(na->nm_mem, na);
+	netmap_mem_drop(na);
 	/* mark the priv as unregistered */
 	priv->np_na = NULL;
 	priv->np_nifp = NULL;
@@ -1289,7 +1312,7 @@ netmap_rxsync_from_host(struct netmap_kring *kring, in
                                 D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
 
 			slot->len = len;
-			slot->flags = kring->nkr_slot_flags;
+			slot->flags = 0;
 			nm_i = nm_next(nm_i, lim);
 			mbq_enqueue(&fq, m);
 		}
@@ -1409,7 +1432,7 @@ netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_
 assign_mem:
 	if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
 	    (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
-		netmap_mem_put((*na)->nm_mem);
+		(*na)->nm_mem_prev = (*na)->nm_mem;
 		(*na)->nm_mem = netmap_mem_get(nmd);
 	}
 
@@ -1896,7 +1919,8 @@ netmap_krings_get(struct netmap_priv_d *priv)
 	int excl = (priv->np_flags & NR_EXCLUSIVE);
 	enum txrx t;
 
-	ND("%s: grabbing tx [%d, %d) rx [%d, %d)",
+	if (netmap_verbose)
+		D("%s: grabbing tx [%d, %d) rx [%d, %d)",
 			na->name,
 			priv->np_qfirst[NR_TX],
 			priv->np_qlast[NR_TX],
@@ -2059,10 +2083,58 @@ netmap_do_regif(struct netmap_priv_d *priv, struct net
 	if (na->active_fds == 0) {
 		/*
 		 * If this is the first registration of the adapter,
-		 * create the  in-kernel view of the netmap rings,
-		 * the netmap krings.
+		 * perform sanity checks and create the in-kernel view
+		 * of the netmap rings (the netmap krings).
 		 */
+		if (na->ifp) {
+			/* This netmap adapter is attached to an ifnet. */
+			unsigned nbs = netmap_mem_bufsize(na->nm_mem);
+			unsigned mtu = nm_os_ifnet_mtu(na->ifp);
+			/* The maximum amount of bytes that a single
+			 * receive or transmit NIC descriptor can hold. */
+			unsigned hw_max_slot_len = 4096;
 
+			if (mtu <= hw_max_slot_len) {
+				/* The MTU fits a single NIC slot. We only
+				 * Need to check that netmap buffers are
+				 * large enough to hold an MTU. NS_MOREFRAG
+				 * cannot be used in this case. */
+				if (nbs < mtu) {
+					nm_prerr("error: netmap buf size (%u) "
+						"< device MTU (%u)", nbs, mtu);
+					error = EINVAL;
+					goto err_drop_mem;
+				}
+			} else {
+				/* More NIC slots may be needed to receive
+				 * or transmit a single packet. Check that
+				 * the adapter supports NS_MOREFRAG and that
+				 * netmap buffers are large enough to hold
+				 * the maximum per-slot size. */
+				if (!(na->na_flags & NAF_MOREFRAG)) {
+					nm_prerr("error: large MTU (%d) needed "
+						"but %s does not support "
+						"NS_MOREFRAG", mtu,
+						na->ifp->if_xname);
+					error = EINVAL;
+					goto err_drop_mem;
+				} else if (nbs < hw_max_slot_len) {
+					nm_prerr("error: using NS_MOREFRAG on "
+						"%s requires netmap buf size "
+						">= %u", na->ifp->if_xname,
+						hw_max_slot_len);
+					error = EINVAL;
+					goto err_drop_mem;
+				} else {
+					nm_prinf("info: netmap application on "
+						"%s needs to support "
+						"NS_MOREFRAG "
+						"(MTU=%u,netmap_buf_size=%u)",
+						na->ifp->if_xname, mtu, nbs);
+				}
+			}
+		}
+
 		/*
 		 * Depending on the adapter, this may also create
 		 * the netmap rings themselves
@@ -2128,15 +2200,15 @@ err_put_lut:
 		memset(&na->na_lut, 0, sizeof(na->na_lut));
 err_del_if:
 	netmap_mem_if_delete(na, nifp);
-err_rel_excl:
-	netmap_krings_put(priv);
 err_del_rings:
 	netmap_mem_rings_delete(na);
+err_rel_excl:
+	netmap_krings_put(priv);
 err_del_krings:
 	if (na->active_fds == 0)
 		na->nm_krings_delete(na);
 err_drop_mem:
-	netmap_mem_deref(na->nm_mem, na);
+	netmap_mem_drop(na);
 err:
 	priv->np_na = NULL;
 	return error;
@@ -2224,6 +2296,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c
 		do {
 			/* memsize is always valid */
 			u_int memflags;
+			uint64_t memsize;
 
 			if (nmr->nr_name[0] != '\0') {
 
@@ -2243,10 +2316,11 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c
 				}
 			}
 
-			error = netmap_mem_get_info(nmd, &nmr->nr_memsize, &memflags,
+			error = netmap_mem_get_info(nmd, &memsize, &memflags,
 				&nmr->nr_arg2);
 			if (error)
 				break;
+			nmr->nr_memsize = (uint32_t)memsize;
 			if (na == NULL) /* only memory info */
 				break;
 			nmr->nr_offset = 0;
@@ -2304,6 +2378,17 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c
 			}
 			NMG_UNLOCK();
 			break;
+		} else if (i == NETMAP_POOLS_CREATE) {
+			nmd = netmap_mem_ext_create(nmr, &error);
+			if (nmd == NULL)
+				break;
+			/* reset the fields used by POOLS_CREATE to
+			 * avoid confusing the rest of the code
+			 */
+			nmr->nr_cmd = 0;
+			nmr->nr_arg1 = 0;
+			nmr->nr_arg2 = 0;
+			nmr->nr_arg3 = 0;
 		} else if (i != 0) {
 			D("nr_cmd must be 0 not %d", i);
 			error = EINVAL;
@@ -2314,7 +2399,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c
 		NMG_LOCK();
 		do {
 			u_int memflags;
-			struct ifnet *ifp;
+			uint64_t memsize;
 
 			if (priv->np_nifp != NULL) {	/* thread already registered */
 				error = EBUSY;
@@ -2356,12 +2441,13 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, c
 			nmr->nr_tx_rings = na->num_tx_rings;
 			nmr->nr_rx_slots = na->num_rx_desc;
 			nmr->nr_tx_slots = na->num_tx_desc;
-			error = netmap_mem_get_info(na->nm_mem, &nmr->nr_memsize, &memflags,
+			error = netmap_mem_get_info(na->nm_mem, &memsize, &memflags,
 				&nmr->nr_arg2);
 			if (error) {
 				netmap_do_unregif(priv);
 				break;
 			}
+			nmr->nr_memsize = (uint32_t)memsize;
 			if (memflags & NETMAP_MEM_PRIVATE) {
 				*(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
 			}
@@ -2533,7 +2619,6 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM
 #define want_tx want[NR_TX]
 #define want_rx want[NR_RX]
 	struct mbq q;	/* packets from RX hw queues to host stack */
-	enum txrx t;
 
 	/*
 	 * In order to avoid nested locks, we need to "double check"
@@ -2585,14 +2670,15 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM
 	check_all_tx = nm_si_user(priv, NR_TX);
 	check_all_rx = nm_si_user(priv, NR_RX);
 
+#ifdef __FreeBSD__
 	/*
 	 * We start with a lock free round which is cheap if we have
 	 * slots available. If this fails, then lock and call the sync
-	 * routines.
+	 * routines. We can't do this on Linux, as the contract says
+	 * that we must call nm_os_selrecord() unconditionally.
 	 */
-#if 1 /* new code- call rx if any of the ring needs to release or read buffers */
 	if (want_tx) {
-		t = NR_TX;
+		enum txrx t = NR_TX;
 		for (i = priv->np_qfirst[t]; want[t] && i < priv->np_qlast[t]; i++) {
 			kring = &NMR(na, t)[i];
 			/* XXX compare ring->cur and kring->tail */
@@ -2603,8 +2689,8 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM
 		}
 	}
 	if (want_rx) {
+		enum txrx t = NR_RX;
 		want_rx = 0; /* look for a reason to run the handlers */
-		t = NR_RX;
 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
 			kring = &NMR(na, t)[i];
 			if (kring->ring->cur == kring->ring->tail /* try fetch new buffers */
@@ -2615,24 +2701,20 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM
 		if (!want_rx)
 			revents |= events & (POLLIN | POLLRDNORM); /* we have data */
 	}
-#else /* old code */
-	for_rx_tx(t) {
-		for (i = priv->np_qfirst[t]; want[t] && i < priv->np_qlast[t]; i++) {
-			kring = &NMR(na, t)[i];
-			/* XXX compare ring->cur and kring->tail */
-			if (!nm_ring_empty(kring->ring)) {
-				revents |= want[t];
-				want[t] = 0;	/* also breaks the loop */
-			}
-		}
-	}
-#endif /* old code */
+#endif
 
+#ifdef linux
+	/* The selrecord must be unconditional on linux. */
+	nm_os_selrecord(sr, check_all_tx ?
+	    &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]].si);
+	nm_os_selrecord(sr, check_all_rx ?
+		&na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]].si);
+#endif /* linux */
+
 	/*
 	 * If we want to push packets out (priv->np_txpoll) or
 	 * want_tx is still set, we must issue txsync calls
 	 * (on all rings, to avoid that the tx rings stall).
-	 * XXX should also check cur != hwcur on the tx rings.
 	 * Fortunately, normal tx mode has np_txpoll set.
 	 */
 	if (priv->np_txpoll || want_tx) {
@@ -2649,6 +2731,12 @@ flush_tx:
 			kring = &na->tx_rings[i];
 			ring = kring->ring;
 
+			/*
+			 * Don't try to txsync this TX ring if we already found some
+			 * space in some of the TX rings (want_tx == 0) and there are no
+			 * TX slots in this ring that need to be flushed to the NIC
+			 * (cur == hwcur).
+			 */
 			if (!send_down && !want_tx && ring->cur == kring->nr_hwcur)
 				continue;
 
@@ -2676,14 +2764,18 @@ flush_tx:
 			if (found) { /* notify other listeners */
 				revents |= want_tx;
 				want_tx = 0;
+#ifndef linux
 				kring->nm_notify(kring, 0);
+#endif /* linux */
 			}
 		}
 		/* if there were any packet to forward we must have handled them by now */
 		send_down = 0;
 		if (want_tx && retry_tx && sr) {
+#ifndef linux
 			nm_os_selrecord(sr, check_all_tx ?
 			    &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]].si);
+#endif /* !linux */
 			retry_tx = 0;
 			goto flush_tx;
 		}
@@ -2734,14 +2826,18 @@ do_retry_rx:
 			if (found) {
 				revents |= want_rx;
 				retry_rx = 0;
+#ifndef linux
 				kring->nm_notify(kring, 0);
+#endif /* linux */
 			}
 		}
 
+#ifndef linux
 		if (retry_rx && sr) {
 			nm_os_selrecord(sr, check_all_rx ?
 			    &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]].si);
 		}
+#endif /* !linux */
 		if (send_down || retry_rx) {
 			retry_rx = 0;
 			if (send_down)
@@ -2766,7 +2862,45 @@ do_retry_rx:
 #undef want_rx
 }
 
+int
+nma_intr_enable(struct netmap_adapter *na, int onoff)
+{
+	bool changed = false;
+	enum txrx t;
+	int i;
 
+	for_rx_tx(t) {
+		for (i = 0; i < nma_get_nrings(na, t); i++) {
+			struct netmap_kring *kring = &NMR(na, t)[i];
+			int on = !(kring->nr_kflags & NKR_NOINTR);
+
+			if (!!onoff != !!on) {
+				changed = true;
+			}
+			if (onoff) {
+				kring->nr_kflags &= ~NKR_NOINTR;
+			} else {
+				kring->nr_kflags |= NKR_NOINTR;
+			}
+		}
+	}
+
+	if (!changed) {
+		return 0; /* nothing to do */
+	}
+
+	if (!na->nm_intr) {
+		D("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
+		  na->name);
+		return -1;
+	}
+
+	na->nm_intr(na, onoff);
+
+	return 0;
+}
+
+
 /*-------------------- driver support routines -------------------*/
 
 /* default notify callback */
@@ -2804,6 +2938,7 @@ netmap_attach_common(struct netmap_adapter *na)
 	if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
 		na->if_input = na->ifp->if_input; /* for netmap_send_up */
 	}
+	na->pdev = na; /* make sure netmap_mem_map() is called */
 #endif /* __FreeBSD__ */
 	if (na->nm_krings_create == NULL) {
 		/* we assume that we have been called by a driver,
@@ -2832,22 +2967,6 @@ netmap_attach_common(struct netmap_adapter *na)
 	return 0;
 }
 
-
-/* standard cleanup, called by all destructors */
-void
-netmap_detach_common(struct netmap_adapter *na)
-{
-	if (na->tx_rings) { /* XXX should not happen */
-		D("freeing leftover tx_rings");
-		na->nm_krings_delete(na);
-	}
-	netmap_pipe_dealloc(na);
-	if (na->nm_mem)
-		netmap_mem_put(na->nm_mem);
-	bzero(na, sizeof(*na));
-	nm_os_free(na);
-}
-
 /* Wrapper for the register callback provided netmap-enabled
  * hardware drivers.
  * nm_iszombie(na) means that the driver module has been
@@ -2900,7 +3019,7 @@ netmap_hw_dtor(struct netmap_adapter *na)
  * Return 0 on success, ENOMEM otherwise.
  */
 int
-netmap_attach_ext(struct netmap_adapter *arg, size_t size)
+netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
 {
 	struct netmap_hw_adapter *hwna = NULL;
 	struct ifnet *ifp = NULL;
@@ -2912,15 +3031,27 @@ netmap_attach_ext(struct netmap_adapter *arg, size_t s
 
 	if (arg == NULL || arg->ifp == NULL)
 		goto fail;
+
 	ifp = arg->ifp;
+	if (NA(ifp) && !NM_NA_VALID(ifp)) {
+		/* If NA(ifp) is not null but there is no valid netmap
+		 * adapter it means that someone else is using the same
+		 * pointer (e.g. ax25_ptr on linux). This happens for
+		 * instance when also PF_RING is in use. */
+		D("Error: netmap adapter hook is busy");
+		return EBUSY;
+	}
+
 	hwna = nm_os_malloc(size);
 	if (hwna == NULL)
 		goto fail;
 	hwna->up = *arg;
 	hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
 	strncpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
-	hwna->nm_hw_register = hwna->up.nm_register;
-	hwna->up.nm_register = netmap_hw_reg;
+	if (override_reg) {
+		hwna->nm_hw_register = hwna->up.nm_register;
+		hwna->up.nm_register = netmap_hw_reg;
+	}
 	if (netmap_attach_common(&hwna->up)) {
 		nm_os_free(hwna);
 		goto fail;
@@ -2939,6 +3070,7 @@ netmap_attach_ext(struct netmap_adapter *arg, size_t s
 #endif /* NETMAP_LINUX_HAVE_NETDEV_OPS */
 	}
 	hwna->nm_ndo.ndo_start_xmit = linux_netmap_start_xmit;
+	hwna->nm_ndo.ndo_change_mtu = linux_netmap_change_mtu;
 	if (ifp->ethtool_ops) {
 		hwna->nm_eto = *ifp->ethtool_ops;
 	}
@@ -2968,7 +3100,8 @@ fail:
 int
 netmap_attach(struct netmap_adapter *arg)
 {
-	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter));
+	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
+			1 /* override nm_reg */);
 }
 
 
@@ -2996,7 +3129,15 @@ NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
 	if (na->nm_dtor)
 		na->nm_dtor(na);
 
-	netmap_detach_common(na);
+	if (na->tx_rings) { /* XXX should not happen */
+		D("freeing leftover tx_rings");
+		na->nm_krings_delete(na);
+	}
+	netmap_pipe_dealloc(na);
+	if (na->nm_mem)
+		netmap_mem_put(na->nm_mem);
+	bzero(na, sizeof(*na));
+	nm_os_free(na);
 
 	return 1;
 }
@@ -3029,15 +3170,14 @@ netmap_detach(struct ifnet *ifp)
 
 	NMG_LOCK();
 	netmap_set_all_rings(na, NM_KR_LOCKED);
-	na->na_flags |= NAF_ZOMBIE;
 	/*
 	 * if the netmap adapter is not native, somebody
 	 * changed it, so we can not release it here.
 	 * The NAF_ZOMBIE flag will notify the new owner that
 	 * the driver is gone.
 	 */
-	if (na->na_flags & NAF_NATIVE) {
-	        netmap_adapter_put(na);
+	if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
+		na->na_flags |= NAF_ZOMBIE;
 	}
 	/* give active users a chance to notice that NAF_ZOMBIE has been
 	 * turned on, so that they can stop and return an error to userspace.
@@ -3116,9 +3256,9 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
 	 */
 	mbq_lock(q);
 
-	busy = kring->nr_hwtail - kring->nr_hwcur;
-	if (busy < 0)
-		busy += kring->nkr_num_slots;
+        busy = kring->nr_hwtail - kring->nr_hwcur;
+        if (busy < 0)
+                busy += kring->nkr_num_slots;
 	if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
 		RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
 			kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
@@ -3216,16 +3356,6 @@ netmap_reset(struct netmap_adapter *na, enum txrx tx, 
 			kring->nr_hwtail -= lim + 1;
 	}
 
-#if 0 // def linux
-	/* XXX check that the mappings are correct */
-	/* need ring_nr, adapter->pdev, direction */
-	buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE);
-	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
-		D("error mapping rx netmap buffer %d", i);
-		// XXX fix error handling
-	}
-
-#endif /* linux */
 	/*
 	 * Wakeup on the individual and global selwait
 	 * We do the wakeup here, but the ring is not yet reconfigured.

Modified: head/sys/dev/netmap/netmap_freebsd.c
==============================================================================
--- head/sys/dev/netmap/netmap_freebsd.c	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/netmap_freebsd.c	Mon Apr  9 09:24:26 2018	(r332319)
@@ -173,6 +173,16 @@ nm_os_ifnet_fini(void)
                 nm_ifnet_dh_tag);
 }
 
+unsigned
+nm_os_ifnet_mtu(struct ifnet *ifp)
+{
+#if __FreeBSD_version < 1100030
+       return ifp->if_data.ifi_mtu;
+#else /* __FreeBSD_version >= 1100030 */
+       return ifp->if_mtu;
+#endif
+}
+
 rawsum_t
 nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
 {
@@ -294,24 +304,30 @@ nm_os_catch_rx(struct netmap_generic_adapter *gna, int
 {
 	struct netmap_adapter *na = &gna->up.up;
 	struct ifnet *ifp = na->ifp;
+	int ret = 0;
 
+	nm_os_ifnet_lock();
 	if (intercept) {
 		if (gna->save_if_input) {
 			D("cannot intercept again");
-			return EINVAL; /* already set */
+			ret = EINVAL; /* already set */
+			goto out;
 		}
 		gna->save_if_input = ifp->if_input;
 		ifp->if_input = freebsd_generic_rx_handler;
 	} else {
 		if (!gna->save_if_input){
 			D("cannot restore");
-			return EINVAL;  /* not saved */
+			ret = EINVAL;  /* not saved */
+			goto out;
 		}
 		ifp->if_input = gna->save_if_input;
 		gna->save_if_input = NULL;
 	}
+out:
+	nm_os_ifnet_unlock();
 
-	return 0;
+	return ret;
 }
 
 
@@ -327,12 +343,14 @@ nm_os_catch_tx(struct netmap_generic_adapter *gna, int
 	struct netmap_adapter *na = &gna->up.up;
 	struct ifnet *ifp = netmap_generic_getifp(gna);
 
+	nm_os_ifnet_lock();
 	if (intercept) {
 		na->if_transmit = ifp->if_transmit;
 		ifp->if_transmit = netmap_transmit;
 	} else {
 		ifp->if_transmit = na->if_transmit;
 	}
+	nm_os_ifnet_unlock();
 
 	return 0;
 }

Modified: head/sys/dev/netmap/netmap_generic.c
==============================================================================
--- head/sys/dev/netmap/netmap_generic.c	Mon Apr  9 08:25:29 2018	(r332318)
+++ head/sys/dev/netmap/netmap_generic.c	Mon Apr  9 09:24:26 2018	(r332319)
@@ -86,8 +86,6 @@ __FBSDID("$FreeBSD$");
 #include <dev/netmap/netmap_kern.h>
 #include <dev/netmap/netmap_mem2.h>
 
-#define rtnl_lock()	ND("rtnl_lock called")
-#define rtnl_unlock()	ND("rtnl_unlock called")
 #define MBUF_RXQ(m)	((m)->m_pkthdr.flowid)
 #define smp_mb()
 
@@ -168,7 +166,13 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
  * has a KASSERT(), checking that the mbuf dtor function is not NULL.
  */
 
+#if __FreeBSD_version <= 1200050
+static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { }
+#else  /* __FreeBSD_version >= 1200051 */
+/* The arg1 and arg2 pointers argument were removed by r324446, which
+ * in included since version 1200051. */
 static void void_mbuf_dtor(struct mbuf *m) { }
+#endif /* __FreeBSD_version >= 1200051 */
 
 #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
 	(m)->m_ext.ext_free = (fn != NULL) ?		\
@@ -200,8 +204,6 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
 
 #include "win_glue.h"
 
-#define rtnl_lock()	ND("rtnl_lock called")
-#define rtnl_unlock()	ND("rtnl_unlock called")
 #define MBUF_TXQ(m) 	0//((m)->m_pkthdr.flowid)
 #define MBUF_RXQ(m)	    0//((m)->m_pkthdr.flowid)
 #define smp_mb()		//XXX: to be correctly defined
@@ -210,7 +212,6 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
 
 #include "bsd_glue.h"
 
-#include <linux/rtnetlink.h>    /* rtnl_[un]lock() */
 #include <linux/ethtool.h>      /* struct ethtool_ops, get_ringparam */
 #include <linux/hrtimer.h>
 
@@ -339,17 +340,13 @@ generic_netmap_unregister(struct netmap_adapter *na)
 	int i, r;
 
 	if (na->active_fds == 0) {
-		rtnl_lock();
-
 		na->na_flags &= ~NAF_NETMAP_ON;
 
-		/* Release packet steering control. */
-		nm_os_catch_tx(gna, 0);
-
 		/* Stop intercepting packets on the RX path. */
 		nm_os_catch_rx(gna, 0);
 
-		rtnl_unlock();
+		/* Release packet steering control. */
+		nm_os_catch_tx(gna, 0);
 	}
 
 	for_each_rx_kring_h(r, kring, na) {
@@ -510,24 +507,20 @@ generic_netmap_register(struct netmap_adapter *na, int
 	}
 
 	if (na->active_fds == 0) {
-		rtnl_lock();
-
 		/* Prepare to intercept incoming traffic. */
 		error = nm_os_catch_rx(gna, 1);
 		if (error) {
 			D("nm_os_catch_rx(1) failed (%d)", error);
-			goto register_handler;
+			goto free_tx_pools;
 		}
 
-		/* Make netmap control the packet steering. */
+		/* Let netmap control the packet steering. */
 		error = nm_os_catch_tx(gna, 1);
 		if (error) {
 			D("nm_os_catch_tx(1) failed (%d)", error);
 			goto catch_rx;
 		}

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list