svn commit: r296031 - head/sys/dev/vnic

Zbigniew Bodek zbb at FreeBSD.org
Thu Feb 25 14:14:48 UTC 2016


Author: zbb
Date: Thu Feb 25 14:14:46 2016
New Revision: 296031
URL: https://svnweb.freebsd.org/changeset/base/296031

Log:
  Enable LRO support for VNIC driver
  
  Support for software LRO when enabled in the capabilities
  
  Reviewed by:   wma
  Obtained from: Semihalf
  Sponsored by:  Cavium
  Differential Revision: https://reviews.freebsd.org/D5321

Modified:
  head/sys/dev/vnic/nicvf_main.c
  head/sys/dev/vnic/nicvf_queues.c
  head/sys/dev/vnic/nicvf_queues.h

Modified: head/sys/dev/vnic/nicvf_main.c
==============================================================================
--- head/sys/dev/vnic/nicvf_main.c	Thu Feb 25 14:12:51 2016	(r296030)
+++ head/sys/dev/vnic/nicvf_main.c	Thu Feb 25 14:14:46 2016	(r296031)
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
 
 #include <netinet/in.h>
 #include <netinet/if_ether.h>
+#include <netinet/tcp_lro.h>
 
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
@@ -353,6 +354,7 @@ nicvf_setup_ifnet(struct nicvf *nic)
 	if_setmtu(ifp, ETHERMTU);
 
 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
+	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
 	/*
 	 * HW offload capabilities
 	 */
@@ -404,9 +406,11 @@ static int
 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 {
 	struct nicvf *nic;
+	struct rcv_queue *rq;
 	struct ifreq *ifr;
 	uint32_t flags;
 	int mask, err;
+	int rq_idx;
 #if defined(INET) || defined(INET6)
 	struct ifaddr *ifa;
 	boolean_t avoid_reset = FALSE;
@@ -511,6 +515,30 @@ nicvf_if_ioctl(struct ifnet *ifp, u_long
 			ifp->if_capenable ^= IFCAP_TXCSUM;
 		if (mask & IFCAP_RXCSUM)
 			ifp->if_capenable ^= IFCAP_RXCSUM;
+		if (mask & IFCAP_LRO) {
+			/*
+			 * Lock the driver for a moment to avoid
+			 * mismatch in per-queue settings.
+			 */
+			NICVF_CORE_LOCK(nic);
+			ifp->if_capenable ^= IFCAP_LRO;
+			if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
+				/*
+				 * Now disable LRO for subsequent packets.
+				 * Atomicity of this change is not necessary
+				 * as we don't need precise toggle of this
+				 * feature for all threads processing the
+				 * completion queue.
+				 */
+				for (rq_idx = 0;
+				    rq_idx < nic->qs->rq_cnt; rq_idx++) {
+					rq = &nic->qs->rq[rq_idx];
+					rq->lro_enabled = !rq->lro_enabled;
+				}
+			}
+			NICVF_CORE_UNLOCK(nic);
+		}
+
 		break;
 
 	default:

Modified: head/sys/dev/vnic/nicvf_queues.c
==============================================================================
--- head/sys/dev/vnic/nicvf_queues.c	Thu Feb 25 14:12:51 2016	(r296030)
+++ head/sys/dev/vnic/nicvf_queues.c	Thu Feb 25 14:14:46 2016	(r296031)
@@ -637,10 +637,12 @@ nicvf_rcv_pkt_handler(struct nicvf *nic,
     struct cqe_rx_t *cqe_rx, int cqe_type)
 {
 	struct mbuf *mbuf;
+	struct rcv_queue *rq;
 	int rq_idx;
 	int err = 0;
 
 	rq_idx = cqe_rx->rq_idx;
+	rq = &nic->qs->rq[rq_idx];
 
 	/* Check for errors */
 	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
@@ -659,6 +661,19 @@ nicvf_rcv_pkt_handler(struct nicvf *nic,
 		return (0);
 	}
 
+	if (rq->lro_enabled &&
+	    ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
+	    (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+		/*
+		 * At this point it is known that there are no errors in the
+		 * packet. Attempt to LRO enqueue. Send to stack if no resources
+		 * or enqueue error.
+		 */
+		if ((rq->lro.lro_cnt != 0) &&
+		    (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
+			return (0);
+	}
 	/*
 	 * Push this packet to the stack later to avoid
 	 * unlocking completion task in the middle of work.
@@ -726,7 +741,11 @@ nicvf_cq_intr_handler(struct nicvf *nic,
 	int cqe_count, cqe_head;
 	struct queue_set *qs = nic->qs;
 	struct cmp_queue *cq = &qs->cq[cq_idx];
+	struct rcv_queue *rq;
 	struct cqe_rx_t *cq_desc;
+	struct lro_ctrl	*lro;
+	struct lro_entry *queued;
+	int rq_idx;
 	int cmp_err;
 
 	NICVF_CMP_LOCK(cq);
@@ -801,6 +820,17 @@ done:
 		if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 	}
 out:
+	/*
+	 * Flush any outstanding LRO work
+	 */
+	rq_idx = cq_idx;
+	rq = &nic->qs->rq[rq_idx];
+	lro = &rq->lro;
+	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+		SLIST_REMOVE_HEAD(&lro->lro_active, next);
+		tcp_lro_flush(lro, queued);
+	}
+
 	NICVF_CMP_UNLOCK(cq);
 
 	ifp = nic->ifp;
@@ -1241,18 +1271,39 @@ nicvf_rcv_queue_config(struct nicvf *nic
 	union nic_mbx mbx = {};
 	struct rcv_queue *rq;
 	struct rq_cfg rq_cfg;
+	struct ifnet *ifp;
+	struct lro_ctrl	*lro;
+
+	ifp = nic->ifp;
 
 	rq = &qs->rq[qidx];
 	rq->enable = enable;
 
+	lro = &rq->lro;
+
 	/* Disable receive queue */
 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
 
 	if (!rq->enable) {
 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
+		/* Free LRO memory */
+		tcp_lro_free(lro);
+		rq->lro_enabled = FALSE;
 		return;
 	}
 
+	/* Configure LRO if enabled */
+	rq->lro_enabled = FALSE;
+	if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
+		if (tcp_lro_init(lro) != 0) {
+			device_printf(nic->dev,
+			    "Failed to initialize LRO for RXQ%d\n", qidx);
+		} else {
+			rq->lro_enabled = TRUE;
+			lro->ifp = nic->ifp;
+		}
+	}
+
 	rq->cq_qs = qs->vnic_id;
 	rq->cq_idx = qidx;
 	rq->start_rbdr_qs = qs->vnic_id;

Modified: head/sys/dev/vnic/nicvf_queues.h
==============================================================================
--- head/sys/dev/vnic/nicvf_queues.h	Thu Feb 25 14:12:51 2016	(r296030)
+++ head/sys/dev/vnic/nicvf_queues.h	Thu Feb 25 14:14:46 2016	(r296031)
@@ -275,6 +275,9 @@ struct rcv_queue {
 	uint8_t		start_qs_rbdr_idx; /* RBDR idx in the above QS */
 	uint8_t		caching;
 	struct		rx_tx_queue_stats stats;
+
+	boolean_t	lro_enabled;
+	struct lro_ctrl	lro;
 } __aligned(CACHE_LINE_SIZE);
 
 struct cmp_queue {


More information about the svn-src-all mailing list