svn commit: r307973 - stable/10/sys/dev/bxe

David C Somayajulu davidcs at FreeBSD.org
Wed Oct 26 18:13:31 UTC 2016


Author: davidcs
Date: Wed Oct 26 18:13:30 2016
New Revision: 307973
URL: https://svnweb.freebsd.org/changeset/base/307973

Log:
  MFC r307578
      1. Use taskqueue_create() instead of taskqueue_create_fast() for both
         fastpath and slowpath taskqueues.
      2. Service all transmits in taskqueue threads.
      3. additional stats counters for  keeping track of
      	- bd availability
      	- tx buf ring not emptied in the fp task queue.
      	  These are drained via timeout taskqueue.
      	- tx attempts during link down.

Modified:
  stable/10/sys/dev/bxe/bxe.c
  stable/10/sys/dev/bxe/bxe.h
  stable/10/sys/dev/bxe/bxe_stats.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/bxe/bxe.c
==============================================================================
--- stable/10/sys/dev/bxe/bxe.c	Wed Oct 26 17:53:12 2016	(r307972)
+++ stable/10/sys/dev/bxe/bxe.c	Wed Oct 26 18:13:30 2016	(r307973)
@@ -27,7 +27,7 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
-#define BXE_DRIVER_VERSION "1.78.81"
+#define BXE_DRIVER_VERSION "1.78.89"
 
 #include "bxe.h"
 #include "ecore_sp.h"
@@ -500,7 +500,14 @@ static const struct {
     { STATS_OFFSET32(mbuf_alloc_tpa),
                 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
     { STATS_OFFSET32(tx_queue_full_return),
-                4, STATS_FLAGS_FUNC, "tx_queue_full_return"}
+                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
+    { STATS_OFFSET32(tx_request_link_down_failures),
+                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
+    { STATS_OFFSET32(bd_avail_too_less_failures),
+                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
+    { STATS_OFFSET32(tx_mq_not_empty),
+                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}
+
 };
 
 static const struct {
@@ -613,7 +620,14 @@ static const struct {
     { Q_STATS_OFFSET32(mbuf_alloc_tpa),
                 4, "mbuf_alloc_tpa"},
     { Q_STATS_OFFSET32(tx_queue_full_return),
-                4, "tx_queue_full_return"}
+                4, "tx_queue_full_return"},
+    { Q_STATS_OFFSET32(tx_request_link_down_failures),
+                4, "tx_request_link_down_failures"},
+    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
+                4, "bd_avail_too_less_failures"},
+    { Q_STATS_OFFSET32(tx_mq_not_empty),
+                4, "tx_mq_not_empty"}
+
 };
 
 #define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
@@ -5612,7 +5626,7 @@ bxe_tx_start(struct ifnet *ifp)
     BXE_FP_TX_UNLOCK(fp);
 }
 
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
 static int
 bxe_tx_mq_start_locked(struct bxe_softc    *sc,
@@ -5634,10 +5648,16 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         return (EINVAL);
     }
 
-    if (!sc->link_vars.link_up ||
-        (ifp->if_drv_flags &
-        (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
+    if (m != NULL) {
         rc = drbr_enqueue(ifp, tx_br, m);
+        if (rc != 0) {
+            fp->eth_q_stats.tx_soft_errors++;
+            goto bxe_tx_mq_start_locked_exit;
+        }
+    }
+
+    if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+        fp->eth_q_stats.tx_request_link_down_failures++;
         goto bxe_tx_mq_start_locked_exit;
     }
 
@@ -5647,24 +5667,22 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
     }
 
-    if (m == NULL) {
-        /* no new work, check for pending frames */
-        next = drbr_dequeue(ifp, tx_br);
-    } else if (drbr_needs_enqueue(ifp, tx_br)) {
-        /* have both new and pending work, maintain packet order */
-        rc = drbr_enqueue(ifp, tx_br, m);
-        if (rc != 0) {
-            fp->eth_q_stats.tx_soft_errors++;
-            goto bxe_tx_mq_start_locked_exit;
-        }
-        next = drbr_dequeue(ifp, tx_br);
-    } else {
-        /* new work only and nothing pending */
-        next = m;
-    }
-
     /* keep adding entries while there are frames to send */
-    while (next != NULL) {
+    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
+        /* handle any completions if we're running low */
+        tx_bd_avail = bxe_tx_avail(sc, fp);
+        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
+            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
+            bxe_txeof(sc, fp);
+            tx_bd_avail = bxe_tx_avail(sc, fp);
+            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
+                fp->eth_q_stats.bd_avail_too_less_failures++;
+                m_freem(next);
+                drbr_advance(ifp, tx_br);
+                rc = ENOBUFS;
+                break;
+            }
+        }
 
         /* the mbuf now belongs to us */
         fp->eth_q_stats.mbuf_alloc_tx++;
@@ -5680,11 +5698,11 @@ bxe_tx_mq_start_locked(struct bxe_softc 
             if (next != NULL) {
                 /* mark the TX queue as full and save the frame */
                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-                /* XXX this may reorder the frame */
-                rc = drbr_enqueue(ifp, tx_br, next);
+                drbr_putback(ifp, tx_br, next);
                 fp->eth_q_stats.mbuf_alloc_tx--;
                 fp->eth_q_stats.tx_frames_deferred++;
-            }
+            } else
+                drbr_advance(ifp, tx_br);
 
             /* stop looking for more work */
             break;
@@ -5696,18 +5714,7 @@ bxe_tx_mq_start_locked(struct bxe_softc 
         /* send a copy of the frame to any BPF listeners */
         BPF_MTAP(ifp, next);
 
-        tx_bd_avail = bxe_tx_avail(sc, fp);
-
-        /* handle any completions if we're running low */
-        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
-            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
-            bxe_txeof(sc, fp);
-            if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
-                break;
-            }
-        }
-
-        next = drbr_dequeue(ifp, tx_br);
+        drbr_advance(ifp, tx_br);
     }
 
     /* all TX packets were dequeued and/or the tx ring is full */
@@ -5717,10 +5724,28 @@ bxe_tx_mq_start_locked(struct bxe_softc 
     }
 
 bxe_tx_mq_start_locked_exit:
+    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
+    if (!drbr_empty(ifp, tx_br)) {
+        fp->eth_q_stats.tx_mq_not_empty++;
+        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
+    }
 
     return (rc);
 }
 
+static void
+bxe_tx_mq_start_deferred(void *arg,
+                         int pending)
+{
+    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
+    struct bxe_softc *sc = fp->sc;
+    struct ifnet *ifp = sc->ifnet;
+
+    BXE_FP_TX_LOCK(fp);
+    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
+    BXE_FP_TX_UNLOCK(fp);
+}
+
 /* Multiqueue (TSS) dispatch routine. */
 static int
 bxe_tx_mq_start(struct ifnet *ifp,
@@ -5742,8 +5767,10 @@ bxe_tx_mq_start(struct ifnet *ifp,
     if (BXE_FP_TX_TRYLOCK(fp)) {
         rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
         BXE_FP_TX_UNLOCK(fp);
-    } else
+    } else {
         rc = drbr_enqueue(ifp, fp->tx_br, m);
+        taskqueue_enqueue(fp->tq, &fp->tx_task);
+    }
 
     return (rc);
 }
@@ -5778,7 +5805,7 @@ bxe_mq_flush(struct ifnet *ifp)
     if_qflush(ifp);
 }
 
-#endif /* FreeBSD_version >= 800000 */
+#endif /* FreeBSD_version >= 901504 */
 
 static uint16_t
 bxe_cid_ilt_lines(struct bxe_softc *sc)
@@ -6138,7 +6165,7 @@ bxe_free_fp_buffers(struct bxe_softc *sc
     for (i = 0; i < sc->num_queues; i++) {
         fp = &sc->fp[i];
 
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
         if (fp->tx_br != NULL) {
             /* just in case bxe_mq_flush() wasn't called */
             if (mtx_initialized(&fp->tx_mtx)) {
@@ -6965,6 +6992,8 @@ bxe_link_attn(struct bxe_softc *sc)
     uint32_t pause_enabled = 0;
     struct host_port_stats *pstats;
     int cmng_fns;
+    struct bxe_fastpath *fp;
+    int i;
 
     /* Make sure that we are synced with the current statistics */
     bxe_stats_handle(sc, STATS_EVENT_STOP);
@@ -6996,6 +7025,12 @@ bxe_link_attn(struct bxe_softc *sc)
         if (sc->state == BXE_STATE_OPEN) {
             bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
         }
+
+	/* Restart tx when the link comes back. */
+        FOR_EACH_ETH_QUEUE(sc, i) {
+            fp = &sc->fp[i];
+            taskqueue_enqueue(fp->tq, &fp->tx_task);
+	}
     }
 
     if (sc->link_vars.link_up && sc->link_vars.line_speed) {
@@ -9047,6 +9082,10 @@ bxe_interrupt_detach(struct bxe_softc *s
         fp = &sc->fp[i];
         if (fp->tq) {
             taskqueue_drain(fp->tq, &fp->tq_task);
+            taskqueue_drain(fp->tq, &fp->tx_task);
+            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
+                NULL))
+                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
             taskqueue_free(fp->tq);
             fp->tq = NULL;
         }
@@ -9079,9 +9118,9 @@ bxe_interrupt_attach(struct bxe_softc *s
     snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
              "bxe%d_sp_tq", sc->unit);
     TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
-    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
-                                      taskqueue_thread_enqueue,
-                                      &sc->sp_tq);
+    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
+                                 taskqueue_thread_enqueue,
+                                 &sc->sp_tq);
     taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
                             "%s", sc->sp_tq_name);
 
@@ -9091,9 +9130,12 @@ bxe_interrupt_attach(struct bxe_softc *s
         snprintf(fp->tq_name, sizeof(fp->tq_name),
                  "bxe%d_fp%d_tq", sc->unit, i);
         TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
-        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
-                                       taskqueue_thread_enqueue,
-                                       &fp->tq);
+        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
+        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
+                                  taskqueue_thread_enqueue,
+                                  &fp->tq);
+        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
+                          bxe_tx_mq_start_deferred, fp);
         taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
                                 "%s", fp->tq_name);
     }
@@ -12150,8 +12192,6 @@ static void
 bxe_periodic_callout_func(void *xsc)
 {
     struct bxe_softc *sc = (struct bxe_softc *)xsc;
-    struct bxe_fastpath *fp;
-    uint16_t tx_bd_avail;
     int i;
 
     if (!BXE_CORE_TRYLOCK(sc)) {
@@ -12174,47 +12214,6 @@ bxe_periodic_callout_func(void *xsc)
         return;
     }
 
-#if __FreeBSD_version >= 800000
-
-    FOR_EACH_QUEUE(sc, i) {
-        fp = &sc->fp[i];
-
-        if (BXE_FP_TX_TRYLOCK(fp)) {
-            struct ifnet *ifp = sc->ifnet;
-            /*
-             * If interface was stopped due to unavailable
-             * bds, try to process some tx completions
-             */
-            (void) bxe_txeof(sc, fp);
-           
-            tx_bd_avail = bxe_tx_avail(sc, fp);
-            if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
-                bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
-            }
-            BXE_FP_TX_UNLOCK(fp);
-        }
-    }
-
-#else
-
-    fp = &sc->fp[0];
-    if (BXE_FP_TX_TRYLOCK(fp)) {
-        struct ifnet *ifp = sc->ifnet;
-        /*
-         * If interface was stopped due to unavailable
-         * bds, try to process some tx completions
-         */
-        (void) bxe_txeof(sc, fp);
-           
-        tx_bd_avail = bxe_tx_avail(sc, fp);
-        if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
-            bxe_tx_start_locked(sc, ifp, fp);
-        }
- 
-        BXE_FP_TX_UNLOCK(fp);
-    }
-
-#endif /* #if __FreeBSD_version >= 800000 */
 
     /* Check for TX timeouts on any fastpath. */
     FOR_EACH_QUEUE(sc, i) {
@@ -12690,7 +12689,7 @@ bxe_init_ifnet(struct bxe_softc *sc)
     ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
     ifp->if_ioctl = bxe_ioctl;
     ifp->if_start = bxe_tx_start;
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
     ifp->if_transmit = bxe_tx_mq_start;
     ifp->if_qflush = bxe_mq_flush;
 #endif
@@ -15737,7 +15736,7 @@ bxe_add_sysctls(struct bxe_softc *sc)
 static int
 bxe_alloc_buf_rings(struct bxe_softc *sc)
 {
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
     int i;
     struct bxe_fastpath *fp;
@@ -15758,7 +15757,7 @@ bxe_alloc_buf_rings(struct bxe_softc *sc
 static void
 bxe_free_buf_rings(struct bxe_softc *sc)
 {
-#if __FreeBSD_version >= 800000
+#if __FreeBSD_version >= 901504
 
     int i;
     struct bxe_fastpath *fp;

Modified: stable/10/sys/dev/bxe/bxe.h
==============================================================================
--- stable/10/sys/dev/bxe/bxe.h	Wed Oct 26 17:53:12 2016	(r307972)
+++ stable/10/sys/dev/bxe/bxe.h	Wed Oct 26 18:13:30 2016	(r307973)
@@ -644,6 +644,9 @@ struct bxe_fastpath {
     struct taskqueue *tq;
     char             tq_name[32];
 
+    struct task tx_task;
+    struct timeout_task tx_timeout_task;
+
     /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */
     uint8_t cl_id;
 #define FP_CL_ID(fp) (fp->cl_id)
@@ -2300,7 +2303,8 @@ void bxe_dump_mbuf_data(struct bxe_softc
 extern int bxe_grc_dump(struct bxe_softc *sc);
 
 #if __FreeBSD_version >= 800000
-#if __FreeBSD_version >= 1000000
+#if (__FreeBSD_version >= 1001513 && __FreeBSD_version < 1100000) ||\
+    __FreeBSD_version >= 1100048
 #define BXE_SET_FLOWID(m) M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE)
 #define BXE_VALID_FLOWID(m) (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
 #else

Modified: stable/10/sys/dev/bxe/bxe_stats.h
==============================================================================
--- stable/10/sys/dev/bxe/bxe_stats.h	Wed Oct 26 17:53:12 2016	(r307972)
+++ stable/10/sys/dev/bxe/bxe_stats.h	Wed Oct 26 18:13:30 2016	(r307973)
@@ -266,6 +266,10 @@ struct bxe_eth_stats {
 
     /* num. of times tx queue full occured */
     uint32_t tx_queue_full_return;
+    /* debug stats */
+    uint32_t tx_request_link_down_failures;
+    uint32_t bd_avail_too_less_failures;
+    uint32_t tx_mq_not_empty;
 };
 
 
@@ -372,6 +376,11 @@ struct bxe_eth_q_stats {
 
     /* num. of times tx queue full occured */
     uint32_t tx_queue_full_return;
+
+    /* debug stats */
+    uint32_t tx_request_link_down_failures;
+    uint32_t bd_avail_too_less_failures;
+    uint32_t tx_mq_not_empty;
 };
 
 struct bxe_eth_stats_old {


More information about the svn-src-all mailing list