svn commit: r193848 - in head/sys: dev/cxgb net sys
Kip Macy
kmacy at FreeBSD.org
Tue Jun 9 19:19:17 UTC 2009
Author: kmacy
Date: Tue Jun 9 19:19:16 2009
New Revision: 193848
URL: http://svn.freebsd.org/changeset/base/193848
Log:
- add drbr routines for accessing #qentries and conditionally dequeueing
- track bytes enqueued in buf_ring
Modified:
head/sys/dev/cxgb/cxgb_multiq.c
head/sys/net/if_var.h
head/sys/sys/buf_ring.h
Modified: head/sys/dev/cxgb/cxgb_multiq.c
==============================================================================
--- head/sys/dev/cxgb/cxgb_multiq.c Tue Jun 9 18:18:41 2009 (r193847)
+++ head/sys/dev/cxgb/cxgb_multiq.c Tue Jun 9 19:19:16 2009 (r193848)
@@ -123,7 +123,7 @@ cxgb_pcpu_enqueue_packet_(struct sge_qse
return (ENETDOWN);
}
txq = &qs->txq[TXQ_ETH];
- err = buf_ring_enqueue(txq->txq_mr, m);
+ err = drbr_enqueue(qs->port->ifp, txq->txq_mr, m);
if (err) {
txq->txq_drops++;
m_freem(m);
Modified: head/sys/net/if_var.h
==============================================================================
--- head/sys/net/if_var.h Tue Jun 9 18:18:41 2009 (r193847)
+++ head/sys/net/if_var.h Tue Jun 9 19:19:16 2009 (r193848)
@@ -556,10 +556,11 @@ do { \
static __inline void
drbr_stats_update(struct ifnet *ifp, int len, int mflags)
{
-
+#ifndef NO_SLOW_STATS
ifp->if_obytes += len;
if (mflags & M_MCAST)
ifp->if_omcasts++;
+#endif
}
static __inline int
@@ -575,9 +576,8 @@ drbr_enqueue(struct ifnet *ifp, struct b
return (error);
}
#endif
- if ((error = buf_ring_enqueue(br, m)) == ENOBUFS) {
+ if ((error = buf_ring_enqueue_bytes(br, m, len)) == ENOBUFS) {
br->br_drops++;
- _IF_DROP(&ifp->if_snd);
m_freem(m);
} else
drbr_stats_update(ifp, len, mflags);
@@ -610,6 +610,27 @@ drbr_dequeue(struct ifnet *ifp, struct b
return (buf_ring_dequeue_sc(br));
}
+static __inline struct mbuf *
+drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
+ int (*func) (struct mbuf *, void *), void *arg)
+{
+ struct mbuf *m;
+#ifdef ALTQ
+ /*
+ * XXX need to evaluate / requeue
+ */
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ return (m);
+ }
+#endif
+ m = buf_ring_peek(br);
+ if (m == NULL || func(m, arg) == 0)
+ return (NULL);
+
+ return (buf_ring_dequeue_sc(br));
+}
+
static __inline int
drbr_empty(struct ifnet *ifp, struct buf_ring *br)
{
@@ -619,6 +640,16 @@ drbr_empty(struct ifnet *ifp, struct buf
#endif
return (buf_ring_empty(br));
}
+
+static __inline int
+drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (ifp->if_snd.ifq_len);
+#endif
+ return (buf_ring_count(br));
+}
#endif
/*
* 72 was chosen below because it is the size of a TCP/IP
Modified: head/sys/sys/buf_ring.h
==============================================================================
--- head/sys/sys/buf_ring.h Tue Jun 9 18:18:41 2009 (r193847)
+++ head/sys/sys/buf_ring.h Tue Jun 9 19:19:16 2009 (r193848)
@@ -49,10 +49,12 @@ struct buf_ring {
int br_prod_size;
int br_prod_mask;
uint64_t br_drops;
+ uint64_t br_prod_bufs;
+ uint64_t br_prod_bytes;
/*
* Pad out to next L2 cache line
*/
- uint64_t _pad0[13];
+ uint64_t _pad0[11];
volatile uint32_t br_cons_head;
volatile uint32_t br_cons_tail;
@@ -74,7 +76,7 @@ struct buf_ring {
*
*/
static __inline int
-buf_ring_enqueue(struct buf_ring *br, void *buf)
+buf_ring_enqueue_bytes(struct buf_ring *br, void *buf, int nbytes)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail;
@@ -116,12 +118,20 @@ buf_ring_enqueue(struct buf_ring *br, vo
*/
while (br->br_prod_tail != prod_head)
cpu_spinwait();
+ br->br_prod_bufs++;
+ br->br_prod_bytes += nbytes;
br->br_prod_tail = prod_next;
- mb();
critical_exit();
return (0);
}
+static __inline int
+buf_ring_enqueue(struct buf_ring *br, void *buf)
+{
+
+ return (buf_ring_enqueue_bytes(br, buf, 0));
+}
+
/*
* multi-consumer safe dequeue
*
@@ -154,7 +164,7 @@ buf_ring_dequeue_mc(struct buf_ring *br)
#ifdef DEBUG_BUFRING
br->br_ring[cons_head] = NULL;
#endif
- mb();
+ rmb();
/*
* If there are other dequeues in progress
@@ -165,7 +175,6 @@ buf_ring_dequeue_mc(struct buf_ring *br)
cpu_spinwait();
br->br_cons_tail = cons_next;
- mb();
critical_exit();
return (buf);
@@ -179,25 +188,29 @@ buf_ring_dequeue_mc(struct buf_ring *br)
static __inline void *
buf_ring_dequeue_sc(struct buf_ring *br)
{
- uint32_t cons_head, cons_next;
+ uint32_t cons_head, cons_next, cons_next_next;
uint32_t prod_tail;
void *buf;
- critical_enter();
cons_head = br->br_cons_head;
prod_tail = br->br_prod_tail;
cons_next = (cons_head + 1) & br->br_cons_mask;
-
- if (cons_head == prod_tail) {
- critical_exit();
+ cons_next_next = (cons_head + 2) & br->br_cons_mask;
+
+ if (cons_head == prod_tail)
return (NULL);
+
+#ifdef PREFETCH_DEFINED
+ if (cons_next != prod_tail) {
+ prefetch(br->br_ring[cons_next]);
+ if (cons_next_next != prod_tail)
+ prefetch(br->br_ring[cons_next_next]);
}
-
+#endif
br->br_cons_head = cons_next;
buf = br->br_ring[cons_head];
- mb();
-
+
#ifdef DEBUG_BUFRING
br->br_ring[cons_head] = NULL;
if (!mtx_owned(br->br_lock))
@@ -207,8 +220,6 @@ buf_ring_dequeue_sc(struct buf_ring *br)
br->br_cons_tail, cons_head);
#endif
br->br_cons_tail = cons_next;
- mb();
- critical_exit();
return (buf);
}
@@ -225,7 +236,12 @@ buf_ring_peek(struct buf_ring *br)
if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
panic("lock not held on single consumer dequeue");
#endif
- mb();
+ /*
+ * I believe it is safe to not have a memory barrier
+ * here because we control cons and tail is worst case
+ * a lagging indicator so we worst case we might
+ * return NULL immediately after a buffer has been enqueued
+ */
if (br->br_cons_head == br->br_prod_tail)
return (NULL);
More information about the svn-src-head
mailing list