svn commit: r202124 - user/luigi/ipfw3-head/sys/netinet/ipfw

Luigi Rizzo luigi at FreeBSD.org
Mon Jan 11 22:35:19 UTC 2010


Author: luigi
Date: Mon Jan 11 22:35:19 2010
New Revision: 202124
URL: http://svn.freebsd.org/changeset/base/202124

Log:
  remove some debugging messages, start implementing the new sockopt

Modified:
  user/luigi/ipfw3-head/sys/netinet/ipfw/dn_sched.h
  user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c
  user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_private.h
  user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/dn_sched.h
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/dn_sched.h	Mon Jan 11 22:34:25 2010	(r202123)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/dn_sched.h	Mon Jan 11 22:35:19 2010	(r202124)
@@ -32,8 +32,6 @@
 #ifndef _DN_SCHED_H
 #define _DN_SCHED_H
 
-// MALLOC_DECLARE(M_DUMMYNET);
-
 /*
  * Descriptor for the scheduler.
  * Contains all function pointers for a given scheduler
@@ -144,14 +142,6 @@ SLIST_HEAD(dn_sched_head, dn_sched);
  */
 
 /*
- * You must call dn_pkt_done() when extracting packets from a queue.
- * The function is used to update packet and queue statistics.
- * - pkt:   packet to return;
- * - q:     packet belongs to this queue
- */
-struct mbuf* dn_pkt_done(struct mbuf *pkt, struct new_queue *q);
-
-/*
  * delete a queue, which we assume nobody references
  */
 int dn_delete_queue(struct new_queue *q);
@@ -199,7 +189,7 @@ struct new_queue * dn_q_hash_id(struct i
 
 /*
  * Extract the head of a queue, update stats. Must be the very last
- * thing done on a queue as it may go away.
+ * thing done on a queue as the queue itself may go away.
  */
 static __inline struct mbuf*
 dn_return_packet(struct new_queue *q)
@@ -207,8 +197,9 @@ dn_return_packet(struct new_queue *q)
     struct mbuf *m = q->mq.head;
     KASSERT(m != NULL, ("empty queue to dn_return_packet"));
     q->mq.head = m->m_nextpkt;
-    q->lenght--;
-    q->len_bytes -= m->m_pkthdr.len;
+    q->ni.length--;
+    q->si->ni.len_bytes -= m->m_pkthdr.len;
+    q->si->ni.len_bytes -= m->m_pkthdr.len;
     if (q->mq.head == NULL && q->fs && q->fs->kflags & DN_DELETE)
 	dn_delete_queue(q);
 

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c	Mon Jan 11 22:34:25 2010	(r202123)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c	Mon Jan 11 22:35:19 2010	(r202124)
@@ -46,7 +46,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/socket.h>
-//#include <sys/socketvar.h>
 #include <sys/time.h>
 #include <sys/sysctl.h>
 #include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
@@ -205,10 +204,8 @@ struct dn_pkt_tag {
 };
 
 /*
- * Return the mbuf tag holding the dummynet state.  As an optimization
- * this is assumed to be the first tag on the list.  If this turns out
- * wrong we'll need to search the list.
- * XXX OK
+ * Return the mbuf tag holding the dummynet state (it should
+ * be the first one on the list).
  */
 static struct dn_pkt_tag *
 dn_tag_get(struct mbuf *m)
@@ -233,12 +230,9 @@ mq_append(struct mq *q, struct mbuf *m)
 }
 
 /*
- * Check if the packet should be drop depending on the queue management
- * or queue size overflow.
- * Put the packet m into the queue q.
- * Update statistics for the queue.
- *   
- * The function returns 1 if the packet is dropped.
+ * Enqueue a packet in q, subject to space and queue management policy.
+ * Update stats for the queue and the scheduler.
+ * Return 0 on success, 1 on drop. The packet is consumed anyways.
  */
 int
 dn_queue_packet(struct new_queue *q, struct mbuf* m)
@@ -248,28 +242,28 @@ dn_queue_packet(struct new_queue *q, str
 	uint64_t len = m->m_pkthdr.len;
 
 	/* Update statistics, then check reasons to drop pkt. */
-	q->tot_bytes += len;
-	q->tot_pkts++;
+	q->ni.tot_bytes += len;
+	q->ni.tot_pkts++;
 	ni->tot_bytes += len;
 	ni->tot_pkts++;
 	if (f->plr && random() < f->plr)
 		goto drop;
 	if (f->flags & DN_QSIZE_BYTES) {
-		if (q->len_bytes > f->qsize)
+		if (q->ni.len_bytes > f->qsize)
 			goto drop;
-	} else if (q->lenght >= f->qsize) {
+	} else if (q->ni.length >= f->qsize) {
 		goto drop;
 	}
 	mq_append(&q->mq, m);
-	q->lenght++;
-	q->len_bytes += len;
+	q->ni.length++;
+	q->ni.len_bytes += len;
 	ni->length++;
 	ni->len_bytes += len;
 	return 0;
 
 drop:
 	io_pkt_drop++;
-	q->drops++;
+	q->ni.drops++;
 	ni->drops++;
 	FREE_PKT(m);
 	return 1;
@@ -278,14 +272,14 @@ drop:
 /*
  * Fetch packets from the delay line which are due now. If there are
  * leftover packets, reinsert the delay line in the heap.
+ * Runs under scheduler lock.
  */
 static void
 transmit_event(struct mq *q, struct delay_line *dline, dn_key now)
 {
-	struct mbuf *m = NULL;
+	struct mbuf *m;
 	struct dn_pkt_tag *pkt = NULL;
 
-	/* XXX we are under scheduler lock */
 	while ((m = dline->mq.head) != NULL) {
 		pkt = dn_tag_get(m);
 		if (!DN_KEY_LEQ(pkt->output_time, now))
@@ -301,8 +295,8 @@ transmit_event(struct mq *q, struct dela
 
 /*
  * Convert the additional MAC overheads/delays into an equivalent
- * number of bits for the given data rate. The samples are in milliseconds
- * so we need to divide by 1000.
+ * number of bits for the given data rate. The samples are
+ * in milliseconds so we need to divide by 1000.
  */
 static uint64_t
 extra_bits(struct mbuf *m, struct new_pipe *p)
@@ -323,19 +317,6 @@ extra_bits(struct mbuf *m, struct new_pi
 	return bits;
 }
 
-#if 0
-/* Insert packet pkt into delay line, adding the delay.
- * dt->output_time was already set */
-static void
-move_pkt(struct mbuf *m, struct new_pipe *p, struct delay_line *d)
-{
-	struct dn_pkt_tag *dt = dn_tag_get(m);
-
-	dt->output_time += p->delay ;
-	mq_append(&d->mq, m);
-}
-#endif
-
 /* Do masking depending of flow id */
 static struct ipfw_flow_id *
 do_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id)
@@ -406,11 +387,10 @@ do_hash(struct ipfw_flow_id *id)
 }
 
 /*
- * returns 0 masks match,
- * returns 1 otherwise
+ * Like bcmp, returns 0 if ids match, 1 otherwise.
  */
 static int
-mask_are_equals (struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
+flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
 {
 	int is_v6 = IS_IP6_FLOW_ID(id1);
 	if (is_v6 != IS_IP6_FLOW_ID(id2))
@@ -450,7 +430,6 @@ create_si(struct new_schk *s, int slot)
 	int l = sizeof(*si) + s->fp->sch_inst_len;
 
 	si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
-
 	if (si == NULL)
 		goto error;
 	/* XXX note we set the length only for the initial part which
@@ -473,7 +452,6 @@ create_si(struct new_schk *s, int slot)
 	/* Initialize list of queues attached here */
 	SLIST_INIT(&si->ql_list);
 
-	si->idle_time = 0;
         /* Put entry in front of the hash list of the parent. */
         SLIST_INSERT_HEAD(&s->ht[slot], si, next);
 	si->ni.hash_slot = slot;
@@ -486,50 +464,60 @@ error:
         return NULL;
 }
 
-/* find the scheduler instance for this packet */
+/*
+ * Find the scheduler instance for this packet. If we need to apply
+ * a mask, do on a local copy of the flow_id to preserve the original.
+ */
 static struct new_sch_inst *
 find_sch_inst(struct new_schk *s, struct ipfw_flow_id *id)
 {
-    struct new_sch_inst *si;
-    int i;
-    struct ipfw_flow_id id_t;
-    
-    if ( 0 == (s->sch.flags & DN_HAVE_MASK) ) {
-	i = 0;
-        si = SLIST_FIRST(&s->ht[0]);
-    } else {
- 	id_t = *id;
-	do_mask(&s->sch.sched_mask, &id_t);
-        i = do_hash(&id_t);
-        i = i % s->ht_slots;
-        /* finally, scan the current hash bucket for a match */
-	searches++;
-	SLIST_FOREACH(si, &s->ht[i], next) {
-            search_steps++;
-            if (!mask_are_equals(&id_t, &si->ni.id))
-                break; /* found */
-        }
-    }
-   
-    if (si == NULL) { /* no match, need to allocate a new entry */
-        si = create_si(s, i);
-        if (si && s->sch.flags & DN_HAVE_MASK)
-	    si->ni.id = id_t;
-    }
-    return si;
+	struct new_sch_inst *si;
+	struct ipfw_flow_id id_t;
+	int i;
+
+	if ( 0 == (s->sch.flags & DN_HAVE_MASK) ) {
+		i = 0;
+		si = SLIST_FIRST(&s->ht[0]);
+	} else {
+		id_t = *id;
+		do_mask(&s->sch.sched_mask, &id_t);
+		i = do_hash(&id_t);
+		i = i % s->ht_slots;
+		/* finally, scan the current hash bucket for a match */
+		searches++;
+		SLIST_FOREACH(si, &s->ht[i], next) {
+			search_steps++;
+			if (!flow_id_cmp(&id_t, &si->ni.id))
+				break; /* found */
+		}
+	}
+
+	if (si == NULL) { /* no match, allocate a new entry */
+		si = create_si(s, i);
+		if (si && s->sch.flags & DN_HAVE_MASK)
+		    si->ni.id = id_t;
+	}
+	return si;
 }
 
 /*
- * Send traffic from a scheduler instance due by 'now'
+ * Send traffic from a scheduler instance due by 'now'.
+ * Return a pointer to the head of the queue.
  */
-static void
+static struct mbuf *
 serve_sched(struct mq *q, struct new_sch_inst *si, dn_key now)
 {
+	struct mq def_q;
 	struct new_schk *s = si->sched;
 	struct mbuf *m = NULL;
 	int delay_line_idle = (si->dline.mq.head == NULL);
 	int done, bw;
 
+	if (q == NULL) {
+		q = &def_q;
+		q->head = NULL;
+	}
+
 	bw = s->pipe.bandwidth;
 	si->kflags &= ~DN_ACTIVE;
 
@@ -543,35 +531,31 @@ serve_sched(struct mq *q, struct new_sch
 		uint64_t len_scaled;
 		done++;
 		len_scaled = bw == 0 ? 0 : hz *
-			(m->m_pkthdr.len * 8 + extra_bits(m, &s->pipe));
+		    (m->m_pkthdr.len * 8 + extra_bits(m, &s->pipe));
 		si->credit -= len_scaled;
 		/* Move packet in the delay line */
 		dn_tag_get(m)->output_time += s->pipe.delay ;
 		mq_append(&si->dline.mq, m);
 	}
+	/*
+	 * If credit >= 0 the instance is idle, mark time.
+	 * Otherwise put back in the heap, and adjust the output
+	 * time of the last inserted packet, m, which was too early.
+	 */
 	if (si->credit >= 0) {
-		/* Instance is idle, because it did not return
-		 * packets while credit was available.
-		 */
-		si->idle_time = curr_time;
+		si->idle_time = now;
 	} else {
-		/* Credit has become negative, so reinsert the
-		 * instance in the heap for when credit will be
-		 * positive again. Also update the output time
-		 * of the last packet, which is 'tosend'
-		 */
 		dn_key t;
 		KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
 		t = (bw - 1 - si->credit) / bw;
-		/* Delay output time because under credit */
 		if (m)
 			dn_tag_get(m)->output_time += t;
 		si->kflags |= DN_ACTIVE;
-		heap_insert(&dn_cfg.system_heap, curr_time + t, si);
+		heap_insert(&dn_cfg.system_heap, now + t, si);
 	}
-
 	if (delay_line_idle && done)
 		transmit_event(q, &si->dline, now);
+	return q->head;
 }
 
 /*
@@ -623,7 +607,7 @@ dummynet_task(void *context, int pending
 	tick_adjustment++;
     }
 
-    /* serve all pending events */
+    /* serve pending events, accumulate in q */
     for (;;) {
 	struct dn_id *p;    /* generic parameter to handler */
 
@@ -658,20 +642,21 @@ dummynet_send(struct mbuf *m)
 
 	for (; m != NULL; m = n) {
 		struct ifnet *ifp;
-		int dst;
         	struct m_tag *tag;
+		int dst;
 
 		n = m->m_nextpkt;
 		m->m_nextpkt = NULL;
 		tag = m_tag_first(m);
-		if (tag == NULL) {
+		if (tag == NULL) { /* should not happen */
 			dst = DIR_DROP;
 		} else {
 			struct dn_pkt_tag *pkt = dn_tag_get(m);
-			/* extract the dummynet info, rename the tag */
+			/* extract the dummynet info, rename the tag
+			 * to carry reinject info.
+			 */
 			dst = pkt->dn_dir;
 			ifp = pkt->ifp;
-			/* rename the tag so it carries reinject info */
 			tag->m_tag_cookie = MTAG_IPFW_RULE;
 			tag->m_tag_id = 0;
 		}
@@ -681,11 +666,13 @@ dummynet_send(struct mbuf *m)
 			SET_HOST_IPLEN(mtod(m, struct ip *));
 			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
 			break ;
+
 		case DIR_IN :
 			/* put header in network format for ip_input() */
 			//SET_NET_IPLEN(mtod(m, struct ip *));
 			netisr_dispatch(NETISR_IP, m);
 			break;
+
 #ifdef INET6
 		case DIR_IN | PROTO_IPV6:
 			netisr_dispatch(NETISR_IPV6, m);
@@ -696,6 +683,7 @@ dummynet_send(struct mbuf *m)
 			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
 			break;
 #endif
+
 		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
 			if (bridge_dn_p != NULL)
 				((*bridge_dn_p)(m, ifp));
@@ -703,6 +691,7 @@ dummynet_send(struct mbuf *m)
 				printf("dummynet: if_bridge not loaded\n");
 
 			break;
+
 		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
 			/*
 			 * The Ethernet code assumes the Ethernet header is
@@ -717,6 +706,7 @@ dummynet_send(struct mbuf *m)
 			}
 			ether_demux(m->m_pkthdr.rcvif, m);
 			break;
+
 		case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
 			ether_output_frame(ifp, m);
 			break;
@@ -760,13 +750,8 @@ int
 dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
 {
 	struct mbuf *m = *m0;
-	struct dn_pkt_tag *pkt;
-	struct m_tag *mtag;
 	struct new_fsk *fs = NULL;
-	struct new_pipe *pipe = NULL;
-	struct new_queue *q = NULL;
-	struct new_schk *sch;
-	struct new_sch_inst *sch_inst;
+	struct new_sch_inst *si;
 	dn_key now; /* save a copy of curr_time */
 
 	int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
@@ -777,57 +762,58 @@ dummynet_io(struct mbuf **m0, int dir, s
 	fs = ipdn_locate_flowset(fs_id);
 	if (fs == NULL)
 		goto dropit;	/* This queue/pipe does not exist! */
-	sch = fs->sched;
-	if (sch == NULL)	/* should not happen */
+	if (fs->sched == NULL)	/* should not happen */
 		goto dropit;
 	/* find_sch_inst can be fast */
-	sch_inst = find_sch_inst(sch, &(fwa->f_id));
-	if (sch_inst == NULL)
+	si = find_sch_inst(fs->sched, &(fwa->f_id));
+	if (si == NULL)
 		goto dropit;
 
 	/* tag the mbuf */
+    {
+	struct dn_pkt_tag *dt;
+	struct m_tag *mtag;
 	mtag = m_tag_get(PACKET_TAG_DUMMYNET,
-	    sizeof(struct dn_pkt_tag), M_NOWAIT | M_ZERO);
+		    sizeof(*dt), M_NOWAIT | M_ZERO);
 	if (mtag == NULL)
 		goto dropit;		/* Cannot allocate packet header. */
 	m_tag_prepend(m, mtag);		/* Attach to mbuf chain. */
-	pkt = (struct dn_pkt_tag *)(mtag + 1);
-	pkt->rule = fwa->rule;
-	pkt->rule.info &= IPFW_ONEPASS;	/* only keep this info */
-	pkt->dn_dir = dir;
-	pkt->ifp = fwa->oif;
-	now = curr_time; /* in case it changes, use the same value */
-	pkt->output_time = now; /* XXX rewritten when reaches head */
-
+	dt = (struct dn_pkt_tag *)(mtag + 1);
+	dt->rule = fwa->rule;
+	dt->rule.info &= IPFW_ONEPASS;	/* only keep this info */
+	dt->dn_dir = dir;
+	dt->ifp = fwa->oif;
+	/* dt->output tame is updated as we move through */
+	dt->output_time = now = curr_time;
+    }
 	if (fs->kflags & DN_HAVE_MASK)
 		do_mask(&fs->fs.flow_mask, &(fwa->f_id));
-	if (sch->fp->enqueue(sch_inst, fs, m, &(fwa->f_id))) {
+	if (fs->sched->fp->enqueue(si, fs, m, &(fwa->f_id))) {
 		printf("%s dropped by enqueue\n", __FUNCTION__);
 		/* packet was dropped by enqueue() */
 		*m0 = NULL;
 		goto dropit;
 	}
 
-	if (sch_inst->kflags & DN_ACTIVE) {
+	if (si->kflags & DN_ACTIVE) {
 		m = *m0 = NULL; /* consumed */
 		goto done; /* already active, nothing to do */
 	}
 
 	/* compute the initial allowance */
-	pipe = &sch->pipe;
-        sch_inst->credit = dn_cfg.io_fast ? pipe->bandwidth : 0;
+    {
+	struct new_pipe *pipe = &fs->sched->pipe;
+        si->credit = dn_cfg.io_fast ? pipe->bandwidth : 0;
         if (pipe->burst) {
-		uint64_t burst = (now - sch_inst->idle_time) *
+		uint64_t burst = (now - si->idle_time) *
                                     pipe->bandwidth;
 		if (burst > pipe->burst)
 			burst = pipe->burst;
-		sch_inst->credit += burst;
+		si->credit += burst;
         }
-	{	/* pass through scheduler and delay line */
-		struct mq q = { NULL, NULL };
-		serve_sched(&q, sch_inst, now);
-		m = q.head;
-	}
+    }
+	/* pass through scheduler and delay line */
+	m = serve_sched(NULL, si, now);
 
 	/* optimization -- pass it back to ipfw for immediate send */
 	if (dn_cfg.io_fast && m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
@@ -849,10 +835,8 @@ done:
 
 dropit:
 	io_pkt_drop++;
-	if (q)
-		q->drops++;
 	DUMMYNET_UNLOCK();
 	FREE_PKT(m);
 	*m0 = NULL;
-	return ((fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS);
+	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
 }

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_private.h
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_private.h	Mon Jan 11 22:34:25 2010	(r202123)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_private.h	Mon Jan 11 22:35:19 2010	(r202124)
@@ -169,10 +169,9 @@ struct new_schk {
 	struct dn_sched *fp;	/* Pointer to scheduler functions */
 };
 
-/* Implementation of the packets queue associated with a scheduler instance */
+/* The packets queue associated with a scheduler instance */
 struct new_queue {
-    struct dn_id oid;
-    struct ipfw_flow_id id;
+    struct new_inst ni;	/* oid, flow_id, stats */
     struct mq mq;	/* packets queue */
 
     SLIST_ENTRY(new_queue) ql_next; /* linked list to sch_inst */
@@ -180,13 +179,6 @@ struct new_queue {
 
     struct new_fsk *fs; /* parent flowset. */
     /* If fs->kflags & DN_DELETE, remove the queue when empty. */
-
-    /* stats */
-    u_int lenght; /* Queue lenght, in packets */
-    u_int len_bytes; /* Queue lenght, in bytes */
-    uint64_t tot_pkts; /* statistics counters  */
-    uint64_t tot_bytes;
-    uint32_t drops;
 };
 SLIST_HEAD(new_queue_head, new_queue);
 

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c	Mon Jan 11 22:34:25 2010	(r202123)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c	Mon Jan 11 22:35:19 2010	(r202124)
@@ -50,7 +50,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
 #include <netinet/in.h>
-//#include <netinet/ip.h>		/* ip_len, ip_off */
 #include <netinet/ip_var.h>	/* ip_output(), IP_FORWARDING */
 #include <netinet/ip_fw.h>
 #include <netinet/ipfw/ip_fw_private.h>
@@ -61,7 +60,7 @@ __FBSDID("$FreeBSD$");
 
 static int	ip_dn_ctl(struct sockopt *sopt);
 
-	/* which objects to copy */
+/* which objects to copy */
 #define DN_C_PIPE 	0x01
 #define DN_C_SCH	0x02
 #define DN_C_SCH_INST	0x04
@@ -203,7 +202,7 @@ dn_create_queue(struct new_sch_inst *si,
 		return NULL;
 	}
 
-	set_oid(&q->oid, DN_QUEUE, 0, size);
+	set_oid(&q->ni.oid, DN_QUEUE, 0, size);
 	q->fs = fs;
 	q->si = si;
 	fs->refcnt++;
@@ -400,29 +399,33 @@ static int
 do_config(void *p, int l)
 {
 	struct dn_id *next, *o;
-	int err = 0, cmd = 0;
+	int err = 0;
 	struct dn_id *arg = NULL;
 
 	for (o = p; l >= sizeof(*o); o = next) {
 		struct dn_id *prev = arg;
-		err = EINVAL;
 		if (o->len < sizeof(*o) || l < o->len) {
 			printf("bad len o->len %d len %d\n", o->len, l);
+			err = EINVAL;
 			break;
 		}
 		l -= o->len;
 		printf("%s cmd %d len %d left %d\n",
 			__FUNCTION__, o->type, o->len, l);
 		next = (struct dn_id *)((char *)o + o->len);
+		err = 0;
 		switch (o->type) {
+		default:
+			printf("cmd %d not implemented\n", o->type);
+			break;
+
 		case DN_CMD_CONFIGURE:
-		case DN_CMD_GET:
-		case DN_CMD_DELETE:
+			break;
+
 		case DN_CMD_FLUSH:
-			cmd = o->type;
-			err = 0;
+			dummynet_flush();
 			break;
-		case DN_TEXT:	/* store for the next block */
+		case DN_TEXT:	/* store argument the next block */
 			prev = NULL;
 			arg = o;
 			break;
@@ -450,13 +453,12 @@ do_config(void *p, int l)
 static struct new_schk *
 locate_scheduler(int sch_nr)
 {
-    struct new_schk *sch;
-
-    SLIST_FOREACH(sch, &dn_cfg.schedhash[HASH(sch_nr)], next)
-        if (sch->sch.sched_nr == sch_nr)
-            return (sch);
+	struct new_schk *sch;
 
-    return (NULL);
+	SLIST_FOREACH(sch, &dn_cfg.schedhash[HASH(sch_nr)], next)
+		if (sch->sch.sched_nr == sch_nr)
+			return (sch);
+	return (NULL);
 }
 
 /* update all flowsets which may refer to this scheduler */
@@ -634,7 +636,6 @@ config_fs(struct new_fs *nfs, struct dn_
 	DUMMYNET_LOCK();
 	fs = ipdn_locate_flowset(i);
 
-	printf("%s %d old %p\n", __FUNCTION__, i, fs);
 	if (fs == NULL) {
 		fs = create_fs();
 		if (fs == NULL) {
@@ -646,27 +647,21 @@ config_fs(struct new_fs *nfs, struct dn_
 	/* copy values, check if scheduler exists and mark active */
 	s = locate_scheduler(nfs->sched_nr);
 	s_changed = s == NULL || fs->sched == NULL || fs->sched->fp != s->fp;
-	if (fs->sched) {
-		printf("%s remove from fsk_list\n", __FUNCTION__);
+	if (fs->sched) { // XXX and not DN_DELETE ?
 		SLIST_REMOVE(&fs->sched->fsk_list, fs, new_fsk, sch_chain);
-		printf("%s remove from fshash\n", __FUNCTION__);
 		SLIST_REMOVE(&dn_cfg.fshash[HASH(i)], fs, new_fsk, next);
 		if (s_changed && fs->sched->fp->free_fs)
 			fs->sched->fp->free_fs(fs);
 	} else {
-		printf("%s remove from fsunlinked\n", __FUNCTION__);
 		SLIST_REMOVE(&dn_cfg.fsunlinked, fs, new_fsk, next);
 	}
 	fs->sched = s;
 	if (s) {
-		printf("%s insert in fsk_list\n", __FUNCTION__);
 		SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain);
-		printf("%s insert in fshash\n", __FUNCTION__);
 		SLIST_INSERT_HEAD(&dn_cfg.fshash[HASH(i)], fs, next);
 		if (s_changed && s->fp->new_fs)
 			s->fp->new_fs(fs, arg, 1);
 	} else {
-		printf("%s insert in fsunlinked\n", __FUNCTION__);
 		SLIST_INSERT_HEAD(&dn_cfg.fsunlinked, fs, next);
 	}
 	dn_cfg.id++;
@@ -737,69 +732,84 @@ config_profile(struct new_profile *pf, s
 }
 
 static int
-dummynet_get(struct sockopt *sopt)
+compute_space(struct dn_id *cmd, int *to_copy)
 {
-    int have = 0, i, need, error;
-    char *start = NULL, *buf, *end;
-    size_t sopt_valsize;
-    struct dn_id cmd;
-    int to_copy = 0;
-
-    /* save original values */
-    sopt_valsize = sopt->sopt_valsize;
-    printf("%s have %d bytes\n", __FUNCTION__, sopt_valsize);
-
-    error = sooptcopyin(sopt, &cmd, sizeof(cmd), sizeof(cmd));
-    sopt->sopt_valsize = sopt_valsize;
-    if (error)
-	return error;
-    printf("%s cmd %d len %d\n", __FUNCTION__, cmd.type, cmd.len);
-    for (have = 0, i = 0; i < 10; i++) {
-	DUMMYNET_LOCK();
-	switch (cmd.subtype) {
+	int need;
+
+	*to_copy = 0;
+	switch (cmd->subtype) {
 	default:
-	    return EINVAL;
+		return -1;
 	case DN_SCH:	/* pipe show */
-	    to_copy = DN_C_SCH | DN_C_PIPE | DN_C_SCH_INST;
-	    need = dn_cfg.schk_count *
-		(sizeof(struct new_sch) + sizeof(struct new_pipe));
-	    need += dn_cfg.si_count * sizeof(struct new_inst);
-	    break;
+		*to_copy = DN_C_SCH | DN_C_PIPE | DN_C_SCH_INST;
+		need = dn_cfg.schk_count *
+			(sizeof(struct new_sch) + sizeof(struct new_pipe));
+		need += dn_cfg.si_count * sizeof(struct new_inst);
+		break;
 	case DN_FS:	/* queue show */
-	    to_copy = DN_C_FS;
-	    need = dn_cfg.fsk_count *
-		(sizeof(struct new_fs));
-	    break;
-	}
-	need += sizeof(cmd);
-	cmd.id = need;
-	printf("pass %d have %d need %d len %d\n",
-		i, have, need, sopt_valsize);
-	if (have >= need)
-	    break;
-	DUMMYNET_UNLOCK();
-	if (start)
-	    free(start, M_DUMMYNET);
-	buf = NULL;
-	if (need > sopt_valsize)
-	    break;
-	have = need;
-	start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO);
+		*to_copy = DN_C_FS;
+		need = dn_cfg.fsk_count * (sizeof(struct new_fs));
+		break;
+	}
+	return need;
+}
+
+static int
+dummynet_get(struct sockopt *sopt)
+{
+	int have, i, need, error, to_copy = 0;
+	char *start = NULL, *buf, *end;
+	size_t sopt_valsize;
+	struct dn_id cmd;
+
+	/* save and restore original sopt_valsize around copyin */
+	sopt_valsize = sopt->sopt_valsize;
+	printf("%s have %d bytes\n", __FUNCTION__, sopt_valsize);
+	error = sooptcopyin(sopt, &cmd, sizeof(cmd), sizeof(cmd));
+	sopt->sopt_valsize = sopt_valsize;
+	if (error)
+		return error;
+	printf("%s cmd %d len %d\n", __FUNCTION__, cmd.type, cmd.len);
+	/* Count space (under lock) and allocate (outside lock).
+	 * Exit with lock held if we manage to get enough buffer.
+	 * Try a few times then give up.
+	 */
+	for (have = 0, i = 0; i < 10; i++) {
+		DUMMYNET_LOCK();
+		need = compute_space(&cmd, &to_copy);
+		if (need < 0) {
+			DUMMYNET_UNLOCK();
+			return EINVAL;
+		}
+		need += sizeof(cmd);
+		cmd.id = need;
+		printf("pass %d have %d need %d len %d\n",
+				i, have, need, sopt_valsize);
+		if (have >= need)
+			break;
+		DUMMYNET_UNLOCK();
+		if (start)
+			free(start, M_DUMMYNET);
+		buf = NULL;
+		if (need > sopt_valsize)
+			break;
+		have = need;
+		start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO);
+		if (start == NULL)
+			return ENOMEM;
+	}
 	if (start == NULL)
-	    return ENOMEM;
-    }
-    if (start == NULL)
-        return sooptcopyout(sopt, &cmd, sizeof(cmd));
-    end = start + have;
-    sopt->sopt_valsize = sopt_valsize;
-    bcopy(&cmd, start, sizeof(cmd));
-    buf = start + sizeof(cmd);
-    /* start copying other objects */
-    copy_data(&buf, end, to_copy);
-    DUMMYNET_UNLOCK();
-    error = sooptcopyout(sopt, start, buf - start);
-    free(start, M_DUMMYNET);
-    return error;
+		return sooptcopyout(sopt, &cmd, sizeof(cmd));
+	end = start + have;
+	sopt->sopt_valsize = sopt_valsize;
+	bcopy(&cmd, start, sizeof(cmd));
+	buf = start + sizeof(cmd);
+	/* start copying other objects */
+	copy_data(&buf, end, to_copy);
+	DUMMYNET_UNLOCK();
+	error = sooptcopyout(sopt, start, buf - start);
+	free(start, M_DUMMYNET);
+	return error;
 }
 
 /*
@@ -828,16 +838,19 @@ ip_dn_ctl(struct sockopt *sopt)
 		error = EINVAL;
 		break;
 
-	case IP_DUMMYNET_GET :
-		error = dummynet_get(sopt);
-		break;
-
 	case IP_DUMMYNET_FLUSH :
-		dummynet_flush();
-		break;
-
 	case IP_DUMMYNET_CONFIGURE :
 	case IP_DUMMYNET_DEL :	/* remove a pipe or queue */
+	case IP_DUMMYNET_GET :
+		printf("dummynet: -- compat option %d", sopt->sopt_name);
+		error = EINVAL;
+		break;
+
+	case IP_DUMMYNET3 :	/* remove a pipe or queue */
+		if (sopt->sopt_dir == SOPT_GET) {
+			error = dummynet_get(sopt);
+			break;
+		}
 		l = sopt->sopt_valsize;
 		if (l < 0 || l > 12000) {
 			printf("argument too large, %d\n", l);
@@ -964,18 +977,9 @@ load_descriptor(struct dn_sched *d)
 {
 	struct dn_sched *s;
 
-	ip_dn_init();
 	if (d == NULL)
 		return 1; /* error */
-
-	printf("%s %s start\n", __FUNCTION__, d->name);
-	/* Search if scheduler already exists */
-	SLIST_FOREACH(s, &list_of_scheduler, next) {
-		if (strcmp(s->name, d->name) == 0) {
-			printf("%s %s already there\n", __FUNCTION__, d->name);
-			return 1; /* scheduler already exists */
-		}
-	}
+	ip_dn_init();	/* just in case */
 
 	/* Check that mandatory funcs exists */
 	if (d->enqueue == NULL || d->dequeue == NULL) {
@@ -983,12 +987,20 @@ load_descriptor(struct dn_sched *d)
 		return 1;
 	}
 
+	/* Search if scheduler already exists */
 	DUMMYNET_LOCK();
-	SLIST_INSERT_HEAD(&list_of_scheduler, d, next);
-	printf("%s %s loaded\n", __FUNCTION__, d->name);
+	SLIST_FOREACH(s, &list_of_scheduler, next) {
+		if (strcmp(s->name, d->name) == 0) {
+			printf("%s %s already loaded\n", __FUNCTION__, d->name);
+			break; /* scheduler already exists */
+		}
+	}
+	if (s == NULL)
+		SLIST_INSERT_HEAD(&list_of_scheduler, d, next);
 	DUMMYNET_UNLOCK();
+	printf("dn_sched %s %sloaded\n", d->name, s ? "not ":"");
 
-	return 0; /* ok */
+	return s ? 1 : 0;
 }
 
 static int
@@ -1017,27 +1029,18 @@ unload_descriptor(struct dn_sched *s)
 int
 dn_sched_modevent(module_t mod, int cmd, void *arg)
 {
-    struct dn_sched *sch = arg;
-    int error = EOPNOTSUPP;
-    printf("%s called\n", __FUNCTION__);
-
-    switch(cmd) {
-    case MOD_LOAD:
-        error = load_descriptor(sch);
-        break;
-
-    case MOD_UNLOAD:
-        error = unload_descriptor(sch);
-        break;
-    }
-  
-    return error;
+	struct dn_sched *sch = arg;
+
+	if (cmd == MOD_LOAD)
+		return load_descriptor(sch);
+	else if (cmd == MOD_UNLOAD)
+		return unload_descriptor(sch);
+	else
+		return EINVAL;
 }
 
 static moduledata_t dummynet_mod = {
-	"dummynet",
-	dummynet_modevent,
-	NULL
+	"dummynet", dummynet_modevent, NULL
 };
 
 DECLARE_MODULE(dummynet, dummynet_mod,


More information about the svn-src-user mailing list