PERFORCE change 168953 for review

Ana Kukec anchie at FreeBSD.org
Mon Sep 28 06:25:24 UTC 2009


http://perforce.freebsd.org/chv.cgi?CH=168953

Change 168953 by anchie at p4imunes on 2009/09/28 06:25:10

	Redefinition of ip_dummynet variables due to virtualisation.	

Affected files ...

.. //depot/projects/vimage/src/sys/netinet/ipfw/ip_dummynet.c#5 edit

Differences ...

==== //depot/projects/vimage/src/sys/netinet/ipfw/ip_dummynet.c#5 (text+ko) ====

@@ -73,6 +73,7 @@
 #include <sys/taskqueue.h>
 #include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
 #include <net/netisr.h>
+#include <net/vnet.h>
 #include <netinet/in.h>
 #include <netinet/ip.h>		/* ip_len, ip_off */
 #include <netinet/ip_fw.h>
@@ -88,35 +89,60 @@
  * We keep a private variable for the simulation time, but we could
  * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
  */
-static dn_key curr_time = 0 ; /* current simulation time */
+static VNET_DEFINE(dn_key, curr_time) = 0 ; /* current simulation time */
+#define	V_curr_time		VNET(curr_time)
 
-static int dn_hash_size = 64 ;	/* default hash size */
+static VNET_DEFINE(int, dn_hash_size) = 64 ;	/* default hash size */
+#define	V_dn_hash_size		VNET(dn_hash_size)
 
 /* statistics on number of queue searches and search steps */
-static long searches, search_steps ;
-static int pipe_expire = 1 ;   /* expire queue if empty */
-static int dn_max_ratio = 16 ; /* max queues/buckets ratio */
+static VNET_DEFINE(long, searches);
+static VNET_DEFINE(long, search_steps);
+static VNET_DEFINE(int, pipe_expire) = 1 ;   /* expire queue if empty */
+static VNET_DEFINE(int, dn_max_ratio) = 16 ; /* max queues/buckets ratio */
+#define	V_searches		VNET(searches)
+#define	V_search_steps		VNET(search_steps)
+#define	V_pipe_expire		VNET(pipe_expire)
+#define	V_dn_max_ratio		VNET(dn_max_ratio)
 
-static long pipe_slot_limit = 100; /* Foot shooting limit for pipe queues. */
-static long pipe_byte_limit = 1024 * 1024;
+static VNET_DEFINE(long, pipe_slot_limit) = 100; /* Foot shooting limit for pipe queues. */
+static VNET_DEFINE(long, pipe_byte_limit) = 1024 * 1024;
+#define V_pipe_slot_limit	VNET(pipe_slot_limit)
+#define V_pipe_byte_limit	VNET(pipe_byte_limit)
 
-static int red_lookup_depth = 256;	/* RED - default lookup table depth */
-static int red_avg_pkt_size = 512;      /* RED - default medium packet size */
-static int red_max_pkt_size = 1500;     /* RED - default max packet size */
+static VNET_DEFINE(int, red_lookup_depth) = 256;	/* RED - default lookup table depth */
+static VNET_DEFINE(int, red_avg_pkt_size) = 512;      /* RED - default medium packet size */
+static VNET_DEFINE(int, red_max_pkt_size) = 1500;     /* RED - default max packet size */
+#define	V_red_lookup_depth	VNET(red_lookup_depth)
+#define V_red_avg_pkt_size	VNET(red_avg_pkt_size)
+#define V_red_max_pkt_size	VNET(red_max_pkt_size)
 
-static struct timeval prev_t, t;
-static long tick_last;			/* Last tick duration (usec). */
-static long tick_delta;			/* Last vs standard tick diff (usec). */
-static long tick_delta_sum;		/* Accumulated tick difference (usec).*/
-static long tick_adjustment;		/* Tick adjustments done. */
-static long tick_lost;			/* Lost(coalesced) ticks number. */
+static VNET_DEFINE(struct timeval, prev_t);
+static VNET_DEFINE(struct timeval, t);
+static VNET_DEFINE(long, tick_last);			/* Last tick duration (usec). */
+static VNET_DEFINE(long, tick_delta);			/* Last vs standard tick diff (usec). */
+static VNET_DEFINE(long, tick_delta_sum);		/* Accumulated tick difference (usec).*/
+static VNET_DEFINE(long, tick_adjustment);		/* Tick adjustments done. */
+static VNET_DEFINE(long, tick_lost);			/* Lost(coalesced) ticks number. */
 /* Adjusted vs non-adjusted curr_time difference (ticks). */
-static long tick_diff;
+static VNET_DEFINE(long, tick_diff);
+#define	V_prev_t		VNET(prev_t)
+#define	V_t			VNET(t)
+#define	V_tick_last		VNET(tick_last)
+#define	V_tick_delta		VNET(tick_delta)
+#define	V_tick_delta_sum	VNET(tick_delta_sum)
+#define	V_tick_adjustment	VNET(tick_adjustment)
+#define	V_tick_lost		VNET(tick_lost)
+#define	V_tick_diff		VNET(tick_diff)
 
-static int		io_fast;
-static unsigned long	io_pkt;
-static unsigned long	io_pkt_fast;
-static unsigned long	io_pkt_drop;
+static VNET_DEFINE(int, io_fast);
+static VNET_DEFINE(unsigned long, io_pkt);
+static VNET_DEFINE(unsigned long, io_pkt_fast);
+static VNET_DEFINE(long, io_pkt_drop);
+#define V_io_fast		VNET(io_fast)
+#define	V_io_pkt		VNET(io_pkt)
+#define	V_io_pkt_fast		VNET(io_pkt_fast)
+#define	V_io_pkt_drop		VNET(io_pkt_drop)
 
 /*
  * Three heaps contain queues and pipes that the scheduler handles:
@@ -131,7 +157,9 @@
 
 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
 
-static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ;
+static VNET_DEFINE(struct dn_heap, ready_heap);
+static VNET_DEFINE(struct dn_heap, extract_heap);
+static VNET_DEFINE(struct dn_heap, wfq_ready_heap);
 
 static int	heap_init(struct dn_heap *h, int size);
 static int	heap_insert (struct dn_heap *h, dn_key key1, void *p);
@@ -145,10 +173,10 @@
 
 #define	HASHSIZE	16
 #define	HASH(num)	((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
-static struct dn_pipe_head	pipehash[HASHSIZE];	/* all pipes */
-static struct dn_flow_set_head	flowsethash[HASHSIZE];	/* all flowsets */
+static VNET_DEFINE(struct dn_pipe_head, pipehash[HASHSIZE]);	/* all pipes */
+static VNET_DEFINE(struct dn_flow_set_head, flowsethash[HASHSIZE]);	/* all flowsets */
 
-static struct callout dn_timeout;
+static VNET_DEFINE(struct callout, dn_timeout);
 
 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
 
@@ -157,73 +185,73 @@
 SYSCTL_DECL(_net_inet_ip);
 
 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
-    CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
+    CTLFLAG_RW, &VNET_NAME(dn_hash_size), 0, "Default hash table size");
 #if 0	/* curr_time is 64 bit */
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, curr_time,
-    CTLFLAG_RD, &curr_time, 0, "Current tick");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, curr_time,
+    CTLFLAG_RD, &VNET_NAME(curr_time), 0, "Current tick");
 #endif
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
-    CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
-    CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, searches,
-    CTLFLAG_RD, &searches, 0, "Number of queue searches");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, search_steps,
-    CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
-    CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
-    CTLFLAG_RW, &dn_max_ratio, 0,
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
+    CTLFLAG_RD, &VNET_NAME(ready_heap).size, 0, "Size of ready heap");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
+    CTLFLAG_RD, &VNET_NAME(extract_heap).size, 0, "Size of extract heap");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, searches,
+    CTLFLAG_RD, &VNET_NAME(searches), 0, "Number of queue searches");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, search_steps,
+    CTLFLAG_RD, &VNET_NAME(search_steps), 0, "Number of queue search steps");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
+    CTLFLAG_RW, &VNET_NAME(pipe_expire), 0, "Expire queue if empty");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
+    CTLFLAG_RW, &VNET_NAME(dn_max_ratio), 0,
     "Max ratio between dynamic queues and buckets");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
-    CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
-    CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
-    CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
-    CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
-    CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
-    CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
-    CTLFLAG_RD, &tick_diff, 0,
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
+    CTLFLAG_RD, &VNET_NAME(red_lookup_depth), 0, "Depth of RED lookup table");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
+    CTLFLAG_RD, &VNET_NAME(red_avg_pkt_size), 0, "RED Medium packet size");
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
+    CTLFLAG_RD, &VNET_NAME(red_max_pkt_size), 0, "RED Max packet size");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
+    CTLFLAG_RD, &VNET_NAME(tick_delta), 0, "Last vs standard tick difference (usec).");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
+    CTLFLAG_RD, &VNET_NAME(tick_delta_sum), 0, "Accumulated tick difference (usec).");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
+    CTLFLAG_RD, &VNET_NAME(tick_adjustment), 0, "Tick adjustments done.");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
+    CTLFLAG_RD, &VNET_NAME(tick_diff), 0,
     "Adjusted vs non-adjusted curr_time difference (ticks).");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
-    CTLFLAG_RD, &tick_lost, 0,
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
+    CTLFLAG_RD, &VNET_NAME(tick_lost), 0,
     "Number of ticks coalesced by dummynet taskqueue.");
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
-    CTLFLAG_RW, &io_fast, 0, "Enable fast dummynet io.");
-SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
-    CTLFLAG_RD, &io_pkt, 0,
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
+    CTLFLAG_RW, &VNET_NAME(io_fast), 0, "Enable fast dummynet io.");
+SYSCTL_VNET_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
+    CTLFLAG_RD, &VNET_NAME(io_pkt), 0,
     "Number of packets passed to dummynet.");
-SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
-    CTLFLAG_RD, &io_pkt_fast, 0,
+SYSCTL_VNET_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
+    CTLFLAG_RD, &VNET_NAME(io_pkt_fast), 0,
     "Number of packets bypassed dummynet scheduler.");
-SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
-    CTLFLAG_RD, &io_pkt_drop, 0,
+SYSCTL_VNET_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
+    CTLFLAG_RD, &VNET_NAME(io_pkt_drop), 0,
     "Number of packets dropped by dummynet.");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
-    CTLFLAG_RW, &pipe_slot_limit, 0, "Upper limit in slots for pipe queue.");
-SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
-    CTLFLAG_RW, &pipe_byte_limit, 0, "Upper limit in bytes for pipe queue.");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
+    CTLFLAG_RW, &VNET_NAME(pipe_slot_limit), 0, "Upper limit in slots for pipe queue.");
+SYSCTL_VNET_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
+    CTLFLAG_RW, &VNET_NAME(pipe_byte_limit), 0, "Upper limit in bytes for pipe queue.");
 #endif
 
 #ifdef DUMMYNET_DEBUG
-int	dummynet_debug = 0;
+VNET_DEFINE(int, dummynet_debug) = 0;
 #ifdef SYSCTL_NODE
-SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug,
+SYSCTL_VNET_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &VNET_NAME(dummynet_debug),
 	    0, "control debugging printfs");
 #endif
-#define	DPRINTF(X)	if (dummynet_debug) printf X
+#define	DPRINTF(X)	if (V_dummynet_debug) printf X
 #else
 #define	DPRINTF(X)
 #endif
 
-static struct task	dn_task;
-static struct taskqueue	*dn_tq = NULL;
+static VNET_DEFINE(struct task, dn_task);
+static VNET_DEFINE(struct taskqueue *, dn_tq) = NULL;
 static void dummynet_task(void *, int);
 
 static struct mtx dummynet_mtx;
@@ -493,7 +521,7 @@
 
 	while ((m = pipe->head) != NULL) {
 		pkt = dn_tag_get(m);
-		if (!DN_KEY_LEQ(pkt->output_time, curr_time))
+		if (!DN_KEY_LEQ(pkt->output_time, V_curr_time))
 			break;
 
 		pipe->head = m->m_nextpkt;
@@ -513,7 +541,7 @@
 		 * XXX Should check errors on heap_insert, by draining the
 		 * whole pipe p and hoping in the future we are more successful.
 		 */
-		heap_insert(&extract_heap, pkt->output_time, pipe);
+		heap_insert(&V_extract_heap, pkt->output_time, pipe);
 	}
 }
 
@@ -590,7 +618,7 @@
     q->len-- ;
     q->len_bytes -= len ;
 
-    dt->output_time = curr_time + p->delay ;
+    dt->output_time = V_curr_time + p->delay ;
 
     if (p->head == NULL)
 	p->head = pkt;
@@ -630,7 +658,7 @@
 	 * bandwidth==0 (no limit) means we can drain the whole queue,
 	 * setting len_scaled = 0 does the job.
 	 */
-	q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
+	q->numbytes += (V_curr_time - q->sched_time) * p->bandwidth;
 	while ((pkt = q->head) != NULL) {
 		int len = pkt->m_pkthdr.len;
 		dn_key len_scaled = p->bandwidth ? len*8*hz
@@ -654,14 +682,14 @@
 	if ((pkt = q->head) != NULL) {	/* this implies bandwidth != 0 */
 		dn_key t = set_ticks(pkt, q, p); /* ticks i have to wait */
 
-		q->sched_time = curr_time;
-		heap_insert(&ready_heap, curr_time + t, (void *)q);
+		q->sched_time = V_curr_time;
+		heap_insert(&V_ready_heap, V_curr_time + t, (void *)q);
 		/*
 		 * XXX Should check errors on heap_insert, and drain the whole
 		 * queue on error hoping next time we are luckier.
 		 */
 	} else		/* RED needs to know when the queue becomes empty. */
-		q->idle_time = curr_time;
+		q->idle_time = V_curr_time;
 
 	/*
 	 * If the delay line was empty call transmit_event() now.
@@ -689,7 +717,7 @@
 	DUMMYNET_LOCK_ASSERT();
 
 	if (p->if_name[0] == 0)		/* tx clock is simulated */
-		p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
+		p->numbytes += (V_curr_time - p->sched_time) * p->bandwidth;
 	else {	/*
 		 * tx clock is for real,
 		 * the ifq must be empty or this is a NOP.
@@ -762,7 +790,7 @@
 		}
 	}
 	if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0) {
-		p->idle_time = curr_time;
+		p->idle_time = V_curr_time;
 		/*
 		 * No traffic and no events scheduled.
 		 * We can get rid of idle-heap.
@@ -793,8 +821,8 @@
 		if (p->bandwidth > 0)
 			t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
 		dn_tag_get(p->tail)->output_time += t;
-		p->sched_time = curr_time;
-		heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
+		p->sched_time = V_curr_time;
+		heap_insert(&V_wfq_ready_heap, V_curr_time + t, (void *)p);
 		/*
 		 * XXX Should check errors on heap_insert, and drain the whole
 		 * queue on error hoping next time we are luckier.
@@ -817,7 +845,7 @@
 dummynet(void * __unused unused)
 {
 
-	taskqueue_enqueue(dn_tq, &dn_task);
+	taskqueue_enqueue(V_dn_tq, &V_dn_task);
 }
 
 /*
@@ -835,23 +863,23 @@
 
 	DUMMYNET_LOCK();
 
-	heaps[0] = &ready_heap;			/* fixed-rate queues */
-	heaps[1] = &wfq_ready_heap;		/* wfq queues */
-	heaps[2] = &extract_heap;		/* delay line */
+	heaps[0] = &V_ready_heap;			/* fixed-rate queues */
+	heaps[1] = &V_wfq_ready_heap;		/* wfq queues */
+	heaps[2] = &V_extract_heap;		/* delay line */
 
  	/* Update number of lost(coalesced) ticks. */
- 	tick_lost += pending - 1;
+ 	V_tick_lost += pending - 1;
  
  	getmicrouptime(&t);
  	/* Last tick duration (usec). */
- 	tick_last = (t.tv_sec - prev_t.tv_sec) * 1000000 +
- 	    (t.tv_usec - prev_t.tv_usec);
+ 	V_tick_last = (V_t.tv_sec - V_prev_t.tv_sec) * 1000000 +
+ 	    (V_t.tv_usec - V_prev_t.tv_usec);
  	/* Last tick vs standard tick difference (usec). */
- 	tick_delta = (tick_last * hz - 1000000) / hz;
+ 	V_tick_delta = (V_tick_last * hz - 1000000) / hz;
  	/* Accumulated tick difference (usec). */
- 	tick_delta_sum += tick_delta;
+ 	V_tick_delta_sum += V_tick_delta;
  
- 	prev_t = t;
+ 	V_prev_t = V_t;
  
  	/*
  	 * Adjust curr_time if accumulated tick difference greater than
@@ -859,28 +887,28 @@
  	 * we do positive adjustment as required and throttle curr_time in
  	 * case of negative adjustment.
  	 */
-  	curr_time++;
- 	if (tick_delta_sum - tick >= 0) {
- 		int diff = tick_delta_sum / tick;
+  	V_curr_time++;
+ 	if (V_tick_delta_sum - tick >= 0) {
+ 		int diff = V_tick_delta_sum / tick;
  
- 		curr_time += diff;
- 		tick_diff += diff;
- 		tick_delta_sum %= tick;
- 		tick_adjustment++;
- 	} else if (tick_delta_sum + tick <= 0) {
- 		curr_time--;
- 		tick_diff--;
- 		tick_delta_sum += tick;
- 		tick_adjustment++;
+ 		V_curr_time += diff;
+ 		V_tick_diff += diff;
+ 		V_tick_delta_sum %= tick;
+ 		V_tick_adjustment++;
+ 	} else if (V_tick_delta_sum + tick <= 0) {
+ 		V_curr_time--;
+ 		V_tick_diff--;
+ 		V_tick_delta_sum += tick;
+ 		V_tick_adjustment++;
  	}
 
 	for (i = 0; i < 3; i++) {
 		h = heaps[i];
-		while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
-			if (h->p[0].key > curr_time)
+		while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, V_curr_time)) {
+			if (h->p[0].key > V_curr_time)
 				printf("dummynet: warning, "
 				    "heap %d is %d ticks late\n",
-				    i, (int)(curr_time - h->p[0].key));
+				    i, (int)(V_curr_time - h->p[0].key));
 			/* store a copy before heap_extract */
 			p = h->p[0].object;
 			/* need to extract before processing */
@@ -901,7 +929,7 @@
 
 	/* Sweep pipes trying to expire idle flow_queues. */
 	for (i = 0; i < HASHSIZE; i++)
-		SLIST_FOREACH(pipe, &pipehash[i], next)
+		SLIST_FOREACH(pipe, &V_pipehash[i], next)
 			if (pipe->idle_heap.elements > 0 &&
 			    DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
 				struct dn_flow_queue *q =
@@ -918,7 +946,7 @@
 	if (head != NULL)
 		dummynet_send(head);
 
-	callout_reset(&dn_timeout, 1, dummynet, NULL);
+	callout_reset(&V_dn_timeout, 1, dummynet, NULL);
 }
 
 static void
@@ -1029,7 +1057,7 @@
 {
 	struct dn_flow_queue *q;
 
-	if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
+	if (fs->rq_elements > fs->rq_size * V_dn_max_ratio &&
 	    expire_queues(fs) == 0) {
 		/* No way to get room, use or create overflow queue. */
 		i = fs->rq_size;
@@ -1045,7 +1073,7 @@
 	q->hash_slot = i;
 	q->next = fs->rq[i];
 	q->S = q->F + 1;	/* hack - mark timestamp as invalid. */
-	q->numbytes = fs->pipe->burst + (io_fast ? fs->pipe->bandwidth : 0);
+	q->numbytes = fs->pipe->burst + (V_io_fast ? fs->pipe->bandwidth : 0);
 	fs->rq[i] = q;
 	fs->rq_elements++;
 	return (q);
@@ -1112,9 +1140,9 @@
 	}
 	i = i % fs->rq_size ;
 	/* finally, scan the current list for a match */
-	searches++ ;
+	V_searches++ ;
 	for (prev=NULL, q = fs->rq[i] ; q ; ) {
-	    search_steps++;
+	    V_search_steps++;
 	    if (is_v6 &&
 		    IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) &&  
 		    IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) &&  
@@ -1134,7 +1162,7 @@
 		break ; /* found */
 
 	    /* No match. Check if we can expire the entry */
-	    if (pipe_expire && q->head == NULL && q->S == q->F+1 ) {
+	    if (V_pipe_expire && q->head == NULL && q->S == q->F+1 ) {
 		/* entry is idle and not in any heap, expire it */
 		struct dn_flow_queue *old_q = q ;
 
@@ -1189,7 +1217,7 @@
 	u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ?
 	    q->len_bytes : q->len;
 
-	DPRINTF(("\ndummynet: %d q: %2u ", (int)curr_time, q_size));
+	DPRINTF(("\ndummynet: %d q: %2u ", (int)V_curr_time, q_size));
 
 	/* Average queue size estimation. */
 	if (q_size != 0) {
@@ -1207,7 +1235,7 @@
 		 * XXX check wraps...
 		 */
 		if (q->avg) {
-			u_int t = (curr_time - q->idle_time) / fs->lookup_step;
+			u_int t = (V_curr_time - q->idle_time) / fs->lookup_step;
 
 			q->avg = (t < fs->lookup_depth) ?
 			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
@@ -1273,7 +1301,7 @@
 {
 	struct dn_flow_set *fs;
 
-	SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next)
+	SLIST_FOREACH(fs, &V_flowsethash[HASH(fs_nr)], next)
 		if (fs->fs_nr == fs_nr)
 			return (fs);
 
@@ -1285,7 +1313,7 @@
 {
 	struct dn_pipe *pipe;
 
-	SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next)
+	SLIST_FOREACH(pipe, &V_pipehash[HASH(pipe_nr)], next)
 		if (pipe->pipe_nr == pipe_nr)
 			return (pipe);
 
@@ -1328,7 +1356,7 @@
 	is_pipe = (cmd->opcode == O_PIPE);
 
 	DUMMYNET_LOCK();
-	io_pkt++;
+	V_io_pkt++;
 	/*
 	 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
 	 *
@@ -1405,29 +1433,29 @@
 		goto done;
 
 	if (is_pipe) {			/* Fixed rate queues. */
-		if (q->idle_time < curr_time) {
+		if (q->idle_time < V_curr_time) {
 			/* Calculate available burst size. */
 			q->numbytes +=
-			    (curr_time - q->idle_time) * pipe->bandwidth;
+			    (V_curr_time - q->idle_time) * pipe->bandwidth;
 			if (q->numbytes > pipe->burst)
 				q->numbytes = pipe->burst;
-			if (io_fast)
+			if (V_io_fast)
 				q->numbytes += pipe->bandwidth;
 		}
 	} else {			/* WF2Q. */
-		if (pipe->idle_time < curr_time) {
+		if (pipe->idle_time < V_curr_time) {
 			/* Calculate available burst size. */
 			pipe->numbytes +=
-			    (curr_time - pipe->idle_time) * pipe->bandwidth;
+			    (V_curr_time - pipe->idle_time) * pipe->bandwidth;
 			if (pipe->numbytes > pipe->burst)
 				pipe->numbytes = pipe->burst;
-			if (io_fast)
+			if (V_io_fast)
 				pipe->numbytes += pipe->bandwidth;
 		}
-		pipe->idle_time = curr_time;
+		pipe->idle_time = V_curr_time;
 	}
 	/* Necessary for both: fixed rate & WF2Q queues. */
-	q->idle_time = curr_time;
+	q->idle_time = V_curr_time;
 
 	/*
 	 * If we reach this point the flow was previously idle, so we need
@@ -1442,11 +1470,11 @@
 			q->extra_bits = compute_extra_bits(m, pipe);
 			t = set_ticks(m, q, pipe);
 		}
-		q->sched_time = curr_time;
+		q->sched_time = V_curr_time;
 		if (t == 0)		/* Must process it now. */
 			ready_event(q, &head, &tail);
 		else
-			heap_insert(&ready_heap, curr_time + t , q);
+			heap_insert(&V_ready_heap, V_curr_time + t , q);
 	} else {
 		/*
 		 * WF2Q. First, compute start time S: if the flow was
@@ -1494,7 +1522,7 @@
 					printf("dummynet: OUCH! pipe should have been idle!\n");
 				DPRINTF(("dummynet: waking up pipe %d at %d\n",
 				    pipe->pipe_nr, (int)(q->F >> MY_M)));
-				pipe->sched_time = curr_time;
+				pipe->sched_time = V_curr_time;
 				ready_event_wfq(pipe, &head, &tail);
 			}
 		}
@@ -1502,7 +1530,7 @@
 done:
 	if (head == m && dir != DN_TO_IFB_FWD && dir != DN_TO_ETH_DEMUX &&
 	    dir != DN_TO_ETH_OUT) {	/* Fast io. */
-		io_pkt_fast++;
+		V_io_pkt_fast++;
 		if (m->m_nextpkt != NULL)
 			printf("dummynet: fast io: pkt chain detected!\n");
 		head = m->m_nextpkt = NULL;
@@ -1515,7 +1543,7 @@
 	return (0);
 
 dropit:
-	io_pkt_drop++;
+	V_io_pkt_drop++;
 	if (q)
 		q->drops++;
 	DUMMYNET_UNLOCK();
@@ -1610,9 +1638,9 @@
 
 	DUMMYNET_LOCK();
 	/* Free heaps so we don't have unwanted events. */
-	heap_free(&ready_heap);
-	heap_free(&wfq_ready_heap);
-	heap_free(&extract_heap);
+	heap_free(&V_ready_heap);
+	heap_free(&V_wfq_ready_heap);
+	heap_free(&V_extract_heap);
 
 	/*
 	 * Now purge all queued pkts and delete all pipes.
@@ -1620,13 +1648,13 @@
 	 * XXXGL: can we merge the for(;;) cycles into one or not?
 	 */
 	for (i = 0; i < HASHSIZE; i++)
-		SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
-			SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
+		SLIST_FOREACH_SAFE(fs, &V_flowsethash[i], next, fs1) {
+			SLIST_REMOVE(&V_flowsethash[i], fs, dn_flow_set, next);
 			purge_flow_set(fs, 1);
 		}
 	for (i = 0; i < HASHSIZE; i++)
-		SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
-			SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
+		SLIST_FOREACH_SAFE(pipe, &V_pipehash[i], next, pipe1) {
+			SLIST_REMOVE(&V_pipehash[i], pipe, dn_pipe, next);
 			purge_pipe(pipe);
 			free_pipe(pipe);
 		}
@@ -1659,13 +1687,13 @@
 		free(x->w_q_lookup, M_DUMMYNET);
 		x->w_q_lookup = NULL;
 	}
-	if (red_lookup_depth == 0) {
+	if (V_red_lookup_depth == 0) {
 		printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
 		    "must be > 0\n");
 		free(x, M_DUMMYNET);
 		return (EINVAL);
 	}
-	x->lookup_depth = red_lookup_depth;
+	x->lookup_depth = V_red_lookup_depth;
 	x->w_q_lookup = (u_int *)malloc(x->lookup_depth * sizeof(int),
 	    M_DUMMYNET, M_NOWAIT);
 	if (x->w_q_lookup == NULL) {
@@ -1683,12 +1711,12 @@
 		x->w_q_lookup[i] =
 		    SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
 
-	if (red_avg_pkt_size < 1)
-		red_avg_pkt_size = 512;
-	x->avg_pkt_size = red_avg_pkt_size;
-	if (red_max_pkt_size < 1)
-		red_max_pkt_size = 1500;
-	x->max_pkt_size = red_max_pkt_size;
+	if (V_red_avg_pkt_size < 1)
+		V_red_avg_pkt_size = 512;
+	x->avg_pkt_size = V_red_avg_pkt_size;
+	if (V_red_max_pkt_size < 1)
+		V_red_max_pkt_size = 1500;
+	x->max_pkt_size = V_red_max_pkt_size;
 	return (0);
 }
 
@@ -1699,7 +1727,7 @@
 	int l = pfs->rq_size;
 
 	if (l == 0)
-	    l = dn_hash_size;
+	    l = V_dn_hash_size;
 	if (l < 4)
 	    l = 4;
 	else if (l > DN_MAX_HASH_SIZE)
@@ -1725,12 +1753,12 @@
 	x->plr = src->plr;
 	x->flow_mask = src->flow_mask;
 	if (x->flags_fs & DN_QSIZE_IS_BYTES) {
-		if (x->qsize > pipe_byte_limit)
+		if (x->qsize > V_pipe_byte_limit)
 			x->qsize = 1024 * 1024;
 	} else {
 		if (x->qsize == 0)
 			x->qsize = 50;
-		if (x->qsize > pipe_slot_limit)
+		if (x->qsize > V_pipe_slot_limit)
 			x->qsize = 50;
 	}
 	/* Configuring RED. */
@@ -1790,12 +1818,12 @@
 			for (i = 0; i <= pipe->fs.rq_size; i++)
 				for (q = pipe->fs.rq[i]; q; q = q->next) {
 					q->numbytes = p->burst +
-					    (io_fast ? p->bandwidth : 0);
+					    (V_io_fast ? p->bandwidth : 0);
 				}
 
 		pipe->bandwidth = p->bandwidth;
 		pipe->burst = p->burst;
-		pipe->numbytes = pipe->burst + (io_fast ? pipe->bandwidth : 0);
+		pipe->numbytes = pipe->burst + (V_io_fast ? pipe->bandwidth : 0);
 		bcopy(p->if_name, pipe->if_name, sizeof(p->if_name));
 		pipe->ifp = NULL;		/* reset interface ptr */
 		pipe->delay = p->delay;
@@ -1835,7 +1863,7 @@
 				free_pipe(pipe);
 				return (error);
 			}
-			SLIST_INSERT_HEAD(&pipehash[HASH(pipe->pipe_nr)],
+			SLIST_INSERT_HEAD(&V_pipehash[HASH(pipe->pipe_nr)],
 			    pipe, next);
 		}
 		DUMMYNET_UNLOCK();
@@ -1886,7 +1914,7 @@
 				free(fs, M_DUMMYNET);
 				return (error);
 			}
-			SLIST_INSERT_HEAD(&flowsethash[HASH(fs->fs_nr)],
+			SLIST_INSERT_HEAD(&V_flowsethash[HASH(fs->fs_nr)],
 			    fs, next);
 		}
 		DUMMYNET_UNLOCK();
@@ -1945,16 +1973,16 @@
 
     DUMMYNET_LOCK_ASSERT();
 
-    heap_free(&ready_heap);
-    heap_free(&wfq_ready_heap);
-    heap_free(&extract_heap);
+    heap_free(&V_ready_heap);
+    heap_free(&V_wfq_ready_heap);
+    heap_free(&V_extract_heap);
     /* remove all references to this pipe from flow_sets */
     for (i = 0; i < HASHSIZE; i++)
-	SLIST_FOREACH(fs, &flowsethash[i], next)
+	SLIST_FOREACH(fs, &V_flowsethash[i], next)
 		purge_flow_set(fs, 0);
 
     for (i = 0; i < HASHSIZE; i++) {
-	SLIST_FOREACH(pipe, &pipehash[i], next) {
+	SLIST_FOREACH(pipe, &V_pipehash[i], next) {
 		purge_flow_set(&(pipe->fs), 0);
 
 		mnext = pipe->head;
@@ -1992,22 +2020,22 @@
 	}
 
 	/* Unlink from list of pipes. */
-	SLIST_REMOVE(&pipehash[HASH(pipe->pipe_nr)], pipe, dn_pipe, next);
+	SLIST_REMOVE(&V_pipehash[HASH(pipe->pipe_nr)], pipe, dn_pipe, next);
 
 	/* Remove all references to this pipe from flow_sets. */
 	for (i = 0; i < HASHSIZE; i++)
-	    SLIST_FOREACH(fs, &flowsethash[i], next)
+	    SLIST_FOREACH(fs, &V_flowsethash[i], next)
 		if (fs->pipe == pipe) {
 			printf("dummynet: ++ ref to pipe %d from fs %d\n",
 			    p->pipe_nr, fs->fs_nr);
 			fs->pipe = NULL ;
 			purge_flow_set(fs, 0);
 		}
-	fs_remove_from_heap(&ready_heap, &(pipe->fs));
+	fs_remove_from_heap(&V_ready_heap, &(pipe->fs));
 	purge_pipe(pipe); /* remove all data associated to this pipe */
 	/* remove reference to here from extract_heap and wfq_ready_heap */
-	pipe_remove_from_heap(&extract_heap, pipe);
-	pipe_remove_from_heap(&wfq_ready_heap, pipe);
+	pipe_remove_from_heap(&V_extract_heap, pipe);
+	pipe_remove_from_heap(&V_wfq_ready_heap, pipe);
 	DUMMYNET_UNLOCK();
 
 	free_pipe(pipe);
@@ -2023,7 +2051,7 @@
 	}
 
 	/* Unlink from list of flowsets. */
-	SLIST_REMOVE( &flowsethash[HASH(fs->fs_nr)], fs, dn_flow_set, next);
+	SLIST_REMOVE( &V_flowsethash[HASH(fs->fs_nr)], fs, dn_flow_set, next);
 
 	if (fs->pipe != NULL) {
 	    /* Update total weight on parent pipe and cleanup parent heaps. */
@@ -2085,10 +2113,10 @@
      * Compute size of data structures: list of pipes and flow_sets.
      */
     for (i = 0; i < HASHSIZE; i++) {
-	SLIST_FOREACH(pipe, &pipehash[i], next)
+	SLIST_FOREACH(pipe, &V_pipehash[i], next)
 		size += sizeof(*pipe) +
 		    pipe->fs.rq_elements * sizeof(struct dn_flow_queue);
-	SLIST_FOREACH(fs, &flowsethash[i], next)
+	SLIST_FOREACH(fs, &V_flowsethash[i], next)
 		size += sizeof (*fs) +
 		    fs->rq_elements * sizeof(struct dn_flow_queue);
     }
@@ -2126,7 +2154,7 @@
     }
     bp = buf;
     for (i = 0; i < HASHSIZE; i++)
-	SLIST_FOREACH(pipe, &pipehash[i], next) {
+	SLIST_FOREACH(pipe, &V_pipehash[i], next) {
 		struct dn_pipe *pipe_bp = (struct dn_pipe *)bp;
 
 		/*
@@ -2156,7 +2184,7 @@
 	}
 
     for (i = 0; i < HASHSIZE; i++)
-	SLIST_FOREACH(fs, &flowsethash[i], next) {
+	SLIST_FOREACH(fs, &V_flowsethash[i], next) {
 		struct dn_flow_set *fs_bp = (struct dn_flow_set *)bp;
 
 		bcopy(fs, bp, sizeof(*fs));
@@ -2250,31 +2278,31 @@
 	DUMMYNET_LOCK_INIT();
 
 	for (i = 0; i < HASHSIZE; i++) {
-		SLIST_INIT(&pipehash[i]);
-		SLIST_INIT(&flowsethash[i]);
+		SLIST_INIT(&V_pipehash[i]);
+		SLIST_INIT(&V_flowsethash[i]);
 	}
-	ready_heap.size = ready_heap.elements = 0;
-	ready_heap.offset = 0;
+	V_ready_heap.size = ready_heap.elements = 0;
+	V_ready_heap.offset = 0;
 
-	wfq_ready_heap.size = wfq_ready_heap.elements = 0;
-	wfq_ready_heap.offset = 0;
+	V_wfq_ready_heap.size = wfq_ready_heap.elements = 0;
+	V_wfq_ready_heap.offset = 0;
 
-	extract_heap.size = extract_heap.elements = 0;
-	extract_heap.offset = 0;
+	V_extract_heap.size = extract_heap.elements = 0;
+	V_extract_heap.offset = 0;
 
 	ip_dn_ctl_ptr = ip_dn_ctl;
 	ip_dn_io_ptr = dummynet_io;
 
-	TASK_INIT(&dn_task, 0, dummynet_task, NULL);
-	dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
-	    taskqueue_thread_enqueue, &dn_tq);
-	taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
+	TASK_INIT(&V_dn_task, 0, dummynet_task, NULL);
+	V_dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
+	    taskqueue_thread_enqueue, &V_dn_tq);
+	taskqueue_start_threads(&V_dn_tq, 1, PI_NET, "dummynet");
 
-	callout_init(&dn_timeout, CALLOUT_MPSAFE);
-	callout_reset(&dn_timeout, 1, dummynet, NULL);
+	callout_init(&V_dn_timeout, CALLOUT_MPSAFE);
+	callout_reset(&V_dn_timeout, 1, dummynet, NULL);
 
 	/* Initialize curr_time adjustment mechanics. */
-	getmicrouptime(&prev_t);
+	getmicrouptime(&V_prev_t);
 }
 
 #ifdef KLD_MODULE
@@ -2285,10 +2313,10 @@
 	ip_dn_io_ptr = NULL;
 
 	DUMMYNET_LOCK();
-	callout_stop(&dn_timeout);
+	callout_stop(&V_dn_timeout);
 	DUMMYNET_UNLOCK();
-	taskqueue_drain(dn_tq, &dn_task);
-	taskqueue_free(dn_tq);
+	taskqueue_drain(V_dn_tq, &V_dn_task);
+	taskqueue_free(V_dn_tq);
 
 	dummynet_flush();
 


More information about the p4-projects mailing list