de-virtualize pf sysctls

Nikos Vassiliadis nvass at gmx.com
Sun Jun 16 11:14:04 UTC 2013


Hi,

On 06/12/2013 08:51 PM, Mikolaj Golub wrote:
> On Sat, Jun 08, 2013 at 04:50:32PM +0200, Nikos Vassiliadis wrote:
>> On 06/08/2013 04:11 PM, Nikos Vassiliadis wrote:
>>> Hi,
>>>
>>> Please review this patch. These two variables are RO-tunables and
>>> cannot be changed at runtime. As such, it is not useful to
>>> virtualize them.
>
> This looks correct to me. Also, it looks like V_pf_hashmask and
> V_pf_srchashmask can be de-virtualized then.

Yes, taken care of on this version. I am not sure if I placed
properly pf_hashmask and pf_srchashmask in pfvar.h.

Please review, thanks.

Nikos

-------------- next part --------------
Index: sys/net/pfvar.h
===================================================================
--- sys/net/pfvar.h	(revision 251794)
+++ sys/net/pfvar.h	(working copy)
@@ -1659,19 +1659,17 @@ struct pf_idhash {
 	struct mtx			lock;
 };
 
+extern u_long		pf_hashmask;
+extern u_long		pf_srchashmask;
 #define	PF_HASHSIZ	(32768)
 VNET_DECLARE(struct pf_keyhash *, pf_keyhash);
 VNET_DECLARE(struct pf_idhash *, pf_idhash);
-VNET_DECLARE(u_long, pf_hashmask);
 #define V_pf_keyhash	VNET(pf_keyhash)
 #define	V_pf_idhash	VNET(pf_idhash)
-#define	V_pf_hashmask	VNET(pf_hashmask)
 VNET_DECLARE(struct pf_srchash *, pf_srchash);
-VNET_DECLARE(u_long, pf_srchashmask);
 #define	V_pf_srchash	VNET(pf_srchash)
-#define V_pf_srchashmask VNET(pf_srchashmask)
 
-#define PF_IDHASH(s)	(be64toh((s)->id) % (V_pf_hashmask + 1))
+#define PF_IDHASH(s)	(be64toh((s)->id) % (pf_hashmask + 1))
 
 VNET_DECLARE(void *, pf_swi_cookie);
 #define V_pf_swi_cookie	VNET(pf_swi_cookie)
Index: sys/netpfil/pf/if_pfsync.c
===================================================================
--- sys/netpfil/pf/if_pfsync.c	(revision 251794)
+++ sys/netpfil/pf/if_pfsync.c	(working copy)
@@ -683,7 +683,7 @@ pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf
 		    pfi_kif_find(clr[i].ifname) == NULL)
 			continue;
 
-		for (int i = 0; i <= V_pf_hashmask; i++) {
+		for (int i = 0; i <= pf_hashmask; i++) {
 			struct pf_idhash *ih = &V_pf_idhash[i];
 			struct pf_state *s;
 relock:
@@ -2045,7 +2045,7 @@ pfsync_bulk_update(void *arg)
 	else
 		i = sc->sc_bulk_hashid;
 
-	for (; i <= V_pf_hashmask; i++) {
+	for (; i <= pf_hashmask; i++) {
 		struct pf_idhash *ih = &V_pf_idhash[i];
 
 		if (s != NULL)
Index: sys/netpfil/pf/pf.c
===================================================================
--- sys/netpfil/pf/pf.c	(revision 251794)
+++ sys/netpfil/pf/pf.c	(working copy)
@@ -353,21 +353,19 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MA
 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
 VNET_DEFINE(struct pf_idhash *, pf_idhash);
-VNET_DEFINE(u_long, pf_hashmask);
 VNET_DEFINE(struct pf_srchash *, pf_srchash);
-VNET_DEFINE(u_long, pf_srchashmask);
 
 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
 
-VNET_DEFINE(u_long, pf_hashsize);
-#define	V_pf_hashsize	VNET(pf_hashsize)
-SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
-    &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
+u_long	pf_hashmask;
+u_long	pf_srchashmask;
+static u_long	pf_hashsize;
+static u_long	pf_srchashsize;
 
-VNET_DEFINE(u_long, pf_srchashsize);
-#define	V_pf_srchashsize	VNET(pf_srchashsize)
-SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
-    &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
+SYSCTL_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
+    &pf_hashsize, 0, "Size of pf(4) states hashtable");
+SYSCTL_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
+    &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
 
 VNET_DEFINE(void *, pf_swi_cookie);
 
@@ -383,7 +381,7 @@ pf_hashkey(struct pf_state_key *sk)
 	    sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
 	    V_pf_hashseed);
 
-	return (h & V_pf_hashmask);
+	return (h & pf_hashmask);
 }
 
 static __inline uint32_t
@@ -404,7 +402,7 @@ pf_hashsrc(struct pf_addr *addr, sa_family_t af)
 		panic("%s: unknown address family %u", __func__, af);
 	}
 
-	return (h & V_pf_srchashmask);
+	return (h & pf_srchashmask);
 }
 
 #ifdef INET6
@@ -566,7 +564,7 @@ pf_overload_task(void *c, int pending)
 	if (SLIST_EMPTY(&queue))
 		return;
 
-	for (int i = 0; i <= V_pf_hashmask; i++) {
+	for (int i = 0; i <= pf_hashmask; i++) {
 		struct pf_idhash *ih = &V_pf_idhash[i];
 		struct pf_state_key *sk;
 		struct pf_state *s;
@@ -698,12 +696,12 @@ pf_initialize()
 	struct pf_srchash	*sh;
 	u_int i;
 
-	TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize);
-	if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
-		V_pf_hashsize = PF_HASHSIZ;
-	TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize);
-	if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
-		V_pf_srchashsize = PF_HASHSIZ / 4;
+	TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
+	if (pf_hashsize == 0 || !powerof2(pf_hashsize))
+		pf_hashsize = PF_HASHSIZ;
+	TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize);
+	if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
+		pf_srchashsize = PF_HASHSIZ / 4;
 
 	V_pf_hashseed = arc4random();
 
@@ -717,12 +715,12 @@ pf_initialize()
 	V_pf_state_key_z = uma_zcreate("pf state keys",
 	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
 	    UMA_ALIGN_PTR, 0);
-	V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash),
+	V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
 	    M_PFHASH, M_WAITOK | M_ZERO);
-	V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash),
+	V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
 	    M_PFHASH, M_WAITOK | M_ZERO);
-	V_pf_hashmask = V_pf_hashsize - 1;
-	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
+	pf_hashmask = pf_hashsize - 1;
+	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
 	    i++, kh++, ih++) {
 		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
 		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
@@ -735,10 +733,10 @@ pf_initialize()
 	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
 	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
 	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
-	V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash),
+	V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
 	  M_PFHASH, M_WAITOK|M_ZERO);
-	V_pf_srchashmask = V_pf_srchashsize - 1;
-	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
+	pf_srchashmask = pf_srchashsize - 1;
+	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
 		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
 
 	/* ALTQ */
@@ -775,7 +773,7 @@ pf_cleanup()
 	struct pf_send_entry	*pfse, *next;
 	u_int i;
 
-	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
+	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
 	    i++, kh++, ih++) {
 		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
 		    __func__));
@@ -787,7 +785,7 @@ pf_cleanup()
 	free(V_pf_keyhash, M_PFHASH);
 	free(V_pf_idhash, M_PFHASH);
 
-	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
+	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
 		KASSERT(LIST_EMPTY(&sh->nodes),
 		    ("%s: source node hash not empty", __func__));
 		mtx_destroy(&sh->lock);
@@ -1177,7 +1175,7 @@ pf_find_state_byid(uint64_t id, uint32_t creatorid
 
 	V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
 
-	ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))];
+	ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
 
 	PF_HASHROW_LOCK(ih);
 	LIST_FOREACH(s, &ih->states, entry)
@@ -1373,7 +1371,7 @@ pf_purge_thread(void *v)
 			/*
 			 * Now purge everything.
 			 */
-			pf_purge_expired_states(0, V_pf_hashmask);
+			pf_purge_expired_states(0, pf_hashmask);
 			pf_purge_expired_fragments();
 			pf_purge_expired_src_nodes();
 
@@ -1396,7 +1394,7 @@ pf_purge_thread(void *v)
 		PF_RULES_RUNLOCK();
 
 		/* Process 1/interval fraction of the state table every run. */
-		idx = pf_purge_expired_states(idx, V_pf_hashmask /
+		idx = pf_purge_expired_states(idx, pf_hashmask /
 			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
 
 		/* Purge other expired types every PFTM_INTERVAL seconds. */
@@ -1462,7 +1460,7 @@ pf_purge_expired_src_nodes()
 	struct pf_src_node	*cur, *next;
 	int i;
 
-	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
+	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
 	    PF_HASHROW_LOCK(sh);
 	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
 		if (cur->states <= 0 && cur->expire <= time_uptime) {
@@ -1614,7 +1612,7 @@ relock:
 		PF_HASHROW_UNLOCK(ih);
 
 		/* Return when we hit end of hash. */
-		if (++i > V_pf_hashmask) {
+		if (++i > pf_hashmask) {
 			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
 			return (0);
 		}
Index: sys/netpfil/pf/pf_ioctl.c
===================================================================
--- sys/netpfil/pf/pf_ioctl.c	(revision 251794)
+++ sys/netpfil/pf/pf_ioctl.c	(working copy)
@@ -1577,7 +1577,7 @@ DIOCCHANGERULE_error:
 		struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
 		u_int			 i, killed = 0;
 
-		for (i = 0; i <= V_pf_hashmask; i++) {
+		for (i = 0; i <= pf_hashmask; i++) {
 			struct pf_idhash *ih = &V_pf_idhash[i];
 
 relock_DIOCCLRSTATES:
@@ -1622,7 +1622,7 @@ relock_DIOCCLRSTATES:
 			break;
 		}
 
-		for (i = 0; i <= V_pf_hashmask; i++) {
+		for (i = 0; i <= pf_hashmask; i++) {
 			struct pf_idhash *ih = &V_pf_idhash[i];
 
 relock_DIOCKILLSTATES:
@@ -1726,7 +1726,7 @@ relock_DIOCKILLSTATES:
 		p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK);
 		nr = 0;
 
-		for (i = 0; i <= V_pf_hashmask; i++) {
+		for (i = 0; i <= pf_hashmask; i++) {
 			struct pf_idhash *ih = &V_pf_idhash[i];
 
 			PF_HASHROW_LOCK(ih);
@@ -3078,7 +3078,7 @@ DIOCCHANGEADDR_error:
 		uint32_t		 i, nr = 0;
 
 		if (psn->psn_len == 0) {
-			for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask;
+			for (i = 0, sh = V_pf_srchash; i < pf_srchashmask;
 			    i++, sh++) {
 				PF_HASHROW_LOCK(sh);
 				LIST_FOREACH(n, &sh->nodes, entry)
@@ -3090,7 +3090,7 @@ DIOCCHANGEADDR_error:
 		}
 
 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK);
-		for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask;
+		for (i = 0, sh = V_pf_srchash; i < pf_srchashmask;
 		    i++, sh++) {
 		    PF_HASHROW_LOCK(sh);
 		    LIST_FOREACH(n, &sh->nodes, entry) {
@@ -3147,7 +3147,7 @@ DIOCCHANGEADDR_error:
 		struct pf_src_node	*sn;
 		u_int			i, killed = 0;
 
-		for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask;
+		for (i = 0, sh = V_pf_srchash; i < pf_srchashmask;
 		    i++, sh++) {
 		    /*
 		     * XXXGL: we don't ever acquire sources hash lock
@@ -3331,7 +3331,7 @@ pf_clear_states(void)
 	struct pf_state	*s;
 	u_int i;
 
-	for (i = 0; i <= V_pf_hashmask; i++) {
+	for (i = 0; i <= pf_hashmask; i++) {
 		struct pf_idhash *ih = &V_pf_idhash[i];
 relock:
 		PF_HASHROW_LOCK(ih);
@@ -3366,7 +3366,7 @@ pf_clear_srcnodes(struct pf_src_node *n)
 	struct pf_state *s;
 	int i;
 
-	for (i = 0; i <= V_pf_hashmask; i++) {
+	for (i = 0; i <= pf_hashmask; i++) {
 		struct pf_idhash *ih = &V_pf_idhash[i];
 
 		PF_HASHROW_LOCK(ih);
@@ -3382,7 +3382,7 @@ pf_clear_srcnodes(struct pf_src_node *n)
 	if (n == NULL) {
 		struct pf_srchash *sh;
 
-		for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask;
+		for (i = 0, sh = V_pf_srchash; i < pf_srchashmask;
 		    i++, sh++) {
 			PF_HASHROW_LOCK(sh);
 			LIST_FOREACH(n, &sh->nodes, entry) {


More information about the freebsd-pf mailing list