git: 8d0ced747a02 - stable/12 - ipfw: fix possible data race between jump cache reading and updating.

Andrey V. Elsukov ae at FreeBSD.org
Tue Aug 24 14:59:38 UTC 2021


The branch stable/12 has been updated by ae:

URL: https://cgit.FreeBSD.org/src/commit/?id=8d0ced747a02c4f2337eccbe9ea2e1e38384f533

commit 8d0ced747a02c4f2337eccbe9ea2e1e38384f533
Author:     Andrey V. Elsukov <ae at FreeBSD.org>
AuthorDate: 2021-08-17 08:08:28 +0000
Commit:     Andrey V. Elsukov <ae at FreeBSD.org>
CommitDate: 2021-08-24 14:59:01 +0000

    ipfw: fix possible data race between jump cache reading and updating.
    
    Jump cache is used to reduce the cost of rule lookup for O_SKIPTO and
    O_CALLRETURN actions. It uses rules chain id to check correctness of
    cached value. But due to the possible race, there is the chance that
    one thread can read invalid value. In some cases this can lead to out
    of bounds access and panic.
    
    Use thread fence operations to constrain the reordering of accesses.
    Also rename jump_fast and jump_linear functions to jump_cached and
    jump_lookup_pos respectively.
    
    Submitted by:   Arseny Smalyuk
    Obtained from:  Yandex LLC
    Sponsored by:   Yandex LLC
    Differential Revision:  https://reviews.freebsd.org/D31484
    
    (cherry picked from commit 322e5efda8578bb9c0a0ab0ef785cd1e1c222c85)
---
 sys/netpfil/ipfw/ip_fw2.c        | 107 ++++++++++++++++++++++++---------------
 sys/netpfil/ipfw/ip_fw_private.h |  12 ++++-
 2 files changed, 75 insertions(+), 44 deletions(-)

diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 9d60b446dc73..aa16f92adf64 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -143,14 +143,14 @@ VNET_DEFINE(unsigned int, fw_tables_sets) = 0;	/* Don't use set-aware tables */
 /* Use 128 tables by default */
 static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT;
 
+static int jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num,
+    int tablearg, int jump_backwards);
 #ifndef LINEAR_SKIPTO
-static int jump_fast(struct ip_fw_chain *chain, struct ip_fw *f, int num,
+static int jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num,
     int tablearg, int jump_backwards);
-#define	JUMP(ch, f, num, targ, back)	jump_fast(ch, f, num, targ, back)
+#define	JUMP(ch, f, num, targ, back)	jump_cached(ch, f, num, targ, back)
 #else
-static int jump_linear(struct ip_fw_chain *chain, struct ip_fw *f, int num,
-    int tablearg, int jump_backwards);
-#define	JUMP(ch, f, num, targ, back)	jump_linear(ch, f, num, targ, back)
+#define	JUMP(ch, f, num, targ, back)	jump_lookup_pos(ch, f, num, targ, back)
 #endif
 
 /*
@@ -1228,60 +1228,83 @@ set_match(struct ip_fw_args *args, int slot,
 	args->flags |= IPFW_ARGS_REF;
 }
 
-#ifndef LINEAR_SKIPTO
-/*
- * Helper function to enable cached rule lookups using
- * cached_id and cached_pos fields in ipfw rule.
- */
 static int
-jump_fast(struct ip_fw_chain *chain, struct ip_fw *f, int num,
+jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num,
     int tablearg, int jump_backwards)
 {
-	int f_pos;
+	int f_pos, i;
 
-	/* If possible use cached f_pos (in f->cached_pos),
-	 * whose version is written in f->cached_id
-	 * (horrible hacks to avoid changing the ABI).
-	 */
-	if (num != IP_FW_TARG && f->cached_id == chain->id)
-		f_pos = f->cached_pos;
-	else {
-		int i = IP_FW_ARG_TABLEARG(chain, num, skipto);
-		/* make sure we do not jump backward */
-		if (jump_backwards == 0 && i <= f->rulenum)
-			i = f->rulenum + 1;
-		if (chain->idxmap != NULL)
-			f_pos = chain->idxmap[i];
-		else
-			f_pos = ipfw_find_rule(chain, i, 0);
-		/* update the cache */
-		if (num != IP_FW_TARG) {
-			f->cached_id = chain->id;
-			f->cached_pos = f_pos;
-		}
-	}
+	i = IP_FW_ARG_TABLEARG(chain, num, skipto);
+	/* make sure we do not jump backward */
+	if (jump_backwards == 0 && i <= f->rulenum)
+		i = f->rulenum + 1;
+
+#ifndef LINEAR_SKIPTO
+	if (chain->idxmap != NULL)
+		f_pos = chain->idxmap[i];
+	else
+		f_pos = ipfw_find_rule(chain, i, 0);
+#else
+	f_pos = chain->idxmap[i];
+#endif /* LINEAR_SKIPTO */
 
 	return (f_pos);
 }
-#else
+
+
+#ifndef LINEAR_SKIPTO
 /*
- * Helper function to enable real fast rule lookups.
+ * Helper function to enable cached rule lookups using
+ * cache.id and cache.pos fields in ipfw rule.
  */
 static int
-jump_linear(struct ip_fw_chain *chain, struct ip_fw *f, int num,
+jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num,
     int tablearg, int jump_backwards)
 {
 	int f_pos;
 
-	num = IP_FW_ARG_TABLEARG(chain, num, skipto);
-	/* make sure we do not jump backward */
-	if (jump_backwards == 0 && num <= f->rulenum)
-		num = f->rulenum + 1;
-	f_pos = chain->idxmap[num];
+	/* Can't use cache with IP_FW_TARG */
+	if (num == IP_FW_TARG)
+		return jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
+
+	/*
+	 * If possible use cached f_pos (in f->cache.pos),
+	 * whose version is written in f->cache.id (horrible hacks
+	 * to avoid changing the ABI).
+	 *
+	 * Multiple threads can execute the same rule simultaneously,
+	 * we need to ensure that cache.pos is updated before cache.id.
+	 */
 
+#ifdef __LP64__
+	struct ip_fw_jump_cache cache;
+
+	cache.raw_value = f->cache.raw_value;
+	if (cache.id == chain->id)
+		return (cache.pos);
+
+	f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
+
+	cache.pos = f_pos;
+	cache.id = chain->id;
+	f->cache.raw_value = cache.raw_value;
+#else
+	if (f->cache.id == chain->id) {
+		/* Load pos after id */
+		atomic_thread_fence_acq();
+		return (f->cache.pos);
+	}
+
+	f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
+
+	f->cache.pos = f_pos;
+	/* Store id after pos */
+	atomic_thread_fence_rel();
+	f->cache.id = chain->id;
+#endif /* !__LP64__ */
 	return (f_pos);
 }
-#endif
+#endif /* !LINEAR_SKIPTO */
 
 #define	TARG(k, f)	IP_FW_ARG_TABLEARG(chain, k, f)
 /*
diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h
index df0985e6326a..e786a9bf883e 100644
--- a/sys/netpfil/ipfw/ip_fw_private.h
+++ b/sys/netpfil/ipfw/ip_fw_private.h
@@ -281,6 +281,15 @@ struct tables_config;
  *  ACTION_PTR(r)	is the start of the first action (things to do
  *			once a rule matched).
  */
+struct ip_fw_jump_cache {
+	union {
+		struct {
+			uint32_t	id;
+			uint32_t	pos;
+		};
+		uint64_t	raw_value;
+	};
+};
 
 struct ip_fw {
 	uint16_t	act_ofs;	/* offset of action in 32-bit units */
@@ -289,10 +298,9 @@ struct ip_fw {
 	uint8_t		set;		/* rule set (0..31)		*/
 	uint8_t		flags;		/* currently unused		*/
 	counter_u64_t	cntr;		/* Pointer to rule counters	*/
+	struct ip_fw_jump_cache	cache;	/* used by jump_fast            */
 	uint32_t	timestamp;	/* tv_sec of last match		*/
 	uint32_t	id;		/* rule id			*/
-	uint32_t	cached_id;	/* used by jump_fast		*/
-	uint32_t	cached_pos;	/* used by jump_fast		*/
 	uint32_t	refcnt;		/* number of references		*/
 
 	struct ip_fw	*next;		/* linked list of deleted rules */


More information about the dev-commits-src-branches mailing list