svn commit: r364019 - head/sys/kern

Hans Petter Selasky hselasky at FreeBSD.org
Fri Aug 7 15:32:42 UTC 2020


Author: hselasky
Date: Fri Aug  7 15:32:42 2020
New Revision: 364019
URL: https://svnweb.freebsd.org/changeset/base/364019

Log:
  Add full support support for dynamic allocation and freeing of epoch's.
  
  Make sure to reclaim epoch structures when they are freed to support
  dynamic allocation and freeing of epoch structures.
  
  While at it, move the 64 supported epoch control structures to the
  static memory domain. This overall simplifies the management and
  debugging of system epoch's.
  
  Reviewed by:		kib, markj
  Differential Revision:	https://reviews.freebsd.org/D25960
  MFC after:		1 week
  Sponsored by:		Mellanox Technologies

Modified:
  head/sys/kern/subr_epoch.c

Modified: head/sys/kern/subr_epoch.c
==============================================================================
--- head/sys/kern/subr_epoch.c	Fri Aug  7 15:13:53 2020	(r364018)
+++ head/sys/kern/subr_epoch.c	Fri Aug  7 15:32:42 2020	(r364019)
@@ -58,8 +58,6 @@ __FBSDID("$FreeBSD$");
 
 #include <ck_epoch.h>
 
-static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
-
 #ifdef __amd64__
 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
 #else
@@ -79,7 +77,7 @@ typedef struct epoch_record {
 struct epoch {
 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
 	epoch_record_t e_pcpu_record;
-	int	e_idx;
+	int	e_in_use;
 	int	e_flags;
 	struct sx e_drain_sx;
 	struct mtx e_drain_mtx;
@@ -128,19 +126,23 @@ TAILQ_HEAD (threadlist, thread);
 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
     ck_epoch_entry_container)
 
-epoch_t	allepochs[MAX_EPOCHS];
+static struct epoch epoch_array[MAX_EPOCHS];
 
 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
 DPCPU_DEFINE(int, epoch_cb_count);
 
 static __read_mostly int inited;
-static __read_mostly int epoch_count;
 __read_mostly epoch_t global_epoch;
 __read_mostly epoch_t global_epoch_preempt;
 
 static void epoch_call_task(void *context __unused);
 static 	uma_zone_t pcpu_zone_record;
 
+static struct sx epoch_sx;
+
+#define	EPOCH_LOCK() sx_xlock(&epoch_sx)
+#define	EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
+
 #ifdef EPOCH_TRACE
 struct stackentry {
 	RB_ENTRY(stackentry) se_node;
@@ -281,6 +283,7 @@ epoch_init(void *arg __unused)
 #ifdef EPOCH_TRACE
 	SLIST_INIT(&thread0.td_epochs);
 #endif
+	sx_init(&epoch_sx, "epoch-sx");
 	inited = 1;
 	global_epoch = epoch_alloc("Global", 0);
 	global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
@@ -326,19 +329,48 @@ epoch_t
 epoch_alloc(const char *name, int flags)
 {
 	epoch_t epoch;
+	int i;
 
+	MPASS(name != NULL);
+
 	if (__predict_false(!inited))
 		panic("%s called too early in boot", __func__);
-	epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
+
+	EPOCH_LOCK();
+
+	/*
+	 * Find a free index in the epoch array. If no free index is
+	 * found, try to use the index after the last one.
+	 */
+	for (i = 0;; i++) {
+		/*
+		 * If too many epochs are currently allocated,
+		 * return NULL.
+		 */
+		if (i == MAX_EPOCHS) {
+			epoch = NULL;
+			goto done;
+		}
+		if (epoch_array[i].e_in_use == 0)
+			break;
+	}
+
+	epoch = epoch_array + i;
 	ck_epoch_init(&epoch->e_epoch);
 	epoch_ctor(epoch);
-	MPASS(epoch_count < MAX_EPOCHS - 2);
 	epoch->e_flags = flags;
-	epoch->e_idx = epoch_count;
 	epoch->e_name = name;
 	sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
 	mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
-	allepochs[epoch_count++] = epoch;
+
+	/*
+	 * Set e_in_use last, because when this field is set the
+	 * epoch_call_task() function will start scanning this epoch
+	 * structure.
+	 */
+	atomic_store_rel_int(&epoch->e_in_use, 1);
+done:
+	EPOCH_UNLOCK();
 	return (epoch);
 }
 
@@ -346,13 +378,24 @@ void
 epoch_free(epoch_t epoch)
 {
 
+	EPOCH_LOCK();
+
+	MPASS(epoch->e_in_use != 0);
+
 	epoch_drain_callbacks(epoch);
-	allepochs[epoch->e_idx] = NULL;
+
+	atomic_store_rel_int(&epoch->e_in_use, 0);
+	/*
+	 * Make sure the epoch_call_task() function see e_in_use equal
+	 * to zero, by calling epoch_wait() on the global_epoch:
+	 */
 	epoch_wait(global_epoch);
 	uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
 	mtx_destroy(&epoch->e_drain_mtx);
 	sx_destroy(&epoch->e_drain_sx);
-	free(epoch, M_EPOCH);
+	memset(epoch, 0, sizeof(*epoch));
+
+	EPOCH_UNLOCK();
 }
 
 static epoch_record_t
@@ -705,8 +748,10 @@ epoch_call_task(void *arg __unused)
 	ck_stack_init(&cb_stack);
 	critical_enter();
 	epoch_enter(global_epoch);
-	for (total = i = 0; i < epoch_count; i++) {
-		if (__predict_false((epoch = allepochs[i]) == NULL))
+	for (total = i = 0; i != MAX_EPOCHS; i++) {
+		epoch = epoch_array + i;
+		if (__predict_false(
+		    atomic_load_acq_int(&epoch->e_in_use) == 0))
 			continue;
 		er = epoch_currecord(epoch);
 		record = &er->er_record;


More information about the svn-src-all mailing list