PERFORCE change 222974 for review

Robert Watson rwatson at FreeBSD.org
Sun Mar 17 15:14:15 UTC 2013


http://p4web.freebsd.org/@@222974?ac=10

Change 222974 by rwatson at rwatson_cinnamon on 2013/03/17 15:13:24

	A number of changes to libtesla to get it compiling (but not yet
	running) in the kernel runtime environment:
	
	- Remove a large chunk of code that supported dynamic registration
	  of new automata classes at runtime; this was left over from a
	  previous iteration, and the new version of tesla doesn't support
	  this yet.
	
	- Use tesla_store_init() to set up per-thread TESLA state, requiring
	  that this be non-static.  We require a not-yet-present
	  tesla_store_destroy(), which will arrive in the next merge of
	  libtesla to the kernel (thanks Jon!).
	
	- Expose global_store to _KERNEL; this should probably be static but
	  isn't yet.
	
	- Use #define's for hard-coded class and instance count maxima,
	  rather than numbers in code.  This is pertinent because we will
	  initialise per-thread storage in quite a different place in the
	  kernel, so as to avoid malloc() at arbitrary instrumentation
	  points.

Affected files ...

.. //depot/projects/ctsrd/tesla/src/sys/libtesla/state-perthread.c#2 edit
.. //depot/projects/ctsrd/tesla/src/sys/libtesla/store.c#2 edit
.. //depot/projects/ctsrd/tesla/src/sys/libtesla/tesla_internal.h#3 edit
.. //depot/projects/ctsrd/tesla/src/sys/libtesla/update.c#3 edit

Differences ...

==== //depot/projects/ctsrd/tesla/src/sys/libtesla/state-perthread.c#2 (text+ko) ====

@@ -48,259 +48,50 @@
 #ifdef _KERNEL
 
 /*
- * Global state used to manage per-thread storage slots for TESLA per-thread
- * assertions.  tspd_tesla_classp is non-NULL when a slot has been allocated.
- */
-static struct tesla_class_perthread_desc {
-	struct tesla_class	*tspd_tesla_classp;
-	size_t			 tspd_len;
-} tesla_class_perthread_desc[TESLA_PERTHREAD_MAX];
-static struct sx tesla_class_perthread_sx;
-
-/*
  * Registration state for per-thread storage.
  */
-static eventhandler_tag	tesla_class_perthread_ctor_tag;
-static eventhandler_tag	tesla_class_perthread_dtor_tag;
+static eventhandler_tag	tesla_perthread_ctor_tag;
+static eventhandler_tag	tesla_perthread_dtor_tag;
 
 static void
-tesla_class_perthread_ctor(__unused void *arg, struct thread *td)
+tesla_perthread_ctor(__unused void *arg, struct thread *td)
 {
-	struct tesla_class_perthread_desc *tspdp;
-	struct tesla_class *tsp;
-	struct tesla_table *ttp;
-	u_int index;
+	struct tesla_store *store;
+	uint32_t error;
 
-	sx_slock(&tesla_class_perthread_sx);
-	for (index = 0; index < TESLA_PERTHREAD_MAX; index++) {
-		tspdp = &tesla_class_perthread_desc[index];
-		tsp = tspdp->tspd_tesla_classp;
-		if (tsp == NULL) {
-			td->td_tesla[index] = NULL;
-			continue;
-		}
-		ttp = malloc(tspdp->tspd_len, M_TESLA, M_WAITOK | M_ZERO);
-		ttp->tt_length = tsp->ts_limit;
-		ttp->tt_free = tsp->ts_limit;
-		td->td_tesla[index] = ttp;
-	}
-	sx_sunlock(&tesla_class_perthread_sx);
+	store = tesla_malloc(sizeof(*store));
+	error = tesla_store_init(store, TESLA_SCOPE_PERTHREAD,
+	    TESLA_MAX_CLASSES, TESLA_MAX_INSTANCES);
+	tesla_assert(error == TESLA_SUCCESS, ("tesla_store_init failed"));
+	curthread->td_tesla = store;
 }
 
 static void
-tesla_class_perthread_dtor_locked(struct thread *td)
+tesla_perthread_dtor(struct thread *td)
 {
-	u_int index;
+	struct tesla_store *store;
 
-	sx_assert(&tesla_class_perthread_sx, SX_LOCKED);
-	for (index = 0; index < TESLA_PERTHREAD_MAX; index++) {
-		if (td->td_tesla[index] == NULL)
-			continue;
-		free(M_TESLA, td->td_tesla[index]);
-		td->td_tesla[index] = NULL;
-	}
+	store = curthread->td_tesla;
+	curthread->td_tesla = NULL;
+	tesla_store_destroy(store);
+	tesla_free(store);
 }
 
 static void
-tesla_class_perthread_dtor(__unused void *arg, struct thread *td)
+tesla_perthread_sysinit(__unused void *arg)
 {
 
-	sx_slock(&tesla_class_perthread_sx);
-	tesla_class_perthread_dtor_locked(td);
-	sx_sunlock(&tesla_class_perthread_sx);
+	tesla_perthread_ctor_tag = EVENTHANDLER_REGISTER(thread_ctor,
+	    tesla_perthread_ctor, NULL, EVENTHANDLER_PRI_ANY);
+	tesla_perthread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
+	    tesla_perthread_dtor, NULL, EVENTHANDLER_PRI_ANY);
 }
+SYSINIT(tesla_perthread, SI_SUB_TESLA, SI_ORDER_FIRST,
+    tesla_perthread_sysinit, NULL);
 
-static void
-tesla_class_perthread_sysinit(__unused void *arg)
-{
+#endif /* !_KERNEL */
 
-	sx_init(&tesla_class_perthread_sx, "tesla_class_perthread_sx");
-	tesla_class_perthread_ctor_tag = EVENTHANDLER_REGISTER(thread_ctor,
-	    tesla_class_perthread_ctor, NULL, EVENTHANDLER_PRI_ANY);
-	tesla_class_perthread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
-	    tesla_class_perthread_dtor, NULL, EVENTHANDLER_PRI_ANY);
-}
-SYSINIT(tesla_class_perthread, SI_SUB_TESLA, SI_ORDER_FIRST,
-    tesla_class_perthread_sysinit, NULL);
-
-static void
-tesla_class_perthread_sysuninit(__unused void *arg)
-{
-	struct proc *p;
-	struct thread *td;
-
-	/*
-	 * XXXRW: Possibility of a race for in-flight handlers and
-	 * instrumentation?
-	 */
-	EVENTHANDLER_DEREGISTER(tesla_class_perthread_ctor,
-	    tesla_class_perthread_ctor_tag);
-	EVENTHANDLER_DEREGISTER(tesla_class_perthread_dtor,
-	    tesla_class_perthread_dtor_tag);
-
-	sx_xlock(&allproc_lock);
-	sx_xlock(&tesla_class_perthread_sx);
-	FOREACH_PROC_IN_SYSTEM(p) {
-		PROC_LOCK(p);
-		FOREACH_THREAD_IN_PROC(p, td) {
-			tesla_class_perthread_dtor_locked(td);
-		}
-		PROC_UNLOCK(p);
-	}
-	sx_xunlock(&tesla_class_perthread_sx);
-	sx_xunlock(&allproc_lock);
-	sx_destroy(&tesla_class_perthread_sx);
-}
-SYSUNINIT(tesla_class_perthread, SI_SUB_TESLA, SI_ORDER_FIRST,
-    tesla_class_perthread_sysuninit, NULL);
-
 int
-tesla_class_perthread_new(struct tesla_class *tsp)
-{
-	struct tesla_class_perthread_desc *tspdp;
-	struct tesla_table *ttp;
-	struct proc *p;
-	struct thread *td;
-	int looped;
-	u_int index;
-
-	/*
-	 * First, allocate a TESLA per-thread storage slot, if available.
-	 */
-	tspdp = NULL;
-	sx_xlock(&tesla_class_perthread_sx);
-	for (index = 0; index < TESLA_PERTHREAD_MAX; index++) {
-		if (tesla_class_perthread_desc[index].tspd_tesla_classp
-		    == NULL) {
-			tspdp = &tesla_class_perthread_desc[index];
-			break;
-		}
-	}
-	if (tspdp == NULL) {
-		sx_xunlock(&tesla_class_perthread_sx);
-		return (TESLA_ERROR_ENOMEM);
-	}
-	tsp->ts_perthread_index = index;
-	tspdp->tspd_tesla_classp = tsp;
-	tspdp->tspd_len = sizeof(*ttp) + sizeof(struct tesla_instance) *
-	    tsp->ts_limit;
-
-	/*
-	 * Walk all existing threads and add required allocations.  If we
-	 * can't allocate under the process lock, we have to loop out, use
-	 * M_WAITOK, and then repeat.  This looks starvation-prone, but
-	 * actually isn't: holding tesla_class_perthread_sx blocks further
-	 * thread allocations from taking place, so the main concern is
-	 * in-progress allocations, which will be bounded in number.
-	 */
-	ttp = NULL;
-	looped = 0;
-	sx_slock(&allproc_lock);
-	FOREACH_PROC_IN_SYSTEM(p) {
-loop:
-		if (looped) {
-			KASSERT(ttp == NULL,
-			    ("tesla_class_perthread_new: ttp not NULL"));
-			ttp = malloc(tspdp->tspd_len, M_TESLA,
-			    M_WAITOK | M_ZERO);
-			looped = 0;
-		}
-		PROC_LOCK(p);
-		FOREACH_THREAD_IN_PROC(p, td) {
-			/*
-			 * If we looped, then some threads may already have
-			 * memory; skip them.
-			 */
-			if (td->td_tesla[index] != NULL)
-				continue;
-			if (ttp == NULL)
-				ttp = malloc(tspdp->tspd_len, M_TESLA,
-				    M_NOWAIT | M_ZERO);
-			if (ttp == NULL) {
-				PROC_UNLOCK(p);
-				looped = 1;
-				goto loop;
-			}
-			ttp->tt_length = tsp->ts_limit;
-			ttp->tt_free = tsp->ts_limit;
-			td->td_tesla[index] = ttp;
-			ttp = NULL;
-		}
-		PROC_UNLOCK(p);
-	}
-	sx_sunlock(&allproc_lock);
-	/* Due to races, we may have allocated an extra, so free it now. */
-	if (ttp != NULL)
-		free(ttp, M_TESLA);
-	sx_xunlock(&tesla_class_perthread_sx);
-	return (TESLA_SUCCESS);
-}
-
-void
-tesla_class_perthread_destroy(struct tesla_class *tsp)
-{
-	struct tesla_class_perthread_desc *tspdp;
-	struct proc *p;
-	struct thread *td;
-	u_int index;
-
-	sx_xlock(&tesla_class_perthread_sx);
-	index = tsp->ts_perthread_index;
-	tspdp = &tesla_class_perthread_desc[index];
-
-	/*
-	 * First, walk all threads and release resources.  This is easier on
-	 * free than alloc due to the non-blocking nature of free.
-	 *
-	 * XXXRW: Do we need a test for td->td_tesla[index] == NULL and a
-	 * continue?  I think probably not.
-	 */
-	sx_slock(&allproc_lock);
-	FOREACH_PROC_IN_SYSTEM(p) {
-		PROC_LOCK(p);
-		FOREACH_THREAD_IN_PROC(p, td) {
-			free(M_TESLA, td->td_tesla[index]);
-			td->td_tesla[index] = NULL;
-		}
-		PROC_UNLOCK(p);
-	}
-	sx_unlock(&allproc_lock);
-
-	/*
-	 * Finally, release the reservation.
-	 */
-	tspdp->tspd_tesla_classp = NULL;
-	tspdp->tspd_len = 0;
-	sx_xunlock(&tesla_class_perthread_sx);
-}
-
-void
-tesla_class_perthread_flush(struct tesla_class *tsp)
-{
-	struct tesla_table *ttp;
-
-	ttp = curthread->td_tesla[tsp->ts_perthread_index];
-	bzero(&ttp->tt_instances,
-	    sizeof(struct tesla_instance) * ttp->tt_length);
-	ttp->tt_free = ttp->tt_length;
-}
-
-int
-tesla_class_perthread_gettable(struct tesla_class *tsp,
-    struct tesla_table **ttpp)
-{
-	struct tesla_table *ttp;
-
-	ttp = curthread->td_tesla[tsp->ts_perthread_index];
-	KASSERT(ttp != NULL,
-	    ("tesla_class_perthread_gettable: NULL tesla thread state"));
-	*ttpp = ttp;
-	return (TESLA_SUCCESS);
-}
-
-#else  /* !_KERNEL */
-
-int
 tesla_class_perthread_postinit(__unused struct tesla_class *c)
 {
 	return 0;
@@ -320,5 +111,3 @@
 tesla_class_perthread_destroy(__unused struct tesla_class *c)
 {
 }
-
-#endif /* _KERNEL */

==== //depot/projects/ctsrd/tesla/src/sys/libtesla/store.c#2 (text+ko) ====

@@ -38,17 +38,14 @@
 #ifndef _KERNEL
 #include <errno.h>
 
-struct tesla_store global_store = { .length = 0 };
-
 /** The pthreads key used to identify TESLA data. */
 pthread_key_t	pthread_key(void);
 #endif
 
+struct tesla_store global_store = { .length = 0 };
+
 static void	tesla_class_acquire(tesla_class*);
 
-static int	tesla_store_init(tesla_store*,
-		uint32_t context, uint32_t classes, uint32_t instances);
-
 int32_t
 tesla_store_get(uint32_t context, uint32_t classes, uint32_t instances,
                 tesla_store* *storep)
@@ -101,7 +98,7 @@
 }
 
 
-static int32_t
+int32_t
 tesla_store_init(tesla_store *store, uint32_t context,
                  uint32_t classes, uint32_t instances)
 {

==== //depot/projects/ctsrd/tesla/src/sys/libtesla/tesla_internal.h#3 (text+ko) ====

@@ -221,12 +221,26 @@
 };
 
 /**
+ * Initialise @ref tesla_store internals.
+ * Locking is the responsibility of the caller.
+ */
+int	tesla_store_init(tesla_store*, uint32_t context, uint32_t classes,
+		uint32_t instances);
+void	tesla_store_destroy(tesla_store*);
+
+/**
  * Initialize @ref tesla_class internals.
  * Locking is the responsibility of the caller.
  */
 int	tesla_class_init(struct tesla_class*, uint32_t context,
 		uint32_t instances);
 
+/*
+ * XXXRW: temporarily, maximum number of classes and instances are hard-coded
+ * constants.  In the future, this should somehow be more dynamic.
+ */
+#define	TESLA_MAX_CLASSES		12
+#define	TESLA_MAX_INSTANCES		8
 
 /*
  * When the assertion fails, what to do?

==== //depot/projects/ctsrd/tesla/src/sys/libtesla/update.c#3 (text+ko) ====

@@ -69,7 +69,8 @@
 	}
 
 	struct tesla_store *store;
-	CHECK(tesla_store_get, tesla_context, 12, 8, &store);
+	CHECK(tesla_store_get, tesla_context, TESLA_MAX_CLASSES,
+	    TESLA_MAX_INSTANCES, &store);
 	VERBOSE_PRINT("store: 0x%tx", (intptr_t) store);
 	VERBOSE_PRINT("\n----\n");
 


More information about the p4-projects mailing list