svn commit: r367598 - head/sys/kern

Mateusz Guzik mjg at FreeBSD.org
Wed Nov 11 18:45:07 UTC 2020


Author: mjg
Date: Wed Nov 11 18:45:06 2020
New Revision: 367598
URL: https://svnweb.freebsd.org/changeset/base/367598

Log:
  thread: batch tid_free calls in thread_reap
  
  This eliminates the highly pessimal pattern of relocking from multiple
  CPUs in quick succession. Note this is still globally serialized.

Modified:
  head/sys/kern/kern_thread.c

Modified: head/sys/kern/kern_thread.c
==============================================================================
--- head/sys/kern/kern_thread.c	Wed Nov 11 18:43:51 2020	(r367597)
+++ head/sys/kern/kern_thread.c	Wed Nov 11 18:45:06 2020	(r367598)
@@ -133,6 +133,7 @@ static __exclusive_cache_line struct thread *thread_zo
 static void thread_zombie(struct thread *);
 static int thread_unsuspend_one(struct thread *td, struct proc *p,
     bool boundary);
+static void thread_free_batched(struct thread *td);
 
 static struct mtx tid_lock;
 static bitstr_t *tid_bitmap;
@@ -200,21 +201,41 @@ tid_alloc(void)
 }
 
 static void
-tid_free(lwpid_t rtid)
+tid_free_locked(lwpid_t rtid)
 {
 	lwpid_t tid;
 
+	mtx_assert(&tid_lock, MA_OWNED);
 	KASSERT(rtid >= NO_PID,
 	    ("%s: invalid tid %d\n", __func__, rtid));
 	tid = rtid - NO_PID;
-	mtx_lock(&tid_lock);
 	KASSERT(bit_test(tid_bitmap, tid) != 0,
 	    ("thread ID %d not allocated\n", rtid));
 	bit_clear(tid_bitmap, tid);
 	nthreads--;
+}
+
+static void
+tid_free(lwpid_t rtid)
+{
+
+	mtx_lock(&tid_lock);
+	tid_free_locked(rtid);
 	mtx_unlock(&tid_lock);
 }
 
+static void
+tid_free_batch(lwpid_t *batch, int n)
+{
+	int i;
+
+	mtx_lock(&tid_lock);
+	for (i = 0; i < n; i++) {
+		tid_free_locked(batch[i]);
+	}
+	mtx_unlock(&tid_lock);
+}
+
 /*
  * Prepare a thread for use.
  */
@@ -440,6 +461,8 @@ void
 thread_reap(void)
 {
 	struct thread *itd, *ntd;
+	lwpid_t tidbatch[16];
+	int tidbatchn;
 
 	/*
 	 * Reading upfront is pessimal if followed by concurrent atomic_swap,
@@ -450,12 +473,23 @@ thread_reap(void)
 
 	itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&thread_zombies,
 	    (uintptr_t)NULL);
+	tidbatchn = 0;
 	while (itd != NULL) {
 		ntd = itd->td_zombie;
+		tidbatch[tidbatchn] = itd->td_tid;
+		tidbatchn++;
 		thread_cow_free(itd);
-		thread_free(itd);
+		thread_free_batched(itd);
+		if (tidbatchn == nitems(tidbatch)) {
+			tid_free_batch(tidbatch, tidbatchn);
+			tidbatchn = 0;
+		}
 		itd = ntd;
 	}
+
+	if (tidbatchn != 0) {
+		tid_free_batch(tidbatch, tidbatchn);
+	}
 }
 
 /*
@@ -502,8 +536,8 @@ thread_alloc_stack(struct thread *td, int pages)
 /*
  * Deallocate a thread.
  */
-void
-thread_free(struct thread *td)
+static void
+thread_free_batched(struct thread *td)
 {
 
 	EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
@@ -515,9 +549,21 @@ thread_free(struct thread *td)
 	if (td->td_kstack != 0)
 		vm_thread_dispose(td);
 	callout_drain(&td->td_slpcallout);
-	tid_free(td->td_tid);
+	/*
+	 * Freeing handled by the caller.
+	 */
 	td->td_tid = -1;
 	uma_zfree(thread_zone, td);
+}
+
+void
+thread_free(struct thread *td)
+{
+	lwpid_t tid;
+
+	tid = td->td_tid;
+	thread_free_batched(td);
+	tid_free(tid);
 }
 
 void


More information about the svn-src-all mailing list