svn commit: r360163 - in head/sys: kern sys

Mark Johnston markj at FreeBSD.org
Tue Apr 21 17:13:07 UTC 2020


Author: markj
Date: Tue Apr 21 17:13:06 2020
New Revision: 360163
URL: https://svnweb.freebsd.org/changeset/base/360163

Log:
  Handle PCATCH in blockcount_sleep() so it can be interrupted.
  
  blockcount_wait() still unconditionally waits for the count to reach
  zero before returning.
  
  Tested by:	pho (a larger patch)
  Reviewed by:	kib
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D24513

Modified:
  head/sys/kern/kern_synch.c
  head/sys/sys/blockcount.h

Modified: head/sys/kern/kern_synch.c
==============================================================================
--- head/sys/kern/kern_synch.c	Tue Apr 21 17:07:02 2020	(r360162)
+++ head/sys/kern/kern_synch.c	Tue Apr 21 17:13:06 2020	(r360163)
@@ -400,12 +400,12 @@ _blockcount_wakeup(blockcount_t *bc, u_int old)
 }
 
 /*
- * Wait for a wakeup.  This does not guarantee that the count is still zero on
- * return and may be subject to transient wakeups.  Callers wanting a precise
- * answer should use blockcount_wait() with an interlock.
+ * Wait for a wakeup or a signal.  This does not guarantee that the count is
+ * still zero on return.  Callers wanting a precise answer should use
+ * blockcount_wait() with an interlock.
  *
- * Return 0 if there is no work to wait for, and 1 if we slept waiting for work
- * to complete.  In the latter case the counter value must be re-read.
+ * If there is no work to wait for, return 0.  If the sleep was interrupted by a
+ * signal, return EINTR or ERESTART, and return EAGAIN otherwise.
  */
 int
 _blockcount_sleep(blockcount_t *bc, struct lock_object *lock, const char *wmesg,
@@ -415,10 +415,15 @@ _blockcount_sleep(blockcount_t *bc, struct lock_object
 	uintptr_t lock_state;
 	u_int old;
 	int ret;
+	bool catch, drop;
 
 	KASSERT(lock != &Giant.lock_object,
 	    ("%s: cannot use Giant as the interlock", __func__));
 
+	catch = (prio & PCATCH) != 0;
+	drop = (prio & PDROP) != 0;
+	prio &= PRIMASK;
+
 	/*
 	 * Synchronize with the fence in blockcount_release().  If we end up
 	 * waiting, the sleepqueue lock acquisition will provide the required
@@ -428,7 +433,7 @@ _blockcount_sleep(blockcount_t *bc, struct lock_object
 	 * ourselves to sleep to avoid jumping ahead.
 	 */
 	if (atomic_load_acq_int(&bc->__count) == 0) {
-		if (lock != NULL && (prio & PDROP) != 0)
+		if (lock != NULL && drop)
 			LOCK_CLASS(lock)->lc_unlock(lock);
 		return (0);
 	}
@@ -439,23 +444,27 @@ _blockcount_sleep(blockcount_t *bc, struct lock_object
 	if (lock != NULL)
 		lock_state = LOCK_CLASS(lock)->lc_unlock(lock);
 	old = blockcount_read(bc);
+	ret = 0;
 	do {
 		if (_BLOCKCOUNT_COUNT(old) == 0) {
 			sleepq_release(wchan);
-			ret = 0;
 			goto out;
 		}
 		if (_BLOCKCOUNT_WAITERS(old))
 			break;
 	} while (!atomic_fcmpset_int(&bc->__count, &old,
 	    old | _BLOCKCOUNT_WAITERS_FLAG));
-	sleepq_add(wchan, NULL, wmesg, 0, 0);
-	sleepq_wait(wchan, prio);
-	ret = 1;
+	sleepq_add(wchan, NULL, wmesg, catch ? SLEEPQ_INTERRUPTIBLE : 0, 0);
+	if (catch)
+		ret = sleepq_wait_sig(wchan, prio);
+	else
+		sleepq_wait(wchan, prio);
+	if (ret == 0)
+		ret = EAGAIN;
 
 out:
 	PICKUP_GIANT();
-	if (lock != NULL && (prio & PDROP) == 0)
+	if (lock != NULL && !drop)
 		LOCK_CLASS(lock)->lc_lock(lock, lock_state);
 
 	return (ret);

Modified: head/sys/sys/blockcount.h
==============================================================================
--- head/sys/sys/blockcount.h	Tue Apr 21 17:07:02 2020	(r360162)
+++ head/sys/sys/blockcount.h	Tue Apr 21 17:13:06 2020	(r360163)
@@ -80,9 +80,9 @@ static __inline void
 _blockcount_wait(blockcount_t *bc, struct lock_object *lo, const char *wmesg,
     int prio)
 {
-	KASSERT((prio & PDROP) == 0, ("%s: invalid prio %x", __func__, prio));
+	KASSERT((prio & ~PRIMASK) == 0, ("%s: invalid prio %x", __func__, prio));
 
-	while (_blockcount_sleep(bc, lo, wmesg, prio) != 0)
+	while (_blockcount_sleep(bc, lo, wmesg, prio) == EAGAIN)
 		;
 }
 


More information about the svn-src-head mailing list