git: 2be2474a198c - MFC kern: _umtx_op: compat32 refactoring

Kyle Evans kevans at FreeBSD.org
Sat Dec 26 17:25:54 UTC 2020


The branch stable/12 has been updated by kevans:

URL: https://cgit.FreeBSD.org/src/commit/?id=2be2474a198c1d66f999d14404335d69400c488b

commit 2be2474a198c1d66f999d14404335d69400c488b
Author:     Kyle Evans <kevans at FreeBSD.org>
AuthorDate: 2020-11-17 03:36:58 +0000
Commit:     Kyle Evans <kevans at FreeBSD.org>
CommitDate: 2020-12-26 17:10:35 +0000

    MFC kern: _umtx_op: compat32 refactoring
    
    63ecb272: umtx_op: reduce redundancy required for compat32
    
    All of the compat32 variants are substantially the same, save for
    copyin/copyout (mostly). Apply the same kind of technique used with kevent
    here by having the syscall routines supply a umtx_copyops describing the
    operations needed.
    
    umtx_copyops carries the bare minimum needed- size of timespec and
    _umtx_time are used for determining if copyout is needed in the sem2_wait
    case.
    
    bd4bcd14: Fix !COMPAT_FREEBSD32 kernel build
    
    One of the last shifts inadvertently moved these static assertions out of a
    COMPAT_FREEBSD32 block, which the relevant definitions are limited to.
    
    Fix it.
    
    27a9392d: _umtx_op: fix robust lists after r367744
    
    A copy-pasto left us copying in 24-bytes at the address of the rb pointer
    instead of the intended target.
    
    15eaec6a: _umtx_op: move compat32 definitions back in
    
    These are reasonably compact, and a future commit will blur the compat32
    lines by supporting 32-bit operations with the native _umtx_op.
    
    60e60e73: freebsd32: take the _umtx_op struct definitions back
    
    Providing these in freebsd32.h facilitates local testing/measuring of the
    structs rather than forcing one to locally recreate them. Sanity checking
    offsets/sizes remains in kern_umtx.c where these are typically used.
    
    (cherry picked from commit 63ecb272a00c6f084a33db1c6ad3e925d4dcf015)
    (cherry picked from commit bd4bcd14e3eae6f70790be65e862f89e2596b9af)
    (cherry picked from commit 27a9392d543933f1aaa4e4ddae2a1585a72db1b2)
    (cherry picked from commit 15eaec6a5c04b1cce7594cd1278600d7a57efc5a)
    (cherry picked from commit 60e60e73fd9d185d62c36a499e21c124629eb8ba)
---
 sys/compat/freebsd32/freebsd32.h      |  21 ++
 sys/compat/freebsd32/freebsd32_misc.c |   1 +
 sys/kern/kern_umtx.c                  | 669 ++++++++++++++--------------------
 sys/sys/proc.h                        |   1 +
 sys/sys/umtx.h                        |   1 +
 5 files changed, 302 insertions(+), 391 deletions(-)

diff --git a/sys/compat/freebsd32/freebsd32.h b/sys/compat/freebsd32/freebsd32.h
index 46dd5e278a48..4227d9037afb 100644
--- a/sys/compat/freebsd32/freebsd32.h
+++ b/sys/compat/freebsd32/freebsd32.h
@@ -94,6 +94,27 @@ struct itimerval32 {
 	struct timeval32 it_value;
 };
 
+struct umtx_time32 {
+	struct	timespec32	_timeout;
+	uint32_t		_flags;
+	uint32_t		_clockid;
+};
+
+struct umtx_robust_lists_params_compat32 {
+	uint32_t	robust_list_offset;
+	uint32_t	robust_priv_list_offset;
+	uint32_t	robust_inact_offset;
+};
+
+struct umutex32 {
+	volatile __lwpid_t	m_owner;	/* Owner of the mutex */
+	__uint32_t		m_flags;	/* Flags of the mutex */
+	__uint32_t		m_ceilings[2];	/* Priority protect ceiling */
+	__uint32_t		m_rb_lnk;	/* Robust linkage */
+	__uint32_t		m_pad;
+	__uint32_t		m_spare[2];
+};
+
 #define FREEBSD4_MFSNAMELEN	16
 #define FREEBSD4_MNAMELEN	(88 - 2 * sizeof(int32_t))
 
diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c
index 6c9ede34c1dc..2643333c51c2 100644
--- a/sys/compat/freebsd32/freebsd32_misc.c
+++ b/sys/compat/freebsd32/freebsd32_misc.c
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/timex.h>
 #include <sys/unistd.h>
 #include <sys/ucontext.h>
+#include <sys/umtx.h>
 #include <sys/vnode.h>
 #include <sys/wait.h>
 #include <sys/ipc.h>
diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c
index 9336cc3ed2b0..46eb147fdbd2 100644
--- a/sys/kern/kern_umtx.c
+++ b/sys/kern/kern_umtx.c
@@ -219,16 +219,20 @@ struct abs_timeout {
 	struct timespec end;
 };
 
-#ifdef COMPAT_FREEBSD32
-struct umutex32 {
-	volatile __lwpid_t	m_owner;	/* Owner of the mutex */
-	__uint32_t		m_flags;	/* Flags of the mutex */
-	__uint32_t		m_ceilings[2];	/* Priority protect ceiling */
-	__uint32_t		m_rb_lnk;	/* Robust linkage */
-	__uint32_t		m_pad;
-	__uint32_t		m_spare[2];
+struct umtx_copyops {
+	int	(*copyin_timeout)(const void *uaddr, struct timespec *tsp);
+	int	(*copyin_umtx_time)(const void *uaddr, size_t size,
+	    struct _umtx_time *tp);
+	int	(*copyin_robust_lists)(const void *uaddr, size_t size,
+	    struct umtx_robust_lists_params *rbp);
+	int	(*copyout_timeout)(void *uaddr, size_t size,
+	    struct timespec *tsp);
+	const size_t	timespec_sz;
+	const size_t	umtx_time_sz;
+	const bool	compat32;
 };
 
+#ifdef COMPAT_FREEBSD32
 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32");
 _Static_assert(__offsetof(struct umutex, m_spare[0]) ==
     __offsetof(struct umutex32, m_spare[0]), "m_spare32");
@@ -3393,11 +3397,11 @@ do_sem2_wake(struct thread *td, struct _usem2 *sem)
 }
 
 inline int
-umtx_copyin_timeout(const void *addr, struct timespec *tsp)
+umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
 {
 	int error;
 
-	error = copyin(addr, tsp, sizeof(struct timespec));
+	error = copyin(uaddr, tsp, sizeof(*tsp));
 	if (error == 0) {
 		if (tsp->tv_sec < 0 ||
 		    tsp->tv_nsec >= 1000000000 ||
@@ -3408,16 +3412,16 @@ umtx_copyin_timeout(const void *addr, struct timespec *tsp)
 }
 
 static inline int
-umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
+umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
 {
 	int error;
 
-	if (size <= sizeof(struct timespec)) {
+	if (size <= sizeof(tp->_timeout)) {
 		tp->_clockid = CLOCK_REALTIME;
 		tp->_flags = 0;
-		error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
+		error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout));
 	} else
-		error = copyin(addr, tp, sizeof(struct _umtx_time));
+		error = copyin(uaddr, tp, sizeof(*tp));
 	if (error != 0)
 		return (error);
 	if (tp->_timeout.tv_sec < 0 ||
@@ -3427,14 +3431,41 @@ umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
 }
 
 static int
-__umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap)
+umtx_copyin_robust_lists(const void *uaddr, size_t size,
+    struct umtx_robust_lists_params *rb)
+{
+
+	if (size > sizeof(*rb))
+		return (EINVAL);
+	return (copyin(uaddr, rb, size));
+}
+
+static int
+umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
+{
+
+	/*
+	 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
+	 * and we're only called if sz >= sizeof(timespec) as supplied in the
+	 * copyops.
+	 */
+	KASSERT(sz >= sizeof(*tsp),
+	    ("umtx_copyops specifies incorrect sizes"));
+
+	return (copyout(tsp, uaddr, sizeof(*tsp)));
+}
+
+static int
+__umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (EOPNOTSUPP);
 }
 
 static int
-__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time timeout, *tm_p;
 	int error;
@@ -3442,17 +3473,18 @@ __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
 		tm_p = &timeout;
 	}
-	return (do_wait(td, uap->obj, uap->val, tm_p, 0, 0));
+	return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0));
 }
 
 static int
-__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time timeout, *tm_p;
 	int error;
@@ -3460,7 +3492,7 @@ __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3470,7 +3502,8 @@ __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time *tm_p, timeout;
 	int error;
@@ -3478,7 +3511,7 @@ __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3488,7 +3521,8 @@ __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (kern_umtx_wake(td, uap->obj, uap->val, 0));
@@ -3496,7 +3530,7 @@ __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
 
 #define BATCH_SIZE	128
 static int
-__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
 {
 	char *uaddrs[BATCH_SIZE], **upp;
 	int count, error, i, pos, tocopy;
@@ -3509,22 +3543,58 @@ __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
 		error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *));
 		if (error != 0)
 			break;
-		for (i = 0; i < tocopy; ++i)
+		for (i = 0; i < tocopy; ++i) {
 			kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
+		}
+		maybe_yield();
+	}
+	return (error);
+}
+
+static int
+__umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
+{
+	uint32_t uaddrs[BATCH_SIZE], *upp;
+	int count, error, i, pos, tocopy;
+
+	upp = (uint32_t *)uap->obj;
+	error = 0;
+	for (count = uap->val, pos = 0; count > 0; count -= tocopy,
+	    pos += tocopy) {
+		tocopy = MIN(count, BATCH_SIZE);
+		error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
+		if (error != 0)
+			break;
+		for (i = 0; i < tocopy; ++i) {
+			kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
+			    INT_MAX, 1);
+		}
 		maybe_yield();
 	}
 	return (error);
 }
 
 static int
-__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
+{
+
+	if (ops->compat32)
+		return (__umtx_op_nwake_private_compat32(td, uap));
+	return (__umtx_op_nwake_private_native(td, uap));
+}
+
+static int
+__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (kern_umtx_wake(td, uap->obj, uap->val, 1));
 }
 
 static int
-__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap,
+   const struct umtx_copyops *ops)
 {
 	struct _umtx_time *tm_p, timeout;
 	int error;
@@ -3533,7 +3603,7 @@ __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3543,14 +3613,16 @@ __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY));
 }
 
 static int
-__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time *tm_p, timeout;
 	int error;
@@ -3559,7 +3631,7 @@ __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3569,28 +3641,32 @@ __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_wake_umutex(td, uap->obj));
 }
 
 static int
-__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_unlock_umutex(td, uap->obj, false));
 }
 
 static int
-__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1));
 }
 
 static int
-__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct timespec *ts, timeout;
 	int error;
@@ -3599,7 +3675,7 @@ __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		ts = NULL;
 	else {
-		error = umtx_copyin_timeout(uap->uaddr2, &timeout);
+		error = ops->copyin_timeout(uap->uaddr2, &timeout);
 		if (error != 0)
 			return (error);
 		ts = &timeout;
@@ -3608,21 +3684,24 @@ __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_cv_signal(td, uap->obj));
 }
 
 static int
-__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_cv_broadcast(td, uap->obj));
 }
 
 static int
-__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time timeout;
 	int error;
@@ -3631,7 +3710,7 @@ __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL) {
 		error = do_rw_rdlock(td, uap->obj, uap->val, 0);
 	} else {
-		error = umtx_copyin_umtx_time(uap->uaddr2,
+		error = ops->copyin_umtx_time(uap->uaddr2,
 		   (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3641,7 +3720,8 @@ __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time timeout;
 	int error;
@@ -3650,7 +3730,7 @@ __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL) {
 		error = do_rw_wrlock(td, uap->obj, 0);
 	} else {
-		error = umtx_copyin_umtx_time(uap->uaddr2,
+		error = ops->copyin_umtx_time(uap->uaddr2,
 		   (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3661,7 +3741,8 @@ __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_rw_unlock(td, uap->obj));
@@ -3669,7 +3750,8 @@ __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
 
 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
 static int
-__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time *tm_p, timeout;
 	int error;
@@ -3678,7 +3760,7 @@ __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
 	if (uap->uaddr2 == NULL)
 		tm_p = NULL;
 	else {
-		error = umtx_copyin_umtx_time(
+		error = ops->copyin_umtx_time(
 		    uap->uaddr2, (size_t)uap->uaddr1, &timeout);
 		if (error != 0)
 			return (error);
@@ -3688,7 +3770,8 @@ __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_sem_wake(td, uap->obj));
@@ -3696,14 +3779,16 @@ __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
 #endif
 
 static int
-__umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_wake2_umutex(td, uap->obj, uap->val));
 }
 
 static int
-__umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct _umtx_time *tm_p, timeout;
 	size_t uasize;
@@ -3715,7 +3800,7 @@ __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
 		tm_p = NULL;
 	} else {
 		uasize = (size_t)uap->uaddr1;
-		error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout);
+		error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout);
 		if (error != 0)
 			return (error);
 		tm_p = &timeout;
@@ -3723,10 +3808,10 @@ __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
 	error = do_sem2_wait(td, uap->obj, tm_p);
 	if (error == EINTR && uap->uaddr2 != NULL &&
 	    (timeout._flags & UMTX_ABSTIME) == 0 &&
-	    uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) {
-		error = copyout(&timeout._timeout,
-		    (struct _umtx_time *)uap->uaddr2 + 1,
-		    sizeof(struct timespec));
+	    uasize >= ops->umtx_time_sz + ops->timespec_sz) {
+		error = ops->copyout_timeout(
+		    (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz),
+		    uasize - ops->umtx_time_sz, &timeout._timeout);
 		if (error == 0) {
 			error = EINTR;
 		}
@@ -3736,7 +3821,8 @@ __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap)
 }
 
 static int
-__umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (do_sem2_wake(td, uap->obj));
@@ -4040,415 +4126,210 @@ umtx_shm(struct thread *td, void *addr, u_int flags)
 }
 
 static int
-__umtx_op_shm(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_shm(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops __unused)
 {
 
 	return (umtx_shm(td, uap->uaddr1, uap->val));
 }
 
 static int
-umtx_robust_lists(struct thread *td, struct umtx_robust_lists_params *rbp)
-{
-
-	td->td_rb_list = rbp->robust_list_offset;
-	td->td_rbp_list = rbp->robust_priv_list_offset;
-	td->td_rb_inact = rbp->robust_inact_offset;
-	return (0);
-}
-
-static int
-__umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap)
+__umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap,
+    const struct umtx_copyops *ops)
 {
 	struct umtx_robust_lists_params rb;
 	int error;
 
-	if (uap->val > sizeof(rb))
-		return (EINVAL);
 	bzero(&rb, sizeof(rb));
-	error = copyin(uap->uaddr1, &rb, uap->val);
+	error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb);
 	if (error != 0)
 		return (error);
-	return (umtx_robust_lists(td, &rb));
-}
-
-typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
 
-static const _umtx_op_func op_table[] = {
-	[UMTX_OP_RESERVED0]	= __umtx_op_unimpl,
-	[UMTX_OP_RESERVED1]	= __umtx_op_unimpl,
-	[UMTX_OP_WAIT]		= __umtx_op_wait,
-	[UMTX_OP_WAKE]		= __umtx_op_wake,
-	[UMTX_OP_MUTEX_TRYLOCK]	= __umtx_op_trylock_umutex,
-	[UMTX_OP_MUTEX_LOCK]	= __umtx_op_lock_umutex,
-	[UMTX_OP_MUTEX_UNLOCK]	= __umtx_op_unlock_umutex,
-	[UMTX_OP_SET_CEILING]	= __umtx_op_set_ceiling,
-	[UMTX_OP_CV_WAIT]	= __umtx_op_cv_wait,
-	[UMTX_OP_CV_SIGNAL]	= __umtx_op_cv_signal,
-	[UMTX_OP_CV_BROADCAST]	= __umtx_op_cv_broadcast,
-	[UMTX_OP_WAIT_UINT]	= __umtx_op_wait_uint,
-	[UMTX_OP_RW_RDLOCK]	= __umtx_op_rw_rdlock,
-	[UMTX_OP_RW_WRLOCK]	= __umtx_op_rw_wrlock,
-	[UMTX_OP_RW_UNLOCK]	= __umtx_op_rw_unlock,
-	[UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private,
-	[UMTX_OP_WAKE_PRIVATE]	= __umtx_op_wake_private,
-	[UMTX_OP_MUTEX_WAIT]	= __umtx_op_wait_umutex,
-	[UMTX_OP_MUTEX_WAKE]	= __umtx_op_wake_umutex,
-#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
-	[UMTX_OP_SEM_WAIT]	= __umtx_op_sem_wait,
-	[UMTX_OP_SEM_WAKE]	= __umtx_op_sem_wake,
-#else
-	[UMTX_OP_SEM_WAIT]	= __umtx_op_unimpl,
-	[UMTX_OP_SEM_WAKE]	= __umtx_op_unimpl,
-#endif
-	[UMTX_OP_NWAKE_PRIVATE]	= __umtx_op_nwake_private,
-	[UMTX_OP_MUTEX_WAKE2]	= __umtx_op_wake2_umutex,
-	[UMTX_OP_SEM2_WAIT]	= __umtx_op_sem2_wait,
-	[UMTX_OP_SEM2_WAKE]	= __umtx_op_sem2_wake,
-	[UMTX_OP_SHM]		= __umtx_op_shm,
-	[UMTX_OP_ROBUST_LISTS]	= __umtx_op_robust_lists,
-};
-
-int
-sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
-{
+	if (ops->compat32)
+		td->td_pflags2 |= TDP2_COMPAT32RB;
+	else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0)
+		return (EINVAL);
 
-	if ((unsigned)uap->op < nitems(op_table))
-		return (*op_table[uap->op])(td, uap);
-	return (EINVAL);
+	td->td_rb_list = rb.robust_list_offset;
+	td->td_rbp_list = rb.robust_priv_list_offset;
+	td->td_rb_inact = rb.robust_inact_offset;
+	return (0);
 }
 
 #ifdef COMPAT_FREEBSD32
-
-struct umtx_time32 {
-	struct	timespec32	timeout;
-	uint32_t		flags;
-	uint32_t		clockid;
-};
-
 static inline int
-umtx_copyin_timeout32(void *addr, struct timespec *tsp)
+umtx_copyin_timeout32(const void *uaddr, struct timespec *tsp)
 {
 	struct timespec32 ts32;
 	int error;
 
-	error = copyin(addr, &ts32, sizeof(struct timespec32));
+	error = copyin(uaddr, &ts32, sizeof(ts32));
 	if (error == 0) {
 		if (ts32.tv_sec < 0 ||
 		    ts32.tv_nsec >= 1000000000 ||
 		    ts32.tv_nsec < 0)
 			error = EINVAL;
 		else {
-			tsp->tv_sec = ts32.tv_sec;
-			tsp->tv_nsec = ts32.tv_nsec;
+			CP(ts32, *tsp, tv_sec);
+			CP(ts32, *tsp, tv_nsec);
 		}
 	}
 	return (error);
 }
 
 static inline int
-umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
+umtx_copyin_umtx_time32(const void *uaddr, size_t size, struct _umtx_time *tp)
 {
 	struct umtx_time32 t32;
 	int error;
 
-	t32.clockid = CLOCK_REALTIME;
-	t32.flags   = 0;
-	if (size <= sizeof(struct timespec32))
-		error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
+	t32._clockid = CLOCK_REALTIME;
+	t32._flags   = 0;
+	if (size <= sizeof(t32._timeout))
+		error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout));
 	else
-		error = copyin(addr, &t32, sizeof(struct umtx_time32));
+		error = copyin(uaddr, &t32, sizeof(t32));
 	if (error != 0)
 		return (error);
-	if (t32.timeout.tv_sec < 0 ||
-	    t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
+	if (t32._timeout.tv_sec < 0 ||
+	    t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
 		return (EINVAL);
-	tp->_timeout.tv_sec = t32.timeout.tv_sec;
-	tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
-	tp->_flags = t32.flags;
-	tp->_clockid = t32.clockid;
+	TS_CP(t32, *tp, _timeout);
+	CP(t32, *tp, _flags);
+	CP(t32, *tp, _clockid);
 	return (0);
 }
 
 static int
-__umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time *tm_p, timeout;
-	int error;
-
-	if (uap->uaddr2 == NULL)
-		tm_p = NULL;
-	else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-			(size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
-}
-
-static int
-__umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time *tm_p, timeout;
-	int error;
-
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL)
-		tm_p = NULL;
-	else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-			    (size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	return (do_lock_umutex(td, uap->obj, tm_p, 0));
-}
-
-static int
-__umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time *tm_p, timeout;
-	int error;
-
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL)
-		tm_p = NULL;
-	else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-		    (size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT));
-}
-
-static int
-__umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct timespec *ts, timeout;
-	int error;
-
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL)
-		ts = NULL;
-	else {
-		error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
-		if (error != 0)
-			return (error);
-		ts = &timeout;
-	}
-	return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
-}
-
-static int
-__umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time timeout;
-	int error;
-
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL) {
-		error = do_rw_rdlock(td, uap->obj, uap->val, 0);
-	} else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-		    (size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
-	}
-	return (error);
-}
-
-static int
-__umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time timeout;
-	int error;
-
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL) {
-		error = do_rw_wrlock(td, uap->obj, 0);
-	} else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-		    (size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		error = do_rw_wrlock(td, uap->obj, &timeout);
-	}
-	return (error);
-}
-
-static int
-__umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
+umtx_copyin_robust_lists32(const void *uaddr, size_t size,
+    struct umtx_robust_lists_params *rbp)
 {
-	struct _umtx_time *tm_p, timeout;
-	int error;
-
-	if (uap->uaddr2 == NULL)
-		tm_p = NULL;
-	else {
-		error = umtx_copyin_umtx_time32(
-		    uap->uaddr2, (size_t)uap->uaddr1,&timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
-}
-
-#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
-static int
-__umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct _umtx_time *tm_p, timeout;
+	struct umtx_robust_lists_params_compat32 rb32;
 	int error;
 
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL)
-		tm_p = NULL;
-	else {
-		error = umtx_copyin_umtx_time32(uap->uaddr2,
-		    (size_t)uap->uaddr1, &timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	return (do_sem_wait(td, uap->obj, tm_p));
+	if (size > sizeof(rb32))
+		return (EINVAL);
+	bzero(&rb32, sizeof(rb32));
+	error = copyin(uaddr, &rb32, size);
+	if (error != 0)
+		return (error);
+	CP(rb32, *rbp, robust_list_offset);
+	CP(rb32, *rbp, robust_priv_list_offset);
+	CP(rb32, *rbp, robust_inact_offset);
+	return (0);
 }
-#endif
 
 static int
-__umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
+umtx_copyout_timeout32(void *uaddr, size_t sz, struct timespec *tsp)
 {
-	struct _umtx_time *tm_p, timeout;
-	size_t uasize;
-	int error;
+	struct timespec32 remain32 = {
+		.tv_sec = tsp->tv_sec,
+		.tv_nsec = tsp->tv_nsec,
+	};
 
-	/* Allow a null timespec (wait forever). */
-	if (uap->uaddr2 == NULL) {
-		uasize = 0;
-		tm_p = NULL;
-	} else {
-		uasize = (size_t)uap->uaddr1;
-		error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout);
-		if (error != 0)
-			return (error);
-		tm_p = &timeout;
-	}
-	error = do_sem2_wait(td, uap->obj, tm_p);
-	if (error == EINTR && uap->uaddr2 != NULL &&
-	    (timeout._flags & UMTX_ABSTIME) == 0 &&
-	    uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) {
-		struct timespec32 remain32 = {
-			.tv_sec = timeout._timeout.tv_sec,
-			.tv_nsec = timeout._timeout.tv_nsec
-		};
-		error = copyout(&remain32,
-		    (struct umtx_time32 *)uap->uaddr2 + 1,
-		    sizeof(struct timespec32));
-		if (error == 0) {
-			error = EINTR;
-		}
-	}
+	/*
+	 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time)
+	 * and we're only called if sz >= sizeof(timespec) as supplied in the
+	 * copyops.
+	 */
+	KASSERT(sz >= sizeof(remain32),
+	    ("umtx_copyops specifies incorrect sizes"));
 
-	return (error);
+	return (copyout(&remain32, uaddr, sizeof(remain32)));
 }
+#endif /* COMPAT_FREEBSD32 */
 
-static int
-__umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
-{
-	uint32_t uaddrs[BATCH_SIZE], *upp;
-	int count, error, i, pos, tocopy;
-
-	upp = (uint32_t *)uap->obj;
-	error = 0;
-	for (count = uap->val, pos = 0; count > 0; count -= tocopy,
-	    pos += tocopy) {
-		tocopy = MIN(count, BATCH_SIZE);
-		error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t));
-		if (error != 0)
-			break;
-		for (i = 0; i < tocopy; ++i)
-			kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i],
-			    INT_MAX, 1);
-		maybe_yield();
-	}
-	return (error);
-}
-
-struct umtx_robust_lists_params_compat32 {
-	uint32_t	robust_list_offset;
-	uint32_t	robust_priv_list_offset;
-	uint32_t	robust_inact_offset;
-};
-
-static int
-__umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap)
-{
-	struct umtx_robust_lists_params rb;
-	struct umtx_robust_lists_params_compat32 rb32;
-	int error;
*** 259 LINES SKIPPED ***


More information about the dev-commits-src-all mailing list