PERFORCE change 67324 for review

David Xu davidxu at FreeBSD.org
Sat Dec 18 18:01:21 PST 2004


http://perforce.freebsd.org/chv.cgi?CH=67324

Change 67324 by davidxu at davidxu_tiger on 2004/12/19 02:00:34

	IPC

Affected files ...

.. //depot/projects/davidxu_thread/src/etc/devd.conf#3 integrate
.. //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#6 integrate
.. //depot/projects/davidxu_thread/src/sys/sys/proc.h#5 integrate

Differences ...

==== //depot/projects/davidxu_thread/src/etc/devd.conf#3 (text+ko) ====

@@ -1,4 +1,4 @@
-# $FreeBSD: src/etc/devd.conf,v 1.19 2004/11/28 23:16:00 iedowse Exp $
+# $FreeBSD: src/etc/devd.conf,v 1.20 2004/12/19 00:50:07 brueffer Exp $
 #
 # Refer to devd.conf(5) and devd(8) man pages for the details on how to
 # run and configure devd.
@@ -19,12 +19,13 @@
 	# Setup some shorthand for regex that we use later in the file.
 	set ethernet-nic-regex
 		"(an|ar|ath|aue|awi|axe|bfe|bge|cm|cnw|cs|cue|dc|de|ed|el|em|\
-		ep|ex|fe|fxp|gem|hme|ie|kue|lge|lnc|my|nge|pcn|ray|re|rl|\
-		rue|sf|sis|sk|sn|snc|ste|ti|tl|tx|txp|vge|vr|vx|wb|wi|xe|\
-		xl)[0-9]+";
+		ep|ex|fe|fxp|gem|hme|ie|kue|lge|lnc|my|nge|pcn|ray|re|rl|rue|\
+		sf|sis|sk|sn|snc|ste|ti|tl|tx|txp|udav|vge|vr|vx|wb|wi|xe|xl)\
+		[0-9]+";
 	set scsi-controller-regex
-		"(adv|advw|aic|aha|ahb|ahc|ahd|bt|ct|iir|isp|mly|mpt|ncv|nsp|\
-		stg|sym|wds)[0-9]+";
+		"(aac|adv|adw|aha|ahb|ahc|ahd|aic|amd|amr|asr|bt|ciss|ct|dpt|\
+		ida|iir|ips|isp|mlx|mly|mpt|ncr|ncv|nsp|stg|sym|trm|wds)\
+		[0-9]+";
 };
 
 # Note that the attach/detach with the highest value wins, so that one can

==== //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#6 (text+ko) ====

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2004, David Xu <davidxu at freebsd.org>
  * Copyright (c) 2002, Jeffrey Roberson <jeff at freebsd.org>
  * All rights reserved.
  *
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_umtx.c,v 1.18 2004/11/30 12:18:53 davidxu Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_umtx.c,v 1.20 2004/12/18 13:43:16 davidxu Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -47,15 +48,35 @@
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 
+#define UMTX_PRIVATE	0
+#define UMTX_SHARED	1
+
+#define UMTX_STATIC_SHARED
+
 struct umtx_key {
-	vm_object_t	uk_object;
-	vm_ooffset_t	uk_offset;
+	int	type;
+	union {
+		struct {
+			vm_object_t	object;
+			long		offset;
+		} shared;
+		struct {
+			struct umtx	*umtx;
+			long		pid;
+		} private;
+		struct {
+			void		*ptr;
+			long		word;
+		} both;
+	} info;
 };
 
 struct umtx_q {
 	LIST_ENTRY(umtx_q)	uq_next;	/* Linked list for the hash. */
-	struct umtx_key		uq_key;
-	struct thread		*uq_thread;
+	struct umtx_key		uq_key;		/* Umtx key. */
+	struct thread		*uq_thread;	/* The thread waits on */
+	LIST_ENTRY(umtx_q)	uq_rqnext;	/* Linked list for requeuing. */
+	vm_offset_t		uq_addr;	/* Umtx's virtual address. */
 };
 
 LIST_HEAD(umtx_head, umtx_q);
@@ -73,21 +94,31 @@
 
 #define	UMTX_CONTESTED	LONG_MIN
 
-static void umtx_init_chains(void *);
-static int  umtxq_hash(struct umtx_key *);
+static void umtxq_init_chains(void *);
+static int umtxq_hash(struct umtx_key *key);
 static struct mtx *umtxq_mtx(int chain);
-static void umtxq_lock(int chain);
-static void umtxq_unlock(int chain);
-static void umtxq_insert(int chain, struct umtx_q *);
-static int  umtxq_count(struct umtx_key *);
-static void umtxq_signal(struct umtx_key *);
+static void umtxq_lock(struct umtx_key *key);
+static void umtxq_unlock(struct umtx_key *key);
+static void umtxq_insert(struct umtx_q *uq);
+static void umtxq_remove(struct umtx_q *uq);
+static int umtxq_sleep(struct thread *td, struct umtx_key *key,
+	int prio, const char *wmesg, int timo);
+static int  umtxq_count(struct umtx_key *key);
+static void umtxq_signal(struct umtx_key *key);
+static void umtxq_broadcast(struct umtx_key *key);
+#ifdef UMTX_DYNAMIC_SHARED
 static void fork_handler(void *arg, struct proc *p1, struct proc *p2,
-		int flags);
+	int flags);
+#endif
+static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
+static int umtx_key_get(struct thread *td, struct umtx *umtx,
+	struct umtx_key *key);
+static void umtx_key_release(struct umtx_key *key);
 
-SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtx_init_chains, NULL);
+SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL);
 
 static void
-umtx_init_chains(void *arg __unused)
+umtxq_init_chains(void *arg __unused)
 {
 	int i;
 
@@ -96,27 +127,24 @@
 			 MTX_DEF | MTX_DUPOK);
 		LIST_INIT(&umtxq_chains[i].uc_queue);
 	}
+#ifdef UMTX_DYNAMIC_SHARED
 	EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000);
+#endif
 }
 
-static void
-fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
+static inline int
+umtxq_hash(struct umtx_key *key)
 {
-	struct thread *td;
-
-	PROC_LOCK(p1);
-	FOREACH_THREAD_IN_PROC(p1, td) {
-		if (td->td_flags & TDF_UMTXQ)
-			wakeup(td);
-	}
-	PROC_UNLOCK(p1);
+	unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word;
+	return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
 }
 
 static inline int
-umtxq_hash(struct umtx_key *key)
+umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2)
 {
-	unsigned n = (uintptr_t)key->uk_object + key->uk_offset;
-	return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
+	return (k1->type == k2->type &&
+		k1->info.both.ptr == k2->info.both.ptr &&
+	        k1->info.both.word == k2->info.both.word);
 }
 
 static inline struct mtx *
@@ -126,14 +154,16 @@
 }
 
 static inline void
-umtxq_lock(int chain)
+umtxq_lock(struct umtx_key *key)
 {
+	int chain = umtxq_hash(key);
 	mtx_lock(umtxq_mtx(chain));
 }
 
 static inline void
-umtxq_unlock(int chain)
+umtxq_unlock(struct umtx_key *key)
 {
+	int chain = umtxq_hash(key);
 	mtx_unlock(umtxq_mtx(chain));
 }
 
@@ -141,25 +171,29 @@
  * Insert a thread onto the umtx queue.
  */
 static inline void
-umtxq_insert(int chain, struct umtx_q *uq)
+umtxq_insert(struct umtx_q *uq)
 {
 	struct umtx_head *head;
+	int chain = umtxq_hash(&uq->uq_key);
 
 	head = &umtxq_chains[chain].uc_queue;
 	LIST_INSERT_HEAD(head, uq, uq_next);
+	uq->uq_thread->td_umtxq = uq;
 	mtx_lock_spin(&sched_lock);
 	uq->uq_thread->td_flags |= TDF_UMTXQ;
 	mtx_unlock_spin(&sched_lock);
 }
 
 /*
- * Remove thread from umtx queue released.
+ * Remove thread from the umtx queue.
  */
 static inline void
 umtxq_remove(struct umtx_q *uq)
 {
 	if (uq->uq_thread->td_flags & TDF_UMTXQ) {
 		LIST_REMOVE(uq, uq_next);
+		uq->uq_thread->td_umtxq = NULL;
+		/* turning off TDF_UMTXQ should be the last thing. */
 		mtx_lock_spin(&sched_lock);
 		uq->uq_thread->td_flags &= ~TDF_UMTXQ;
 		mtx_unlock_spin(&sched_lock);
@@ -174,16 +208,15 @@
 	int chain, count = 0;
 
 	chain = umtxq_hash(key);
-	umtxq_lock(chain);
+	umtxq_lock(key);
 	head = &umtxq_chains[chain].uc_queue;
 	LIST_FOREACH(uq, head, uq_next) {
-		if (uq->uq_key.uk_object == key->uk_object &&
-		    uq->uq_key.uk_offset == key->uk_offset) {
+		if (umtx_key_match(&uq->uq_key, key)) {
 			if (++count > 1)
 				break;
 		}
 	}
-	umtxq_unlock(chain);
+	umtxq_unlock(key);
 	return (count);
 }
 
@@ -196,55 +229,209 @@
 	int chain;
 
 	chain = umtxq_hash(key);
-	umtxq_lock(chain);
+	umtxq_lock(key);
 	head = &umtxq_chains[chain].uc_queue;
 	LIST_FOREACH(uq, head, uq_next) {
-		if (uq->uq_key.uk_object == key->uk_object &&
-		    uq->uq_key.uk_offset == key->uk_offset) {
+		if (umtx_key_match(&uq->uq_key, key)) {
 			blocked = uq->uq_thread;
 			umtxq_remove(uq);
 			break;
 		}
 	}
-	umtxq_unlock(chain);
+	umtxq_unlock(key);
 	if (blocked != NULL)
 		wakeup(blocked);
 }
 
-int
-_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
-    /* struct umtx *umtx */
+static void
+umtxq_broadcast(struct umtx_key *key)
+{
+	struct umtx_q *uq, *next;
+	struct umtx_head *head;
+	struct thread *blocked;
+	int chain;
+
+	chain = umtxq_hash(key);
+	umtxq_lock(key);
+	head = &umtxq_chains[chain].uc_queue;
+	for (uq = LIST_FIRST(head); uq != NULL; uq = next) {
+		next = LIST_NEXT(uq, uq_next);
+		if (umtx_key_match(&uq->uq_key, key)) {
+			blocked = uq->uq_thread;
+			umtxq_remove(uq);
+			wakeup(blocked);
+		}
+		uq = next;
+	}
+	umtxq_unlock(key);
+}
+
+static inline int
+umtxq_sleep(struct thread *td, struct umtx_key *key, int priority,
+	    const char *wmesg, int timo)
+{
+	int error;
+	int chain = umtxq_hash(key);
+
+	error = msleep(td, umtxq_mtx(chain), priority, wmesg, timo);
+	return (error);
+}
+
+static int
+umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key)
+{
+#if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED)
+	vm_map_t map;
+	vm_map_entry_t entry;
+	vm_pindex_t pindex;
+	vm_prot_t prot;
+	boolean_t wired;
+
+	map = &td->td_proc->p_vmspace->vm_map;
+	if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
+	    &entry, &key->info.shared.object, &pindex, &prot,
+	    &wired) != KERN_SUCCESS) {
+		return EFAULT;
+	}
+#endif
+
+#if defined(UMTX_DYNAMIC_SHARED)
+	key->type = UMTX_SHARED;
+	key->info.shared.offset = entry->offset + entry->start - 
+		(vm_offset_t)umtx;
+	/*
+	 * Add object reference, if we don't do this, a buggy application
+	 * deallocates the object, the object will be reused by other
+	 * applications, then unlock will wake wrong thread.
+	 */
+	vm_object_reference(key->info.shared.object);
+	vm_map_lookup_done(map, entry);
+#elif defined(UMTX_STATIC_SHARED)
+	if (VM_INHERIT_SHARE == entry->inheritance) {
+		key->type = UMTX_SHARED;
+		key->info.shared.offset = entry->offset + entry->start -
+			(vm_offset_t)umtx;
+		vm_object_reference(key->info.shared.object);
+	} else {
+		key->type = UMTX_PRIVATE;
+		key->info.private.umtx = umtx;
+		key->info.private.pid  = td->td_proc->p_pid;
+	}
+	vm_map_lookup_done(map, entry);
+#else
+	key->type = UMTX_PRIVATE;
+	key->info.private.umtx = umtx;
+	key->info.private.pid  = td->td_proc->p_pid;
+#endif
+	return (0);
+}
+
+static inline void
+umtx_key_release(struct umtx_key *key)
+{
+	if (key->type == UMTX_SHARED)
+		vm_object_deallocate(key->info.shared.object);
+}
+
+static inline int
+umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq)
+{
+	int error;
+
+	if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0)
+		return (error);
+
+	uq->uq_addr = (vm_offset_t)umtx;
+	uq->uq_thread = td;
+	umtxq_lock(&uq->uq_key);
+	umtxq_insert(uq);
+	umtxq_unlock(&uq->uq_key);
+	return (0);
+}
+
+#if defined(UMTX_DYNAMIC_SHARED)
+static void
+fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
 {
-	struct umtx_q uq;
-	struct umtx *umtx;
 	vm_map_t map;
 	vm_map_entry_t entry;
 	vm_object_t object;
 	vm_pindex_t pindex;
 	vm_prot_t prot;
 	boolean_t wired;
+	struct umtx_key key;
+	LIST_HEAD(, umtx_q) workq;
+	struct umtx_q *uq;
+	struct thread *td;
+	int onq;
+
+	LIST_INIT(&workq);
+
+	/* Collect threads waiting on umtxq */
+	PROC_LOCK(p1);
+	FOREACH_THREAD_IN_PROC(p1, td) {
+		if (td->td_flags & TDF_UMTXQ) {
+			uq = td->td_umtxq;
+			if (uq)
+				LIST_INSERT_HEAD(&workq, uq, uq_rqnext);
+		}
+	}
+	PROC_UNLOCK(p1);
+
+	LIST_FOREACH(uq, &workq, uq_rqnext) {
+		map = &p1->p_vmspace->vm_map;
+		if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE,
+		    &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
+			continue;
+		}
+		key.type = UMTX_SHARED;
+		key.info.shared.object = object;
+		key.info.shared.offset = entry->offset + entry->start -
+			uq->uq_addr;
+		if (umtx_key_match(&key, &uq->uq_key)) {
+			vm_map_lookup_done(map, entry);
+			continue;
+		}
+		
+		umtxq_lock(&uq->uq_key);
+		if (uq->uq_thread->td_flags & TDF_UMTXQ) {
+			umtxq_remove(uq);
+			onq = 1;
+		} else
+			onq = 0;
+		umtxq_unlock(&uq->uq_key);
+		if (onq) {
+			vm_object_deallocate(uq->uq_key.info.shared.object);
+			uq->uq_key = key;
+			umtxq_lock(&uq->uq_key);
+			umtxq_insert(uq);
+			umtxq_unlock(&uq->uq_key);
+			vm_object_reference(uq->uq_key.info.shared.object);
+		}
+		vm_map_lookup_done(map, entry);
+	}
+}
+#endif
+
+static int
+_do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
+{
+	struct umtx_q uq;
 	intptr_t owner;
 	intptr_t old;
-	int chain, page_off;
 	int error = 0;
 
 	/*
-	 * Care must be exercised when dealing with this structure.  It
+	 * Care must be exercised when dealing with umtx structure.  It
 	 * can fault on any access.
 	 */
-	umtx = uap->umtx;
 
-	page_off = ((unsigned long)umtx) % PAGE_SIZE;
- 	/* Must not on page boundary. */
-	if (page_off + sizeof(void *) > PAGE_SIZE)
-		return (EINVAL);
-
 	for (;;) {
 		/*
 		 * Try the uncontested case.  This should be done in userland.
 		 */
 		owner = casuptr((intptr_t *)&umtx->u_owner,
-		    UMTX_UNOWNED, td->td_tid);
+		    UMTX_UNOWNED, id);
 
 		/* The acquire succeeded. */
 		if (owner == UMTX_UNOWNED)
@@ -257,7 +444,7 @@
 		/* If no one owns it but it is contested try to acquire it. */
 		if (owner == UMTX_CONTESTED) {
 			owner = casuptr((intptr_t *)&umtx->u_owner,
-			    UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
+			    UMTX_CONTESTED, id | UMTX_CONTESTED);
 
 			if (owner == UMTX_CONTESTED)
 				return (0);
@@ -274,26 +461,9 @@
 		 * If we caught a signal, we have retried and now
 		 * exit immediately.
 		 */
-		if (error)
+		if (error || (error = umtxq_queue_me(td, umtx, &uq)) != 0)
 			return (error);
 
-		map = &td->td_proc->p_vmspace->vm_map;
-		if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
-		    &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
-			vm_map_lookup_done(map, entry);
-			return EFAULT;
-		}
-		vm_object_reference(object);
-		uq.uq_key.uk_object = object;
-		uq.uq_key.uk_offset = entry->offset + entry->start -
-			(vm_offset_t)umtx;
-		uq.uq_thread = td;
-		chain = umtxq_hash(&uq.uq_key);
-		umtxq_lock(chain);
-		umtxq_insert(chain, &uq);
-		umtxq_unlock(chain);
-		vm_map_lookup_done(map, entry);
-
 		/*
 		 * Set the contested bit so that a release in user space
 		 * knows to use the system call for unlock.  If this fails
@@ -305,10 +475,10 @@
 
 		/* The address was invalid. */
 		if (old == -1) {
-			umtxq_lock(chain);
+			umtxq_lock(&uq.uq_key);
 			umtxq_remove(&uq);
-			umtxq_unlock(chain);
-			vm_object_deallocate(uq.uq_key.uk_object);
+			umtxq_unlock(&uq.uq_key);
+			umtx_key_release(&uq.uq_key);
 			return (EFAULT);
 		}
 
@@ -317,45 +487,67 @@
 		 * and we need to retry or we lost a race to the thread
 		 * unlocking the umtx.
 		 */
-		umtxq_lock(chain);
+		umtxq_lock(&uq.uq_key);
 		if (old == owner && (td->td_flags & TDF_UMTXQ)) {
-			error = msleep(td, umtxq_mtx(chain),
+			error = umtxq_sleep(td, &uq.uq_key,
 				       td->td_priority | PCATCH | PDROP,
-				       "umtx", 0);
+				       "umtx", timo);
 			if (td->td_flags & TDF_UMTXQ) {
-				umtxq_lock(chain);
+				umtxq_lock(&uq.uq_key);
 				umtxq_remove(&uq);
-				umtxq_unlock(chain);
+				umtxq_unlock(&uq.uq_key);
 			}
 		} else {
+			umtxq_remove(&uq);
+			umtxq_unlock(&uq.uq_key);
 			error = 0;
-			umtxq_remove(&uq);
-			umtxq_unlock(chain);
 		}
-		vm_object_deallocate(uq.uq_key.uk_object);
+		umtx_key_release(&uq.uq_key);
 	}
 
 	return (0);
 }
 
-int
-_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
-    /* struct umtx *umtx */
+static int
+do_lock(struct thread *td, struct umtx *umtx, long id,
+	struct timespec *abstime)
+{
+	struct timespec ts1, ts2;
+	struct timeval tv;
+	int timo, error;
+
+	if (abstime == NULL) {
+		error = _do_lock(td, umtx, id, 0);
+	} else {
+		for (;;) {
+			ts1 = *abstime;
+			getnanotime(&ts2);
+			timespecsub(&ts1, &ts2);
+			TIMESPEC_TO_TIMEVAL(&tv, &ts1);
+			if (tv.tv_sec < 0) {
+				error = EWOULDBLOCK;
+				break;
+			}
+			timo = tvtohz(&tv);
+			error = _do_lock(td, umtx, id, timo);
+			if (error != EWOULDBLOCK) {
+				if (error == ERESTART)
+					error = EINTR;
+				break;
+			}
+		}
+	}
+	return (error);
+}
+
+static int
+do_unlock(struct thread *td, struct umtx *umtx, long id)
 {
-	struct umtx_q uq;
-	struct umtx *umtx;
-	vm_map_t map;
-	vm_map_entry_t entry;
-	vm_object_t object;
-	vm_pindex_t pindex;
-	vm_prot_t prot;
-	boolean_t wired;
+	struct umtx_key key;
 	intptr_t owner;
 	intptr_t old;
-	int count;
+	int count, error;
 
-	umtx = uap->umtx;
-
 	/*
 	 * Make sure we own this mtx.
 	 *
@@ -365,7 +557,7 @@
 	if ((owner = fuword(&umtx->u_owner)) == -1)
 		return (EFAULT);
 
-	if ((owner & ~UMTX_CONTESTED) != td->td_tid)
+	if ((owner & ~UMTX_CONTESTED) != id)
 		return (EPERM);
 
 	/* We should only ever be in here for contested locks */
@@ -383,16 +575,8 @@
 	if (old != owner)
 		return (EINVAL);
 
-	map = &td->td_proc->p_vmspace->vm_map;
-	if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
-	    &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
-		vm_map_lookup_done(map, entry);
-		return EFAULT;
-	}
-	vm_object_reference(object);
-	uq.uq_key.uk_object = object;
-	uq.uq_key.uk_offset = entry->offset + entry->start - (vm_offset_t)umtx;
-	vm_map_lookup_done(map, entry);
+	if ((error = umtx_key_get(td, umtx, &key)) != 0)
+		return (error);
 
 	/*
 	 * At the point, a new thread can lock the umtx before we
@@ -400,9 +584,9 @@
 	 * are two or more threads on wait queue, we should set
 	 * contensted bit for them.
 	 */
-	count = umtxq_count(&uq.uq_key);
+	count = umtxq_count(&key);
 	if (count <= 0) {
-		vm_object_deallocate(object);
+		umtx_key_release(&key);
 		return (0);
 	}
 
@@ -419,7 +603,7 @@
 			if (old == owner)
 				break;
 			if (old == -1) {
-				vm_object_deallocate(object);
+				umtx_key_release(&key);
 				return (EFAULT);
 			}
 			owner = old;
@@ -430,14 +614,187 @@
 		 * the umtx.
 		 */
 		if ((owner & ~UMTX_CONTESTED) != 0) {
-			vm_object_deallocate(object);
+			umtx_key_release(&key);
 			return (0);
 		}
 	}
 
 	/* Wake blocked thread. */
-	umtxq_signal(&uq.uq_key);
-	vm_object_deallocate(object);
+	umtxq_signal(&key);
+	umtx_key_release(&key);
+
+	return (0);
+}
+
+static int
+do_unlock_and_wait(struct thread *td, struct umtx *umtx, long id, void *uaddr,
+	struct timespec *abstime)
+{
+	struct umtx_q uq;
+	intptr_t owner;
+	intptr_t old;
+	struct timespec ts1, ts2;
+	struct timeval tv;
+	int timo, error = 0;
+
+	if (umtx == uaddr)
+		return (EINVAL);
+
+	/*
+	 * Make sure we own this mtx.
+	 *
+	 * XXX Need a {fu,su}ptr this is not correct on arch where
+	 * sizeof(intptr_t) != sizeof(long).
+	 */
+	if ((owner = fuword(&umtx->u_owner)) == -1)
+		return (EFAULT);
+
+	if ((owner & ~UMTX_CONTESTED) != id)
+		return (EPERM);
+
+	if ((error = umtxq_queue_me(td, uaddr, &uq)) != 0)
+		return (error);
+
+	old = casuptr((intptr_t *)&umtx->u_owner, id, UMTX_UNOWNED);
+	if (old == -1) {
+		umtxq_lock(&uq.uq_key);
+		umtxq_remove(&uq);
+		umtxq_unlock(&uq.uq_key);
+		umtx_key_release(&uq.uq_key);
+		return (EFAULT);
+	}
+	if (old != id) {
+		error = do_unlock(td, umtx, id);
+		if (error) {
+			umtxq_lock(&uq.uq_key);
+			umtxq_remove(&uq);
+			umtxq_unlock(&uq.uq_key);
+			umtx_key_release(&uq.uq_key);
+			return (error);
+		}
+	}
+	if (abstime == NULL) {
+		umtxq_lock(&uq.uq_key);
+		if (td->td_flags & TDF_UMTXQ)
+			error = umtxq_sleep(td, &uq.uq_key,
+			       td->td_priority | PCATCH, "ucond", 0);
+		umtxq_remove(&uq);
+		umtxq_unlock(&uq.uq_key);
+		if (error == ERESTART)
+			error = EINTR;
+	} else {
+		for (;;) {
+			ts1 = *abstime;
+			getnanotime(&ts2);
+			timespecsub(&ts1, &ts2);
+			TIMESPEC_TO_TIMEVAL(&tv, &ts1);
+			if (tv.tv_sec < 0) {
+				error = EWOULDBLOCK;
+				break;
+			}
+			timo = tvtohz(&tv);
+			umtxq_lock(&uq.uq_key);
+			if (td->td_flags & TDF_UMTXQ) {
+				error = umtxq_sleep(td, &uq.uq_key,
+					    td->td_priority | PCATCH | PDROP,
+					    "ucond", timo);
+				if (!(td->td_flags & TDF_UMTXQ)) {
+					error = 0;
+					break;
+				}
+				if (error != 0 && error != EWOULDBLOCK) {
+					if (error == ERESTART)
+						error = EINTR;
+					break;
+				}
+			} else {
+				umtxq_unlock(&uq.uq_key);
+				error = 0;
+				break;
+			}
+		}
+		if (td->td_flags & TDF_UMTXQ) {
+			umtxq_lock(&uq.uq_key);
+			umtxq_remove(&uq);
+			umtxq_unlock(&uq.uq_key);
+		}
+	}
+	umtx_key_release(&uq.uq_key);
+	return (error);
+}
 
+static int
+do_wake(struct thread *td, void *uaddr, int broadcast)
+{
+	struct umtx_key key;
+	int error;
+	
+	if ((error = umtx_key_get(td, uaddr, &key)) != 0)
+		return (error);
+	if (!broadcast)
+		umtxq_signal(&key);
+	else
+		umtxq_broadcast(&key);
+	umtx_key_release(&key);	
 	return (0);
 }
+
+int
+_umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
+    /* struct umtx *umtx */
+{
+	return _do_lock(td, uap->umtx, td->td_tid, 0);
+}
+
+int
+_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
+    /* struct umtx *umtx */
+{
+	return do_unlock(td, uap->umtx, td->td_tid);
+}
+
+int
+_umtx_op(struct thread *td, struct _umtx_op_args *uap)
+{
+	struct timespec abstime;
+	struct timespec *ts;
+	int error;
+
+	switch(uap->op) {
+	case UMTX_OP_LOCK:
+		/* Allow a null timespec (wait forever). */
+		if (uap->abstime == NULL)
+			ts = NULL;
+		else {
+			error = copyin(uap->abstime, &abstime, sizeof(abstime));
+			if (error != 0)
+				return (error);
+			if (abstime.tv_nsec >= 1000000000 ||
+			    abstime.tv_nsec < 0)
+				return (EINVAL);
+			ts = &abstime;
+		}
+		return do_lock(td, uap->umtx, uap->id, ts);
+	case UMTX_OP_UNLOCK:
+		return do_unlock(td, uap->umtx, uap->id);
+	case UMTX_OP_UNLOCK_AND_WAIT:
+		/* Allow a null timespec (wait forever). */
+		if (uap->abstime == NULL)
+			ts = NULL;
+		else {
+			error = copyin(uap->abstime, &abstime, sizeof(abstime));
+			if (error != 0)
+				return (error);
+			if (abstime.tv_nsec >= 1000000000 ||
+			    abstime.tv_nsec < 0)
+				return (EINVAL);
+			ts = &abstime;
+		}
+		return do_unlock_and_wait(td, uap->umtx, uap->id,
+					  uap->uaddr, ts);
+	case UMTX_OP_WAKE:
+		return do_wake(td, uap->uaddr, uap->id);
+	default:
+		return (EINVAL);
+	}
+}

==== //depot/projects/davidxu_thread/src/sys/sys/proc.h#5 (text+ko) ====

@@ -32,7 +32,7 @@
  * SUCH DAMAGE.
  *
  *	@(#)proc.h	8.15 (Berkeley) 5/19/95
- * $FreeBSD: src/sys/sys/proc.h,v 1.412 2004/11/20 02:32:50 das Exp $
+ * $FreeBSD: src/sys/sys/proc.h,v 1.414 2004/12/18 12:52:44 davidxu Exp $
  */
 
 #ifndef _SYS_PROC_H_
@@ -281,9 +281,8 @@
 	sigset_t	td_oldsigmask;	/* (k) Saved mask from pre sigpause. */
 	sigset_t	td_sigmask;	/* (c) Current signal mask. */
 	sigset_t	td_siglist;	/* (c) Sigs arrived, not delivered. */
-	sigset_t	td_sigproc;	/* (c) Signals targets proc. */
 	sigset_t	*td_waitset;	/* (c) Wait set for sigwait. */
-	TAILQ_ENTRY(thread) td_umtx;	/* (c?) Link for when we're blocked. */
+	struct umtx_q   *td_umtxq;	/* (c?) Link for when we're blocked. */
 	volatile u_int	td_generation;	/* (k) For detection of preemption */
 	stack_t		td_sigstk;	/* (k) Stack ptr and on-stack flag. */
 	int		td_kflags;	/* (c) Flags for KSE threading. */
@@ -346,7 +345,7 @@
 #define	TDF_NEEDRESCHED	0x00010000 /* Thread needs to yield. */
 #define	TDF_NEEDSIGCHK	0x00020000 /* Thread may need signal delivery. */
 #define	TDF_XSIG	0x00040000 /* Thread is exchanging signal under trace */
-#define	TDF_UMTXQ	0x00080000 /* Libthr thread is on a umtx. */
+#define	TDF_UMTXQ	0x00080000 /* Thread is sleeping on a umtx. */
 #define	TDF_THRWAKEUP	0x00100000 /* Libthr thread must not suspend itself. */
 #define	TDF_DBSUSPEND	0x00200000 /* Thread is suspended by debugger */
 #define	TDF_UNUSED22	0x00400000 /* --available -- */
@@ -466,13 +465,11 @@
 	TAILQ_ENTRY(ksegrp) kg_ksegrp;	/* (*) Queue of KSEGs in kg_proc. */
 	TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
 	TAILQ_HEAD(, thread) kg_runq;	/* (td_runq) waiting RUNNABLE threads */
-	TAILQ_HEAD(, thread) kg_slpq;	/* (td_runq) NONRUNNABLE threads. */
 	TAILQ_HEAD(, kse_upcall) kg_upcalls;	/* All upcalls in the group. */
 
 #define	kg_startzero kg_estcpu
 	u_int		kg_estcpu;	/* (j) Sum of the same field in KSEs. */
 	u_int		kg_slptime;	/* (j) How long completely blocked. */
-	int		kg_runnable;	/* (j) Num runnable threads on queue. */
 	int		kg_numupcalls;	/* (j) Num upcalls. */
 	int		kg_upsleeps;	/* (c) Num threads in kse_release(). */
 	struct kse_thr_mailbox *kg_completed; /* (c) Completed thread mboxes. */


More information about the p4-projects mailing list