PERFORCE change 65494 for review

David Xu davidxu at FreeBSD.org
Fri Nov 19 18:45:36 PST 2004


http://perforce.freebsd.org/chv.cgi?CH=65494

Change 65494 by davidxu at davidxu_alona on 2004/11/20 02:45:26

	Use umtx for low level lock.

Affected files ...

.. //depot/projects/davidxu_thread/src/lib/libthread/sys/lock.c#2 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/sys/lock.h#2 edit

Differences ...

==== //depot/projects/davidxu_thread/src/lib/libthread/sys/lock.c#2 (text+ko) ====

@@ -26,319 +26,4 @@
  * $FreeBSD: src/lib/libpthread/sys/lock.c,v 1.8 2003/11/04 20:01:38 deischen Exp $
  */
 
-#include <sys/types.h>
-#include <machine/atomic.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#include "atomic_ops.h"
 #include "lock.h"
-
-#ifdef _LOCK_DEBUG
-#define	LCK_ASSERT(e)	assert(e)
-#else
-#define LCK_ASSERT(e)
-#endif
-
-#define	MAX_SPINS	500
-
-void
-_lock_destroy(struct lock *lck)
-{
-	if ((lck != NULL) && (lck->l_head != NULL)) {
-		free(lck->l_head);
-		lck->l_head = NULL;
-		lck->l_tail = NULL;
-	}
-}
-
-int
-_lock_init(struct lock *lck, enum lock_type ltype,
-    lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
-{
-	if (lck == NULL)
-		return (-1);
-	else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
-		return (-1);
-	else {
-		lck->l_type = ltype;
-		lck->l_wait = waitfunc;
-		lck->l_wakeup = wakeupfunc;
-		lck->l_head->lr_locked = 0;
-		lck->l_head->lr_watcher = NULL;
-		lck->l_head->lr_owner = NULL;
-		lck->l_head->lr_active = 1;
-		lck->l_tail = lck->l_head;
-	}
-	return (0);
-}
-
-int
-_lock_reinit(struct lock *lck, enum lock_type ltype,
-    lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
-{
-	if (lck == NULL)
-		return (-1);
-	else if (lck->l_head == NULL)
-		return (_lock_init(lck, ltype, waitfunc, wakeupfunc));
-	else {
-		lck->l_head->lr_locked = 0;
-		lck->l_head->lr_watcher = NULL;
-		lck->l_head->lr_owner = NULL;
-		lck->l_head->lr_active = 1;
-		lck->l_tail = lck->l_head;
-	}
-	return (0);
-}
-
-int
-_lockuser_init(struct lockuser *lu, void *priv)
-{
-	if (lu == NULL)
-		return (-1);
-	else if ((lu->lu_myreq == NULL) &&
-	    ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
-		return (-1);
-	else {
-		lu->lu_myreq->lr_locked = 1;
-		lu->lu_myreq->lr_watcher = NULL;
-		lu->lu_myreq->lr_owner = lu;
-		lu->lu_myreq->lr_active = 0;
-		lu->lu_watchreq = NULL;
-		lu->lu_priority = 0;
-		lu->lu_private = priv;
-		lu->lu_private2 = NULL;
-	}
-	return (0);
-}
-
-int
-_lockuser_reinit(struct lockuser *lu, void *priv)
-{
-	if (lu == NULL)
-		return (-1);
-	/*
-	 * All lockusers keep their watch request and drop their
-	 * own (lu_myreq) request.  Their own request is either
-	 * some other lockuser's watch request or is the head of
-	 * the lock.
-	 */
-	lu->lu_myreq = lu->lu_watchreq;
-	if (lu->lu_myreq == NULL)
-		return (_lockuser_init(lu, priv));
-	else {
-		lu->lu_myreq->lr_locked = 1;
-		lu->lu_myreq->lr_watcher = NULL;
-		lu->lu_myreq->lr_owner = lu;
-		lu->lu_myreq->lr_active = 0;
-		lu->lu_watchreq = NULL;
-		lu->lu_priority = 0;
-		lu->lu_private = priv;
-		lu->lu_private2 = NULL;
-	}
-	return (0);
-}
-
-void
-_lockuser_destroy(struct lockuser *lu)
-{
-	if ((lu != NULL) && (lu->lu_myreq != NULL))
-		free(lu->lu_myreq);
-}
-
-/*
- * Acquire a lock waiting (spin or sleep) for it to become available.
- */
-void
-_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
-{
-	int i;
-	int lval;
-
-	/**
-	 * XXX - We probably want to remove these checks to optimize
-	 *       performance.  It is also a bug if any one of the 
-	 *       checks fail, so it's probably better to just let it
-	 *       SEGV and fix it.
-	 */
-#if 0
-	if (lck == NULL || lu == NULL || lck->l_head == NULL)
-		return;
-#endif
-	if ((lck->l_type & LCK_PRIORITY) != 0) {
-		LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
-		LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
-		LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
-		LCK_ASSERT(lu->lu_watchreq == NULL);
-
-		lu->lu_priority = prio;
-	}
-	/*
-	 * Atomically swap the head of the lock request with
-	 * this request.
-	 */
-	atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
-
-	if (lu->lu_watchreq->lr_locked != 0) {
-		atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
-		if ((lck->l_wait == NULL) ||
-		    ((lck->l_type & LCK_ADAPTIVE) == 0)) {
-			while (lu->lu_watchreq->lr_locked == 0)
-				;	/* spin, then yield? */
-		} else {
-			/*
-			 * Spin for a bit before invoking the wait function.
-			 *
-			 * We should be a little smarter here.  If we're
-			 * running on a single processor, then the lock
-			 * owner got preempted and spinning will accomplish
-			 * nothing but waste time.  If we're running on
-			 * multiple processors, the owner could be running
-			 * on another CPU and we might acquire the lock if
-			 * we spin for a bit.
-			 *
-			 * The other thing to keep in mind is that threads
-			 * acquiring these locks are considered to be in
-			 * critical regions; they will not be preempted by
-			 * the _UTS_ until they release the lock.  It is
-			 * therefore safe to assume that if a lock can't
-			 * be acquired, it is currently held by a thread
-			 * running in another KSE.
-			 */
-			for (i = 0; i < MAX_SPINS; i++) {
-				if (lu->lu_watchreq->lr_locked == 0)
-					return;
-				if (lu->lu_watchreq->lr_active == 0)
-					break;
-			}
-			atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
-			    2, &lval);
-			if (lval == 0)
-				lu->lu_watchreq->lr_locked = 0;
-			else
-				lck->l_wait(lck, lu);
-
-		}
-	}
-	lu->lu_myreq->lr_active = 1;
-}
-
-/*
- * Release a lock.
- */
-void
-_lock_release(struct lock *lck, struct lockuser *lu)
-{
-	struct lockuser *lu_tmp, *lu_h;
-	struct lockreq *myreq;
-	int prio_h;
-	int lval;
-
-	/**
-	 * XXX - We probably want to remove these checks to optimize
-	 *       performance.  It is also a bug if any one of the 
-	 *       checks fail, so it's probably better to just let it
-	 *       SEGV and fix it.
-	 */
-#if 0
-	if ((lck == NULL) || (lu == NULL))
-		return;
-#endif
-	if ((lck->l_type & LCK_PRIORITY) != 0) {
-		prio_h = 0;
-		lu_h = NULL;
-
-		/* Update tail if our request is last. */
-		if (lu->lu_watchreq->lr_owner == NULL) {
-			atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
-			atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
-		} else {
-			/* Remove ourselves from the list. */
-			atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
-			    lu->lu_watchreq->lr_owner);
-			atomic_store_rel_ptr(
-			    &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
-		}
-		/*
-		 * The watch request now becomes our own because we've
-		 * traded away our previous request.  Save our previous
-		 * request so that we can grant the lock.
-		 */
-		myreq = lu->lu_myreq;
-		lu->lu_myreq = lu->lu_watchreq;
-		lu->lu_watchreq = NULL;
-		lu->lu_myreq->lr_locked = 1;
-		lu->lu_myreq->lr_owner = lu;
-		lu->lu_myreq->lr_watcher = NULL;
-		/*
-		 * Traverse the list of lock requests in reverse order
-		 * looking for the user with the highest priority.
-		 */
-		for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
-		     lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
-			if (lu_tmp->lu_priority > prio_h) {
-				lu_h = lu_tmp;
-				prio_h = lu_tmp->lu_priority;
-			}
-		}
-		if (lu_h != NULL) {
-			/* Give the lock to the highest priority user. */
-			if (lck->l_wakeup != NULL) {
-				atomic_swap_int(
-				    (int *)&lu_h->lu_watchreq->lr_locked,
-				    0, &lval);
-				if (lval == 2)
-					/* Notify the sleeper */
-					lck->l_wakeup(lck,
-					    lu_h->lu_myreq->lr_watcher);
-			}
-			else
-				atomic_store_rel_int(
-				    &lu_h->lu_watchreq->lr_locked, 0);
-		} else {
-			if (lck->l_wakeup != NULL) {
-				atomic_swap_int((int *)&myreq->lr_locked,
-				    0, &lval);
-				if (lval == 2)
-					/* Notify the sleeper */
-					lck->l_wakeup(lck, myreq->lr_watcher);
-			}
-			else
-				/* Give the lock to the previous request. */
-				atomic_store_rel_int(&myreq->lr_locked, 0);
-		}
-	} else {
-		/*
-		 * The watch request now becomes our own because we've
-		 * traded away our previous request.  Save our previous
-		 * request so that we can grant the lock.
-		 */
-		myreq = lu->lu_myreq;
-		lu->lu_myreq = lu->lu_watchreq;
-		lu->lu_watchreq = NULL;
-		lu->lu_myreq->lr_locked = 1;
-		if (lck->l_wakeup) {
-			atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
-			if (lval == 2)
-				/* Notify the sleeper */
-				lck->l_wakeup(lck, myreq->lr_watcher);
-		}
-		else
-			/* Give the lock to the previous request. */
-			atomic_store_rel_int(&myreq->lr_locked, 0);
-	}
-	lu->lu_myreq->lr_active = 0;
-}
-
-void
-_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
-{
-	atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
-}
-
-void
-_lockuser_setactive(struct lockuser *lu, int active)
-{
-	lu->lu_myreq->lr_active = active;
-}
-

==== //depot/projects/davidxu_thread/src/lib/libthread/sys/lock.h#2 (text+ko) ====

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2001, 2003 Daniel Eischen <deischen at freebsd.org>.
+ * Copyright (c) 2005 David Xu <davidxu at freebsd.org>.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,69 +28,63 @@
  */
 
 #ifndef _LOCK_H_
-#define	_LOCK_H_
+#define _LOCK_H_
+
+#include <errno.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <sys/umtx.h>
 
-struct lockreq;
-struct lockuser;
-struct lock;
+extern void _thr_exit(char *fname, int lineno, char *msg);
 
-enum lock_type {
-	LCK_DEFAULT	= 0x0000,	/* default is FIFO spin locks */
-	LCK_PRIORITY	= 0x0001,
-	LCK_ADAPTIVE 	= 0x0002	/* call user-supplied handlers */
+struct lock {
+	struct umtx	l_mtx;
 };
 
-typedef void lock_handler_t(struct lock *, struct lockuser *);
+#define	_LOCK_INITIALIZER(lck)	{ {UMTX_UNOWNED}  }
 
-struct lock {
-	struct lockreq	*l_head;
-	struct lockreq	*l_tail;	/* only used for priority locks */
-	enum lock_type	l_type;
-	lock_handler_t	*l_wait;	/* only used for adaptive locks */
-	lock_handler_t	*l_wakeup;	/* only used for adaptive locks */
-};
+static inline int
+_lock_init(struct lock *lck)
+{
+	lck->l_mtx.u_owner = UMTX_UNOWNED;
+	return (0);
+}
 
-/* Try to make this >= CACHELINESIZE */
-struct lockreq {
-	struct lockuser	*lr_watcher;	/* only used for priority locks */
-	struct lockuser	*lr_owner;	/* only used for priority locks */
-	volatile int	lr_locked;	/* lock granted = 0, busy otherwise */
-	volatile int	lr_active;	/* non-zero if the lock is last lock for thread */
-};
+static inline int
+_lock_reinit(struct lock *lck)
+{
+	lck->l_mtx.u_owner = UMTX_UNOWNED;
+	return (0);
+}
 
-struct lockuser {
-	struct lockreq	*lu_myreq;	/* request to give up/trade */
-	struct lockreq	*lu_watchreq;	/* watch this request */
-	int		lu_priority;	/* only used for priority locks */
-	void		*lu_private1;	/* private{1,2} are initialized to */
-	void		*lu_private2;	/*   NULL and can be used by caller */
-#define	lu_private	lu_private1
-};
+static inline void
+_lock_destroy(struct lock *lck __unused)
+{
+}
 
-#define	_LCK_INITIALIZER(lck_req)	{ &lck_req, NULL, LCK_DEFAULT, \
-					  NULL, NULL }
-#define	_LCK_REQUEST_INITIALIZER	{ 0, NULL, NULL, 0 }
+static inline void
+_lock_acquire(struct lock *lck, long tid)
+{
+	int err;
 
-#define	_LCK_BUSY(lu)			((lu)->lu_watchreq->lr_locked != 0)
-#define	_LCK_ACTIVE(lu)			((lu)->lu_watchreq->lr_active != 0)
-#define	_LCK_GRANTED(lu)		((lu)->lu_watchreq->lr_locked == 3)
+	while ((err = umtx_lock(&lck->l_mtx, tid)) != 0) {
+		if (err != EINTR && err != EAGAIN)
+			_thr_exit(__FILE__, __LINE__, "umtx_lock failed.\n");
+	}
+}
 
-#define	_LCK_SET_PRIVATE(lu, p)		(lu)->lu_private = (void *)(p)
-#define	_LCK_GET_PRIVATE(lu)		(lu)->lu_private
-#define	_LCK_SET_PRIVATE2(lu, p)	(lu)->lu_private2 = (void *)(p)
-#define	_LCK_GET_PRIVATE2(lu)		(lu)->lu_private2
+static inline int
+_lock_trylock(struct lock *lck, long tid)
+{
+	return umtx_trylock(&lck->l_mtx, tid);
+}
 
-void	_lock_acquire(struct lock *, struct lockuser *, int);
-void	_lock_destroy(struct lock *);
-void	_lock_grant(struct lock *, struct lockuser *);
-int	_lock_init(struct lock *, enum lock_type,
-	    lock_handler_t *, lock_handler_t *);
-int	_lock_reinit(struct lock *, enum lock_type,
-	    lock_handler_t *, lock_handler_t *);
-void	_lock_release(struct lock *, struct lockuser *);
-int	_lockuser_init(struct lockuser *lu, void *priv);
-void	_lockuser_destroy(struct lockuser *lu);
-int	_lockuser_reinit(struct lockuser *lu, void *priv);
-void	_lockuser_setactive(struct lockuser *lu, int active);
+static inline void
+_lock_release(struct lock *lck, long tid)
+{
+	if (umtx_unlock(&lck->l_mtx, tid)) {
+		_thr_exit(__FILE__, __LINE__, "umtx_unlock failed.\n");
+	}
+}
 
 #endif


More information about the p4-projects mailing list