PERFORCE change 100089 for review
John Baldwin
jhb at freebsd.org
Mon Jun 26 22:29:55 UTC 2006
On Monday 26 June 2006 16:54, Kip Macy wrote:
> http://perforce.freebsd.org/chv.cgi?CH=100089
>
> Change 100089 by kmacy at kmacy_storage:sun4v_work_sleepq on 2006/06/26
20:53:51
>
> add profiling for rwlocks
> not convinced of correctness as there don't appear to be any contended
rwlocks on my workloads
Few things use them currently. I have a patch to make the name cache use them
if you want it.
> Affected files ...
>
> .. //depot/projects/kmacy_sun4v/src/sys/kern/kern_rwlock.c#4 edit
>
> Differences ...
>
> ==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_rwlock.c#4 (text+ko)
====
>
> @@ -44,7 +44,7 @@
> #include <sys/rwlock.h>
> #include <sys/systm.h>
> #include <sys/turnstile.h>
> -
> +#include <sys/lock_profile.h>
> #include <machine/cpu.h>
>
> #ifdef DDB
> @@ -80,12 +80,19 @@
> #define _rw_assert(rw, what, file, line)
> #endif
>
> +#ifdef SMP
> +#define smp_turnstile_valid(obj) (turnstile_lookup((obj)) != NULL)
> +#else
> +#define smp_turnstile(obj)
> +#endif
> +
> void
> rw_init(struct rwlock *rw, const char *name)
> {
>
> rw->rw_lock = RW_UNLOCKED;
>
> + lock_profile_init(&rw->rw_object, name);
> lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
> LO_RECURSABLE | LO_UPGRADABLE);
> }
> @@ -95,6 +102,7 @@
> {
>
> KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
> + lock_profile_destroy(&rw->rw_object);
> lock_destroy(&rw->rw_object);
> }
>
> @@ -109,14 +117,17 @@
> void
> _rw_wlock(struct rwlock *rw, const char *file, int line)
> {
> -
> + uint64_t waitstart;
> +
> MPASS(curthread != NULL);
> KASSERT(rw_wowner(rw) != curthread,
> ("%s (%s): wlock already held @ %s:%d", __func__,
> rw->rw_object.lo_name, file, line));
> WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
> line);
> + lock_profile_waitstart(&waitstart);
> __rw_wlock(rw, curthread, file, line);
> + lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
> LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
> WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
> }
> @@ -129,6 +140,7 @@
> _rw_assert(rw, RA_WLOCKED, file, line);
> WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
> LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
> + lock_profile_release_lock(&rw->rw_object);
> __rw_wunlock(rw, curthread, file, line);
> }
>
> @@ -139,6 +151,8 @@
> volatile struct thread *owner;
> #endif
> uintptr_t x;
> + uint64_t waitstart;
> + int contested;
>
> KASSERT(rw_wowner(rw) != curthread,
> ("%s (%s): wlock already held @ %s:%d", __func__,
> @@ -156,6 +170,7 @@
> * be blocked on the writer, and the writer would be blocked
> * waiting for the reader to release its original read lock.
> */
> + lock_profile_waitstart(&waitstart);
> for (;;) {
> /*
> * Handle the easy case. If no other thread has a write
> @@ -169,7 +184,6 @@
> */
> x = rw->rw_lock;
> if (x & RW_LOCK_READ) {
> -
> /*
> * The RW_LOCK_READ_WAITERS flag should only be set
> * if another thread currently holds a write lock,
> @@ -178,6 +192,9 @@
> MPASS((x & RW_LOCK_READ_WAITERS) == 0);
> if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
> x + RW_ONE_READER)) {
> + if ((x & ((1<<RW_READERS_SHIFT)-1)) == 0)
> + lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file,
line);
> +
> if (LOCK_LOG_TEST(&rw->rw_object, 0))
> CTR4(KTR_LOCK,
> "%s: %p succeed %p -> %p", __func__,
> @@ -186,6 +203,7 @@
> break;
> }
> cpu_spinwait();
> + lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
> continue;
> }
>
> @@ -234,6 +252,7 @@
> */
> owner = (struct thread *)RW_OWNER(x);
> if (TD_IS_RUNNING(owner)) {
> + lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
> turnstile_release(&rw->rw_object);
> if (LOCK_LOG_TEST(&rw->rw_object, 0))
> CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
> @@ -279,7 +298,6 @@
> LOCK_LOG_LOCK("RUNLOCK", &rw->rw_object, 0, 0, file, line);
>
> /* TODO: drop "owner of record" here. */
> -
> for (;;) {
> /*
> * See if there is more than one read lock held. If so,
> @@ -297,7 +315,8 @@
> break;
> }
> continue;
> - }
> + } else
> + lock_profile_release_lock(&rw->rw_object);
>
> /*
> * We should never have read waiters while at least one
> @@ -328,7 +347,8 @@
> break;
> }
> continue;
> - }
> + }
> +
>
> /*
> * There should just be one reader with one or more
> @@ -394,6 +414,7 @@
> volatile struct thread *owner;
> #endif
> uintptr_t v;
> + int contested;
>
> if (LOCK_LOG_TEST(&rw->rw_object, 0))
> CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
> @@ -434,6 +455,7 @@
> }
> turnstile_release(&rw->rw_object);
> cpu_spinwait();
> + lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
> continue;
> }
>
> @@ -447,6 +469,7 @@
> v | RW_LOCK_WRITE_WAITERS)) {
> turnstile_release(&rw->rw_object);
> cpu_spinwait();
> + lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
> continue;
> }
> if (LOCK_LOG_TEST(&rw->rw_object, 0))
> @@ -462,6 +485,7 @@
> */
> owner = (struct thread *)RW_OWNER(v);
> if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
> + lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
> turnstile_release(&rw->rw_object);
> if (LOCK_LOG_TEST(&rw->rw_object, 0))
> CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
> @@ -629,11 +653,7 @@
> v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
> success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
> tid | v);
> -#ifdef SMP
> - if (success && v && turnstile_lookup(&rw->rw_object) != NULL)
> -#else
> - if (success && v)
> -#endif
> + if (success && v && smp_turnstile_valid(&rw->rw_object))
> turnstile_claim(&rw->rw_object);
> else
> turnstile_release(&rw->rw_object);
>
--
John Baldwin
More information about the p4-projects
mailing list