svn commit: r192221 - in user/kmacy/ZFS_MFC/sys: kern sys
Kip Macy
kmacy at FreeBSD.org
Sat May 16 21:48:10 UTC 2009
Author: kmacy
Date: Sat May 16 21:48:10 2009
New Revision: 192221
URL: http://svn.freebsd.org/changeset/base/192221
Log:
integrate rmlock support and bring kern_osd.c in line with HEAD
Added:
user/kmacy/ZFS_MFC/sys/kern/kern_rmlock.c (contents, props changed)
user/kmacy/ZFS_MFC/sys/sys/_rmlock.h (contents, props changed)
user/kmacy/ZFS_MFC/sys/sys/rmlock.h (contents, props changed)
Modified:
user/kmacy/ZFS_MFC/sys/kern/kern_osd.c
user/kmacy/ZFS_MFC/sys/kern/subr_pcpu.c
user/kmacy/ZFS_MFC/sys/sys/pcpu.h
Modified: user/kmacy/ZFS_MFC/sys/kern/kern_osd.c
==============================================================================
--- user/kmacy/ZFS_MFC/sys/kern/kern_osd.c Sat May 16 21:38:55 2009 (r192220)
+++ user/kmacy/ZFS_MFC/sys/kern/kern_osd.c Sat May 16 21:48:10 2009 (r192221)
@@ -36,7 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
-#include <sys/rwlock.h>
+#include <sys/rmlock.h>
#include <sys/sx.h>
#include <sys/queue.h>
#include <sys/proc.h>
@@ -78,7 +78,7 @@ static const u_int osd_nmethods[OSD_LAST
};
static struct sx osd_module_lock[OSD_LAST + 1];
-static struct rwlock osd_object_lock[OSD_LAST + 1];
+static struct rmlock osd_object_lock[OSD_LAST + 1];
static struct mtx osd_list_lock[OSD_LAST + 1];
static void
@@ -124,12 +124,12 @@ osd_register(u_int type, osd_destructor_
osd_nmethods[type], M_OSD, M_WAITOK);
newptr = malloc(sizeof(osd_destructor_t) * osd_nslots[type],
M_OSD, M_WAITOK);
- rw_wlock(&osd_object_lock[type]);
+ rm_wlock(&osd_object_lock[type]);
bcopy(osd_destructors[type], newptr,
sizeof(osd_destructor_t) * i);
free(osd_destructors[type], M_OSD);
osd_destructors[type] = newptr;
- rw_wunlock(&osd_object_lock[type]);
+ rm_wunlock(&osd_object_lock[type]);
OSD_DEBUG("New slot allocated (type=%u, slot=%u).",
type, i + 1);
}
@@ -154,7 +154,7 @@ osd_deregister(u_int type, u_int slot)
KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot."));
sx_xlock(&osd_module_lock[type]);
- rw_wlock(&osd_object_lock[type]);
+ rm_wlock(&osd_object_lock[type]);
/*
* Free all OSD for the given slot.
*/
@@ -188,25 +188,26 @@ osd_deregister(u_int type, u_int slot)
OSD_DEBUG("Slot deregistration (type=%u, slot=%u).",
type, slot);
}
- rw_wunlock(&osd_object_lock[type]);
+ rm_wunlock(&osd_object_lock[type]);
sx_xunlock(&osd_module_lock[type]);
}
int
osd_set(u_int type, struct osd *osd, u_int slot, void *value)
{
+ struct rm_priotracker tracker;
KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type."));
KASSERT(slot > 0, ("Invalid slot."));
KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot."));
- rw_rlock(&osd_object_lock[type]);
+ rm_rlock(&osd_object_lock[type], &tracker);
if (slot > osd->osd_nslots) {
if (value == NULL) {
OSD_DEBUG(
"Not allocating null slot (type=%u, slot=%u).",
type, slot);
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
return (0);
} else if (osd->osd_nslots == 0) {
/*
@@ -216,7 +217,7 @@ osd_set(u_int type, struct osd *osd, u_i
osd->osd_slots = malloc(sizeof(void *) * slot, M_OSD,
M_NOWAIT | M_ZERO);
if (osd->osd_slots == NULL) {
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
return (ENOMEM);
}
osd->osd_nslots = slot;
@@ -234,7 +235,7 @@ osd_set(u_int type, struct osd *osd, u_i
newptr = realloc(osd->osd_slots, sizeof(void *) * slot,
M_OSD, M_NOWAIT | M_ZERO);
if (newptr == NULL) {
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
return (ENOMEM);
}
osd->osd_slots = newptr;
@@ -245,20 +246,21 @@ osd_set(u_int type, struct osd *osd, u_i
OSD_DEBUG("Setting slot value (type=%u, slot=%u, value=%p).", type,
slot, value);
osd->osd_slots[slot - 1] = value;
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
return (0);
}
void *
osd_get(u_int type, struct osd *osd, u_int slot)
{
+ struct rm_priotracker tracker;
void *value;
KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type."));
KASSERT(slot > 0, ("Invalid slot."));
KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot."));
- rw_rlock(&osd_object_lock[type]);
+ rm_rlock(&osd_object_lock[type], &tracker);
if (slot > osd->osd_nslots) {
value = NULL;
OSD_DEBUG("Slot doesn't exist (type=%u, slot=%u).", type, slot);
@@ -267,7 +269,7 @@ osd_get(u_int type, struct osd *osd, u_i
OSD_DEBUG("Returning slot value (type=%u, slot=%u, value=%p).",
type, slot, value);
}
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
return (value);
}
@@ -280,7 +282,7 @@ do_osd_del_locked(u_int type, struct osd
KASSERT(slot > 0, ("Invalid slot."));
KASSERT(osd_destructors[type][slot - 1] != NULL, ("Unused slot."));
mtx_assert(&osd_list_lock[type], MA_OWNED);
-
+
OSD_DEBUG("Deleting slot (type=%u, slot=%u).", type, slot);
if (slot > osd->osd_nslots) {
@@ -331,10 +333,11 @@ do_osd_del(u_int type, struct osd *osd,
void
osd_del(u_int type, struct osd *osd, u_int slot)
{
+ struct rm_priotracker tracker;
- rw_rlock(&osd_object_lock[type]);
+ rm_rlock(&osd_object_lock[type], &tracker);
do_osd_del(type, osd, slot);
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
}
@@ -367,6 +370,7 @@ osd_call(u_int type, u_int method, void
void
osd_exit(u_int type, struct osd *osd)
{
+ struct rm_priotracker tracker;
u_int i;
KASSERT(type >= OSD_FIRST && type <= OSD_LAST, ("Invalid type."));
@@ -377,14 +381,14 @@ osd_exit(u_int type, struct osd *osd)
return;
}
- rw_rlock(&osd_object_lock[type]);
+ rm_rlock(&osd_object_lock[type], &tracker);
for (i = 1; i <= osd->osd_nslots; i++) {
if (osd_destructors[type][i - 1] != NULL)
do_osd_del(type, osd, i);
else
OSD_DEBUG("Unused slot (type=%u, slot=%u).", type, i);
}
- rw_runlock(&osd_object_lock[type]);
+ rm_runlock(&osd_object_lock[type], &tracker);
OSD_DEBUG("Object exit (type=%u).", type);
}
@@ -397,7 +401,7 @@ osd_init(void *arg __unused)
osd_nslots[i] = 0;
LIST_INIT(&osd_list[i]);
sx_init(&osd_module_lock[i], "osd_module");
- rw_init(&osd_object_lock[i], "osd_object");
+ rm_init(&osd_object_lock[i], "osd_object", 0);
mtx_init(&osd_list_lock[i], "osd_list", NULL, MTX_DEF);
osd_destructors[i] = NULL;
osd_methods[i] = NULL;
Added: user/kmacy/ZFS_MFC/sys/kern/kern_rmlock.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/ZFS_MFC/sys/kern/kern_rmlock.c Sat May 16 21:48:10 2009 (r192221)
@@ -0,0 +1,490 @@
+/*-
+ * Copyright (c) 2007 Stephan Uphoff <ups at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Machine independent bits of reader/writer lock implementation.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rmlock.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+#include <sys/turnstile.h>
+#include <sys/lock_profile.h>
+#include <machine/cpu.h>
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#define RMPF_ONQUEUE 1
+#define RMPF_SIGNAL 2
+
+/*
+ * To support usage of rmlock in CVs and msleep yet another list for the
+ * priority tracker would be needed. Using this lock for cv and msleep also
+ * does not seem very useful
+ */
+
+static __inline void compiler_memory_barrier(void) {
+ __asm __volatile("":::"memory");
+}
+
+static void lock_rm(struct lock_object *lock, int how);
+static int unlock_rm(struct lock_object *lock);
+
+struct lock_class lock_class_rm = {
+ .lc_name = "rm",
+ .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
+#if 0
+#ifdef DDB
+ .lc_ddb_show = db_show_rwlock,
+#endif
+#endif
+ .lc_lock = lock_rm,
+ .lc_unlock = unlock_rm,
+};
+
+static void
+lock_rm(struct lock_object *lock, int how)
+{
+
+ panic("lock_rm called");
+}
+
+static int
+unlock_rm(struct lock_object *lock)
+{
+
+ panic("unlock_rm called");
+}
+
+static struct mtx rm_spinlock;
+
+MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
+
+/*
+ * Add or remove tracker from per cpu list.
+ *
+ * The per cpu list can be traversed at any time in forward direction from an
+ * interrupt on the *local* cpu.
+ */
+static void inline
+rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
+{
+ struct rm_queue *next;
+
+ /* Initialize all tracker pointers */
+ tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
+ next = pc->pc_rm_queue.rmq_next;
+ tracker->rmp_cpuQueue.rmq_next = next;
+
+ /* rmq_prev is not used during froward traversal. */
+ next->rmq_prev = &tracker->rmp_cpuQueue;
+
+ /* Update pointer to first element. */
+ pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
+}
+
+static void inline
+rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
+{
+ struct rm_queue *next, *prev;
+
+ next = tracker->rmp_cpuQueue.rmq_next;
+ prev = tracker->rmp_cpuQueue.rmq_prev;
+
+ /* Not used during forward traversal. */
+ next->rmq_prev = prev;
+
+ /* Remove from list. */
+ prev->rmq_next = next;
+}
+
+static void
+rm_cleanIPI(void *arg)
+{
+ struct pcpu *pc;
+ struct rmlock *rm = arg;
+ struct rm_priotracker *tracker;
+ struct rm_queue *queue;
+ pc = pcpu_find(curcpu);
+
+ for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
+ queue = queue->rmq_next) {
+ tracker = (struct rm_priotracker *)queue;
+ if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
+ tracker->rmp_flags = RMPF_ONQUEUE;
+ mtx_lock_spin(&rm_spinlock);
+ LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
+ rmp_qentry);
+ mtx_unlock_spin(&rm_spinlock);
+ }
+ }
+}
+
+void
+rm_init(struct rmlock *rm, const char *name, int opts)
+{
+
+ rm->rm_noreadtoken = 1;
+ LIST_INIT(&rm->rm_activeReaders);
+ mtx_init(&rm->rm_lock, name, "RM_MTX",MTX_NOWITNESS);
+ lock_init(&rm->lock_object, &lock_class_rm, name, NULL,
+ (opts & LO_RECURSABLE)| LO_WITNESS);
+}
+
+void
+rm_destroy(struct rmlock *rm)
+{
+
+ mtx_destroy(&rm->rm_lock);
+ lock_destroy(&rm->lock_object);
+}
+
+int
+rm_wowned(struct rmlock *rm)
+{
+
+ return (mtx_owned(&rm->rm_lock));
+}
+
+void
+rm_sysinit(void *arg)
+{
+
+ struct rm_args *args = arg;
+ rm_init(args->ra_rm, args->ra_desc, args->ra_opts);
+}
+
+static void
+_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker)
+{
+ struct pcpu *pc;
+ struct rm_queue *queue;
+ struct rm_priotracker *atracker;
+
+ critical_enter();
+ pc = pcpu_find(curcpu);
+
+ /* Check if we just need to do a proper critical_exit. */
+ if (0 == rm->rm_noreadtoken) {
+ critical_exit();
+ return;
+ }
+
+ /* Remove our tracker from the per cpu list. */
+ rm_tracker_remove(pc, tracker);
+
+ /* Check to see if the IPI granted us the lock after all. */
+ if (tracker->rmp_flags) {
+ /* Just add back tracker - we hold the lock. */
+ rm_tracker_add(pc, tracker);
+ critical_exit();
+ return;
+ }
+
+ /*
+ * We allow readers to aquire a lock even if a writer is blocked if
+ * the lock is recursive and the reader already holds the lock.
+ */
+ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
+ /*
+ * Just grand the lock if this thread already have a tracker
+ * for this lock on the per cpu queue.
+ */
+ for (queue = pc->pc_rm_queue.rmq_next;
+ queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
+ atracker = (struct rm_priotracker *)queue;
+ if ((atracker->rmp_rmlock == rm) &&
+ (atracker->rmp_thread == tracker->rmp_thread)) {
+ mtx_lock_spin(&rm_spinlock);
+ LIST_INSERT_HEAD(&rm->rm_activeReaders,
+ tracker, rmp_qentry);
+ tracker->rmp_flags = RMPF_ONQUEUE;
+ mtx_unlock_spin(&rm_spinlock);
+ rm_tracker_add(pc, tracker);
+ critical_exit();
+ return;
+ }
+ }
+ }
+
+ sched_unpin();
+ critical_exit();
+
+ mtx_lock(&rm->rm_lock);
+ rm->rm_noreadtoken = 0;
+ critical_enter();
+
+ pc = pcpu_find(curcpu);
+ rm_tracker_add(pc, tracker);
+ sched_pin();
+ critical_exit();
+
+ mtx_unlock(&rm->rm_lock);
+}
+
+void
+_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker)
+{
+ struct thread *td = curthread;
+ struct pcpu *pc;
+
+ tracker->rmp_flags = 0;
+ tracker->rmp_thread = td;
+ tracker->rmp_rmlock = rm;
+
+ td->td_critnest++; /* critical_enter(); */
+
+ compiler_memory_barrier();
+
+ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
+
+ rm_tracker_add(pc, tracker);
+
+ td->td_pinned++; /* sched_pin(); */
+
+ compiler_memory_barrier();
+
+ td->td_critnest--;
+
+ /*
+ * Fast path to combine two common conditions into a single
+ * conditional jump.
+ */
+ if (0 == (td->td_owepreempt | rm->rm_noreadtoken))
+ return;
+
+ /* We do not have a read token and need to acquire one. */
+ _rm_rlock_hard(rm, tracker);
+}
+
+static void
+_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
+{
+
+ if (td->td_owepreempt) {
+ td->td_critnest++;
+ critical_exit();
+ }
+
+ if (!tracker->rmp_flags)
+ return;
+
+ mtx_lock_spin(&rm_spinlock);
+ LIST_REMOVE(tracker, rmp_qentry);
+
+ if (tracker->rmp_flags & RMPF_SIGNAL) {
+ struct rmlock *rm;
+ struct turnstile *ts;
+
+ rm = tracker->rmp_rmlock;
+
+ turnstile_chain_lock(&rm->lock_object);
+ mtx_unlock_spin(&rm_spinlock);
+
+ ts = turnstile_lookup(&rm->lock_object);
+
+ turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
+ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
+ turnstile_chain_unlock(&rm->lock_object);
+ } else
+ mtx_unlock_spin(&rm_spinlock);
+}
+
+void
+_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
+{
+ struct pcpu *pc;
+ struct thread *td = tracker->rmp_thread;
+
+ td->td_critnest++; /* critical_enter(); */
+ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
+ rm_tracker_remove(pc, tracker);
+ td->td_critnest--;
+ td->td_pinned--; /* sched_unpin(); */
+
+ if (0 == (td->td_owepreempt | tracker->rmp_flags))
+ return;
+
+ _rm_unlock_hard(td, tracker);
+}
+
+void
+_rm_wlock(struct rmlock *rm)
+{
+ struct rm_priotracker *prio;
+ struct turnstile *ts;
+
+ mtx_lock(&rm->rm_lock);
+
+ if (rm->rm_noreadtoken == 0) {
+ /* Get all read tokens back */
+
+ rm->rm_noreadtoken = 1;
+
+ /*
+ * Assumes rm->rm_noreadtoken update is visible on other CPUs
+ * before rm_cleanIPI is called.
+ */
+#ifdef SMP
+ smp_rendezvous(smp_no_rendevous_barrier,
+ rm_cleanIPI,
+ smp_no_rendevous_barrier,
+ rm);
+
+#else
+ rm_cleanIPI(rm);
+#endif
+
+ mtx_lock_spin(&rm_spinlock);
+ while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
+ ts = turnstile_trywait(&rm->lock_object);
+ prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
+ mtx_unlock_spin(&rm_spinlock);
+ turnstile_wait(ts, prio->rmp_thread,
+ TS_EXCLUSIVE_QUEUE);
+ mtx_lock_spin(&rm_spinlock);
+ }
+ mtx_unlock_spin(&rm_spinlock);
+ }
+}
+
+void
+_rm_wunlock(struct rmlock *rm)
+{
+
+ mtx_unlock(&rm->rm_lock);
+}
+
+#ifdef LOCK_DEBUG
+
+void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
+{
+
+ WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
+ file, line);
+
+ _rm_wlock(rm);
+
+ LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
+
+ WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+
+ curthread->td_locks++;
+
+}
+
+void
+_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
+{
+
+ curthread->td_locks--;
+ WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
+ LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
+ _rm_wunlock(rm);
+}
+
+void
+_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line)
+{
+
+ WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line);
+
+ _rm_rlock(rm, tracker);
+
+ LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
+
+ WITNESS_LOCK(&rm->lock_object, 0, file, line);
+
+ curthread->td_locks++;
+}
+
+void
+_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line)
+{
+
+ curthread->td_locks--;
+ WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
+ LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
+ _rm_runlock(rm, tracker);
+}
+
+#else
+
+/*
+ * Just strip out file and line arguments if no lock debugging is enabled in
+ * the kernel - we are called from a kernel module.
+ */
+void
+_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
+{
+
+ _rm_wlock(rm);
+}
+
+void
+_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
+{
+
+ _rm_wunlock(rm);
+}
+
+void
+_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line)
+{
+
+ _rm_rlock(rm, tracker);
+}
+
+void
+_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line) {
+
+ _rm_runlock(rm, tracker);
+}
+
+#endif
Modified: user/kmacy/ZFS_MFC/sys/kern/subr_pcpu.c
==============================================================================
--- user/kmacy/ZFS_MFC/sys/kern/subr_pcpu.c Sat May 16 21:38:55 2009 (r192220)
+++ user/kmacy/ZFS_MFC/sys/kern/subr_pcpu.c Sat May 16 21:48:10 2009 (r192221)
@@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <ddb/ddb.h>
-static struct pcpu *cpuid_to_pcpu[MAXCPU];
+struct pcpu *cpuid_to_pcpu[MAXCPU];
struct cpuhead cpuhead = SLIST_HEAD_INITIALIZER(cpuhead);
/*
Added: user/kmacy/ZFS_MFC/sys/sys/_rmlock.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/ZFS_MFC/sys/sys/_rmlock.h Sat May 16 21:48:10 2009 (r192221)
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2007 Stephan Uphoff <ups at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__RMLOCK_H_
+#define _SYS__RMLOCK_H_
+
+/*
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets and can define rm_queue in _rm_lock.h
+*/
+#include <sys/pcpu.h>
+/*
+ * Mostly reader/occasional writer lock.
+ */
+
+LIST_HEAD(rmpriolist,rm_priotracker);
+
+struct rmlock {
+ struct lock_object lock_object;
+ volatile int rm_noreadtoken;
+ LIST_HEAD(,rm_priotracker) rm_activeReaders;
+ struct mtx rm_lock;
+
+};
+
+struct rm_priotracker {
+ struct rm_queue rmp_cpuQueue; /* Must be first */
+ struct rmlock *rmp_rmlock;
+ struct thread *rmp_thread;
+ int rmp_flags;
+ LIST_ENTRY(rm_priotracker) rmp_qentry;
+};
+
+#endif /* !_SYS__RMLOCK_H_ */
Modified: user/kmacy/ZFS_MFC/sys/sys/pcpu.h
==============================================================================
--- user/kmacy/ZFS_MFC/sys/sys/pcpu.h Sat May 16 21:38:55 2009 (r192220)
+++ user/kmacy/ZFS_MFC/sys/sys/pcpu.h Sat May 16 21:48:10 2009 (r192221)
@@ -45,6 +45,15 @@
struct pcb;
struct thread;
+/*
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets and can define rm_queue in _rm_lock.h
+*/
+struct rm_queue {
+ struct rm_queue* volatile rmq_next;
+ struct rm_queue* volatile rmq_prev;
+};
+
/*
* This structure maps out the global data that needs to be kept on a
* per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
@@ -72,6 +81,13 @@ struct pcpu {
struct vmmeter pc_cnt; /* VM stats counters */
long pc_cp_time[CPUSTATES]; /* statclock ticks */
struct device *pc_device;
+ /*
+ * Stuff for read mostly lock
+ *
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets.
+ */
+ struct rm_queue pc_rm_queue;
};
#ifdef _KERNEL
@@ -92,6 +108,10 @@ extern struct cpuhead cpuhead;
* db_show_mdpcpu() is responsible for handling machine dependent
* fields for the DDB 'show pcpu' command.
*/
+
+extern struct pcpu *cpuid_to_pcpu[MAXCPU];
+
+
void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
void db_show_mdpcpu(struct pcpu *pcpu);
Added: user/kmacy/ZFS_MFC/sys/sys/rmlock.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/ZFS_MFC/sys/sys/rmlock.h Sat May 16 21:48:10 2009 (r192221)
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2007 Stephan Uphoff <ups at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RMLOCK_H_
+#define _SYS_RMLOCK_H_
+
+#include <sys/mutex.h>
+
+#include <sys/_lock.h>
+#include <sys/_rmlock.h>
+
+#ifdef _KERNEL
+
+
+void rm_init(struct rmlock *rm, const char *name, int opts);
+void rm_destroy(struct rmlock *rm);
+int rm_wowned(struct rmlock *rm);
+void rm_sysinit(void *arg);
+
+
+void _rm_wlock_debug(struct rmlock *rm, const char *file, int line);
+void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line);
+void _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line);
+void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line);
+
+
+void _rm_wlock(struct rmlock *rm);
+void _rm_wunlock(struct rmlock *rm);
+void _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker);
+void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker);
+
+/*
+ * Public interface for lock operations.
+ *
+ */
+
+#ifndef LOCK_DEBUG
+#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rmlock.h>
+#endif
+
+#if LOCK_DEBUG > 0
+
+#define rm_wlock(rm) _rm_wlock_debug((rm), LOCK_FILE, LOCK_LINE)
+#define rm_wunlock(rm) _rm_wunlock_debug((rm), LOCK_FILE, LOCK_LINE)
+#define rm_rlock(rm,tracker) \
+ _rm_rlock_debug((rm),(tracker), LOCK_FILE, LOCK_LINE )
+#define rm_runlock(rm,tracker) \
+ _rm_runlock_debug((rm), (tracker), LOCK_FILE, LOCK_LINE )
+
+#else
+
+#define rm_wlock(rm) _rm_wlock((rm))
+#define rm_wunlock(rm) _rm_wunlock((rm))
+#define rm_rlock(rm,tracker) _rm_rlock((rm),(tracker))
+#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker))
+
+#endif
+
+#define rm_initialized(rm) lock_initalized(&(rm)->lock_object)
+
+struct rm_args {
+ struct rmlock *ra_rm;
+ const char *ra_desc;
+ int ra_opts;
+};
+
+#define RM_SYSINIT(name, rm, desc, opts) \
+ static struct rm_args name##_args = { \
+ (rm), \
+ (desc), \
+ (opts), \
+ }; \
+ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_sysinit, &name##_args); \
+ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_destroy, (rm))
+
+
+#endif /* _KERNEL */
+#endif /* !_SYS_RMLOCK_H_ */
More information about the svn-src-user
mailing list