svn commit: r192431 - in user/kmacy/releng_7_2_fcs/sys: ddb kern sys

Kip Macy kmacy at FreeBSD.org
Wed May 20 07:31:12 UTC 2009


Author: kmacy
Date: Wed May 20 07:31:11 2009
New Revision: 192431
URL: http://svn.freebsd.org/changeset/base/192431

Log:
  - Update locks to newer lock profiling and witness
  - Update ddb for support

Modified:
  user/kmacy/releng_7_2_fcs/sys/ddb/db_command.c
  user/kmacy/releng_7_2_fcs/sys/ddb/db_command.h
  user/kmacy/releng_7_2_fcs/sys/ddb/db_main.c
  user/kmacy/releng_7_2_fcs/sys/ddb/ddb.h
  user/kmacy/releng_7_2_fcs/sys/kern/kern_mutex.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_rmlock.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_rwlock.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_sx.c
  user/kmacy/releng_7_2_fcs/sys/kern/subr_lock.c
  user/kmacy/releng_7_2_fcs/sys/kern/subr_sleepqueue.c
  user/kmacy/releng_7_2_fcs/sys/kern/subr_turnstile.c
  user/kmacy/releng_7_2_fcs/sys/kern/subr_witness.c
  user/kmacy/releng_7_2_fcs/sys/sys/_lock.h
  user/kmacy/releng_7_2_fcs/sys/sys/_rwlock.h
  user/kmacy/releng_7_2_fcs/sys/sys/lock.h
  user/kmacy/releng_7_2_fcs/sys/sys/lock_profile.h
  user/kmacy/releng_7_2_fcs/sys/sys/proc.h
  user/kmacy/releng_7_2_fcs/sys/sys/rwlock.h

Modified: user/kmacy/releng_7_2_fcs/sys/ddb/db_command.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/ddb/db_command.c	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/ddb/db_command.c	Wed May 20 07:31:11 2009	(r192431)
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/systm.h>
 #include <sys/cons.h>
 #include <sys/watchdog.h>
+#include <sys/kernel.h>
 
 #include <ddb/ddb.h>
 #include <ddb/db_command.h>
@@ -63,10 +64,6 @@ db_addr_t	db_last_addr;
 db_addr_t	db_prev;
 db_addr_t	db_next;
 
-SET_DECLARE(db_cmd_set, struct command);
-SET_DECLARE(db_show_cmd_set, struct command);
-SET_DECLARE(db_show_all_cmd_set, struct command);
-
 static db_cmdfcn_t	db_fncall;
 static db_cmdfcn_t	db_gdb;
 static db_cmdfcn_t	db_halt;
@@ -81,30 +78,20 @@ static db_cmdfcn_t	db_watchdog;
  */
 
 static struct command db_show_all_cmds[] = {
-	{ (char *)0 }
-};
-
-static struct command_table db_show_all_table = {
-	db_show_all_cmds,
-	SET_BEGIN(db_show_all_cmd_set),
-	SET_LIMIT(db_show_all_cmd_set)
+	{ "trace",	db_stack_trace_all,	0,	0 },
 };
+struct command_table db_show_all_table =
+    LIST_HEAD_INITIALIZER(db_show_all_table);
 
 static struct command db_show_cmds[] = {
 	{ "all",	0,			0,	&db_show_all_table },
 	{ "registers",	db_show_regs,		0,	0 },
 	{ "breaks",	db_listbreak_cmd, 	0,	0 },
 	{ "threads",	db_show_threads,	0,	0 },
-	{ (char *)0, }
 };
+struct command_table db_show_table = LIST_HEAD_INITIALIZER(db_show_table);
 
-static struct command_table db_show_table = {
-	db_show_cmds,
-	SET_BEGIN(db_show_cmd_set),
-	SET_LIMIT(db_show_cmd_set)
-};
-	
-static struct command db_commands[] = {
+static struct command db_cmds[] = {
 	{ "print",	db_print_cmd,		0,	0 },
 	{ "p",		db_print_cmd,		0,	0 },
 	{ "examine",	db_examine_cmd,		CS_SET_DOT, 0 },
@@ -130,6 +117,7 @@ static struct command db_commands[] = {
 	{ "match",	db_trace_until_matching_cmd,0,	0 },
 	{ "trace",	db_stack_trace,		CS_OWN,	0 },
 	{ "t",		db_stack_trace,		CS_OWN,	0 },
+	/* XXX alias for all trace */
 	{ "alltrace",	db_stack_trace_all,	0,	0 },
 	{ "where",	db_stack_trace,		CS_OWN,	0 },
 	{ "bt",		db_stack_trace,		CS_OWN,	0 },
@@ -149,14 +137,8 @@ static struct command db_commands[] = {
 	{ "unscript",	db_unscript_cmd,	CS_OWN,	0 },
 	{ "capture",	db_capture_cmd,		CS_OWN,	0 },
 	{ "textdump",	db_textdump_cmd,	CS_OWN, 0 },
-	{ (char *)0, }
-};
-
-static struct command_table db_command_table = {
-	db_commands,
-	SET_BEGIN(db_cmd_set),
-	SET_LIMIT(db_cmd_set)
 };
+struct command_table db_cmd_table = LIST_HEAD_INITIALIZER(db_cmd_table);
 
 static struct command	*db_last_command = 0;
 
@@ -197,6 +179,72 @@ static void	db_command(struct command **
 		    struct command_table *cmd_table, int dopager);
 
 /*
+ * Initialize the command lists from the static tables.
+ */
+void
+db_command_init(void)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	int i;
+
+	for (i = 0; i < N(db_cmds); i++)
+		db_command_register(&db_cmd_table, &db_cmds[i]);
+	for (i = 0; i < N(db_show_cmds); i++)
+		db_command_register(&db_show_table, &db_show_cmds[i]);
+	for (i = 0; i < N(db_show_all_cmds); i++)
+		db_command_register(&db_show_all_table, &db_show_all_cmds[i]);
+#undef N
+}
+
+/*
+ * Register a command.
+ */
+void
+db_command_register(struct command_table *list, struct command *cmd)
+{
+	struct command *c, *last;
+
+	last = NULL;
+	LIST_FOREACH(c, list, next) {
+		int n = strcmp(cmd->name, c->name);
+
+		/* Check that the command is not already present. */
+		if (n == 0) {
+			printf("%s: Warning, the command \"%s\" already exists;"
+			     " ignoring request\n", __func__, cmd->name);
+			return;
+		}
+		if (n < 0) {
+			/* NB: keep list sorted lexicographically */
+			LIST_INSERT_BEFORE(c, cmd, next);
+			return;
+		}
+		last = c;
+	}
+	if (last == NULL)
+		LIST_INSERT_HEAD(list, cmd, next);
+	else
+		LIST_INSERT_AFTER(last, cmd, next);
+}
+
+/*
+ * Remove a command previously registered with db_command_register.
+ */
+void
+db_command_unregister(struct command_table *list, struct command *cmd)
+{
+	struct command *c;
+
+	LIST_FOREACH(c, list, next) {
+		if (cmd == c) {
+			LIST_REMOVE(cmd, next);
+			return;
+		}
+	}
+	/* NB: intentionally quiet */
+}
+
+/*
  * Helper function to match a single command.
  */
 static void
@@ -245,22 +293,14 @@ db_cmd_search(name, table, cmdp)
 	struct command	**cmdp;	/* out */
 {
 	struct command	*cmd;
-	struct command	**aux_cmdp;
 	int		result = CMD_NONE;
 
-	for (cmd = table->table; cmd->name != 0; cmd++) {
-		db_cmd_match(name, cmd, cmdp, &result);
+	LIST_FOREACH(cmd, table, next) {
+		db_cmd_match(name,cmd,cmdp,&result);
 		if (result == CMD_UNIQUE)
-			return (CMD_UNIQUE);
+			break;
 	}
-	if (table->aux_tablep != NULL)
-		for (aux_cmdp = table->aux_tablep;
-		     aux_cmdp < table->aux_tablep_end;
-		     aux_cmdp++) {
-			db_cmd_match(name, *aux_cmdp, cmdp, &result);
-			if (result == CMD_UNIQUE)
-				return (CMD_UNIQUE);
-		}
+
 	if (result == CMD_NONE) {
 		/* check for 'help' */
 		if (name[0] == 'h' && name[1] == 'e'
@@ -274,19 +314,11 @@ static void
 db_cmd_list(table)
 	struct command_table *table;
 {
-	register struct command *cmd;
-	register struct command **aux_cmdp;
+	register struct command	*cmd;
 
-	for (cmd = table->table; cmd->name != 0; cmd++) {
-	    db_printf("%-12s", cmd->name);
-	    db_end_line(12);
-	}
-	if (table->aux_tablep == NULL)
-	    return;
-	for (aux_cmdp = table->aux_tablep; aux_cmdp < table->aux_tablep_end;
-	     aux_cmdp++) {
-	    db_printf("%-12s", (*aux_cmdp)->name);
-	    db_end_line(12);
+	LIST_FOREACH(cmd, table, next) {
+		db_printf("%-12s", cmd->name);
+		db_end_line(12);
 	}
 }
 
@@ -296,7 +328,7 @@ db_command(last_cmdp, cmd_table, dopager
 	struct command_table *cmd_table;
 	int dopager;
 {
-	struct command	*cmd;
+	struct command	*cmd = NULL;
 	int		t;
 	char		modif[TOK_STRING_SIZE];
 	db_expr_t	addr, count;
@@ -463,7 +495,7 @@ db_command_loop()
 	    db_printf("db> ");
 	    (void) db_read_line();
 
-	    db_command(&db_last_command, &db_command_table, /* dopager */ 1);
+	    db_command(&db_last_command, &db_cmd_table, /* dopager */ 1);
 	}
 }
 
@@ -481,7 +513,7 @@ db_command_script(const char *command)
 {
 	db_prev = db_next = db_dot;
 	db_inject_line(command);
-	db_command(&db_last_command, &db_command_table, /* dopager */ 0);
+	db_command(&db_last_command, &db_cmd_table, /* dopager */ 0);
 }
 
 void

Modified: user/kmacy/releng_7_2_fcs/sys/ddb/db_command.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/ddb/db_command.h	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/ddb/db_command.h	Wed May 20 07:31:11 2009	(r192431)
@@ -37,6 +37,7 @@
  * Command loop declarations.
  */
 
+void	db_command_init(void);
 void	db_command_loop(void);
 void	db_command_script(const char *command);
 

Modified: user/kmacy/releng_7_2_fcs/sys/ddb/db_main.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/ddb/db_main.c	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/ddb/db_main.c	Wed May 20 07:31:11 2009	(r192431)
@@ -172,6 +172,7 @@ db_init(void)
 	uintptr_t symtab, strtab;
 	Elf_Size tabsz, strsz;
 
+	db_command_init();
 	if (ksym_end > ksym_start && ksym_start != 0) {
 		symtab = ksym_start;
 		tabsz = *((Elf_Size*)symtab);

Modified: user/kmacy/releng_7_2_fcs/sys/ddb/ddb.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/ddb/ddb.h	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/ddb/ddb.h	Wed May 20 07:31:11 2009	(r192431)
@@ -43,6 +43,9 @@ SYSCTL_DECL(_debug_ddb);
 
 #include <machine/db_machdep.h>		/* type definitions */
 
+#include <sys/queue.h>			/* LIST_* */
+#include <sys/kernel.h>			/* SYSINIT */
+
 #ifndef DB_MAXARGS
 #define	DB_MAXARGS	10
 #endif
@@ -73,36 +76,97 @@ SYSCTL_DECL(_debug_ddb);
 int	DB_CALL(db_expr_t, db_expr_t *, int, db_expr_t[]);
 #endif
 
+/*
+ * There are three "command tables":
+ * - One for simple commands; a list of these is displayed
+ *   by typing 'help' at the debugger prompt.
+ * - One for sub-commands of 'show'; to see this type 'show'
+ *   without any arguments.
+ * - The last one for sub-commands of 'show all'; type 'show all'
+ *   without any argument to get a list.
+ */
+struct command;
+LIST_HEAD(command_table, command);
+extern struct command_table db_cmd_table;
+extern struct command_table db_show_table;
+extern struct command_table db_show_all_table;
+
+/*
+ * Type signature for a function implementing a ddb command.
+ */
 typedef void db_cmdfcn_t(db_expr_t addr, boolean_t have_addr, db_expr_t count,
 	    char *modif);
 
-#define DB_COMMAND(cmd_name, func_name) \
-	DB_FUNC(cmd_name, func_name, db_cmd_set, 0, NULL)
-#define DB_SHOW_COMMAND(cmd_name, func_name) \
-	DB_FUNC(cmd_name, func_name, db_show_cmd_set, 0, NULL)
-#define DB_SHOW_ALL_COMMAND(cmd_name, func_name) \
-	DB_FUNC(cmd_name, func_name, db_show_all_cmd_set, 0, NULL)
+/*
+ * Command table entry.
+ */
+struct command {
+	char *	name;		/* command name */
+	db_cmdfcn_t *fcn;	/* function to call */
+	int	flag;		/* extra info: */
+#define	CS_OWN		0x1	/* non-standard syntax */
+#define	CS_MORE		0x2	/* standard syntax, but may have other words
+				 * at end */
+#define	CS_SET_DOT	0x100	/* set dot after command */
+	struct command_table *more; /* another level of command */
+	LIST_ENTRY(command) next; /* next entry in the command table */
+};
 
-#define	DB_SET(cmd_name, func_name, set, flag, more)		\
-static const struct command __CONCAT(cmd_name,_cmd) = {		\
-	__STRING(cmd_name),					\
-	func_name,						\
-	flag,							\
-	more							\
+/*
+ * Arrange for the specified ddb command to be defined and
+ * bound to the specified function.  Commands can be defined
+ * in modules in which case they will be available only when
+ * the module is loaded.
+ */
+#define _DB_SET(_suffix, _name, _func, list, _flag, _more)	\
+static struct command __CONCAT(_name,_suffix) = {		\
+	.name	= __STRING(_name),				\
+	.fcn	= _func,					\
+	.flag	= _flag,					\
+	.more	= _more						\
 };								\
-TEXT_SET(set, __CONCAT(cmd_name,_cmd))
+static void __CONCAT(__CONCAT(_name,_suffix),_add)(void *arg __unused) \
+    { db_command_register(&list, &__CONCAT(_name,_suffix)); }	\
+SYSINIT(__CONCAT(_name,_suffix), SI_SUB_KLD, SI_ORDER_ANY,	\
+    __CONCAT(__CONCAT(_name,_suffix),_add), NULL);		\
+static void __CONCAT(__CONCAT(_name,_suffix),_del)(void *arg __unused) \
+    { db_command_unregister(&list, &__CONCAT(_name,_suffix)); }	\
+SYSUNINIT(__CONCAT(_name,_suffix), SI_SUB_KLD, SI_ORDER_ANY,	\
+    __CONCAT(__CONCAT(_name,_suffix),_del), NULL);
 
-#define DB_FUNC(cmd_name, func_name, set, flag, more)		\
-static db_cmdfcn_t	func_name;				\
-								\
-DB_SET(cmd_name, func_name, set, flag, more);			\
-								\
+/*
+ * Like _DB_SET but also create the function declaration which
+ * must be followed immediately by the body; e.g.
+ *   _DB_FUNC(_cmd, panic, db_panic, db_cmd_table, 0, NULL)
+ *   {
+ *	...panic implementation...
+ *   }
+ *
+ * This macro is mostly used to define commands placed in one of
+ * the ddb command tables; see DB_COMMAND, etc. below.
+ */
+#define _DB_FUNC(_suffix, _name, _func, list, _flag, _more)	\
+static db_cmdfcn_t _func;					\
+_DB_SET(_suffix, _name, _func, list, _flag, _more);		\
 static void							\
-func_name(addr, have_addr, count, modif)			\
-	db_expr_t addr;						\
-	boolean_t have_addr;					\
-	db_expr_t count;					\
-	char *modif;
+_func(db_expr_t addr, boolean_t have_addr, db_expr_t count, char *modif)
+
+/* common idom provided for backwards compatibility */
+#define DB_FUNC(_name, _func, list, _flag, _more)		\
+	_DB_FUNC(_cmd, _name, _func, list, _flag, _more)
+
+#define DB_COMMAND(cmd_name, func_name) \
+	_DB_FUNC(_cmd, cmd_name, func_name, db_cmd_table, 0, NULL)
+#define DB_ALIAS(alias_name, func_name) \
+	_DB_SET(_cmd, alias_name, func_name, db_cmd_table, 0, NULL)
+#define DB_SHOW_COMMAND(cmd_name, func_name) \
+	_DB_FUNC(_show, cmd_name, func_name, db_show_table, 0, NULL)
+#define DB_SHOW_ALIAS(alias_name, func_name) \
+	_DB_SET(_show, alias_name, func_name, db_show_table, 0, NULL)
+#define DB_SHOW_ALL_COMMAND(cmd_name, func_name) \
+	_DB_FUNC(_show_all, cmd_name, func_name, db_show_all_table, 0, NULL)
+#define DB_SHOW_ALL_ALIAS(alias_name, func_name) \
+	_DB_SET(_show_all, alias_name, func_name, db_show_all_table, 0, NULL)
 
 extern db_expr_t db_maxoff;
 extern int db_indent;
@@ -150,6 +214,8 @@ void		db_trace_self(void);
 int		db_trace_thread(struct thread *, int);
 int		db_value_of_name(const char *name, db_expr_t *valuep);
 int		db_write_bytes(vm_offset_t addr, size_t size, char *data);
+void		db_command_register(struct command_table *, struct command *);
+void		db_command_unregister(struct command_table *, struct command *);
 
 db_cmdfcn_t	db_breakpoint_cmd;
 db_cmdfcn_t	db_capture_cmd;
@@ -179,28 +245,6 @@ db_cmdfcn_t	db_watchpoint_cmd;
 db_cmdfcn_t	db_write_cmd;
 
 /*
- * Command table.
- */
-struct command;
-
-struct command_table {
-	struct command *table;
-	struct command **aux_tablep;
-	struct command **aux_tablep_end;
-};
-
-struct command {
-	char *	name;		/* command name */
-	db_cmdfcn_t *fcn;	/* function to call */
-	int	flag;		/* extra info: */
-#define	CS_OWN		0x1	/* non-standard syntax */
-#define	CS_MORE		0x2	/* standard syntax, but may have other words
-				 * at end */
-#define	CS_SET_DOT	0x100	/* set dot after command */
-	struct command_table *more; /* another level of command */
-};
-
-/*
  * Interface between DDB and the DDB output capture facility.
  */
 struct dumperinfo;

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_mutex.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_mutex.c	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_mutex.c	Wed May 20 07:31:11 2009	(r192431)
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
 
 #define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
 
+static void	assert_mtx(struct lock_object *lock, int what);
 #ifdef DDB
 static void	db_show_mtx(struct lock_object *lock);
 #endif
@@ -98,6 +99,7 @@ static int	unlock_spin(struct lock_objec
 struct lock_class lock_class_mtx_sleep = {
 	.lc_name = "sleep mutex",
 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
+	.lc_assert = assert_mtx,
 #ifdef DDB
 	.lc_ddb_show = db_show_mtx,
 #endif
@@ -107,6 +109,7 @@ struct lock_class lock_class_mtx_sleep =
 struct lock_class lock_class_mtx_spin = {
 	.lc_name = "spin mutex",
 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
+	.lc_assert = assert_mtx,
 #ifdef DDB
 	.lc_ddb_show = db_show_mtx,
 #endif
@@ -120,19 +123,12 @@ struct lock_class lock_class_mtx_spin = 
 struct mtx blocked_lock;
 struct mtx Giant;
 
-#ifdef LOCK_PROFILING
-static inline void lock_profile_init(void)
+void
+assert_mtx(struct lock_object *lock, int what)
 {
-        int i;
-        /* Initialize the mutex profiling locks */
-        for (i = 0; i < LPROF_LOCK_SIZE; i++) {
-                mtx_init(&lprof_locks[i], "mprof lock",
-                    NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
-        }
+
+	mtx_assert((struct mtx *)lock, what);
 }
-#else
-static inline void lock_profile_init(void) {;}
-#endif
 
 void
 lock_mtx(struct lock_object *lock, int how)
@@ -181,7 +177,7 @@ _mtx_lock_flags(struct mtx *m, int opts,
 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
 	    file, line));
 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
-	    file, line);
+	    file, line, NULL);
 
 	_get_sleep_lock(m, curthread, opts, file, line);
 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
@@ -225,7 +221,7 @@ _mtx_lock_spin_flags(struct mtx *m, int 
 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
 		    m->lock_object.lo_name, file, line));
 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
-	    file, line);
+	    file, line, NULL);
 	_get_spin_lock(m, curthread, opts, file, line);
 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
 	    line);
@@ -258,9 +254,12 @@ _mtx_unlock_spin_flags(struct mtx *m, in
 int
 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
 {
-	int rval, contested = 0;
+#ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
-	
+	int contested = 0;
+#endif
+	int rval;
+
 	MPASS(curthread != NULL);
 	KASSERT(m->mtx_lock != MTX_DESTROYED,
 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
@@ -300,16 +299,18 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
     int line)
 {
 	struct turnstile *ts;
+	uintptr_t v;
 #ifdef ADAPTIVE_MUTEXES
 	volatile struct thread *owner;
 #endif
 #ifdef KTR
 	int cont_logged = 0;
 #endif
+#ifdef LOCK_PROFILING
 	int contested = 0;
 	uint64_t waittime = 0;
-	uintptr_t v;
-	
+#endif
+
 	if (mtx_owned(m)) {
 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -328,7 +329,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
 
-	while (!_obtain_lock(m, tid)) { 
+	while (!_obtain_lock(m, tid)) {
 #ifdef ADAPTIVE_MUTEXES
 		/*
 		 * If the owner is running on another CPU, spin until the
@@ -337,11 +338,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		v = m->mtx_lock;
 		if (v != MTX_UNOWNED) {
 			owner = (struct thread *)(v & ~MTX_FLAGMASK);
-#ifdef ADAPTIVE_GIANT
 			if (TD_IS_RUNNING(owner)) {
-#else
-			if (m != &Giant && TD_IS_RUNNING(owner)) {
-#endif
 				if (LOCK_LOG_TEST(&m->lock_object, 0))
 					CTR3(KTR_LOCK,
 					    "%s: spinning on %p held by %p",
@@ -375,11 +372,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		 * CPU quit the hard path and try to spin.
 		 */
 		owner = (struct thread *)(v & ~MTX_FLAGMASK);
-#ifdef ADAPTIVE_GIANT
 		if (TD_IS_RUNNING(owner)) {
-#else
-		if (m != &Giant && TD_IS_RUNNING(owner)) {
-#endif
 			turnstile_cancel(ts);
 			cpu_spinwait();
 			continue;
@@ -426,8 +419,8 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		    m->lock_object.lo_name, (void *)tid, file, line);
 	}
 #endif
-	lock_profile_obtain_lock_success(&m->lock_object, contested,	
-	    waittime, (file), (line));					
+	lock_profile_obtain_lock_success(&m->lock_object, contested,
+	    waittime, file, line);
 }
 
 static void
@@ -460,9 +453,12 @@ void
 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
     int line)
 {
-	int i = 0, contested = 0;
+	int i = 0;
+#ifdef LOCK_PROFILING
+	int contested = 0;
 	uint64_t waittime = 0;
-	
+#endif
+
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
 
@@ -488,7 +484,7 @@ _mtx_lock_spin(struct mtx *m, uintptr_t 
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
 
-	lock_profile_obtain_lock_success(&m->lock_object, contested,	
+	lock_profile_obtain_lock_success(&m->lock_object, contested,
 	    waittime, (file), (line));
 }
 #endif /* SMP */
@@ -498,11 +494,13 @@ _thread_lock_flags(struct thread *td, in
 {
 	struct mtx *m;
 	uintptr_t tid;
-	int i, contested;
-	uint64_t waittime;
+	int i;
+#ifdef LOCK_PROFILING
+	int contested = 0;
+	uint64_t waittime = 0;
+#endif
 
-	contested = i = 0;
-	waittime = 0;
+	i = 0;
 	tid = (uintptr_t)curthread;
 	for (;;) {
 retry:
@@ -518,13 +516,14 @@ retry:
 	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
 			    m->lock_object.lo_name, file, line));
 		WITNESS_CHECKORDER(&m->lock_object,
-		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
+		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
 		while (!_obtain_lock(m, tid)) {
 			if (m->mtx_lock == tid) {
 				m->mtx_recurse++;
 				break;
 			}
-			lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
+			lock_profile_obtain_lock_failed(&m->lock_object,
+			    &contested, &waittime);
 			/* Give interrupts a chance while we spin. */
 			spinlock_exit();
 			while (m->mtx_lock != MTX_UNOWNED) {
@@ -545,8 +544,9 @@ retry:
 			break;
 		_rel_spin_lock(m);	/* does spinlock_exit() */
 	}
-	lock_profile_obtain_lock_success(&m->lock_object, contested,	
-	    waittime, (file), (line));
+	if (m->mtx_recurse == 0)
+		lock_profile_obtain_lock_success(&m->lock_object, contested,
+		    waittime, (file), (line));
 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
 	    line);
 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
@@ -614,10 +614,10 @@ _mtx_unlock_sleep(struct mtx *m, int opt
 	ts = turnstile_lookup(&m->lock_object);
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
-
 	MPASS(ts != NULL);
 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
 	_release_lock_quick(m);
+
 	/*
 	 * This turnstile is now no longer associated with the mutex.  We can
 	 * unlock the chain lock so a new turnstile may take it's place.
@@ -807,8 +807,6 @@ mutex_init(void)
 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
 	mtx_lock(&Giant);
-	
-	lock_profile_init();
 }
 
 #ifdef DDB
@@ -846,7 +844,7 @@ db_show_mtx(struct lock_object *lock)
 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
 		td = mtx_owner(m);
 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
-		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
+		    td->td_tid, td->td_proc->p_pid, td->td_name);
 		if (mtx_recursed(m))
 			db_printf(" recursed: %d\n", m->mtx_recurse);
 	}

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_rmlock.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_rmlock.c	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_rmlock.c	Wed May 20 07:31:11 2009	(r192431)
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: head/sys/kern/kern_rmlock.c 191539 2009-04-26 21:16:03Z rwatson $");
 
 #include "opt_ddb.h"
 
@@ -69,12 +69,14 @@ static __inline void compiler_memory_bar
 	__asm __volatile("":::"memory");
 }
 
+static void	assert_rm(struct lock_object *lock, int what);
 static void	lock_rm(struct lock_object *lock, int how);
 static int	unlock_rm(struct lock_object *lock);
 
 struct lock_class lock_class_rm = {
 	.lc_name = "rm",
 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
+	.lc_assert = assert_rm,
 #if 0
 #ifdef DDB
 	.lc_ddb_show = db_show_rwlock,
@@ -85,6 +87,13 @@ struct lock_class lock_class_rm = {
 };
 
 static void
+assert_rm(struct lock_object *lock, int what)
+{
+
+	panic("assert_rm called");
+}
+
+static void
 lock_rm(struct lock_object *lock, int how)
 {
 
@@ -403,7 +412,7 @@ void _rm_wlock_debug(struct rmlock *rm, 
 {
 
 	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
-	    file, line);
+	    file, line, NULL);
 
 	_rm_wlock(rm);
 
@@ -430,7 +439,7 @@ _rm_rlock_debug(struct rmlock *rm, struc
     const char *file, int line)
 {
 
-	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line);
+	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
 
 	_rm_rlock(rm, tracker);
 

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_rwlock.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_rwlock.c	Wed May 20 07:11:46 2009	(r192430)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_rwlock.c	Wed May 20 07:31:11 2009	(r192431)
@@ -39,10 +39,12 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/ktr.h>
+#include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
+#include <sys/sysctl.h>
 #include <sys/systm.h>
 #include <sys/turnstile.h>
 
@@ -54,17 +56,27 @@ CTASSERT((RW_RECURSE & LO_CLASSFLAGS) ==
 #define	ADAPTIVE_RWLOCKS
 #endif
 
+#ifdef ADAPTIVE_RWLOCKS
+static int rowner_retries = 10;
+static int rowner_loops = 10000;
+SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
+#endif
+
 #ifdef DDB
 #include <ddb/ddb.h>
 
 static void	db_show_rwlock(struct lock_object *lock);
 #endif
+static void	assert_rw(struct lock_object *lock, int what);
 static void	lock_rw(struct lock_object *lock, int how);
 static int	unlock_rw(struct lock_object *lock);
 
 struct lock_class lock_class_rw = {
 	.lc_name = "rw",
 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
+	.lc_assert = assert_rw,
 #ifdef DDB
 	.lc_ddb_show = db_show_rwlock,
 #endif
@@ -103,6 +115,13 @@ struct lock_class lock_class_rw = {
 #endif
 
 void
+assert_rw(struct lock_object *lock, int what)
+{
+
+	rw_assert((struct rwlock *)lock, what);
+}
+
+void
 lock_rw(struct lock_object *lock, int how)
 {
 	struct rwlock *rw;
@@ -172,6 +191,14 @@ rw_sysinit(void *arg)
 	rw_init(args->ra_rw, args->ra_desc);
 }
 
+void
+rw_sysinit_flags(void *arg)
+{
+	struct rw_args_flags *args = arg;
+
+	rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
+}
+
 int
 rw_wowned(struct rwlock *rw)
 {
@@ -187,7 +214,7 @@ _rw_wlock(struct rwlock *rw, const char 
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
-	    line);
+	    line, NULL);
 	__rw_wlock(rw, curthread, file, line);
 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
@@ -234,6 +261,17 @@ _rw_wunlock(struct rwlock *rw, const cha
 		lock_profile_release_lock(&rw->lock_object);
 	__rw_wunlock(rw, curthread, file, line);
 }
+/*
+ * Determines whether a new reader can acquire a lock.  Succeeds if the
+ * reader already owns a read lock and the lock is locked for read to
+ * prevent deadlock from reader recursion.  Also succeeds if the lock
+ * is unlocked and has no writer waiters or spinners.  Failing otherwise
+ * prioritizes writers before readers.
+ */
+#define	RW_CAN_READ(_rw)						\
+    ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
+    (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
+    RW_LOCK_READ)
 
 void
 _rw_rlock(struct rwlock *rw, const char *file, int line)
@@ -241,31 +279,22 @@ _rw_rlock(struct rwlock *rw, const char 
 	struct turnstile *ts;
 #ifdef ADAPTIVE_RWLOCKS
 	volatile struct thread *owner;
+	int spintries = 0;
+	int i;
 #endif
-#ifdef LOCK_PROFILING_SHARED
+#ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
 	int contested = 0;
 #endif
-	uintptr_t x;
+	uintptr_t v;
 
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
 	KASSERT(rw_wowner(rw) != curthread,
 	    ("%s (%s): wlock already held @ %s:%d", __func__,
 	    rw->lock_object.lo_name, file, line));
-	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
+	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
 
-	/*
-	 * Note that we don't make any attempt to try to block read
-	 * locks once a writer has blocked on the lock.  The reason is
-	 * that we currently allow for read locks to recurse and we
-	 * don't keep track of all the holders of read locks.  Thus, if
-	 * we were to block readers once a writer blocked and a reader
-	 * tried to recurse on their reader lock after a writer had
-	 * blocked we would end up in a deadlock since the reader would
-	 * be blocked on the writer, and the writer would be blocked
-	 * waiting for the reader to release its original read lock.
-	 */
 	for (;;) {
 		/*
 		 * Handle the easy case.  If no other thread has a write
@@ -277,33 +306,27 @@ _rw_rlock(struct rwlock *rw, const char 
 		 * completely unlocked rwlock since such a lock is encoded
 		 * as a read lock with no waiters.
 		 */
-		x = rw->rw_lock;
-		if (x & RW_LOCK_READ) {
-
+		v = rw->rw_lock;
+		if (RW_CAN_READ(v)) {
 			/*
 			 * The RW_LOCK_READ_WAITERS flag should only be set
-			 * if another thread currently holds a write lock,
-			 * and in that case RW_LOCK_READ should be clear.
+			 * if the lock has been unlocked and write waiters
+			 * were present.
 			 */
-			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
-			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
-			    x + RW_ONE_READER)) {
-#ifdef LOCK_PROFILING_SHARED
-				if (RW_READERS(x) == 0)
-					lock_profile_obtain_lock_success(
-					    &rw->lock_object, contested,
-					    waittime, file, line);
-#endif
+			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
+			    v + RW_ONE_READER)) {
 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
 					CTR4(KTR_LOCK,
 					    "%s: %p succeed %p -> %p", __func__,
-					    rw, (void *)x,
-					    (void *)(x + RW_ONE_READER));
+					    rw, (void *)v,
+					    (void *)(v + RW_ONE_READER));
 				break;
 			}
 			cpu_spinwait();
 			continue;
 		}
+		lock_profile_obtain_lock_failed(&rw->lock_object,
+		    &contested, &waittime);
 
 #ifdef ADAPTIVE_RWLOCKS
 		/*
@@ -311,36 +334,45 @@ _rw_rlock(struct rwlock *rw, const char 
 		 * the owner stops running or the state of the lock
 		 * changes.
 		 */
-		owner = (struct thread *)RW_OWNER(x);
-		if (TD_IS_RUNNING(owner)) {
-			if (LOCK_LOG_TEST(&rw->lock_object, 0))
-				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
-				    __func__, rw, owner);
-#ifdef LOCK_PROFILING_SHARED
-			lock_profile_obtain_lock_failed(&rw->lock_object,
-			    &contested, &waittime);
-#endif
-			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
-			    TD_IS_RUNNING(owner))
+		if ((v & RW_LOCK_READ) == 0) {
+			owner = (struct thread *)RW_OWNER(v);
+			if (TD_IS_RUNNING(owner)) {
+				if (LOCK_LOG_TEST(&rw->lock_object, 0))
+					CTR3(KTR_LOCK,
+					    "%s: spinning on %p held by %p",
+					    __func__, rw, owner);
+				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
+				    owner && TD_IS_RUNNING(owner))
+					cpu_spinwait();
+				continue;
+			}
+		} else if (spintries < rowner_retries) {
+			spintries++;
+			for (i = 0; i < rowner_loops; i++) {
+				v = rw->rw_lock;
+				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
+					break;
 				cpu_spinwait();
-			continue;
+			}
+			if (i != rowner_loops)
+				continue;
 		}
 #endif
 
 		/*
 		 * Okay, now it's the hard case.  Some other thread already
-		 * has a write lock, so acquire the turnstile lock so we can
-		 * begin the process of blocking.
+		 * has a write lock or there are write waiters present,
+		 * acquire the turnstile lock so we can begin the process
+		 * of blocking.
 		 */
 		ts = turnstile_trywait(&rw->lock_object);
 
 		/*
 		 * The lock might have been released while we spun, so
-		 * recheck its state and restart the loop if there is no
-		 * longer a write lock.
+		 * recheck its state and restart the loop if needed.
 		 */
-		x = rw->rw_lock;
-		if (x & RW_LOCK_READ) {
+		v = rw->rw_lock;
+		if (RW_CAN_READ(v)) {
 			turnstile_cancel(ts);
 			cpu_spinwait();
 			continue;
@@ -351,23 +383,30 @@ _rw_rlock(struct rwlock *rw, const char 
 		 * If the current owner of the lock is executing on another
 		 * CPU quit the hard path and try to spin.
 		 */
-		owner = (struct thread *)RW_OWNER(x);
-		if (TD_IS_RUNNING(owner)) {
-			turnstile_cancel(ts);
-			cpu_spinwait();
-			continue;
+		if ((v & RW_LOCK_READ) == 0) {
+			owner = (struct thread *)RW_OWNER(v);
+			if (TD_IS_RUNNING(owner)) {
+				turnstile_cancel(ts);
+				cpu_spinwait();
+				continue;
+			}
 		}
 #endif
 
 		/*
-		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
-		 * flag is already set, then we can go ahead and block.  If
-		 * it is not set then try to set it.  If we fail to set it
-		 * drop the turnstile lock and restart the loop.
-		 */
-		if (!(x & RW_LOCK_READ_WAITERS)) {

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-user mailing list