PERFORCE change 126329 for review

Kip Macy kmacy at FreeBSD.org
Wed Sep 12 01:31:20 PDT 2007


http://perforce.freebsd.org/chv.cgi?CH=126329

Change 126329 by kmacy at kmacy_home:ethng on 2007/09/12 08:30:25

	Add callout_init_rwlock	

Affected files ...

.. //depot/projects/ethng/src/sys/kern/kern_timeout.c#3 edit
.. //depot/projects/ethng/src/sys/sys/callout.h#2 edit

Differences ...

==== //depot/projects/ethng/src/sys/kern/kern_timeout.c#3 (text+ko) ====

@@ -45,6 +45,7 @@
 #include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
+#include <sys/rwlock.h>
 #include <sys/proc.h>
 #include <sys/sleepqueue.h>
 #include <sys/sysctl.h>
@@ -58,6 +59,9 @@
 static int avg_mtxcalls;
 SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
     "Average number of mtx callouts made per softclock call. Units = 1/1000");
+static int avg_rwcalls;
+SYSCTL_INT(_debug, OID_AUTO, to_avg_rwcalls, CTLFLAG_RD, &avg_rwcalls, 0,
+    "Average number of rw callouts made per softclock call. Units = 1/1000");
 static int avg_mpcalls;
 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
     "Average number of MP callouts made per softclock call. Units = 1/1000");
@@ -171,6 +175,7 @@
 	int depth;
 	int mpcalls;
 	int mtxcalls;
+	int rwcalls;
 	int gcalls;
 #ifdef DIAGNOSTIC
 	struct bintime bt1, bt2;
@@ -185,6 +190,7 @@
 
 	mpcalls = 0;
 	mtxcalls = 0;
+	rwcalls = 0;
 	gcalls = 0;
 	depth = 0;
 	steps = 0;
@@ -216,6 +222,7 @@
 				void (*c_func)(void *);
 				void *c_arg;
 				struct mtx *c_mtx;
+				struct rwlock *c_rwlock;
 				int c_flags;
 
 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
@@ -223,6 +230,7 @@
 				c_func = c->c_func;
 				c_arg = c->c_arg;
 				c_mtx = c->c_mtx;
+				c_rwlock = c->c_rwlock;
 				c_flags = c->c_flags;
 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
 					c->c_func = NULL;
@@ -237,7 +245,19 @@
 				}
 				curr_cancelled = 0;
 				mtx_unlock_spin(&callout_lock);
-				if (c_mtx != NULL) {
+				if (c_rwlock != NULL) {
+					rw_wlock(c_rwlock);
+					if (curr_cancelled) {
+						rw_wunlock(c_rwlock);
+						goto skip;
+					}
+					curr_cancelled = 1;
+					rwcalls++;
+					
+					CTR3(KTR_CALLOUT, "callout mtx"
+					    " %p func %p arg %p",
+					    c, c_func, c_arg);
+				} else if (c_mtx != NULL) {
 					if (c_flags & CALLOUT_NETGIANT) {
 						mtx_lock(&Giant);
 						gcalls++;
@@ -297,8 +317,10 @@
 					lastfunc = c_func;
 				}
 #endif
-				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
+				if (c_mtx != NULL && (c_flags & CALLOUT_RETURNUNLOCKED) == 0)
 					mtx_unlock(c_mtx);
+				if (c_rwlock != NULL && (c_flags & CALLOUT_RETURNUNLOCKED_RW) == 0)
+					rw_wunlock(c_rwlock);
 				if (c_flags & CALLOUT_NETGIANT)
 					mtx_unlock(&Giant);
 			skip:
@@ -322,6 +344,7 @@
 	avg_depth += (depth * 1000 - avg_depth) >> 8;
 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
 	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
+	avg_rwcalls += (rwcalls * 1000 - avg_rwcalls) >> 8;
 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
 	nextsoftcheck = NULL;
 	mtx_unlock_spin(&callout_lock);
@@ -623,7 +646,6 @@
 {
 	bzero(c, sizeof *c);
 	if (mpsafe) {
-		c->c_mtx = NULL;
 		c->c_flags = CALLOUT_RETURNUNLOCKED;
 	} else {
 		c->c_mtx = &Giant;
@@ -639,6 +661,7 @@
 {
 	bzero(c, sizeof *c);
 	c->c_mtx = mtx;
+	c->c_rwlock = NULL;
 	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT)) == 0,
 	    ("callout_init_mtx: bad flags %d", flags));
 	/* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
@@ -647,6 +670,17 @@
 	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT);
 }
 
+void
+callout_init_rwlock(struct callout *c, struct rwlock *rw, int flags)
+{
+	bzero(c, sizeof *c);
+	c->c_mtx = NULL;
+	c->c_rwlock = rw;
+	KASSERT((flags & ~CALLOUT_RETURNUNLOCKED_RW) == 0,
+	    ("callout_init_rwlock: bad flags %d", flags));
+	c->c_flags = flags & CALLOUT_RETURNUNLOCKED_RW;
+}
+
 #ifdef APM_FIXUP_CALLTODO
 /* 
  * Adjust the kernel calltodo timeout list.  This routine is used after 

==== //depot/projects/ethng/src/sys/sys/callout.h#2 (text+ko) ====

@@ -54,6 +54,7 @@
 	void	*c_arg;				/* function argument */
 	void	(*c_func)(void *);	/* function to call */
 	struct mtx *c_mtx;			/* mutex to lock */
+	struct rwlock *c_rwlock;		/* rwlock to lock */
 	int	c_flags;			/* state of this entry */
 };
 
@@ -63,6 +64,7 @@
 #define	CALLOUT_MPSAFE		0x0008 /* callout handler is mp safe */
 #define	CALLOUT_RETURNUNLOCKED	0x0010 /* handler returns with mtx unlocked */
 #define	CALLOUT_NETGIANT	0x0020 /* XXX: obtain Giant before mutex */
+#define	CALLOUT_RETURNUNLOCKED_RW	0x0040 /* handler returns with rwlock unlocked */
 
 struct callout_handle {
 	struct callout *callout;
@@ -81,6 +83,7 @@
 #define	callout_drain(c)	_callout_stop_safe(c, 1)
 void	callout_init(struct callout *, int);
 void	callout_init_mtx(struct callout *, struct mtx *, int);
+void	callout_init_rwlock(struct callout *, struct rwlock *, int);
 #define	callout_pending(c)	((c)->c_flags & CALLOUT_PENDING)
 int	callout_reset(struct callout *, int, void (*)(void *), void *);
 #define	callout_stop(c)		_callout_stop_safe(c, 0)


More information about the p4-projects mailing list