svn commit: r204831 - stable/8/sys/kern

Luigi Rizzo luigi at FreeBSD.org
Sun Mar 7 14:29:12 UTC 2010


Author: luigi
Date: Sun Mar  7 14:29:12 2010
New Revision: 204831
URL: http://svn.freebsd.org/changeset/base/204831

Log:
  MFC r197137 and r200510, which fixes a problem in 8.0
  with callouts firing one tick too late. See the logs for
  the original patch for details.
  RELENG_7 is not affected by the problem.

Modified:
  stable/8/sys/kern/kern_timeout.c

Modified: stable/8/sys/kern/kern_timeout.c
==============================================================================
--- stable/8/sys/kern/kern_timeout.c	Sun Mar  7 14:23:44 2010	(r204830)
+++ stable/8/sys/kern/kern_timeout.c	Sun Mar  7 14:29:12 2010	(r204831)
@@ -82,6 +82,23 @@ SYSCTL_INT(_debug, OID_AUTO, to_avg_mpca
  */
 int callwheelsize, callwheelbits, callwheelmask;
 
+/*
+ * There is one struct callout_cpu per cpu, holding all relevant
+ * state for the callout processing thread on the individual CPU.
+ * In particular:
+ *	cc_ticks is incremented once per tick in callout_cpu().
+ *	It tracks the global 'ticks' but in a way that the individual
+ *	threads should not worry about races in the order in which
+ *	hardclock() and hardclock_cpu() run on the various CPUs.
+ *	cc_softclock is advanced in callout_cpu() to point to the
+ *	first entry in cc_callwheel that may need handling. In turn,
+ *	a softclock() is scheduled so it can serve the various entries i
+ *	such that cc_softclock <= i <= cc_ticks .
+ *	XXX maybe cc_softclock and cc_ticks should be volatile ?
+ *
+ *	cc_ticks is also used in callout_reset_cpu() to determine
+ *	when the callout should be served.
+ */
 struct callout_cpu {
 	struct mtx		cc_lock;
 	struct callout		*cc_callout;
@@ -90,6 +107,7 @@ struct callout_cpu {
 	struct callout		*cc_next;
 	struct callout		*cc_curr;
 	void			*cc_cookie;
+	int 			cc_ticks;
 	int 			cc_softticks;
 	int			cc_cancel;
 	int			cc_waiting;
@@ -244,7 +262,8 @@ callout_tick(void)
 	need_softclock = 0;
 	cc = CC_SELF();
 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
-	for (; (cc->cc_softticks - ticks) < 0; cc->cc_softticks++) {
+	cc->cc_ticks++;
+	for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
 		bucket = cc->cc_softticks & callwheelmask;
 		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
 			need_softclock = 1;
@@ -323,7 +342,7 @@ softclock(void *arg)
 	steps = 0;
 	cc = (struct callout_cpu *)arg;
 	CC_LOCK(cc);
-	while (cc->cc_softticks != ticks) {
+	while (cc->cc_softticks - 1 != cc->cc_ticks) {
 		/*
 		 * cc_softticks may be modified by hard clock, so cache
 		 * it while we work on a given bucket.
@@ -622,7 +641,7 @@ retry:
 	c->c_arg = arg;
 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
 	c->c_func = ftn;
-	c->c_time = ticks + to_ticks;
+	c->c_time = cc->cc_ticks + to_ticks;
 	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 
 			  c, c_links.tqe);
 	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",


More information about the svn-src-all mailing list