svn commit: r246413 - in projects/calloutng/sys: dev/random dev/syscons kern netinet sys

Davide Italiano davide at FreeBSD.org
Wed Feb 6 15:23:59 UTC 2013


Author: davide
Date: Wed Feb  6 15:23:55 2013
New Revision: 246413
URL: http://svnweb.freebsd.org/changeset/base/246413

Log:
   Move callout(9) from struct bintime to a new 64-bit type, sbintime_t.
  There are quite a few advantages in using sbintime_t over struct bintime,
  they've been widely discussed on -current. See archives for more
  informations on the topic.
  
  Suggested by:	phk
  Reviewed by:	mav

Modified:
  projects/calloutng/sys/dev/random/randomdev_soft.c
  projects/calloutng/sys/dev/syscons/syscons.c
  projects/calloutng/sys/kern/kern_condvar.c
  projects/calloutng/sys/kern/kern_event.c
  projects/calloutng/sys/kern/kern_resource.c
  projects/calloutng/sys/kern/kern_synch.c
  projects/calloutng/sys/kern/kern_tc.c
  projects/calloutng/sys/kern/kern_time.c
  projects/calloutng/sys/kern/kern_timeout.c
  projects/calloutng/sys/kern/subr_log.c
  projects/calloutng/sys/kern/subr_param.c
  projects/calloutng/sys/kern/subr_sleepqueue.c
  projects/calloutng/sys/kern/sys_generic.c
  projects/calloutng/sys/netinet/tcp_timer.c
  projects/calloutng/sys/sys/_callout.h
  projects/calloutng/sys/sys/callout.h
  projects/calloutng/sys/sys/condvar.h
  projects/calloutng/sys/sys/mutex.h
  projects/calloutng/sys/sys/rwlock.h
  projects/calloutng/sys/sys/sleepqueue.h
  projects/calloutng/sys/sys/sx.h
  projects/calloutng/sys/sys/systm.h
  projects/calloutng/sys/sys/time.h

Modified: projects/calloutng/sys/dev/random/randomdev_soft.c
==============================================================================
--- projects/calloutng/sys/dev/random/randomdev_soft.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/dev/random/randomdev_soft.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -282,9 +282,8 @@ random_kthread(void *arg __unused)
 			random_kthread_control = 0;
 
 		/* Work done, so don't belabour the issue */
-		msleep_spin_bt(&random_kthread_control, &harvest_mtx,
-		    "-", ticks2bintime(hz / 10), zero_bt,
-		    C_PREL(1) | C_HARDCLOCK);
+		msleep_spin_sbt(&random_kthread_control, &harvest_mtx,
+		    "-", (SBT_1S / 10), 0, C_PREL(1));
 
 	}
 	mtx_unlock_spin(&harvest_mtx);

Modified: projects/calloutng/sys/dev/syscons/syscons.c
==============================================================================
--- projects/calloutng/sys/dev/syscons/syscons.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/dev/syscons/syscons.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -1899,8 +1899,8 @@ done:
 	    rate = 2;
 	else
 	    rate = 30;
-	callout_reset_bt(&sc->ctimeout, ticks2bintime(hz / rate), zero_bt,
-	    scrn_timer, sc, C_PREL(1) | C_HARDCLOCK);
+	callout_reset_sbt(&sc->ctimeout, (SBT_1S / rate), 0,
+	    scrn_timer, sc, C_PREL(1));
     }
 }
 
@@ -3845,8 +3845,8 @@ blink_screen(void *arg)
 	(*scp->rndr->draw)(scp, 0, scp->xsize*scp->ysize, 
 			   scp->sc->blink_in_progress & 1);
 	scp->sc->blink_in_progress--;
-	callout_reset_bt(&scp->sc->cblink, ticks2bintime(hz / 15), zero_bt,
-	    blink_screen, scp, C_PREL(0) | C_HARDCLOCK);
+	callout_reset_sbt(&scp->sc->cblink, (SBT_1S / 15), 0,
+	    blink_screen, scp, C_PREL(0));
     }
 }
 

Modified: projects/calloutng/sys/kern/kern_condvar.c
==============================================================================
--- projects/calloutng/sys/kern/kern_condvar.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_condvar.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -274,8 +274,8 @@ _cv_wait_sig(struct cv *cvp, struct lock
  * cv_signal or cv_broadcast, EWOULDBLOCK if the timeout expires.
  */
 int
-_cv_timedwait_bt(struct cv *cvp, struct lock_object *lock, struct bintime bt,
-    struct bintime pr, int flags)
+_cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt,
+    sbintime_t pr, int flags)
 {
 	WITNESS_SAVE_DECL(lock_witness);
 	struct lock_class *class;
@@ -311,7 +311,7 @@ _cv_timedwait_bt(struct cv *cvp, struct 
 	DROP_GIANT();
 
 	sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
-	sleepq_set_timeout_bt(cvp, bt, pr, flags);
+	sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
 	if (lock != &Giant.lock_object) {
 		if (class->lc_flags & LC_SLEEPABLE)
 			sleepq_release(cvp);
@@ -342,8 +342,8 @@ _cv_timedwait_bt(struct cv *cvp, struct 
  * or ERESTART if a signal was caught.
  */
 int
-_cv_timedwait_sig_bt(struct cv *cvp, struct lock_object *lock,
-    struct bintime bt, struct bintime pr, int flags)
+_cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock,
+    sbintime_t sbt, sbintime_t pr, int flags)
 {
 	WITNESS_SAVE_DECL(lock_witness);
 	struct lock_class *class;
@@ -380,7 +380,7 @@ _cv_timedwait_sig_bt(struct cv *cvp, str
 
 	sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
 	    SLEEPQ_INTERRUPTIBLE, 0);
-	sleepq_set_timeout_bt(cvp, bt, pr, flags);
+	sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
 	if (lock != &Giant.lock_object) {
 		if (class->lc_flags & LC_SLEEPABLE)
 			sleepq_release(cvp);

Modified: projects/calloutng/sys/kern/kern_event.c
==============================================================================
--- projects/calloutng/sys/kern/kern_event.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_event.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -517,14 +517,11 @@ knote_fork(struct knlist *list, int pid)
  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
  * interval timer support code.
  */
-static struct bintime
-timer2bintime(intptr_t data)
+static __inline sbintime_t 
+timer2sbintime(intptr_t data)
 {
-	struct bintime bt;
 
-	bt.sec = data / 1000;
-	bt.frac = (data % 1000) * (((uint64_t)1 << 63) / 500);
-	return bt;
+	return (SBT_1MS * data); 
 }
 
 static void
@@ -546,8 +543,8 @@ filt_timerexpire(void *knx)
 	 */
 	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
 		calloutp = (struct callout *)kn->kn_hook;
-		callout_reset_bt_on(calloutp,
-		    timer2bintime(kn->kn_sdata), zero_bt /* 1ms? */,
+		callout_reset_sbt_on(calloutp,
+		    timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
 		    filt_timerexpire, kn, PCPU_GET(cpuid), 0);
 	}
 }
@@ -572,8 +569,8 @@ filt_timerattach(struct knote *kn)
 	calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
 	callout_init(calloutp, CALLOUT_MPSAFE);
 	kn->kn_hook = calloutp;
-	callout_reset_bt_on(calloutp,
-	    timer2bintime(kn->kn_sdata), zero_bt /* 1ms? */,
+	callout_reset_sbt_on(calloutp,
+	    timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
 	    filt_timerexpire, kn, PCPU_GET(cpuid), 0);
 
 	return (0);
@@ -1319,7 +1316,7 @@ kqueue_scan(struct kqueue *kq, int maxev
     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 {
 	struct kevent *kevp;
-	struct bintime abt, rbt;
+	sbintime_t asbt, rsbt;
 	struct knote *kn, *marker;
 	int count, nkev, error, influx;
 	int haskqglobal, touch;
@@ -1339,19 +1336,15 @@ kqueue_scan(struct kqueue *kq, int maxev
 			goto done_nl;
 		}
 		if (timespecisset(tsp)) {
-			timespec2bintime(tsp, &rbt);
-			if (TIMESEL(&abt, &rbt))
-				bintime_add(&abt, &tc_tick_bt);
-			bintime_add(&abt, &rbt);
-			bintime_shift(&rbt, -tc_timeexp);
-		} else {
-			abt.sec = -1;
-			abt.frac = 0;
-		}
-	} else {
-		abt.sec = 0;
-		abt.frac = 0;
-	}
+			rsbt = timespec2sbintime(*tsp);
+			if (TIMESEL(&asbt, rsbt))
+				asbt += tc_tick_sbt;
+			asbt += rsbt;
+			rsbt >>= tc_precexp;
+		} else
+			asbt = -1;
+	} else
+		asbt = 0;
 	marker = knote_alloc(1);
 	if (marker == NULL) {
 		error = ENOMEM;
@@ -1363,12 +1356,12 @@ kqueue_scan(struct kqueue *kq, int maxev
 retry:
 	kevp = keva;
 	if (kq->kq_count == 0) {
-		if (abt.sec < 0) {
+		if (asbt == -1) {
 			error = EWOULDBLOCK;
 		} else {
 			kq->kq_state |= KQ_SLEEP;
-			error = msleep_bt(kq, &kq->kq_lock, PSOCK | PCATCH,
-			    "kqread", abt, rbt, C_ABSOLUTE);
+			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
+			    "kqread", asbt, rsbt, C_ABSOLUTE);
 		}
 		if (error == 0)
 			goto retry;

Modified: projects/calloutng/sys/kern/kern_resource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_resource.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_resource.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -645,7 +645,7 @@ lim_cb(void *arg)
 		}
 	}
 	if ((p->p_flag & P_WEXIT) == 0)
-		callout_reset_bt(&p->p_limco, ticks2bintime(hz), zero_bt,
+		callout_reset_sbt(&p->p_limco, SBT_1S, 0,
 		    lim_cb, p, C_PREL(1) | C_HARDCLOCK);
 }
 
@@ -698,7 +698,7 @@ kern_proc_setrlimit(struct thread *td, s
 	case RLIMIT_CPU:
 		if (limp->rlim_cur != RLIM_INFINITY &&
 		    p->p_cpulimit == RLIM_INFINITY)
-			callout_reset_bt(&p->p_limco, ticks2bintime(hz), zero_bt,
+			callout_reset_sbt(&p->p_limco, SBT_1S, 0,
 			    lim_cb, p, C_PREL(1) | C_HARDCLOCK);
 		p->p_cpulimit = limp->rlim_cur;
 		break;
@@ -1139,7 +1139,7 @@ lim_fork(struct proc *p1, struct proc *p
 	p2->p_limit = lim_hold(p1->p_limit);
 	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
 	if (p1->p_cpulimit != RLIM_INFINITY)
-		callout_reset_bt(&p2->p_limco, ticks2bintime(hz), zero_bt,
+		callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
 		    lim_cb, p2, C_PREL(1) | C_HARDCLOCK);
 }
 

Modified: projects/calloutng/sys/kern/kern_synch.c
==============================================================================
--- projects/calloutng/sys/kern/kern_synch.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_synch.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -146,7 +146,7 @@ sleepinit(void)
  */
 int
 _sleep(void *ident, struct lock_object *lock, int priority,
-    const char *wmesg, struct bintime bt, struct bintime pr, int flags)
+    const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
 {
 	struct thread *td;
 	struct proc *p;
@@ -162,7 +162,7 @@ _sleep(void *ident, struct lock_object *
 #endif
 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
 	    "Sleeping on \"%s\"", wmesg);
-	KASSERT(bintime_isset(&bt) || mtx_owned(&Giant) || lock != NULL,
+	KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
 	    ("sleeping without a lock"));
 	KASSERT(p != NULL, ("msleep1"));
 	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
@@ -232,17 +232,17 @@ _sleep(void *ident, struct lock_object *
 	 * return from cursig().
 	 */
 	sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
-	if (bintime_isset(&bt))
-		sleepq_set_timeout_bt(ident, bt, pr, flags);
+	if (sbt != 0)
+		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
 	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
 		sleepq_release(ident);
 		WITNESS_SAVE(lock, lock_witness);
 		lock_state = class->lc_unlock(lock);
 		sleepq_lock(ident);
 	}
-	if (bintime_isset(&bt) && catch)
+	if (sbt != 0 && catch)
 		rval = sleepq_timedwait_sig(ident, pri);
-	else if (bintime_isset(&bt))
+	else if (sbt != 0)
 		rval = sleepq_timedwait(ident, pri);
 	else if (catch)
 		rval = sleepq_wait_sig(ident, pri);
@@ -263,8 +263,8 @@ _sleep(void *ident, struct lock_object *
 }
 
 int
-msleep_spin_bt(void *ident, struct mtx *mtx, const char *wmesg,
-    struct bintime bt, struct bintime pr, int flags)
+msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
+    sbintime_t sbt, sbintime_t pr, int flags)
 {
 	struct thread *td;
 	struct proc *p;
@@ -302,8 +302,8 @@ msleep_spin_bt(void *ident, struct mtx *
 	 * We put ourselves on the sleep queue and start our timeout.
 	 */
 	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
-	if (bintime_isset(&bt))
-		sleepq_set_timeout_bt(ident, bt, pr, flags);
+	if (sbt != 0)
+		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
 
 	/*
 	 * Can't call ktrace with any spin locks held so it can lock the
@@ -325,7 +325,7 @@ msleep_spin_bt(void *ident, struct mtx *
 	    wmesg);
 	sleepq_lock(ident);
 #endif
-	if (bintime_isset(&bt))
+	if (sbt != 0)
 		rval = sleepq_timedwait(ident, 0);
 	else {
 		sleepq_wait(ident, 0);
@@ -349,28 +349,30 @@ msleep_spin_bt(void *ident, struct mtx *
  * to a "timo" value of one.
  */
 int
-pause_bt(const char *wmesg, struct bintime bt, struct bintime pr, int flags)
+pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
 {
+	int sbt_sec;
 
-	KASSERT(bt.sec >= 0, ("pause: timo must be >= 0"));
+	sbt_sec = sbintime_getsec(sbt);	
+	KASSERT(sbt_sec >= 0, ("pause: timo must be >= 0"));
 
 	/* silently convert invalid timeouts */
-	if (!bintime_isset(&bt))
-		bt = tick_bt;
+	if (sbt == 0)
+		sbt = tick_sbt;
 
 	if (cold) {
 		/*
 		 * We delay one second at a time to avoid overflowing the
 		 * system specific DELAY() function(s):
 		 */
-		while (bt.sec > 0) {
+		while (sbt_sec > 0) {
 			DELAY(1000000);
-			bt.sec--;
+			sbt_sec--;
 		}
-		DELAY(bt.frac >> 44);
+		DELAY(sbt / SBT_1US);
 		return (0);
 	}
-	return (_sleep(&pause_wchan, NULL, 0, wmesg, bt, pr, flags));
+	return (_sleep(&pause_wchan, NULL, 0, wmesg, sbt, pr, flags));
 }
 
 /*
@@ -561,8 +563,8 @@ loadav(void *arg)
 	 * random variation to avoid synchronisation with processes that
 	 * run at regular intervals.
 	 */
-	callout_reset_bt(&loadav_callout,
-	    ticks2bintime(hz * 4 + (int)(random() % (hz * 2 + 1))), zero_bt,
+	callout_reset_sbt(&loadav_callout,
+	    tick_sbt * (hz * 4 + (int)(random() % (hz * 2 + 1))), 0,
 	    loadav, NULL, C_DIRECT_EXEC | C_HARDCLOCK);
 }
 

Modified: projects/calloutng/sys/kern/kern_tc.c
==============================================================================
--- projects/calloutng/sys/kern/kern_tc.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_tc.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -122,8 +122,11 @@ SYSCTL_INT(_kern_timecounter, OID_AUTO, 
 
 struct bintime bt_timethreshold;
 struct bintime bt_tickthreshold;
+sbintime_t sbt_timethreshold;
+sbintime_t sbt_tickthreshold;
 struct bintime tc_tick_bt;
-int tc_timeexp;
+sbintime_t tc_tick_sbt;
+int tc_precexp;
 int tc_timepercentage = TC_DEFAULTPERC;
 TUNABLE_INT("kern.timecounter.alloweddeviation", &tc_timepercentage);
 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
@@ -347,6 +350,16 @@ binuptime(struct bintime *bt)
 }
 
 void
+sbinuptime(sbintime_t *sbt)
+{
+	/* XXX: We need a real implementation, but tomorrow */
+	struct bintime bt;
+
+	binuptime(&bt);
+	*sbt = bintime2sbintime(bt);
+}
+
+void
 nanouptime(struct timespec *tsp)
 {
 	struct bintime bt;
@@ -404,6 +417,16 @@ getbinuptime(struct bintime *bt)
 }
 
 void
+getsbinuptime(sbintime_t *sbt)
+{
+	/* XXX: We need a real implementation, but tomorrow */ 
+	struct bintime bt;
+
+	getbinuptime(&bt);
+	*sbt = bintime2sbintime(bt);
+}
+
+void
 getnanouptime(struct timespec *tsp)
 {
 	struct timehands *th;
@@ -896,6 +919,16 @@ binuptime(struct bintime *bt)
 }
 
 void
+sbinuptime(sbintime_t sbt)
+{
+	/* XXX: We need a real implementation, but tomorrow */ 
+	struct bintime bt;
+
+	binuptime(&bt);
+	*sbt = bintime2sbintime(bt);
+} 
+
+void
 nanouptime(struct timespec *tsp)
 {
 
@@ -938,6 +971,16 @@ getbinuptime(struct bintime *bt)
 }
 
 void
+getsbinuptime(sbintime_t *sbt)
+{
+	/* XXX: We need a real implementation, but tomorrow */ 
+	struct bintime bt;
+
+	getbinuptime(&bt);
+	*sbt = bintime2sbintime(bt);
+}
+
+void
 getnanouptime(struct timespec *tsp)
 {
 
@@ -1725,17 +1768,19 @@ tc_adjprecision(void)
 
 	if (tc_timepercentage > 0) {
 		t = (99 + tc_timepercentage) / tc_timepercentage;
-		tc_timeexp = fls(t + (t >> 1)) - 1;
+		tc_precexp = fls(t + (t >> 1)) - 1;
 		FREQ2BT(hz / tc_tick, &bt_timethreshold);
 		FREQ2BT(hz, &bt_tickthreshold);
-		bintime_shift(&bt_timethreshold, tc_timeexp);
-		bintime_shift(&bt_tickthreshold, tc_timeexp);
+		bintime_shift(&bt_timethreshold, tc_precexp);
+		bintime_shift(&bt_tickthreshold, tc_precexp);
 	} else {
-		tc_timeexp = 31;
+		tc_precexp = 31;
 		bt_timethreshold.sec = INT_MAX;
 		bt_timethreshold.frac = ~(uint64_t)0;
 		bt_tickthreshold = bt_timethreshold;
 	}
+	sbt_timethreshold = bintime2sbintime(bt_timethreshold);
+	sbt_tickthreshold = bintime2sbintime(bt_tickthreshold);
 }
 
 static int
@@ -1772,8 +1817,10 @@ inittimecounter(void *dummy)
 		tc_tick = 1;
 	tc_adjprecision();
 	FREQ2BT(hz, &tick_bt);
+	tick_sbt = bintime2sbintime(tick_bt);
 	tick_rate = hz / tc_tick;
 	FREQ2BT(tick_rate, &tc_tick_bt);
+	tc_tick_sbt = bintime2sbintime(tc_tick_bt);
 	p = (tc_tick * 1000000) / hz;
 	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
 

Modified: projects/calloutng/sys/kern/kern_time.c
==============================================================================
--- projects/calloutng/sys/kern/kern_time.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_time.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -483,34 +483,32 @@ int
 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt)
 {
 	struct timespec ts;
-	struct bintime bt, btt, bt_prec, tmp;
+	sbintime_t sbt, sbtt, prec, tmp;
 	int error;
 
 	if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
 		return (EINVAL);
 	if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
 		return (0);
-	timespec2bintime(rqt, &tmp);
-	bt_prec = tmp;
-	bintime_shift(&bt_prec, -tc_timeexp);
-	if (TIMESEL(&bt, &tmp))
-		bintime_add(&bt, &tc_tick_bt);
-	bintime_add(&bt, &tmp);
-	error = tsleep_bt(&nanowait, PWAIT | PCATCH, "nanslp", bt, bt_prec,
+	tmp = timespec2sbintime(*rqt);
+	prec = tmp;
+	prec >>= tc_precexp;
+	if (TIMESEL(&sbt, tmp))
+		sbt += tc_tick_sbt;
+	sbt += tmp;
+	error = tsleep_sbt(&nanowait, PWAIT | PCATCH, "nanslp", sbt, prec,
 	    C_ABSOLUTE);
 	if (error != EWOULDBLOCK) {
 		if (error == ERESTART)
 			error = EINTR;
-		TIMESEL(&btt, &tmp);
+		TIMESEL(&sbtt, tmp);
 		if (rmt != NULL) {
-			tmp = bt;
-			bintime_sub(&tmp, &btt);
-			bintime2timespec(&tmp, &ts);
+			ts = sbintime2timespec(sbt - sbtt);
 			if (ts.tv_sec < 0)
 				timespecclear(&ts);
 			*rmt = ts;
 		}
-		if (bintime_cmp(&btt, &bt, >=))
+		if (sbtt >= sbt)
 			return (0);
 		return (error);
 	}

Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/kern_timeout.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -121,7 +121,7 @@ struct cc_exec {
 	void			(*ce_migration_func)(void *);
 	void			*ce_migration_arg;
 	int			ce_migration_cpu;
-	struct bintime		ce_migration_time;
+	sbintime_t		ce_migration_time;
 #endif
 	int			cc_cancel;
 	int			cc_waiting;
@@ -138,8 +138,8 @@ struct callout_cpu {
 	struct callout_tailq	*cc_callwheel;
 	struct callout_tailq	cc_expireq;
 	struct callout_list	cc_callfree;
-	struct bintime 		cc_firstevent;
-	struct bintime 		cc_lastscan;
+	sbintime_t		cc_firstevent;
+	sbintime_t		cc_lastscan;
 	void			*cc_cookie;
 };
 
@@ -217,7 +217,7 @@ cc_cme_cleanup(struct callout_cpu *cc, i
 	cc->cc_exec_entity[direct].cc_waiting = 0;
 #ifdef SMP
 	cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
-	bintime_clear(&cc->cc_exec_entity[direct].ce_migration_time);
+	cc->cc_exec_entity[direct].ce_migration_time = 0;
 	cc->cc_exec_entity[direct].ce_migration_func = NULL;
 	cc->cc_exec_entity[direct].ce_migration_arg = NULL;
 #endif
@@ -368,30 +368,29 @@ SYSINIT(start_softclock, SI_SUB_SOFTINTR
 #define	CC_HASH_SHIFT	10
 
 static inline int
-callout_hash(struct bintime *bt)
+callout_hash(sbintime_t sbt)
 {
-
-	return (int) ((bt->sec << CC_HASH_SHIFT) +
-	    (bt->frac >> (64 - CC_HASH_SHIFT)));
+	
+	return (int)(sbt >> (32 - CC_HASH_SHIFT));
 }
 
 static inline int
-get_bucket(struct bintime *bt)
+callout_get_bucket(sbintime_t sbt)
 {
 
-	return callout_hash(bt) & callwheelmask;
+	return callout_hash(sbt) & callwheelmask;
 }
 
 void
 callout_process(struct bintime *now)
 {
-	struct bintime first, last, max, tmp_max;
 	struct callout *tmp, *tmpn;
 	struct callout_cpu *cc;
 	struct callout_tailq *sc;
 	uint64_t lookahead;
-	int depth_dir, firstb, mpcalls_dir, lastb, nowb, lockcalls_dir,
-	    need_softclock, exit_allowed, exit_wanted;
+	sbintime_t first, last, max, now_sbt, tmp_max;
+	int depth_dir, firstb, lastb, mpcalls_dir, nowb,
+	    lockcalls_dir, need_softclock, exit_allowed, exit_wanted;
 
 	need_softclock = 0;
 	depth_dir = 0;
@@ -401,22 +400,23 @@ callout_process(struct bintime *now)
 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
 
 	/* Compute the buckets of the last scan and present times. */
-	firstb = callout_hash(&cc->cc_lastscan);
-	cc->cc_lastscan = *now;
-	nowb = callout_hash(now);
+	firstb = callout_hash(cc->cc_lastscan);
+	now_sbt = bintime2sbintime(*now);
+	cc->cc_lastscan = now_sbt;
+	nowb = callout_hash(now_sbt);
 
 	/* Compute the last bucket and minimum time of the bucket after it. */
 	if (nowb == firstb)
-		lookahead = 1LLU << 60;		/* 1/16s */
+		lookahead = (SBT_1S / 16);
 	else if (nowb - firstb == 1)
-		lookahead = 1LLU << 61;		/* 1/8s */
+		lookahead = (SBT_1S / 8);
 	else
-		lookahead = 1LLU << 63;		/* 1/2s */
-	first = last = *now;
-	bintime_addx(&first, lookahead / 2);
-	bintime_addx(&last, lookahead);
-	last.frac &= (0xffffffffffffffffLLU << (64 - CC_HASH_SHIFT));
-	lastb = callout_hash(&last) - 1;
+		lookahead = (SBT_1S / 2);
+	first = last = now_sbt;
+	first += (lookahead / 2);
+	last += lookahead;
+	last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
+	lastb = callout_hash(last) - 1;
 	max = last;
 
 	/*
@@ -438,7 +438,7 @@ callout_process(struct bintime *now)
 		tmp = TAILQ_FIRST(sc);
 		while (tmp != NULL) {
 			/* Run the callout if present time within allowed. */
-			if (bintime_cmp(&tmp->c_time, now, <=)) {
+			if (tmp->c_time <= now_sbt) {
 				/*
 				 * Consumer told us the callout may be run
 				 * directly from hardware interrupt context.
@@ -464,22 +464,22 @@ callout_process(struct bintime *now)
 				continue;
 			}
 			/* Skip events from distant future. */
-			if (bintime_cmp(&tmp->c_time, &max, >=))
+			if (tmp->c_time >= max)
 				goto next;
 			/*
 			 * Event minimal time is bigger than present maximal
 			 * time, so it cannot be aggregated.
 			 */
-			if (bintime_cmp(&tmp->c_time, &last, >)) {
+			if (tmp->c_time > last) {
 				exit_wanted = 1;
 				goto next;
 			}
 			/* Update first and last time, respecting this event. */
-			if (bintime_cmp(&tmp->c_time, &first, <))
+			if (tmp->c_time < first)
 				first = tmp->c_time;
 			tmp_max = tmp->c_time;
-			bintime_add(&tmp_max, &tmp->c_precision);
-			if (bintime_cmp(&tmp_max, &last, <))
+			tmp_max += tmp->c_precision;
+			if (tmp_max < last)
 				last = tmp_max;
 next:
 			tmp = TAILQ_NEXT(tmp, c_links.tqe);
@@ -500,7 +500,8 @@ next:
 	}
 	cc->cc_exec_next_dir = NULL;
 	if (callout_new_inserted != NULL)
-		(*callout_new_inserted)(curcpu, last, first);
+		(*callout_new_inserted)(curcpu, sbintime2bintime(last), 
+		    sbintime2bintime(first));
 	cc->cc_firstevent = last;
 #ifdef CALLOUT_PROFILING
 	avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
@@ -542,39 +543,38 @@ callout_lock(struct callout *c)
 
 static void
 callout_cc_add(struct callout *c, struct callout_cpu *cc,
-    struct bintime to_bintime, struct bintime precision, void (*func)(void *),
+    sbintime_t sbt, sbintime_t precision, void (*func)(void *),
     void *arg, int cpu, int flags)
 {
-	struct bintime last;
+	sbintime_t last;
 	int bucket;
 
 	CC_LOCK_ASSERT(cc);
-	if (bintime_cmp(&to_bintime, &cc->cc_lastscan, <))
-		to_bintime = cc->cc_lastscan;
+	if (sbt < cc->cc_lastscan)
+		sbt = cc->cc_lastscan;
 	c->c_arg = arg;
 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
 	if (flags & C_DIRECT_EXEC)
 		c->c_flags |= CALLOUT_DIRECT;
 	c->c_flags &= ~CALLOUT_PROCESSED;
 	c->c_func = func;
-	c->c_time = to_bintime;
+	c->c_time = sbt;
 	c->c_precision = precision;
-	CTR4(KTR_CALLOUT, "precision set for %p: %d.%08x%08x",
-	    c, c->c_precision.sec, (u_int) (c->c_precision.frac >> 32),
-	    (u_int) (c->c_precision.frac & 0xffffffff));
-	bucket = get_bucket(&c->c_time);
+	bucket = callout_get_bucket(c->c_time);
+	CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
+	    c, (int)(c->c_precision >> 32), 
+	    (u_int)(c->c_precision & 0xffffffff));
 	TAILQ_INSERT_TAIL(&cc->cc_callwheel[bucket], c, c_links.tqe);
 	/*
 	 * Inform the eventtimers(4) subsystem there's a new callout
 	 * that has been inserted, but only if really required.
 	 */
-	last = c->c_time;
-	bintime_add(&last, &c->c_precision);
-	if (callout_new_inserted != NULL &&
-	    (bintime_cmp(&last, &cc->cc_firstevent, <) ||
-	    !bintime_isset(&cc->cc_firstevent))) {
+	last = c->c_time + c->c_precision;
+	if (callout_new_inserted != NULL && ((last < cc->cc_firstevent) ||
+	    (cc->cc_firstevent == 0))) {
 		cc->cc_firstevent = last;
-		(*callout_new_inserted)(cpu, last, c->c_time);
+		(*callout_new_inserted)(cpu, sbintime2bintime(last), 
+		    sbintime2bintime(c->c_time));
 	}
 }
 
@@ -602,7 +602,7 @@ softclock_call_cc(struct callout *c, str
 	void (*new_func)(void *);
 	void *new_arg;
 	int flags, new_cpu;
-	struct bintime new_time;
+	sbintime_t new_time;
 #endif
 #ifdef DIAGNOSTIC
 	struct bintime bt1, bt2;
@@ -896,27 +896,28 @@ DPCPU_DECLARE(struct bintime, hardclockt
  * callout_deactivate() - marks the callout as having been serviced
  */
 int
-callout_reset_bt_on(struct callout *c, struct bintime bt, struct bintime pr,
+callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
     void (*ftn)(void *), void *arg, int cpu, int flags)
 {
-	struct bintime to_bt, pr1;
+	sbintime_t to_sbt, pr;
+	struct bintime to_bt;
 	struct callout_cpu *cc;
 	int bucket, cancelled, direct;
 
 	cancelled = 0;
 	if (flags & C_ABSOLUTE) {
-		to_bt = bt;
+		to_sbt = sbt;
 	} else {
-		if ((flags & C_HARDCLOCK) && bintime_cmp(&bt, &tick_bt, <))
-			bt = tick_bt;
+		if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
+			sbt = tick_sbt;
 		if ((flags & C_HARDCLOCK) ||
 #ifdef NO_EVENTTIMERS
-		    bintime_cmp(&bt, &bt_timethreshold, >=)) {
-			getbinuptime(&to_bt);
+		    sbt >= sbt_timethreshold) {
+			getsbinuptime(&to_sbt);
 			/* Add safety belt for the case of hz > 1000. */
-			bintime_addx(&to_bt, tc_tick_bt.frac - tick_bt.frac);
+			to_sbt += (tc_tick_dur - tick_dur);
 #else
-		    bintime_cmp(&bt, &bt_tickthreshold, >=)) {
+		    sbt >= sbt_tickthreshold) {
 			/*
 			 * Obtain the time of the last hardclock() call on
 			 * this CPU directly from the kern_clocksource.c.
@@ -925,20 +926,18 @@ callout_reset_bt_on(struct callout *c, s
 			 */
 			spinlock_enter();
 			to_bt = DPCPU_GET(hardclocktime);
+			to_sbt = bintime2sbintime(to_bt);
 			spinlock_exit();
 #endif
 			if ((flags & C_HARDCLOCK) == 0)
-				bintime_addx(&to_bt, tick_bt.frac);
+				to_sbt += tick_sbt;
 		} else
-			binuptime(&to_bt);
-		bintime_add(&to_bt, &bt);
-		pr1 = bt;
-		if (C_PRELGET(flags) < 0)
-			bintime_shift(&pr1, -tc_timeexp);
-		else
-			bintime_shift(&pr1, -C_PRELGET(flags));
-		if (bintime_cmp(&pr1, &pr, >))
-			pr = pr1;
+			sbinuptime(&to_sbt);
+		to_sbt += sbt;
+		pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
+		    sbt >> C_PRELGET(flags));
+		if (pr > precision)
+			precision = pr;
 	}
 	/*
 	 * Don't allow migration of pre-allocated callouts lest they
@@ -975,7 +974,7 @@ callout_reset_bt_on(struct callout *c, s
 			if (cc->cc_exec_next_dir == c)
 				cc->cc_exec_next_dir = TAILQ_NEXT(c,
 				    c_links.tqe);
-			bucket = get_bucket(&c->c_time);
+			bucket = callout_get_bucket(c->c_time);
 			TAILQ_REMOVE(&cc->cc_callwheel[bucket], c,
 			    c_links.tqe);
 		} else
@@ -994,14 +993,14 @@ callout_reset_bt_on(struct callout *c, s
 		if (cc->cc_exec_entity[direct].cc_curr == c) {
 			cc->cc_exec_entity[direct].ce_migration_cpu = cpu;
 			cc->cc_exec_entity[direct].ce_migration_time
-			    = to_bt;
+			    = to_sbt;
 			cc->cc_exec_entity[direct].ce_migration_func = ftn;
 			cc->cc_exec_entity[direct].ce_migration_arg = arg;
 			c->c_flags |= CALLOUT_DFRMIGRATION;
 			CTR6(KTR_CALLOUT,
 		    "migration of %p func %p arg %p in %d.%08x to %u deferred",
-			    c, c->c_func, c->c_arg, (int)(to_bt.sec),
-			    (u_int)(to_bt.frac >> 32), cpu);
+			    c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
+			    (u_int)(to_sbt & 0xffffffff), cpu);
 			CC_UNLOCK(cc);
 			return (cancelled);
 		}
@@ -1009,10 +1008,10 @@ callout_reset_bt_on(struct callout *c, s
 	}
 #endif
 
-	callout_cc_add(c, cc, to_bt, pr, ftn, arg, cpu, flags);
+	callout_cc_add(c, cc, to_sbt, pr, ftn, arg, cpu, flags);
 	CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
-	    cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_bt.sec),
-	    (u_int)(to_bt.frac >> 32));
+	    cancelled ? "re" : "", c, c->c_func, c->c_arg,(int)(to_sbt >> 32),
+	    (u_int)(to_sbt & 0xffffffff));
 	CC_UNLOCK(cc);
 
 	return (cancelled);
@@ -1197,7 +1196,7 @@ again:
 	if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
 		if (cc->cc_exec_next_dir == c)
 			cc->cc_exec_next_dir = TAILQ_NEXT(c, c_links.tqe);
-		bucket = get_bucket(&c->c_time);
+		bucket = callout_get_bucket(c->c_time);
 		TAILQ_REMOVE(&cc->cc_callwheel[bucket], c,
 		    c_links.tqe);
 	} else

Modified: projects/calloutng/sys/kern/subr_log.c
==============================================================================
--- projects/calloutng/sys/kern/subr_log.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/subr_log.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -117,9 +117,8 @@ logopen(struct cdev *dev, int flags, int
 		return (EBUSY);
 	}
 	log_open = 1;
-	callout_reset_bt(&logsoftc.sc_callout,
-	    ticks2bintime(hz / log_wakeups_per_second), zero_bt,
-	    logtimeout, NULL, C_PREL(1) | C_HARDCLOCK);
+	callout_reset_sbt(&logsoftc.sc_callout,
+	    (SBT_1S / log_wakeups_per_second), 0, logtimeout, NULL, C_PREL(1));
 	mtx_unlock(&msgbuf_lock);
 
 	fsetown(td->td_proc->p_pid, &logsoftc.sc_sigio);	/* signal process only */
@@ -247,9 +246,8 @@ done:
 		printf("syslog wakeup is less than one.  Adjusting to 1.\n");
 		log_wakeups_per_second = 1;
 	}
-	callout_reset_bt(&logsoftc.sc_callout,
-	    ticks2bintime(hz / log_wakeups_per_second), zero_bt,
-	    logtimeout, NULL, C_PREL(1) | C_HARDCLOCK);
+	callout_reset_sbt(&logsoftc.sc_callout,
+	    (SBT_1S / log_wakeups_per_second), 0, logtimeout, NULL, C_PREL(1));
 }
 
 /*ARGSUSED*/

Modified: projects/calloutng/sys/kern/subr_param.c
==============================================================================
--- projects/calloutng/sys/kern/subr_param.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/subr_param.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -84,7 +84,8 @@ static int sysctl_kern_vm_guest(SYSCTL_H
 int	hz;				/* system clock's frequency */
 int	tick;				/* usec per tick (1000000 / hz) */
 struct bintime tick_bt;			/* bintime per tick (1s / hz) */
-struct bintime zero_bt = { 0, 0 };	/* bintime per tick (1s / hz) */
+struct bintime zero_bt = { 0, 0 };     /* bintime per tick (1s / hz) */
+sbintime_t tick_sbt;
 int	maxusers;			/* base tunable */
 int	maxproc;			/* maximum # of processes */
 int	maxprocperuid;			/* max # of procs per user */
@@ -224,6 +225,7 @@ init_param1(void)
 		hz = vm_guest > VM_GUEST_NO ? HZ_VM : HZ;
 	tick = 1000000 / hz;
 	FREQ2BT(hz, &tick_bt);
+	tick_sbt = bintime2sbintime(tick_bt);
 
 #ifdef VM_SWZONE_SIZE_MAX
 	maxswzone = VM_SWZONE_SIZE_MAX;

Modified: projects/calloutng/sys/kern/subr_sleepqueue.c
==============================================================================
--- projects/calloutng/sys/kern/subr_sleepqueue.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/subr_sleepqueue.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -363,7 +363,7 @@ sleepq_add(void *wchan, struct lock_obje
  * sleep queue after timo ticks if the thread has not already been awakened.
  */
 void
-sleepq_set_timeout_bt(void *wchan, struct bintime bt, struct bintime pr,
+sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
     int flags)
 {
 
@@ -376,7 +376,7 @@ sleepq_set_timeout_bt(void *wchan, struc
 	MPASS(TD_ON_SLEEPQ(td));
 	MPASS(td->td_sleepqueue == NULL);
 	MPASS(wchan != NULL);
-	callout_reset_bt_on(&td->td_slpcallout, bt, pr,
+	callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
 	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
 }
 

Modified: projects/calloutng/sys/kern/sys_generic.c
==============================================================================
--- projects/calloutng/sys/kern/sys_generic.c	Wed Feb  6 15:18:46 2013	(r246412)
+++ projects/calloutng/sys/kern/sys_generic.c	Wed Feb  6 15:23:55 2013	(r246413)
@@ -102,7 +102,7 @@ static int	dofilewrite(struct thread *, 
 		    off_t, int);
 static void	doselwakeup(struct selinfo *, int);
 static void	seltdinit(struct thread *);
-static int	seltdwait(struct thread *, struct bintime, struct bintime);
+static int	seltdwait(struct thread *, sbintime_t, sbintime_t);
 static void	seltdclear(struct thread *);
 
 /*
@@ -903,7 +903,7 @@ kern_select(struct thread *td, int nd, f
 	 */
 	fd_mask s_selbits[howmany(2048, NFDBITS)];
 	fd_mask *ibits[3], *obits[3], *selbits, *sbp;
-	struct bintime abt, precision, rbt;
+	sbintime_t asbt, precision, rsbt;
 	struct timeval rtv;
 	int error, lf, ndu;
 	u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
@@ -1003,23 +1003,21 @@ kern_select(struct thread *td, int nd, f
 			error = EINVAL;
 			goto done;
 		}
-		timeval2bintime(&rtv, &rbt);
-		precision = rbt;
-		bintime_shift(&precision, -tc_timeexp);
-		if (TIMESEL(&abt, &rbt))
-			bintime_add(&abt, &tc_tick_bt);
-		bintime_add(&abt, &rbt);
-	} else {
-		abt.sec = (time_t)-1;
-		abt.frac = 0;
-	}
+		rsbt = timeval2sbintime(rtv);
+		precision = rsbt;
+		precision >>= tc_precexp;
+		if (TIMESEL(&asbt, rsbt))
+			asbt += tc_tick_sbt;
+		asbt += rsbt; 
+	} else
+		asbt = -1;
 	seltdinit(td);
 	/* Iterate until the timeout expires or descriptors become ready. */
 	for (;;) {
 		error = selscan(td, ibits, obits, nd);
 		if (error || td->td_retval[0] != 0)
 			break;
-		error = seltdwait(td, abt, precision);
+		error = seltdwait(td, asbt, precision);
 		if (error)
 			break;
 		error = selrescan(td, ibits, obits);
@@ -1251,7 +1249,7 @@ sys_poll(td, uap)
 {
 	struct pollfd *bits;
 	struct pollfd smallbits[32];
-	struct bintime abt, precision, rbt;
+	sbintime_t asbt, precision, rsbt;
 	int error;
 	u_int nfds;
 	size_t ni;
@@ -1272,24 +1270,21 @@ sys_poll(td, uap)
 			error = EINVAL;
 			goto done;
 		}
-		rbt.sec = uap->timeout / 1000;
-		rbt.frac = (uap->timeout % 1000) * (((uint64_t)1 << 63) / 500);
-		precision = rbt;
-		bintime_shift(&precision, -tc_timeexp);
-		if (TIMESEL(&abt, &rbt))
-			bintime_add(&abt, &tc_tick_bt);
-		bintime_add(&abt, &rbt);
-	} else {
-		abt.sec = (time_t)-1;
-		abt.frac = 0;
-	}
+		rsbt = SBT_1MS * uap->timeout; 
+		precision = rsbt;
+		precision >>= tc_precexp;
+		if (TIMESEL(&asbt, rsbt))
+			asbt += tc_tick_sbt;
+		asbt += rsbt;
+	} else
+		asbt = -1;
 	seltdinit(td);
 	/* Iterate until the timeout expires or descriptors become ready. */
 	for (;;) {
 		error = pollscan(td, bits, nfds);
 		if (error || td->td_retval[0] != 0)
 			break;
-		error = seltdwait(td, abt, precision);
+		error = seltdwait(td, asbt, precision);
 		if (error)
 			break;
 		error = pollrescan(td);
@@ -1631,7 +1626,7 @@ out:
 }
 
 static int
-seltdwait(struct thread *td, struct bintime bt, struct bintime precision)
+seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
 {
 	struct seltd *stp;
 	int error;
@@ -1650,11 +1645,11 @@ seltdwait(struct thread *td, struct bint
 		mtx_unlock(&stp->st_mtx);
 		return (0);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-projects mailing list