MFC 7.0 calcru changes

David O'Brien obrien at freebsd.org
Thu Aug 23 19:23:15 PDT 2007


This is a patch to MFC what I think are all the calcru-related changes
that occurred since 6-STABLE was branched and 7.0 continued forward.

If anyone spots some changes I missed, please yell out.

-- 
-- David  (obrien at FreeBSD.org)

Index: amd64/amd64/mp_machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/amd64/amd64/mp_machdep.c,v
retrieving revision 1.260.2.11
diff -u -p -r1.260.2.11 mp_machdep.c
--- amd64/amd64/mp_machdep.c	5 May 2007 21:13:16 -0000	1.260.2.11
+++ amd64/amd64/mp_machdep.c	23 Aug 2007 18:05:54 -0000
@@ -577,7 +577,7 @@ init_secondary(void)
 	spinlock_exit();
 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 
-	binuptime(PCPU_PTR(switchtime));
+	PCPU_SET(switchtime, cpu_ticks());
 	PCPU_SET(switchticks, ticks);
 
 	cpu_throw(NULL, choosethread());	/* doesn't return */
Index: amd64/amd64/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/amd64/amd64/trap.c,v
retrieving revision 1.289.2.6
diff -u -p -r1.289.2.6 trap.c
--- amd64/amd64/trap.c	14 Aug 2007 19:42:51 -0000	1.289.2.6
+++ amd64/amd64/trap.c	23 Aug 2007 20:28:33 -0000
@@ -162,7 +162,7 @@ trap(frame)
 {
 	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
-	u_int sticks = 0, type;
+	u_int type;
 	int i = 0, ucode = 0, code;
 
 	PCPU_LAZY_INC(cnt.v_trap);
@@ -242,7 +242,7 @@ trap(frame)
         if (ISPL(frame.tf_cs) == SEL_UPL) {
 		/* user trap */
 
-		sticks = td->td_sticks;
+		td->td_pticks = 0;
 		td->td_frame = &frame;
 		if (td->td_ucred != p->p_ucred) 
 			cred_update_thread(td);
@@ -488,7 +488,7 @@ trap(frame)
 #endif
 
 user:
-	userret(td, &frame, sticks);
+	userret(td, &frame);
 	mtx_assert(&Giant, MA_NOTOWNED);
 userout:
 out:
@@ -703,7 +703,6 @@ syscall(frame)
 	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
 	register_t orig_tf_rflags;
-	u_int sticks;
 	int error;
 	int narg;
 	register_t args[8];
@@ -728,7 +727,7 @@ syscall(frame)
 
 	reg = 0;
 	regcnt = 6;
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	td->td_frame = &frame;
 	if (td->td_ucred != p->p_ucred) 
 		cred_update_thread(td);
@@ -851,7 +850,7 @@ syscall(frame)
 	/*
 	 * Handle reschedule and other end-of-syscall issues
 	 */
-	userret(td, &frame, sticks);
+	userret(td, &frame);
 
 	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm, code);
Index: amd64/amd64/tsc.c
===================================================================
RCS file: /home/ncvs/src/sys/amd64/amd64/tsc.c,v
retrieving revision 1.205
diff -u -p -r1.205 tsc.c
--- amd64/amd64/tsc.c	17 Nov 2003 08:58:13 -0000	1.205
+++ amd64/amd64/tsc.c	23 Aug 2007 20:32:25 -0000
@@ -77,6 +77,7 @@ init_TSC(void)
 	tsc_freq = tscval[1] - tscval[0];
 	if (bootverbose)
 		printf("TSC clock: %lu Hz\n", tsc_freq);
+	set_cputicker(rdtsc, tsc_freq, 1);
 }
 
 
Index: amd64/ia32/ia32_syscall.c
===================================================================
RCS file: /home/ncvs/src/sys/amd64/ia32/ia32_syscall.c,v
retrieving revision 1.8.2.2
diff -u -p -r1.8.2.2 ia32_syscall.c
--- amd64/ia32/ia32_syscall.c	11 Aug 2006 18:42:48 -0000	1.8.2.2
+++ amd64/ia32/ia32_syscall.c	23 Aug 2007 18:11:09 -0000
@@ -96,7 +96,6 @@ ia32_syscall(struct trapframe frame)
 	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
 	register_t orig_tf_rflags;
-	u_int sticks;
 	int error;
 	int narg;
 	u_int32_t args[8];
@@ -109,7 +108,7 @@ ia32_syscall(struct trapframe frame)
 	 */
 	PCPU_LAZY_INC(cnt.v_syscall);
 
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	td->td_frame = &frame;
 	if (td->td_ucred != p->p_ucred) 
 		cred_update_thread(td);
@@ -239,7 +238,7 @@ ia32_syscall(struct trapframe frame)
 	/*
 	 * Handle reschedule and other end-of-syscall issues
 	 */
-	userret(td, &frame, sticks);
+	userret(td, &frame);
 
 	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm, code);
Index: arm/arm/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/arm/arm/trap.c,v
retrieving revision 1.17.2.3
diff -u -p -r1.17.2.3 trap.c
--- arm/arm/trap.c	26 Feb 2007 20:38:31 -0000	1.17.2.3
+++ arm/arm/trap.c	23 Aug 2007 20:27:08 -0000
@@ -230,7 +230,6 @@ data_abort_handler(trapframe_t *tf)
 	vm_prot_t ftype;
 	void *onfault;
 	vm_offset_t va;
-	u_int sticks = 0;
 	int error = 0;
 	struct ksig ksig;
 	struct proc *p;
@@ -257,7 +256,8 @@ data_abort_handler(trapframe_t *tf)
 	user = TRAP_USERMODE(tf);
 
 	if (user) {
-		sticks = td->td_sticks;                                                         td->td_frame = tf;		
+		td->td_pticks = 0;
+		td->td_frame = tf;		
 		if (td->td_ucred != td->td_proc->p_ucred)
 			cred_update_thread(td);
 		if (td->td_pflags & TDP_SA)
@@ -463,7 +463,7 @@ do_trapsignal:
 out:
 	/* If returning to user mode, make sure to invoke userret() */
 	if (user)
-		userret(td, tf, sticks);
+		userret(td, tf);
 }
 
 /*
@@ -705,7 +705,6 @@ prefetch_abort_handler(trapframe_t *tf)
 	struct vm_map *map;
 	vm_offset_t fault_pc, va;
 	int error = 0;
-	u_int sticks = 0;
 	struct ksig ksig;
 
 
@@ -755,7 +754,7 @@ prefetch_abort_handler(trapframe_t *tf)
 	/* Prefetch aborts cannot happen in kernel mode */
 	if (__predict_false(!TRAP_USERMODE(tf)))
 		dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 
 
 	/* Ok validate the address, can only execute in USER space */
@@ -810,7 +809,7 @@ do_trapsignal:
 	call_trapsignal(td, ksig.signb, ksig.code);
 
 out:
-	userret(td, tf, sticks);
+	userret(td, tf);
 
 }
 
@@ -872,10 +871,9 @@ syscall(struct thread *td, trapframe_t *
 	register_t *ap, *args, copyargs[MAXARGS];
 	struct sysent *callp;
 	int locked = 0;
-	u_int sticks = 0;
 
 	PCPU_LAZY_INC(cnt.v_syscall);
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	if (td->td_ucred != td->td_proc->p_ucred)
 		cred_update_thread(td);
 	switch (insn & SWI_OS_MASK) {
@@ -884,11 +882,11 @@ syscall(struct thread *td, trapframe_t *
 		break;
 	default:
 		trapsignal(td, SIGILL, 0);
-		userret(td, frame, td->td_sticks);
+		userret(td, frame);
 		return;
 	}
 	code = insn & 0x000fffff;                
-	sticks = td->td_sticks;
+ 	td->td_pticks = 0;
 	ap = &frame->tf_r0;
 	if (code == SYS_syscall) {
 		code = *ap++;
@@ -974,7 +972,7 @@ bad:
 		mtx_unlock(&Giant);
 	
 	
-	userret(td, frame, sticks);
+	userret(td, frame);
 	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm, code);
 	
@@ -996,6 +994,7 @@ swi_handler(trapframe_t *frame)
 
 	td->td_frame = frame;
 	
+	td->td_pticks = 0;
 	if (td->td_proc->p_flag & P_SA)
 		thread_user_enter(td);
 	/*
@@ -1004,7 +1003,7 @@ swi_handler(trapframe_t *frame)
 	 */
 	if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
 		trapsignal(td, SIGILL, 0);
-		userret(td, frame, td->td_sticks);
+		userret(td, frame);
 		return;
 	}
 	insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
Index: i386/i386/geode.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/geode.c,v
retrieving revision 1.5.8.2
diff -u -p -r1.5.8.2 geode.c
--- i386/i386/geode.c	30 Mar 2007 19:17:37 -0000	1.5.8.2
+++ i386/i386/geode.c	23 Aug 2007 20:32:25 -0000
@@ -110,6 +110,20 @@ static struct timecounter geode_timecoun
 	1000
 };
 
+static uint64_t
+geode_cputicks(void)
+{
+	unsigned c;
+	static unsigned last;
+	static uint64_t offset;
+
+	c = inl(geode_counter);
+	if (c < last)
+		offset += (1LL << 32);
+	last = c;
+	return (offset | c);
+}
+
 /*
  * The GEODE watchdog runs from a 32kHz frequency.  One period of that is
  * 31250 nanoseconds which we round down to 2^14 nanoseconds.  The watchdog
@@ -176,6 +190,7 @@ geode_probe(device_t self)
 			tc_init(&geode_timecounter);
 			EVENTHANDLER_REGISTER(watchdog_list, geode_watchdog,
 			    NULL, 0);
+			set_cputicker(geode_cputicks, 27000000, 0);
 		}
 	} else if (pci_get_devid(self) == 0x0510100b) {
 		gpio = pci_read_config(self, PCIR_BAR(0), 4);
Index: i386/i386/mp_machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/mp_machdep.c,v
retrieving revision 1.252.2.11
diff -u -p -r1.252.2.11 mp_machdep.c
--- i386/i386/mp_machdep.c	2 May 2007 16:16:57 -0000	1.252.2.11
+++ i386/i386/mp_machdep.c	23 Aug 2007 18:05:54 -0000
@@ -650,7 +650,7 @@ init_secondary(void)
 	spinlock_exit();
 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 
-	binuptime(PCPU_PTR(switchtime));
+	PCPU_SET(switchtime, cpu_ticks());
 	PCPU_SET(switchticks, ticks);
 
 	cpu_throw(NULL, choosethread());	/* doesn't return */
Index: i386/i386/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/trap.c,v
retrieving revision 1.277.2.6
diff -u -p -r1.277.2.6 trap.c
--- i386/i386/trap.c	14 Aug 2007 19:42:52 -0000	1.277.2.6
+++ i386/i386/trap.c	23 Aug 2007 20:27:51 -0000
@@ -176,7 +176,7 @@ trap(frame)
 {
 	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
-	u_int sticks = 0, type;
+	u_int type;
 	int i = 0, ucode = 0, code;
 	vm_offset_t eva;
 #ifdef POWERFAIL_NMI
@@ -275,7 +275,7 @@ trap(frame)
 		!(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) {
 		/* user trap */
 
-		sticks = td->td_sticks;
+		td->td_pticks = 0;
 		td->td_frame = &frame;
 		if (td->td_ucred != p->p_ucred) 
 			cred_update_thread(td);
@@ -650,7 +650,7 @@ trap(frame)
 #endif
 
 user:
-	userret(td, &frame, sticks);
+	userret(td, &frame);
 	mtx_assert(&Giant, MA_NOTOWNED);
 userout:
 out:
@@ -881,7 +881,6 @@ syscall(frame)
 	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
 	register_t orig_tf_eflags;
-	u_int sticks;
 	int error;
 	int narg;
 	int args[8];
@@ -902,7 +901,7 @@ syscall(frame)
 	}
 #endif
 
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	td->td_frame = &frame;
 	if (td->td_ucred != p->p_ucred) 
 		cred_update_thread(td);
@@ -1032,7 +1031,7 @@ syscall(frame)
 	/*
 	 * Handle reschedule and other end-of-syscall issues
 	 */
-	userret(td, &frame, sticks);
+	userret(td, &frame);
 
 	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm, code);
Index: i386/i386/tsc.c
===================================================================
RCS file: /home/ncvs/src/sys/i386/i386/tsc.c,v
retrieving revision 1.204
diff -u -p -r1.204 tsc.c
--- i386/i386/tsc.c	21 Oct 2003 18:28:34 -0000	1.204
+++ i386/i386/tsc.c	23 Aug 2007 20:32:25 -0000
@@ -86,6 +86,7 @@ init_TSC(void)
 	tsc_freq = tscval[1] - tscval[0];
 	if (bootverbose)
 		printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq);
+	set_cputicker(rdtsc, tsc_freq, 1);
 }
 
 
Index: ia64/ia32/ia32_trap.c
===================================================================
RCS file: /home/ncvs/src/sys/ia64/ia32/ia32_trap.c,v
retrieving revision 1.5.2.3
diff -u -p -r1.5.2.3 ia32_trap.c
--- ia64/ia32/ia32_trap.c	19 Sep 2006 12:38:08 -0000	1.5.2.3
+++ ia64/ia32/ia32_trap.c	23 Aug 2007 20:29:46 -0000
@@ -217,7 +217,6 @@ ia32_trap(int vector, struct trapframe *
 	struct thread *td;
 	uint64_t ucode;
 	int sig;
-	u_int sticks;
 
 	KASSERT(TRAPF_USERMODE(tf), ("%s: In kernel mode???", __func__));
 
@@ -226,7 +225,7 @@ ia32_trap(int vector, struct trapframe *
 
 	td = curthread;
 	td->td_frame = tf;
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	p = td->td_proc;
 	if (td->td_ucred != p->p_ucred)
 		cred_update_thread(td);
@@ -304,7 +303,7 @@ ia32_trap(int vector, struct trapframe *
 	trapsignal(td, sig, ucode);
 
 out:
-	userret(td, tf, sticks);
+	userret(td, tf);
 	mtx_assert(&Giant, MA_NOTOWNED);
 	do_ast(tf);
 }
Index: ia64/ia64/mp_machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/ia64/ia64/mp_machdep.c,v
retrieving revision 1.55.2.2
diff -u -p -r1.55.2.2 mp_machdep.c
--- ia64/ia64/mp_machdep.c	14 Feb 2006 03:40:49 -0000	1.55.2.2
+++ ia64/ia64/mp_machdep.c	23 Aug 2007 18:05:54 -0000
@@ -136,7 +136,7 @@ ia64_ap_startup(void)
 
 	mtx_lock_spin(&sched_lock);
 
-	binuptime(PCPU_PTR(switchtime));
+	PCPU_SET(switchtime, cpu_ticks());
 	PCPU_SET(switchticks, ticks);
 
 	ia64_set_tpr(0);
Index: ia64/ia64/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/ia64/ia64/trap.c,v
retrieving revision 1.113.2.3
diff -u -p -r1.113.2.3 trap.c
--- ia64/ia64/trap.c	19 Sep 2006 12:38:08 -0000	1.113.2.3
+++ ia64/ia64/trap.c	23 Aug 2007 20:29:30 -0000
@@ -362,7 +362,6 @@ trap(int vector, struct trapframe *tf)
 	struct thread *td;
 	uint64_t ucode;
 	int error, sig, user;
-	u_int sticks;
 
 	user = TRAPF_USERMODE(tf) ? 1 : 0;
 
@@ -374,12 +373,11 @@ trap(int vector, struct trapframe *tf)
 
 	if (user) {
 		ia64_set_fpsr(IA64_FPSR_DEFAULT);
-		sticks = td->td_sticks;
+		td->td_pticks = 0;
 		td->td_frame = tf;
 		if (td->td_ucred != p->p_ucred)
 			cred_update_thread(td);
 	} else {
-		sticks = 0;		/* XXX bogus -Wuninitialized warning */
 		KASSERT(cold || td->td_ucred != NULL,
 		    ("kernel trap doesn't have ucred"));
 #ifdef KDB
@@ -875,7 +873,7 @@ trap(int vector, struct trapframe *tf)
 
 out:
 	if (user) {
-		userret(td, tf, sticks);
+		userret(td, tf);
 		mtx_assert(&Giant, MA_NOTOWNED);
 		do_ast(tf);
 	}
@@ -941,7 +939,6 @@ syscall(struct trapframe *tf)
 	struct thread *td;
 	uint64_t *args;
 	int code, error;
-	u_int sticks;
 
 	ia64_set_fpsr(IA64_FPSR_DEFAULT);
 
@@ -954,7 +951,7 @@ syscall(struct trapframe *tf)
 	td->td_frame = tf;
 	p = td->td_proc;
 
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	if (td->td_ucred != p->p_ucred)
 		cred_update_thread(td);
 	if (p->p_flag & P_SA)
@@ -1035,7 +1032,7 @@ syscall(struct trapframe *tf)
 	/*
 	 * Handle reschedule and other end-of-syscall issues
 	 */
-	userret(td, tf, sticks);
+	userret(td, tf);
 
 	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm, code);
Index: kern/init_main.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/init_main.c,v
retrieving revision 1.256.2.5
diff -u -p -r1.256.2.5 init_main.c
--- kern/init_main.c	28 Sep 2006 18:27:01 -0000	1.256.2.5
+++ kern/init_main.c	23 Aug 2007 18:05:54 -0000
@@ -460,11 +460,10 @@ proc0_post(void *dummy __unused)
 	sx_slock(&allproc_lock);
 	LIST_FOREACH(p, &allproc, p_list) {
 		microuptime(&p->p_stats->p_start);
-		p->p_rux.rux_runtime.sec = 0;
-		p->p_rux.rux_runtime.frac = 0;
+		p->p_rux.rux_runtime = 0;
 	}
 	sx_sunlock(&allproc_lock);
-	binuptime(PCPU_PTR(switchtime));
+	PCPU_SET(switchtime, cpu_ticks());
 	PCPU_SET(switchticks, ticks);
 
 	/*
Index: kern/kern_clock.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_clock.c,v
retrieving revision 1.178.2.4
diff -u -p -r1.178.2.4 kern_clock.c
--- kern/kern_clock.c	20 Feb 2007 22:56:28 -0000	1.178.2.4
+++ kern/kern_clock.c	23 Aug 2007 20:32:25 -0000
@@ -421,7 +421,7 @@ statclock(frame)
 		 */
 		if (p->p_flag & P_SA)
 			thread_statclock(1);
-		p->p_rux.rux_uticks++;
+		td->td_uticks++;
 		if (p->p_nice > NZERO)
 			cp_time[CP_NICE]++;
 		else
@@ -441,13 +441,13 @@ statclock(frame)
 		 */
 		if ((td->td_pflags & TDP_ITHREAD) ||
 		    td->td_intr_nesting_level >= 2) {
-			p->p_rux.rux_iticks++;
+			td->td_iticks++;
 			cp_time[CP_INTR]++;
 		} else {
 			if (p->p_flag & P_SA)
 				thread_statclock(0);
+			td->td_pticks++;
 			td->td_sticks++;
-			p->p_rux.rux_sticks++;
 			if (td != PCPU_GET(idlethread))
 				cp_time[CP_SYS]++;
 			else
Index: kern/kern_fork.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v
retrieving revision 1.252.2.6
diff -u -p -r1.252.2.6 kern_fork.c
--- kern/kern_fork.c	19 Jul 2007 18:46:32 -0000	1.252.2.6
+++ kern/kern_fork.c	23 Aug 2007 18:11:10 -0000
@@ -856,7 +856,7 @@ fork_return(td, frame)
 	struct trapframe *frame;
 {
 
-	userret(td, frame, 0);
+	userret(td, frame);
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_SYSRET))
 		ktrsysret(SYS_fork, 0, 0);
Index: kern/kern_proc.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_proc.c,v
retrieving revision 1.230.2.5
diff -u -p -r1.230.2.5 kern_proc.c
--- kern/kern_proc.c	29 Sep 2006 07:41:24 -0000	1.230.2.5
+++ kern/kern_proc.c	23 Aug 2007 20:32:25 -0000
@@ -621,7 +621,6 @@ fill_kinfo_proc_only(struct proc *p, str
 	struct thread *td0;
 	struct tty *tp;
 	struct session *sp;
-	struct timeval tv;
 	struct ucred *cred;
 	struct sigacts *ps;
 
@@ -692,8 +691,7 @@ fill_kinfo_proc_only(struct proc *p, str
 	kp->ki_swtime = p->p_swtime;
 	kp->ki_pid = p->p_pid;
 	kp->ki_nice = p->p_nice;
-	bintime2timeval(&p->p_rux.rux_runtime, &tv);
-	kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
+	kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
 	mtx_unlock_spin(&sched_lock);
 	if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
 		kp->ki_start = p->p_stats->p_start;
Index: kern/kern_resource.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_resource.c,v
retrieving revision 1.148.2.3
diff -u -p -r1.148.2.3 kern_resource.c
--- kern/kern_resource.c	19 Jul 2007 18:46:32 -0000	1.148.2.3
+++ kern/kern_resource.c	23 Aug 2007 21:25:37 -0000
@@ -694,125 +694,132 @@ getrlimit(td, uap)
 }
 
 /*
- * Transform the running time and tick information in proc p into user,
- * system, and interrupt time usage.
+ * Transform the running time and tick information for children of proc p
+ * into user and system time usage.
  */
 void
-calcru(p, up, sp)
+calccru(p, up, sp)
 	struct proc *p;
 	struct timeval *up;
 	struct timeval *sp;
 {
-	struct bintime bt;
+
+	PROC_LOCK_ASSERT(p, MA_OWNED);
+	calcru1(p, &p->p_crux, up, sp);
+}
+
+/*
+ * Transform the running time and tick information in proc p into user
+ * and system time usage.  If appropriate, include the current time slice
+ * on this CPU.
+ */
+void
+calcru(struct proc *p, struct timeval *up, struct timeval *sp)
+{
 	struct rusage_ext rux;
 	struct thread *td;
-	int bt_valid;
+	uint64_t u;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	mtx_assert(&sched_lock, MA_NOTOWNED);
-	bt_valid = 0;
 	mtx_lock_spin(&sched_lock);
-	rux = p->p_rux;
-	FOREACH_THREAD_IN_PROC(p, td) {
-		if (TD_IS_RUNNING(td)) {
-			/*
-			 * Adjust for the current time slice.  This is
-			 * actually fairly important since the error here is
-			 * on the order of a time quantum which is much
-			 * greater than the precision of binuptime().
-			 */
-			KASSERT(td->td_oncpu != NOCPU,
-			    ("%s: running thread has no CPU", __func__));
-			if (!bt_valid) {
-				binuptime(&bt);
-				bt_valid = 1;
-			}
-			bintime_add(&rux.rux_runtime, &bt);
-			bintime_sub(&rux.rux_runtime,
-			    &pcpu_find(td->td_oncpu)->pc_switchtime);
-		}
+
+	/*
+	 * If we are getting stats for the current process, then add in the
+	 * stats that this thread has accumulated in its current time slice.
+	 * We reset the thread and CPU state as if we had performed a context
+	 * switch right here.
+	 */
+	if (curthread->td_proc == p) {
+		td = curthread;
+		u = cpu_ticks();
+		p->p_rux.rux_runtime += u - PCPU_GET(switchtime);
+		PCPU_SET(switchtime, u);
+		p->p_rux.rux_uticks += td->td_uticks;
+		td->td_uticks = 0;
+		p->p_rux.rux_iticks += td->td_iticks;
+		td->td_iticks = 0;
+		p->p_rux.rux_sticks += td->td_sticks;
+		td->td_sticks = 0;
 	}
+	/* Work on a copy of p_rux so we can let go of sched_lock */
+	rux = p->p_rux;
 	mtx_unlock_spin(&sched_lock);
 	calcru1(p, &rux, up, sp);
+	/* Update the result from the p_rux copy */
 	p->p_rux.rux_uu = rux.rux_uu;
 	p->p_rux.rux_su = rux.rux_su;
-	p->p_rux.rux_iu = rux.rux_iu;
-}
-
-void
-calccru(p, up, sp)
-	struct proc *p;
-	struct timeval *up;
-	struct timeval *sp;
-{
-
-	PROC_LOCK_ASSERT(p, MA_OWNED);
-	calcru1(p, &p->p_crux, up, sp);
+	p->p_rux.rux_tu = rux.rux_tu;
 }
 
 static void
-calcru1(p, ruxp, up, sp)
-	struct proc *p;
-	struct rusage_ext *ruxp;
-	struct timeval *up;
-	struct timeval *sp;
+calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
+    struct timeval *sp)
 {
-	struct timeval tv;
-	/* {user, system, interrupt, total} {ticks, usec}; previous tu: */
-	u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
+	/* {user, system, interrupt, total} {ticks, usec}: */
+	u_int64_t ut, uu, st, su, it, tt, tu;
 
 	ut = ruxp->rux_uticks;
 	st = ruxp->rux_sticks;
 	it = ruxp->rux_iticks;
 	tt = ut + st + it;
 	if (tt == 0) {
+		/* Avoid divide by zero */
 		st = 1;
 		tt = 1;
 	}
-	bintime2timeval(&ruxp->rux_runtime, &tv);
-	tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
-	ptu = ruxp->rux_uu + ruxp->rux_su + ruxp->rux_iu;
-	if (tu < ptu) {
-		printf(
-"calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n",
-		    (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm);
-		tu = ptu;
-	}
+	tu = cputick2usec(ruxp->rux_runtime);
 	if ((int64_t)tu < 0) {
+		/* XXX: this should be an assert /phk */
 		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
 		    (intmax_t)tu, p->p_pid, p->p_comm);
-		tu = ptu;
+		tu = ruxp->rux_tu;
 	}
 
-	/* Subdivide tu. */
-	uu = (tu * ut) / tt;
-	su = (tu * st) / tt;
-	iu = tu - uu - su;
-
-	/* Enforce monotonicity. */
-	if (uu < ruxp->rux_uu || su < ruxp->rux_su || iu < ruxp->rux_iu) {
+	if (tu >= ruxp->rux_tu) {
+		/*
+		 * The normal case, time increased.
+		 * Enforce monotonicity of bucketed numbers.
+		 */
+		uu = (tu * ut) / tt;
 		if (uu < ruxp->rux_uu)
 			uu = ruxp->rux_uu;
-		else if (uu + ruxp->rux_su + ruxp->rux_iu > tu)
-			uu = tu - ruxp->rux_su - ruxp->rux_iu;
-		if (st == 0)
+		su = (tu * st) / tt;
+		if (su < ruxp->rux_su)
 			su = ruxp->rux_su;
-		else {
-			su = ((tu - uu) * st) / (st + it);
-			if (su < ruxp->rux_su)
-				su = ruxp->rux_su;
-			else if (uu + su + ruxp->rux_iu > tu)
-				su = tu - uu - ruxp->rux_iu;
-		}
-		KASSERT(uu + su + ruxp->rux_iu <= tu,
-		    ("calcru: monotonisation botch 1"));
-		iu = tu - uu - su;
-		KASSERT(iu >= ruxp->rux_iu,
-		    ("calcru: monotonisation botch 2"));
+	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
+		/* 
+		 * When we calibrate the cputicker, it is not uncommon to
+		 * see the presumably fixed frequency increase slightly over
+		 * time as a result of thermal stabilization and NTP
+		 * discipline (of the reference clock).  We therefore ignore
+		 * a bit of backwards slop because we  expect to catch up
+ 		 * shortly.  We use a 3 microsecond limit to catch low
+		 * counts and a 1% limit for high counts.
+		 */
+		uu = ruxp->rux_uu;
+		su = ruxp->rux_su;
+		tu = ruxp->rux_tu;
+	} else { /* tu < ruxp->rux_tu */
+		/*
+		 * What happene here was likely that a laptop, which ran at
+		 * a reduced clock frequency at boot, kicked into high gear.
+		 * The wisdom of spamming this message in that case is
+		 * dubious, but it might also be indicative of something
+		 * serious, so lets keep it and hope laptops can be made
+		 * more truthful about their CPU speed via ACPI.
+		 */
+		printf("calcru: runtime went backwards from %ju usec "
+		    "to %ju usec for pid %d (%s)\n",
+		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
+		    p->p_pid, p->p_comm);
+		uu = (tu * ut) / tt;
+		su = (tu * st) / tt;
 	}
+
 	ruxp->rux_uu = uu;
 	ruxp->rux_su = su;
-	ruxp->rux_iu = iu;
+	ruxp->rux_tu = tu;
 
 	up->tv_sec = uu / 1000000;
 	up->tv_usec = uu % 1000000;
@@ -883,13 +890,13 @@ ruadd(ru, rux, ru2, rux2)
 	register long *ip, *ip2;
 	register int i;
 
-	bintime_add(&rux->rux_runtime, &rux2->rux_runtime);
+	rux->rux_runtime += rux2->rux_runtime;
 	rux->rux_uticks += rux2->rux_uticks;
 	rux->rux_sticks += rux2->rux_sticks;
 	rux->rux_iticks += rux2->rux_iticks;
 	rux->rux_uu += rux2->rux_uu;
 	rux->rux_su += rux2->rux_su;
-	rux->rux_iu += rux2->rux_iu;
+	rux->rux_tu += rux2->rux_tu;
 	if (ru->ru_maxrss < ru2->ru_maxrss)
 		ru->ru_maxrss = ru2->ru_maxrss;
 	ip = &ru->ru_first;
Index: kern/kern_synch.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_synch.c,v
retrieving revision 1.270.2.6
diff -u -p -r1.270.2.6 kern_synch.c
--- kern/kern_synch.c	6 Jul 2006 08:32:50 -0000	1.270.2.6
+++ kern/kern_synch.c	23 Aug 2007 20:32:25 -0000
@@ -337,7 +337,7 @@ wakeup_one(ident)
 void
 mi_switch(int flags, struct thread *newtd)
 {
-	struct bintime new_switchtime;
+	uint64_t new_switchtime;
 	struct thread *td;
 	struct proc *p;
 
@@ -366,9 +366,14 @@ mi_switch(int flags, struct thread *newt
 	 * Compute the amount of time during which the current
 	 * process was running, and add that to its total so far.
 	 */
-	binuptime(&new_switchtime);
-	bintime_add(&p->p_rux.rux_runtime, &new_switchtime);
-	bintime_sub(&p->p_rux.rux_runtime, PCPU_PTR(switchtime));
+	new_switchtime = cpu_ticks();
+	p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
+	p->p_rux.rux_uticks += td->td_uticks;
+	td->td_uticks = 0;
+	p->p_rux.rux_iticks += td->td_iticks;
+	td->td_iticks = 0;
+	p->p_rux.rux_sticks += td->td_sticks;
+	td->td_sticks = 0;
 
 	td->td_generation++;	/* bump preempt-detect counter */
 
@@ -387,7 +392,7 @@ mi_switch(int flags, struct thread *newt
 	 * it reaches the max, arrange to kill the process in ast().
 	 */
 	if (p->p_cpulimit != RLIM_INFINITY &&
-	    p->p_rux.rux_runtime.sec >= p->p_cpulimit) {
+	    p->p_rux.rux_runtime >= p->p_cpulimit * cpu_tickrate()) {
 		p->p_sflag |= PS_XCPU;
 		td->td_flags |= TDF_ASTPENDING;
 	}
Index: kern/kern_tc.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_tc.c,v
retrieving revision 1.164
diff -u -p -r1.164 kern_tc.c
--- kern/kern_tc.c	26 Mar 2005 20:04:28 -0000	1.164
+++ kern/kern_tc.c	23 Aug 2007 20:32:25 -0000
@@ -116,6 +116,7 @@ TC_STATS(nsetclock);
 #undef TC_STATS
 
 static void tc_windup(void);
+static void cpu_tick_calibrate(int);
 
 static int
 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
@@ -131,6 +132,7 @@ sysctl_kern_boottime(SYSCTL_HANDLER_ARGS
 #endif
 		return SYSCTL_OUT(req, &boottime, sizeof(boottime));
 }
+
 /*
  * Return the difference between the timehands' counter value now and what
  * was when we copied it to the timehands' offset_count.
@@ -363,6 +365,7 @@ tc_setclock(struct timespec *ts)
 	struct timespec ts2;
 	struct bintime bt, bt2;
 
+	cpu_tick_calibrate(1);
 	nsetclock++;
 	binuptime(&bt2);
 	timespec2bintime(ts, &bt);
@@ -379,6 +382,7 @@ tc_setclock(struct timespec *ts)
 		    (intmax_t)ts2.tv_sec, ts2.tv_nsec,
 		    (intmax_t)ts->tv_sec, ts->tv_nsec);
 	}
+	cpu_tick_calibrate(1);
 }
 
 /*
@@ -475,8 +479,8 @@ tc_windup(void)
 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
 	 *
 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
-	 * we can only multiply by about 850 without overflowing, but that
-	 * leaves suitably precise fractions for multiply before divide.
+	 * we can only multiply by about 850 without overflowing, that
+	 * leaves no suitably precise fractions for multiply before divide.
 	 *
 	 * Divide before multiply with a fraction of 2199/512 results in a
 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
@@ -749,11 +753,16 @@ void
 tc_ticktock(void)
 {
 	static int count;
+	static time_t last_calib;
 
 	if (++count < tc_tick)
 		return;
 	count = 0;
 	tc_windup();
+	if (time_uptime != last_calib && !(time_uptime & 0xf)) {
+		cpu_tick_calibrate(0);
+		last_calib = time_uptime;
+	}
 }
 
 static void
@@ -782,3 +791,143 @@ inittimecounter(void *dummy)
 }
 
 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
+
+/* Cpu tick handling -------------------------------------------------*/
+
+static int cpu_tick_variable;
+static uint64_t	cpu_tick_frequency;
+
+static
+uint64_t
+tc_cpu_ticks(void)
+{
+	static uint64_t base;
+	static unsigned last;
+	unsigned u;
+	struct timecounter *tc;
+
+	tc = timehands->th_counter;
+	u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
+	if (u < last)
+		base += tc->tc_counter_mask + 1;
+	last = u;
+	return (u + base);
+}
+
+/*
+ * This function gets called ever 16 seconds on only one designated
+ * CPU in the system from hardclock() via tc_ticktock().
+ *
+ * Whenever the real time clock is stepped we get called with reset=1
+ * to make sure we handle suspend/resume and similar events correctly.
+ */
+
+static void
+cpu_tick_calibrate(int reset)
+{
+	static uint64_t c_last;
+	uint64_t c_this, c_delta;
+	static struct bintime  t_last;
+	struct bintime t_this, t_delta;
+
+	if (reset) {
+		/* The clock was stepped, abort & reset */
+		t_last.sec = 0;
+		return;
+	}
+
+	/* we don't calibrate fixed rate cputicks */
+	if (!cpu_tick_variable)
+		return;
+
+	getbinuptime(&t_this);
+	c_this = cpu_ticks();
+	if (t_last.sec != 0) {
+		c_delta = c_this - c_last;
+		t_delta = t_this;
+		bintime_sub(&t_delta, &t_last);
+		if (0 && bootverbose) {
+			struct timespec ts;
+			bintime2timespec(&t_delta, &ts);
+			printf("%ju  %ju.%016jx %ju.%09ju",
+			    (uintmax_t)c_delta >> 4,
+			    (uintmax_t)t_delta.sec, (uintmax_t)t_delta.frac,
+			    (uintmax_t)ts.tv_sec, (uintmax_t)ts.tv_nsec);
+		}
+		/*
+		 * Validate that 16 +/- 1/256 seconds passed. 
+		 * After division by 16 this gives us a precision of
+		 * roughly 250PPM which is sufficient
+		 */
+		if (t_delta.sec > 16 || (
+		    t_delta.sec == 16 && t_delta.frac >= (0x01LL << 56))) {
+			/* too long */
+			if (0 && bootverbose)
+				printf("\ttoo long\n");
+		} else if (t_delta.sec < 15 ||
+		    (t_delta.sec == 15 && t_delta.frac <= (0xffLL << 56))) {
+			/* too short */
+			if (0 && bootverbose)
+				printf("\ttoo short\n");
+		} else {
+			/* just right */
+			c_delta >>= 4;
+			if (c_delta  > cpu_tick_frequency) {
+				if (0 && bootverbose)
+					printf("\thigher\n");
+				cpu_tick_frequency = c_delta;
+			} else {
+				if (0 && bootverbose)
+					printf("\tlower\n");
+			}
+		}
+	}
+	c_last = c_this;
+	t_last = t_this;
+}
+
+void
+set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
+{
+
+	if (func == NULL) {
+		cpu_ticks = tc_cpu_ticks;
+	} else {
+		cpu_tick_frequency = freq;
+		cpu_tick_variable = var;
+		cpu_ticks = func;
+	}
+}
+
+uint64_t
+cpu_tickrate(void)
+{
+
+	if (cpu_ticks == tc_cpu_ticks) 
+		return (tc_getfrequency());
+	return (cpu_tick_frequency);
+}
+
+/*
+ * We need to be slightly careful converting cputicks to microseconds.
+ * There is plenty of margin in 64 bits of microseconds (half a million
+ * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
+ * before divide conversion (to retain precision) we find that the
+ * margin shrinks to 1.5 hours (one millionth of 146y).
+ * With a three prong approach we never loose significant bits, no
+ * matter what the cputick rate and length of timeinterval is.
+ */
+
+uint64_t
+cputick2usec(uint64_t tick)
+{
+
+	if (tick > 18446744073709551LL)		/* floor(2^64 / 1000) */
+		return (tick / (cpu_tickrate() / 1000000LL));
+	else if (tick > 18446744073709LL)	/* floor(2^64 / 1000000) */
+		return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
+	else
+		return ((tick * 1000000LL) / cpu_tickrate());
+}
+
+cpu_tick_f	*cpu_ticks = tc_cpu_ticks;
Index: kern/kern_thread.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_thread.c,v
retrieving revision 1.216.2.6
diff -u -p -r1.216.2.6 kern_thread.c
--- kern/kern_thread.c	2 Sep 2006 17:29:57 -0000	1.216.2.6
+++ kern/kern_thread.c	23 Aug 2007 18:05:54 -0000
@@ -448,7 +448,7 @@ thread_free(struct thread *td)
 void
 thread_exit(void)
 {
-	struct bintime new_switchtime;
+	uint64_t new_switchtime;
 	struct thread *td;
 	struct proc *p;
 	struct ksegrp	*kg;
@@ -488,13 +488,14 @@ thread_exit(void)
 	/*
 	 * The thread is exiting. scheduler can release its stuff
 	 * and collect stats etc.
+	 * XXX this is not very right, since PROC_UNLOCK may still
+	 * need scheduler stuff.
 	 */
 	sched_thread_exit(td);
 
 	/* Do the same timestamp bookkeeping that mi_switch() would do. */
-	binuptime(&new_switchtime);
-	bintime_add(&p->p_rux.rux_runtime, &new_switchtime);
-	bintime_sub(&p->p_rux.rux_runtime, PCPU_PTR(switchtime));
+	new_switchtime = cpu_ticks();
+	p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
 	PCPU_SET(switchtime, new_switchtime);
 	PCPU_SET(switchticks, ticks);
 	cnt.v_swtch++;
Index: kern/subr_trap.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/subr_trap.c,v
retrieving revision 1.281.2.1
diff -u -p -r1.281.2.1 subr_trap.c
--- kern/subr_trap.c	6 Sep 2006 21:43:59 -0000	1.281.2.1
+++ kern/subr_trap.c	23 Aug 2007 18:11:10 -0000
@@ -74,10 +74,7 @@ __FBSDID("$FreeBSD: src/sys/kern/subr_tr
  * MPSAFE
  */
 void
-userret(td, frame, oticks)
-	struct thread *td;
-	struct trapframe *frame;
-	u_int oticks;
+userret(struct thread *td, struct trapframe *frame)
 {
 	struct proc *p = td->td_proc;
 
@@ -127,10 +124,8 @@ userret(td, frame, oticks)
 	 * Charge system time if profiling.
 	 */
 	if (p->p_flag & P_PROFIL) {
-		quad_t ticks;
 
-		ticks = td->td_sticks - oticks;
-		addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio);
+		addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio);
 	}
 
 	/*
@@ -153,7 +148,6 @@ ast(struct trapframe *framep)
 	struct proc *p;
 	struct ksegrp *kg;
 	struct rlimit rlim;
-	u_int sticks;
 	int sflag;
 	int flags;
 	int sig;
@@ -172,7 +166,7 @@ ast(struct trapframe *framep)
 	mtx_assert(&Giant, MA_NOTOWNED);
 	mtx_assert(&sched_lock, MA_NOTOWNED);
 	td->td_frame = framep;
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 
 	if ((p->p_flag & P_SA) && (td->td_mailbox == NULL))
 		thread_user_enter(td);
@@ -234,7 +228,7 @@ ast(struct trapframe *framep)
 		PROC_LOCK(p);
 		lim_rlimit(p, RLIMIT_CPU, &rlim);
 		mtx_lock_spin(&sched_lock);
-		if (p->p_rux.rux_runtime.sec >= rlim.rlim_max) {
+		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
 			mtx_unlock_spin(&sched_lock);
 			killproc(p, "exceeded maximum CPU limit");
 		} else {
@@ -272,6 +266,6 @@ ast(struct trapframe *framep)
 		PROC_UNLOCK(p);
 	}
 
-	userret(td, framep, sticks);
+	userret(td, framep);
 	mtx_assert(&Giant, MA_NOTOWNED);
 }
Index: powerpc/powerpc/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/powerpc/powerpc/trap.c,v
retrieving revision 1.54.2.3
diff -u -p -r1.54.2.3 trap.c
--- powerpc/powerpc/trap.c	19 Sep 2006 12:38:08 -0000	1.54.2.3
+++ powerpc/powerpc/trap.c	23 Aug 2007 20:28:18 -0000
@@ -146,7 +146,7 @@ trap(struct trapframe *frame)
 	struct thread	*td;
 	struct proc	*p;
 	int		sig, type, user;
-	u_int		sticks, ucode;
+	u_int		ucode;
 
 	PCPU_LAZY_INC(cnt.v_trap);
 
@@ -156,13 +156,12 @@ trap(struct trapframe *frame)
 	type = ucode = frame->exc;
 	sig = 0;
 	user = frame->srr1 & PSL_PR;
-	sticks = 0;
 
 	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm,
 	    trapname(type), user ? "user" : "kernel");
 
 	if (user) {
-		sticks = td->td_sticks;
+		td->td_pticks = 0;
 		td->td_frame = frame;
 		if (td->td_ucred != p->p_ucred)
 			cred_update_thread(td);
@@ -260,7 +259,7 @@ trap(struct trapframe *frame)
 		trapsignal(td, sig, ucode);
 	}
 
-	userret(td, frame, sticks);
+	userret(td, frame);
 	mtx_assert(&Giant, MA_NOTOWNED);
 }
 
Index: sparc64/sparc64/mp_machdep.c
===================================================================
RCS file: /home/ncvs/src/sys/sparc64/sparc64/mp_machdep.c,v
retrieving revision 1.29.2.2
diff -u -p -r1.29.2.2 mp_machdep.c
--- sparc64/sparc64/mp_machdep.c	4 Sep 2006 18:28:32 -0000	1.29.2.2
+++ sparc64/sparc64/mp_machdep.c	23 Aug 2007 18:05:54 -0000
@@ -363,7 +363,7 @@ cpu_mp_bootstrap(struct pcpu *pc)
 	/* ok, now grab sched_lock and enter the scheduler */
 	mtx_lock_spin(&sched_lock);
 	spinlock_exit();
-	binuptime(PCPU_PTR(switchtime));
+	PCPU_SET(switchtime, cpu_ticks());
 	PCPU_SET(switchticks, ticks);
 	cpu_throw(NULL, choosethread());	/* doesn't return */
 }
Index: sparc64/sparc64/tick.c
===================================================================
RCS file: /home/ncvs/src/sys/sparc64/sparc64/tick.c,v
retrieving revision 1.16.2.1
diff -u -p -r1.16.2.1 tick.c
--- sparc64/sparc64/tick.c	31 Mar 2006 23:38:29 -0000	1.16.2.1
+++ sparc64/sparc64/tick.c	23 Aug 2007 20:32:25 -0000
@@ -64,6 +64,13 @@ SYSCTL_INT(_machdep_tick, OID_AUTO, adju
 
 static void tick_hardclock(struct clockframe *);
 
+static uint64_t
+tick_cputicks(void)
+{
+
+	return (rd(tick));
+}
+
 void
 cpu_initclocks(void)
 {
@@ -148,6 +155,8 @@ tick_init(u_long clock)
 	 * handled.
 	 */
 	tick_stop();
+
+	set_cputicker(tick_cputicks, tick_freq, 0);
 }
 
 void
Index: sparc64/sparc64/trap.c
===================================================================
RCS file: /home/ncvs/src/sys/sparc64/sparc64/trap.c,v
retrieving revision 1.74.2.2
diff -u -p -r1.74.2.2 trap.c
--- sparc64/sparc64/trap.c	29 Jan 2007 21:32:18 -0000	1.74.2.2
+++ sparc64/sparc64/trap.c	23 Aug 2007 18:11:10 -0000
@@ -234,7 +234,6 @@ trap(struct trapframe *tf)
 {
 	struct thread *td;
 	struct proc *p;
-	u_int sticks;
 	int error;
 	int sig;
 
@@ -251,7 +250,7 @@ trap(struct trapframe *tf)
 		KASSERT(td->td_proc != NULL, ("trap: curproc NULL"));
 
 		p = td->td_proc;
-		sticks = td->td_sticks;
+		td->td_pticks = 0;
 		td->td_frame = tf;
 		if (td->td_ucred != p->p_ucred)
 			cred_update_thread(td);
@@ -291,7 +290,7 @@ trap(struct trapframe *tf)
 			trapsignal(td, sig, tf->tf_type);
 		}
 
-		userret(td, tf, sticks);
+		userret(td, tf);
 		mtx_assert(&Giant, MA_NOTOWNED);
  	} else {
 		KASSERT((tf->tf_type & T_KERNEL) != 0,
@@ -500,7 +499,6 @@ syscall(struct trapframe *tf)
 	register_t args[8];
 	register_t *argp;
 	struct proc *p;
-	u_int sticks;
 	u_long code;
 	u_long tpc;
 	int reg;
@@ -521,7 +519,7 @@ syscall(struct trapframe *tf)
 	reg = 0;
 	regcnt = REG_MAXARGS;
 
-	sticks = td->td_sticks;
+	td->td_pticks = 0;
 	td->td_frame = tf;
 	if (td->td_ucred != p->p_ucred)
 		cred_update_thread(td);
@@ -646,7 +644,7 @@ syscall(struct trapframe *tf)
 	/*
 	 * Handle reschedule and other end-of-syscall issues
 	 */
-	userret(td, tf, sticks);
+	userret(td, tf);
 
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_SYSRET))
Index: sys/pcpu.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/pcpu.h,v
retrieving revision 1.16
diff -u -p -r1.16 pcpu.h
--- sys/pcpu.h	26 Apr 2005 17:07:40 -0000	1.16
+++ sys/pcpu.h	23 Aug 2007 18:05:54 -0000
@@ -60,7 +60,7 @@ struct pcpu {
 	struct thread	*pc_fpcurthread;	/* Fp state owner */
 	struct thread	*pc_deadthread;		/* Zombie thread or NULL */
 	struct pcb	*pc_curpcb;		/* Current pcb */
-	struct bintime	pc_switchtime;	
+	uint64_t	pc_switchtime;	
 	int		pc_switchticks;
 	u_int		pc_cpuid;		/* This cpu number */
 	cpumask_t	pc_cpumask;		/* This cpu mask */
Index: sys/proc.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/proc.h,v
retrieving revision 1.432.2.10
diff -u -p -r1.432.2.10 proc.h
--- sys/proc.h	11 Jun 2007 11:27:04 -0000	1.432.2.10
+++ sys/proc.h	23 Aug 2007 21:25:37 -0000
@@ -278,7 +278,10 @@ struct thread {
 	struct ucred	*td_ucred;	/* (k) Reference to credentials. */
 	struct thread	*td_standin;	/* (k + a) Use this for an upcall. */
 	struct kse_upcall *td_upcall;	/* (k + j) Upcall structure. */
-	u_int64_t	td_sticks;	/* (k) Statclock hits in system mode. */
+	u_int		td_pticks;	/* (k) Statclock hits for profiling */
+	u_int		td_sticks;	/* (k) Statclock hits in system mode. */
+	u_int		td_iticks;	/* (k) Statclock hits in intr mode. */
+	u_int		td_uticks;	/* (k) Statclock hits in user mode. */
 	u_int		td_uuticks;	/* (k) Statclock hits (usr), for UTS. */
 	u_int		td_usticks;	/* (k) Statclock hits (sys), for UTS. */
 	int		td_intrval;	/* (j) Return value of TDF_INTERRUPT. */
@@ -502,13 +505,13 @@ struct ksegrp {
  * Locking: (cj) means (j) for p_rux and (c) for p_crux.
  */
 struct rusage_ext {
-	struct bintime	rux_runtime;    /* (cj) Real time. */
+	u_int64_t	rux_runtime;    /* (cj) Real time. */
 	u_int64_t	rux_uticks;     /* (cj) Statclock hits in user mode. */
 	u_int64_t	rux_sticks;     /* (cj) Statclock hits in sys mode. */
 	u_int64_t	rux_iticks;     /* (cj) Statclock hits in intr mode. */
 	u_int64_t	rux_uu;         /* (c) Previous user time in usec. */
 	u_int64_t	rux_su;         /* (c) Previous sys time in usec. */
-	u_int64_t	rux_iu;         /* (c) Previous intr time in usec. */
+	u_int64_t	rux_tu;         /* (c) Previous total time in usec. */
 };
 
 /*
@@ -910,7 +913,7 @@ extern	void (*cpu_idle_hook)(void);	/* H
 void	cpu_switch(struct thread *old, struct thread *new);
 void	cpu_throw(struct thread *old, struct thread *new) __dead2;
 void	unsleep(struct thread *);
-void	userret(struct thread *, struct trapframe *, u_int);
+void	userret(struct thread *, struct trapframe *);
 
 void	cpu_exit(struct thread *);
 void	exit1(struct thread *, int) __dead2;
Index: sys/systm.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/systm.h,v
retrieving revision 1.234.2.6
diff -u -p -r1.234.2.6 systm.h
--- sys/systm.h	18 Jun 2007 22:44:59 -0000	1.234.2.6
+++ sys/systm.h	23 Aug 2007 20:32:25 -0000
@@ -240,6 +240,12 @@ int	setenv(const char *name, const char 
 int	unsetenv(const char *name);
 int	testenv(const char *name);
 
+typedef uint64_t (cpu_tick_f)(void);
+void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var);
+extern cpu_tick_f *cpu_ticks;
+uint64_t cpu_tickrate(void);
+uint64_t cputick2usec(uint64_t tick);
+
 #ifdef APM_FIXUP_CALLTODO
 struct timeval;
 void	adjust_timeout_calltodo(struct timeval *time_change);



More information about the freebsd-stable mailing list