svn commit: r355512 - in head/sys: kern sys

Jeff Roberson jeff at FreeBSD.org
Sun Dec 8 01:17:39 UTC 2019


Author: jeff
Date: Sun Dec  8 01:17:38 2019
New Revision: 355512
URL: https://svnweb.freebsd.org/changeset/base/355512

Log:
  Handle multiple clock interrupts simultaneously in sched_clock().
  
  Reviewed by:	kib, markj, mav
  Differential Revision:	https://reviews.freebsd.org/D22625

Modified:
  head/sys/kern/kern_clock.c
  head/sys/kern/sched_4bsd.c
  head/sys/kern/sched_ule.c
  head/sys/sys/sched.h

Modified: head/sys/kern/kern_clock.c
==============================================================================
--- head/sys/kern/kern_clock.c	Sun Dec  8 01:16:22 2019	(r355511)
+++ head/sys/kern/kern_clock.c	Sun Dec  8 01:17:38 2019	(r355512)
@@ -711,8 +711,7 @@ statclock(int cnt, int usermode)
 	td->td_incruntime += runtime;
 	PCPU_SET(switchtime, new_switchtime);
 
-	for ( ; cnt > 0; cnt--)
-		sched_clock(td);
+	sched_clock(td, cnt);
 	thread_unlock(td);
 #ifdef HWPMC_HOOKS
 	if (td->td_intr_frame != NULL)

Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c	Sun Dec  8 01:16:22 2019	(r355511)
+++ head/sys/kern/sched_4bsd.c	Sun Dec  8 01:17:38 2019	(r355512)
@@ -706,8 +706,8 @@ sched_rr_interval(void)
  * favor processes which haven't run much recently, and to round-robin
  * among other processes.
  */
-void
-sched_clock(struct thread *td)
+static void
+sched_clock_tick(struct thread *td)
 {
 	struct pcpuidlestat *stat;
 	struct td_sched *ts;
@@ -734,6 +734,14 @@ sched_clock(struct thread *td)
 	stat = DPCPU_PTR(idlestat);
 	stat->oldidlecalls = stat->idlecalls;
 	stat->idlecalls = 0;
+}
+
+void
+sched_clock(struct thread *td, int cnt)
+{
+
+	for ( ; cnt > 0; cnt--)
+		sched_clock_tick(td);
 }
 
 /*

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c	Sun Dec  8 01:16:22 2019	(r355511)
+++ head/sys/kern/sched_ule.c	Sun Dec  8 01:17:38 2019	(r355512)
@@ -2421,7 +2421,7 @@ sched_userret_slowpath(struct thread *td)
  * threads.
  */
 void
-sched_clock(struct thread *td)
+sched_clock(struct thread *td, int cnt)
 {
 	struct tdq *tdq;
 	struct td_sched *ts;
@@ -2432,8 +2432,10 @@ sched_clock(struct thread *td)
 	/*
 	 * We run the long term load balancer infrequently on the first cpu.
 	 */
-	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0) {
-		if (balance_ticks && --balance_ticks == 0)
+	if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 &&
+	    balance_ticks != 0) {
+		balance_ticks -= cnt;
+		if (balance_ticks <= 0)
 			sched_balance();
 	}
 #endif
@@ -2455,14 +2457,15 @@ sched_clock(struct thread *td)
 	}
 	ts = td_get_sched(td);
 	sched_pctcpu_update(ts, 1);
-	if (td->td_pri_class & PRI_FIFO_BIT)
+	if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td))
 		return;
+
 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
 		/*
 		 * We used a tick; charge it to the thread so
 		 * that we can compute our interactivity.
 		 */
-		td_get_sched(td)->ts_runtime += tickincr;
+		td_get_sched(td)->ts_runtime += tickincr * cnt;
 		sched_interact_update(td);
 		sched_priority(td);
 	}
@@ -2471,7 +2474,8 @@ sched_clock(struct thread *td)
 	 * Force a context switch if the current thread has used up a full
 	 * time slice (default is 100ms).
 	 */
-	if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) {
+	ts->ts_slice += cnt;
+	if (ts->ts_slice >= tdq_slice(tdq)) {
 		ts->ts_slice = 0;
 		td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
 	}

Modified: head/sys/sys/sched.h
==============================================================================
--- head/sys/sys/sched.h	Sun Dec  8 01:16:22 2019	(r355511)
+++ head/sys/sys/sched.h	Sun Dec  8 01:17:38 2019	(r355512)
@@ -135,7 +135,7 @@ sched_userret(struct thread *td)
  * Threads are moved on and off of run queues
  */
 void	sched_add(struct thread *td, int flags);
-void	sched_clock(struct thread *td);
+void	sched_clock(struct thread *td, int ticks);
 void	sched_preempt(struct thread *td);
 void	sched_rem(struct thread *td);
 void	sched_relinquish(struct thread *td);


More information about the svn-src-all mailing list