svn commit: r191760 - head/sys/dev/acpica

Alexander Motin mav at FreeBSD.org
Sat May 2 22:30:33 UTC 2009


Author: mav
Date: Sat May  2 22:30:33 2009
New Revision: 191760
URL: http://svn.freebsd.org/changeset/base/191760

Log:
  Avoid comparing negative signed to positive unsignad values. It was
  leading to a bug, when C-state does not decrease on sleep shorter then
  declared transition latency. Fixing this deprecates workaround for broken
  C-states on some hardware.
  
  By the way, change state selecting logic a bit. Instead of last sleep
  time use short-time average of it. Global interrupts rate in system is a
  quite random value, to corellate subsequent sleeps so directly.

Modified:
  head/sys/dev/acpica/acpi_cpu.c

Modified: head/sys/dev/acpica/acpi_cpu.c
==============================================================================
--- head/sys/dev/acpica/acpi_cpu.c	Sat May  2 22:22:00 2009	(r191759)
+++ head/sys/dev/acpica/acpi_cpu.c	Sat May  2 22:30:33 2009	(r191760)
@@ -882,43 +882,13 @@ acpi_cpu_idle()
 	return;
     }
 
-    /*
-     * If we slept 100 us or more, use the lowest Cx state.  Otherwise,
-     * find the lowest state that has a latency less than or equal to
-     * the length of our last sleep.
-     */
-    cx_next_idx = sc->cpu_cx_lowest;
-    if (sc->cpu_prev_sleep < 100) {
-	/*
-	 * If we sleep too short all the time, this system may not implement
-	 * C2/3 correctly (i.e. reads return immediately).  In this case,
-	 * back off and use the next higher level.
-	 * It seems that when you have a dual core cpu (like the Intel Core Duo)
-	 * that both cores will get out of C3 state as soon as one of them
-	 * requires it. This breaks the sleep detection logic as the sleep
-	 * counter is local to each cpu. Disable the sleep logic for now as a
-	 * workaround if there's more than one CPU. The right fix would probably
-	 * be to add quirks for system that don't really support C3 state.
-	 */
-	if (mp_ncpus < 2 && sc->cpu_prev_sleep <= 1) {
-	    sc->cpu_short_slp++;
-	    if (sc->cpu_short_slp == 1000 && sc->cpu_cx_lowest != 0) {
-		if (sc->cpu_non_c3 == sc->cpu_cx_lowest && sc->cpu_non_c3 != 0)
-		    sc->cpu_non_c3--;
-		sc->cpu_cx_lowest--;
-		sc->cpu_short_slp = 0;
-		device_printf(sc->cpu_dev,
-		    "too many short sleeps, backing off to C%d\n",
-		    sc->cpu_cx_lowest + 1);
-	    }
-	} else
-	    sc->cpu_short_slp = 0;
-
-	for (i = sc->cpu_cx_lowest; i >= 0; i--)
-	    if (sc->cpu_cx_states[i].trans_lat <= sc->cpu_prev_sleep) {
-		cx_next_idx = i;
-		break;
-	    }
+    /* Find the lowest state that has small enougth latency. */
+    cx_next_idx = 0;
+    for (i = sc->cpu_cx_lowest; i >= 0; i--) {
+	if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
+	    cx_next_idx = i;
+	    break;
+	}
     }
 
     /*
@@ -943,10 +913,10 @@ acpi_cpu_idle()
     /*
      * Execute HLT (or equivalent) and wait for an interrupt.  We can't
      * calculate the time spent in C1 since the place we wake up is an
-     * ISR.  Assume we slept one quantum and return.
+     * ISR.  Assume we slept half of quantum and return.
      */
     if (cx_next->type == ACPI_STATE_C1) {
-	sc->cpu_prev_sleep = 1000000 / hz;
+	sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4;
 	acpi_cpu_c1();
 	return;
     }
@@ -989,9 +959,9 @@ acpi_cpu_idle()
     }
     ACPI_ENABLE_IRQS();
 
-    /* Find the actual time asleep in microseconds, minus overhead. */
+    /* Find the actual time asleep in microseconds. */
     end_time = acpi_TimerDelta(end_time, start_time);
-    sc->cpu_prev_sleep = PM_USEC(end_time) - cx_next->trans_lat;
+    sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
 }
 
 /*


More information about the svn-src-all mailing list