(Missing) power states of an Atom N455-based netbook
Taku YAMAMOTO
taku at tackymt.homeip.net
Fri Jul 15 08:48:21 UTC 2011
On Wed, 13 Jul 2011 12:02:50 -0400
Jung-uk Kim <jkim at FreeBSD.org> wrote:
> I fixed that bug in the later patches, e.g.,
>
> http://people.freebsd.org/~jkim/acpi_cx_native.diff
I'm afraid spinning on *state == STATE_MWAIT doesn't work well for hardware-
originated interrupts.
Other than that, basically I like yours better than my dirty one.
# I'm attaching my dirty hack for the reference.
> However, it doesn't work yet. Please see below.
>
> On Wednesday 13 July 2011 10:32 am, Vitaly Magerya wrote:
> > Sure. With the above change it hangs during the boot.
> >
> > Here are the last two lines before the hang with verbose boot:
> >
> > acpi_acad0: acline initialization start
> > acpi_battery0: battery initialization start
>
> It is a known problem and the above patch hangs, too. :-(
It's quite strange...
Maybe we have to tell the BIOS that we are going to utilize _CST via SMI.
In fact, the major difference between mine and jkim's is that in mine
I took the following snippet (which we can find in acpi_cpu_startup_cx
function, living in sys/dev/acpica/acpi_cpu.c) out of #ifdef notyet.
-#ifdef notyet
/* Signal platform that we can handle _CST notification. */
if (!cpu_cx_generic && cpu_cst_cnt != 0) {
ACPI_LOCK(acpi);
AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
ACPI_UNLOCK(acpi);
}
-#endif
Vitaly, could you remove the above-mentioned #ifdef-#endif pair (to activate
the code inbetween) and test jkim's patch again?
> Actually, I abandoned the patches and I am thinking about rewriting it
> from scratch, e.g., refactoring MI/MD support code
> (dev/acpica/acpi_cpu.c -> machine/machdep.c & acpica/acpi_machdep.c).
> Unfortunately, I don't have much time nor hardware to test, so it
> won't happen any time soon. Sorry.
>
> If anyone wants to pick it up from here, please feel free.
>
> Jung-uk Kim
I have a laptop which carries Core 2 Duo which can MWAIT_C3 (and actually
it works with my hack).
Though I don't rebuild the kernel often these days, I will happily test
your new patches next time.
--
-|-__ YAMAMOTO, Taku
| __ < <taku at tackymt.homeip.net>
- A chicken is an egg's way of producing more eggs. -
-------------- next part --------------
diff -rup --exclude='*~' --exclude='*.patch' /sys/dev/acpica/acpi_cpu.c native_cx/dev/acpica/acpi_cpu.c
--- sys/dev/acpica/acpi_cpu.c.orig 2010-11-13 02:10:12.000000000 +0900
+++ sys/dev/acpica/acpi_cpu.c 2010-11-30 16:45:50.087748111 +0900
@@ -65,6 +65,12 @@ struct acpi_cx {
uint32_t trans_lat; /* Transition latency (usec). */
uint32_t power; /* Power consumed (mW). */
int res_type; /* Resource type for p_lvlx. */
+#ifdef ACPI_USE_NATIVE_CX
+#define pseudo_RES_FFIXEDHW 0 /* FFixedHW pseudo resource */
+ uint16_t code; /* Encoded as BitWidth/BitOffset */
+ uint8_t arg1; /* Encoded as AccessWidth */
+ uint64_t arg0; /* Encoded as Address */
+#endif /* ACPI_USE_NATIVE_CX */
};
#define MAX_CX_STATES 8
@@ -336,6 +342,9 @@ acpi_cpu_attach(device_t dev)
* SMP control where each CPU can have different settings.
*/
sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3;
+#ifdef ACPI_USE_NATIVE_CX /* XXX */
+ sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE;
+#endif
if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) {
for (i = 0; i < drv_count; i++) {
if (ACPI_GET_FEATURES(drivers[i], &features) == 0)
@@ -695,6 +704,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s
/* Validate the state to see if we should use it. */
switch (cx_ptr->type) {
case ACPI_STATE_C1:
+#ifndef ACPI_USE_NATIVE_CX
if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
/* This is the first C1 state. Use the reserved slot. */
sc->cpu_cx_states[0] = *cx_ptr;
@@ -704,6 +714,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s
sc->cpu_cx_count++;
}
continue;
+#endif
case ACPI_STATE_C2:
sc->cpu_non_c3 = i;
break;
@@ -726,6 +737,27 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s
}
#endif
+#ifdef ACPI_USE_NATIVE_CX
+ /* Check for FFixedHW first. */
+ if (acpi_PkgGasFFH(pkg, 0, &cx_ptr->code, &cx_ptr->arg0, &cx_ptr->arg1) == 0) {
+ /* We do not increment cpu_rid, because FixedHW isn't a resource. */
+ cx_ptr->res_type = pseudo_RES_FFIXEDHW;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "acpi_cpu%d: Got C%d - %d latency\n",
+ device_get_unit(sc->cpu_dev), cx_ptr->type,
+ cx_ptr->trans_lat));
+ if (cx_ptr->type == ACPI_STATE_C1 &&
+ sc->cpu_cx_states[0].type == ACPI_STATE_C0) {
+ /* This is the first C1 state. Use the reserved slot. */
+ sc->cpu_cx_states[0] = *cx_ptr;
+ } else {
+ cx_ptr++;
+ sc->cpu_cx_count++;
+ }
+ continue;
+ }
+#endif
+
/* Allocate the control register for C2 or C3. */
acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &sc->cpu_rid,
&cx_ptr->p_lvlx, RF_SHAREABLE);
@@ -746,6 +778,10 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *s
if (cx_ptr->type == ACPI_STATE_C0) {
cx_ptr->type = ACPI_STATE_C1;
cx_ptr->trans_lat = 0;
+#ifdef ACPI_USE_NATIVE_CX
+ cx_ptr->res_type = pseudo_RES_FFIXEDHW;
+ cx_ptr->code = 0;
+#endif
}
return (0);
@@ -791,6 +827,8 @@ acpi_cpu_startup(void *arg)
if (sc->cpu_cx_count < cpu_cx_count)
cpu_cx_count = sc->cpu_cx_count;
}
+ if (cpu_cx_count <= 1)
+ return;
} else {
/*
* We are using _CST mode, remove C3 state if necessary.
@@ -871,14 +909,12 @@ acpi_cpu_startup_cx(struct acpi_cpu_soft
(void *)sc, 0, acpi_cpu_usage_sysctl, "A",
"percent usage for each Cx state");
-#ifdef notyet
/* Signal platform that we can handle _CST notification. */
if (!cpu_cx_generic && cpu_cst_cnt != 0) {
ACPI_LOCK(acpi);
AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8);
ACPI_UNLOCK(acpi);
}
-#endif
}
/*
@@ -952,7 +988,11 @@ acpi_cpu_idle()
* is an ISR. Assume we slept no more then half of quantum, unless
* we are called inside critical section, delaying context switch.
*/
- if (cx_next->type == ACPI_STATE_C1) {
+ if (cx_next->type == ACPI_STATE_C1
+#ifdef ACPI_USE_NATIVE_CX
+ && cx_next->code == 0
+#endif
+ ) {
AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
acpi_cpu_c1();
AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
@@ -982,6 +1022,11 @@ acpi_cpu_idle()
* is the only reliable time source.
*/
AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
+#ifdef ACPI_USE_NATIVE_CX
+ if (cx_next->res_type == pseudo_RES_FFIXEDHW)
+ acpi_cpu_cx_native(cx_next->code, cx_next->arg0, cx_next->arg1);
+ else {
+#endif
CPU_GET_REG(cx_next->p_lvlx, 1);
/*
@@ -991,6 +1036,9 @@ acpi_cpu_idle()
* margin that we are certain to have a correct value.
*/
AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
+#ifdef ACPI_USE_NATIVE_CX
+ }
+#endif
AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
/* Enable bus master arbitration and disable bus master wakeup. */
@@ -999,11 +1047,15 @@ acpi_cpu_idle()
AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
}
- ACPI_ENABLE_IRQS();
/* Find the actual time asleep in microseconds. */
- end_time = acpi_TimerDelta(end_time, start_time);
- sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
+ end_time = PM_USEC(acpi_TimerDelta(end_time, start_time));
+#ifdef notyet
+ if (cx_may_nonatomic && curthread->td_critnest == 0)
+ end_time = min(end_time, 500000 / hz);
+#endif
+ sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4;
+ ACPI_ENABLE_IRQS();
}
/*
diff -rup --exclude='*~' --exclude='*.patch' /sys/dev/acpica/acpi_package.c native_cx/dev/acpica/acpi_package.c
--- sys/dev/acpica/acpi_package.c 2010-01-22 06:14:28.000000000 +0900
+++ sys/dev/acpica/acpi_package.c 2010-10-11 06:28:24.777637413 +0900
@@ -103,11 +103,9 @@ acpi_PkgStr(ACPI_OBJECT *res, int idx, v
return (0);
}
-int
-acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid,
- struct resource **dst, u_int flags)
+static int
+acpi_get_PkgGas(ACPI_OBJECT *res, int idx, ACPI_GENERIC_ADDRESS *gas)
{
- ACPI_GENERIC_ADDRESS gas;
ACPI_OBJECT *obj;
obj = &res->Package.Elements[idx];
@@ -115,11 +113,48 @@ acpi_PkgGas(device_t dev, ACPI_OBJECT *r
obj->Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3)
return (EINVAL);
- memcpy(&gas, obj->Buffer.Pointer + 3, sizeof(gas));
+ memcpy(gas, obj->Buffer.Pointer + 3, sizeof(*gas));
+ return (0);
+}
+
+int
+acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid,
+ struct resource **dst, u_int flags)
+{
+ ACPI_GENERIC_ADDRESS gas;
+ int r;
+ r = acpi_get_PkgGas(res, idx, &gas);
+ if (r != 0)
+ return (r);
return (acpi_bus_alloc_gas(dev, type, rid, &gas, dst, flags));
}
+int
+acpi_PkgGasFFH(ACPI_OBJECT *res, int idx, uint16_t *code, UINT64 *arg0,
+ uint8_t *arg1)
+{
+ ACPI_GENERIC_ADDRESS gas;
+ int r;
+ uint8_t vendorcode, classcode;
+
+ r = acpi_get_PkgGas(res, idx, &gas);
+ if (r != 0)
+ return (r);
+
+ if (gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE)
+ return (EOPNOTSUPP);
+
+ /* Extract encoded values. */
+ vendorcode = gas.BitWidth;
+ classcode = gas.BitOffset;
+ *code = vendorcode | ((uint16_t)classcode << 8);
+ *arg1 = gas.AccessWidth;
+ *arg0 = gas.Address;
+
+ return (0);
+}
+
ACPI_HANDLE
acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj)
{
diff -rup --exclude='*~' --exclude='*.patch' /sys/dev/acpica/acpivar.h native_cx/dev/acpica/acpivar.h
--- sys/dev/acpica/acpivar.h.orig 2011-02-26 03:29:57.000000000 +0900
+++ sys/dev/acpica/acpivar.h 2011-03-09 14:35:39.477877295 +0900
@@ -454,6 +454,8 @@ int acpi_PkgInt32(ACPI_OBJECT *res, int
int acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size);
int acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type,
int *rid, struct resource **dst, u_int flags);
+int acpi_PkgGasFFH(ACPI_OBJECT *res, int idx, uint16_t *code,
+ UINT64 *arg0, uint8_t *arg1);
ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj);
/*
diff -rup --exclude='*~' --exclude='*.patch' /sys/i386/acpica/acpi_machdep.c native_cx/i386/acpica/acpi_machdep.c
--- sys/i386/acpica/acpi_machdep.c 2010-03-19 21:43:18.000000000 +0900
+++ sys/i386/acpica/acpi_machdep.c 2010-11-01 15:29:54.018653408 +0900
@@ -559,6 +559,50 @@ acpi_cpu_c1()
__asm __volatile("sti; hlt");
}
+extern void cpu_idle_mwait_hint(uint32_t); /* XXX */
+
+/*
+ * code: vendor | (classcode << 8)
+ * vendor: ACPI_GENERIC_ADDRESS.BitWidth (8-bit).
+ * classcode: ACPI_GENERIC_ADDRESS.BitOffset (8-bit).
+ * arg0: ACPI_GENERIC_ADDRESS.Address (64-bit).
+ * arg1: ACPI_GENERIC_ADDRESS.AccessWidth (8-bit).
+ */
+void
+acpi_cpu_cx_native(uint16_t code, uint64_t arg0, uint8_t arg1)
+{
+ switch (code) {
+ /* 0x##01: Intel */
+ case 0x0101:
+ /*
+ * C1 I/O then HLT.
+ * arg0: I/O port address to inb.
+ * arg1: not used (supposed to be 0).
+ */
+ __asm __volatile(
+ "inb %w0, %%al\n\t"
+ "sti; hlt"
+ :: "d" ((uint16_t)arg0) : "al");
+ break;
+
+ case 0x0201:
+ /*
+ * Native C state insn, a.k.a. MWAIT.
+ * arg0[31:0] : hint for MWAIT (through EAX).
+ * arg1[0] : H/W-coordinated.
+ * arg1[1] : Requires bus-master avoidance. (BM_CTRL)
+ */
+ cpu_idle_mwait_hint((uint32_t)arg0);
+ break;
+
+ /* More vendors? */
+
+ default:
+ /* Defaults to STI-HLT sequence. */
+ __asm __volatile("sti; hlt");
+ }
+}
+
/*
* Support for mapping ACPI tables during early boot. This abuses the
* crashdump map because the kernel cannot allocate KVA in
diff -rup --exclude='*~' --exclude='*.patch' /sys/i386/i386/machdep.c native_cx/i386/i386/machdep.c
--- sys/i386/i386/machdep.c.orig 2011-04-08 05:03:12.282748503 +0900
+++ sys/i386/i386/machdep.c 2011-04-08 05:47:54.071749599 +0900
@@ -1226,6 +1226,7 @@ cpu_halt(void)
void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
+static int cpu_ident_mwait_intr_ext = 0;
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
@@ -1282,6 +1283,11 @@ cpu_idle_hlt(int busy)
#define MWAIT_C3 0x20
#define MWAIT_C4 0x30
+/*
+ * MWAIT extensions.
+ */
+#define MWAIT_INTR 0x01 /* an INT breaks MWAIT even if it's disabled. */
+
static void
cpu_idle_mwait(int busy)
{
@@ -1297,6 +1303,25 @@ cpu_idle_mwait(int busy)
*state = STATE_RUNNING;
}
+void cpu_idle_mwait_hint(uint32_t); /* XXX */
+void
+cpu_idle_mwait_hint(uint32_t hint)
+{
+ int *state;
+
+ state = (int *)PCPU_PTR(monitorbuf);
+ *state = STATE_MWAIT;
+ cpu_monitor(state, 0, 0);
+ if (*state == STATE_MWAIT) {
+ if (cpu_ident_mwait_intr_ext)
+ cpu_mwait(MWAIT_INTR, hint);
+ else {
+ enable_intr();
+ cpu_mwait(0, hint);
+ }
+ }
+}
+
static void
cpu_idle_spin(int busy)
{
@@ -1341,6 +1366,21 @@ cpu_probe_amdc1e(void)
}
}
+/*
+ * Check for MWAIT INTR-BREAK extension.
+ */
+static void
+cpu_probe_mwait_ext(void)
+{
+ u_int regs[4];
+
+ if (!(cpu_feature2 & CPUID2_MON))
+ return;
+ do_cpuid(5 /*CPUID_MONITOR*/, regs);
+ if ((regs[2/*ecx*/] & 0x03/*MON_EXT|MON_INTR*/) == 0x03/*both set*/)
+ cpu_ident_mwait_intr_ext = 1;
+}
+
#ifdef XEN
void (*cpu_idle_fn)(int) = cpu_idle_hlt;
#else
@@ -2747,6 +2787,7 @@ init386(first)
cpu_probe_amdc1e();
cpu_probe_cmpxchg8b();
+ cpu_probe_mwait_ext();
}
#else
@@ -3024,6 +3065,7 @@ init386(first)
cpu_probe_amdc1e();
cpu_probe_cmpxchg8b();
+ cpu_probe_mwait_ext();
}
#endif
diff -rup --exclude='*~' --exclude='*.patch' /sys/i386/include/acpica_machdep.h native_cx/i386/include/acpica_machdep.h
--- sys/i386/include/acpica_machdep.h 2009-09-24 00:42:35.000000000 +0900
+++ sys/i386/include/acpica_machdep.h 2010-11-01 14:15:16.956638758 +0900
@@ -94,9 +94,11 @@ extern int acpi_release_global_lock(uint
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
#define ACPI_USE_NATIVE_DIVIDE
+#define ACPI_USE_NATIVE_CX
void acpi_SetDefaultIntrModel(int model);
void acpi_cpu_c1(void);
+void acpi_cpu_cx_native(uint16_t code, uint64_t arg0, uint8_t arg1);
void *acpi_map_table(vm_paddr_t pa, const char *sig);
void acpi_unmap_table(void *table);
vm_paddr_t acpi_find_table(const char *sig);
More information about the freebsd-acpi
mailing list