git: ad15eeeaba30 - main - x86/xen: fallback when VCPUOP_send_nmi is not available
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 17 Jan 2022 10:09:28 UTC
The branch main has been updated by royger:
URL: https://cgit.FreeBSD.org/src/commit/?id=ad15eeeaba30cdf10036b7869d27441cfc9f0674
commit ad15eeeaba30cdf10036b7869d27441cfc9f0674
Author: Roger Pau Monné <royger@FreeBSD.org>
AuthorDate: 2022-01-13 13:48:14 +0000
Commit: Roger Pau Monné <royger@FreeBSD.org>
CommitDate: 2022-01-17 10:06:40 +0000
x86/xen: fallback when VCPUOP_send_nmi is not available
It has been reported that on some AWS instances VCPUOP_send_nmi
returns -38 (ENOSYS). The hypercall is only available for HVM guests
in Xen 4.7 and newer. Add a fallback to use the native NMI sending
procedure when VCPUOP_send_nmi is not available, so that the NMI is
not lost.
Reported and Tested by: avg
MFC after: 1 week
Fixes: b2802351c162 ('xen: fix dispatching of NMIs')
Sponsored by: Citrix Systems R&D
---
sys/x86/xen/xen_apic.c | 52 +++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 41 insertions(+), 11 deletions(-)
diff --git a/sys/x86/xen/xen_apic.c b/sys/x86/xen/xen_apic.c
index b553e5248716..2efa5a3c2345 100644
--- a/sys/x86/xen/xen_apic.c
+++ b/sys/x86/xen/xen_apic.c
@@ -107,6 +107,14 @@ static struct xen_ipi_handler xen_ipis[] =
};
#endif
+/*
+ * Save previous (native) handler as a fallback. Xen < 4.7 doesn't support
+ * VCPUOP_send_nmi for HVM guests, and thus we need a fallback in that case:
+ *
+ * https://lists.freebsd.org/archives/freebsd-xen/2022-January/000032.html
+ */
+void (*native_ipi_vectored)(u_int, int);
+
/*------------------------------- Per-CPU Data -------------------------------*/
#ifdef SMP
DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
@@ -273,10 +281,11 @@ xen_pv_lapic_ipi_raw(register_t icrlo, u_int dest)
}
#define PCPU_ID_GET(id, field) (pcpu_find(id)->pc_##field)
-static void
+static int
send_nmi(int dest)
{
unsigned int cpu;
+ int rc = 0;
/*
* NMIs are not routed over event channels, and instead delivered as on
@@ -286,24 +295,33 @@ send_nmi(int dest)
*/
switch(dest) {
case APIC_IPI_DEST_SELF:
- HYPERVISOR_vcpu_op(VCPUOP_send_nmi, PCPU_GET(vcpu_id), NULL);
+ rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, PCPU_GET(vcpu_id), NULL);
break;
case APIC_IPI_DEST_ALL:
- CPU_FOREACH(cpu)
- HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
+ CPU_FOREACH(cpu) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
PCPU_ID_GET(cpu, vcpu_id), NULL);
+ if (rc != 0)
+ break;
+ }
break;
case APIC_IPI_DEST_OTHERS:
- CPU_FOREACH(cpu)
- if (cpu != PCPU_GET(cpuid))
- HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
+ CPU_FOREACH(cpu) {
+ if (cpu != PCPU_GET(cpuid)) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
PCPU_ID_GET(cpu, vcpu_id), NULL);
+ if (rc != 0)
+ break;
+ }
+ }
break;
default:
- HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
+ rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
PCPU_ID_GET(apic_cpuid(dest), vcpu_id), NULL);
break;
}
+
+ return rc;
}
#undef PCPU_ID_GET
@@ -312,9 +330,21 @@ xen_pv_lapic_ipi_vectored(u_int vector, int dest)
{
xen_intr_handle_t *ipi_handle;
int ipi_idx, to_cpu, self;
+ static bool pvnmi = true;
if (vector >= IPI_NMI_FIRST) {
- send_nmi(dest);
+ if (pvnmi) {
+ int rc = send_nmi(dest);
+
+ if (rc != 0) {
+ printf(
+ "Sending NMI using hypercall failed (%d) switching to APIC\n", rc);
+ pvnmi = false;
+ native_ipi_vectored(vector, dest);
+ }
+ } else
+ native_ipi_vectored(vector, dest);
+
return;
}
@@ -580,8 +610,8 @@ xen_setup_cpus(void)
xen_cpu_ipi_init(i);
/* Set the xen pv ipi ops to replace the native ones */
- if (xen_hvm_domain())
- apic_ops.ipi_vectored = xen_pv_lapic_ipi_vectored;
+ native_ipi_vectored = apic_ops.ipi_vectored;
+ apic_ops.ipi_vectored = xen_pv_lapic_ipi_vectored;
}
/* Switch to using PV IPIs as soon as the vcpu_id is set. */