git: 78a3a1e78ad8 - main - amd64/vmm: Factor vcpu_notify_event() into two functions

From: Mark Johnston <markj_at_FreeBSD.org>
Date: Tue, 04 Nov 2025 14:36:02 UTC
The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=78a3a1e78ad8812bcf82e30b52ba2f21c49a3407

commit 78a3a1e78ad8812bcf82e30b52ba2f21c49a3407
Author:     Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2025-11-04 13:52:21 +0000
Commit:     Mark Johnston <markj@FreeBSD.org>
CommitDate: 2025-11-04 13:52:21 +0000

    amd64/vmm: Factor vcpu_notify_event() into two functions
    
    vcpu_notify_event() previously took a boolean parameter which determines
    whether the implementation should try to use a posted interrupt.  On
    arm64 and riscv, the implementation of vcpu_notify_event() is otherwise
    identical to that of amd64.
    
    With the aim of deduplicating vcpu state management code, introduce a
    separate amd64-only function which tries to use posted interrupts.  This
    requires some duplication with vcpu_notify_event_locked(), but only a
    little bit.
    
    Then, fix up callers.  No functional change intended.
    
    Reviewed by:    corvink, jhb
    MFC after:      2 weeks
    Sponsored by:   The FreeBSD Foundation
    Sponsored by:   Klara, Inc.
    Differential Revision:  https://reviews.freebsd.org/D53419
---
 sys/amd64/include/vmm.h   |  3 ++-
 sys/amd64/vmm/io/vlapic.c |  2 +-
 sys/amd64/vmm/vmm.c       | 42 ++++++++++++++++++++++++------------------
 sys/amd64/vmm/vmm_lapic.c |  2 +-
 4 files changed, 28 insertions(+), 21 deletions(-)

diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index ad67510fecf3..c7baa5e4c54a 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -383,7 +383,8 @@ vcpu_should_yield(struct vcpu *vcpu)
 #endif
 
 void *vcpu_stats(struct vcpu *vcpu);
-void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
+void vcpu_notify_event(struct vcpu *vcpu);
+void vcpu_notify_lapic(struct vcpu *vcpu);
 struct vm_mem *vm_mem(struct vm *vm);
 struct vatpic *vm_atpic(struct vm *vm);
 struct vatpit *vm_atpit(struct vm *vm);
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 9879dfa164a4..afd5045de574 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -456,7 +456,7 @@ vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
 			return (0);
 		}
 		if (vlapic_set_intr_ready(vlapic, vec, false))
-			vcpu_notify_event(vlapic->vcpu, true);
+			vcpu_notify_lapic(vlapic->vcpu);
 		break;
 	case APIC_LVT_DM_NMI:
 		vm_inject_nmi(vlapic->vcpu);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index f2bea0d82b5c..4f3057492fc9 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -274,7 +274,7 @@ u_int vm_maxcpu;
 SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &vm_maxcpu, 0, "Maximum number of vCPUs");
 
-static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
+static void vcpu_notify_event_locked(struct vcpu *vcpu);
 
 /* global statistics */
 VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
@@ -1028,7 +1028,7 @@ vcpu_wait_idle(struct vcpu *vcpu)
 	KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
 
 	vcpu->reqidle = 1;
-	vcpu_notify_event_locked(vcpu, false);
+	vcpu_notify_event_locked(vcpu);
 	VMM_CTR1(vcpu, "vcpu state change from %s to "
 	    "idle requested", vcpu_state2str(vcpu->state));
 	msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
@@ -1509,7 +1509,7 @@ vm_handle_suspend(struct vcpu *vcpu, bool *retu)
 	 */
 	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &vm->suspended_cpus)) {
-			vcpu_notify_event(vm_vcpu(vm, i), false);
+			vcpu_notify_event(vm_vcpu(vm, i));
 		}
 	}
 
@@ -1583,7 +1583,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
 	 */
 	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &vm->active_cpus))
-			vcpu_notify_event(vm_vcpu(vm, i), false);
+			vcpu_notify_event(vm_vcpu(vm, i));
 	}
 
 	return (0);
@@ -2063,7 +2063,7 @@ vm_inject_nmi(struct vcpu *vcpu)
 {
 
 	vcpu->nmi_pending = 1;
-	vcpu_notify_event(vcpu, false);
+	vcpu_notify_event(vcpu);
 	return (0);
 }
 
@@ -2090,7 +2090,7 @@ vm_inject_extint(struct vcpu *vcpu)
 {
 
 	vcpu->extint_pending = 1;
-	vcpu_notify_event(vcpu, false);
+	vcpu_notify_event(vcpu);
 	return (0);
 }
 
@@ -2261,14 +2261,14 @@ vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
 		vm->debug_cpus = vm->active_cpus;
 		for (int i = 0; i < vm->maxcpus; i++) {
 			if (CPU_ISSET(i, &vm->active_cpus))
-				vcpu_notify_event(vm_vcpu(vm, i), false);
+				vcpu_notify_event(vm_vcpu(vm, i));
 		}
 	} else {
 		if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
 			return (EINVAL);
 
 		CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
-		vcpu_notify_event(vcpu, false);
+		vcpu_notify_event(vcpu);
 	}
 	return (0);
 }
@@ -2376,7 +2376,7 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
  *   to the host_cpu to cause the vcpu to trap into the hypervisor.
  */
 static void
-vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event_locked(struct vcpu *vcpu)
 {
 	int hostcpu;
 
@@ -2384,12 +2384,7 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
 	if (vcpu->state == VCPU_RUNNING) {
 		KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
 		if (hostcpu != curcpu) {
-			if (lapic_intr) {
-				vlapic_post_intr(vcpu->vlapic, hostcpu,
-				    vmm_ipinum);
-			} else {
-				ipi_cpu(hostcpu, vmm_ipinum);
-			}
+			ipi_cpu(hostcpu, vmm_ipinum);
 		} else {
 			/*
 			 * If the 'vcpu' is running on 'curcpu' then it must
@@ -2407,10 +2402,21 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
 }
 
 void
-vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event(struct vcpu *vcpu)
+{
+	vcpu_lock(vcpu);
+	vcpu_notify_event_locked(vcpu);
+	vcpu_unlock(vcpu);
+}
+
+void
+vcpu_notify_lapic(struct vcpu *vcpu)
 {
 	vcpu_lock(vcpu);
-	vcpu_notify_event_locked(vcpu, lapic_intr);
+	if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu)
+		vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum);
+	else
+		vcpu_notify_event_locked(vcpu);
 	vcpu_unlock(vcpu);
 }
 
@@ -2472,7 +2478,7 @@ restart:
 	 */
 	for (i = 0; i < vm->maxcpus; i++) {
 		if (CPU_ISSET(i, &dest))
-			vcpu_notify_event(vm_vcpu(vm, i), false);
+			vcpu_notify_event(vm_vcpu(vm, i));
 	}
 
 	return (vm_handle_rendezvous(vcpu));
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 0cae01f172ec..63bdee69bb59 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -61,7 +61,7 @@ lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
 
 	vlapic = vm_lapic(vcpu);
 	if (vlapic_set_intr_ready(vlapic, vector, level))
-		vcpu_notify_event(vcpu, true);
+		vcpu_notify_lapic(vcpu);
 	return (0);
 }