git: 8e1395e88b53 - stable/13 - vmm: Use struct vcpu with the vmm_stat API.

From: John Baldwin <jhb_at_FreeBSD.org>
Date: Thu, 26 Jan 2023 22:11:49 UTC
The branch stable/13 has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=8e1395e88b53eca8ac0862940904708e5d8e21d9

commit 8e1395e88b53eca8ac0862940904708e5d8e21d9
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2022-11-18 18:01:18 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2023-01-26 21:46:54 +0000

    vmm: Use struct vcpu with the vmm_stat API.
    
    The function callbacks still use struct vm and and vCPU index.
    
    Reviewed by:    corvink, markj
    Differential Revision:  https://reviews.freebsd.org/D37157
    
    (cherry picked from commit 3dc3d32ad67b38ab44ed4a7cf3020a0741b47ec1)
---
 sys/amd64/include/vmm.h   |  2 +-
 sys/amd64/vmm/amd/svm.c   | 32 ++++++++++++++++----------------
 sys/amd64/vmm/intel/vmx.c | 38 +++++++++++++++++++-------------------
 sys/amd64/vmm/io/vlapic.c | 18 ++++++++----------
 sys/amd64/vmm/vmm.c       | 24 ++++++++++++------------
 sys/amd64/vmm/vmm_stat.c  |  2 +-
 sys/amd64/vmm/vmm_stat.h  | 20 ++++++++++----------
 7 files changed, 67 insertions(+), 69 deletions(-)

diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 7548fea76696..800114151d95 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -380,7 +380,7 @@ vcpu_should_yield(struct vm *vm, int vcpu)
 }
 #endif
 
-void *vcpu_stats(struct vm *vm, int vcpu);
+void *vcpu_stats(struct vcpu *vcpu);
 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
 struct vmspace *vm_get_vmspace(struct vm *vm);
 struct vatpic *vm_atpic(struct vm *vm);
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 5eeef4b9a0fe..f55262faee4f 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -1010,7 +1010,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
 	 */
 	SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
 	    VMCB_EXITINTINFO_VECTOR(intinfo));
-	vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_EXITINTINFO, 1);
+	vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
 	vm_exit_intinfo(svm_sc->vm, vcpuid, intinfo);
 }
 
@@ -1355,7 +1355,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 	vmexit->rip = state->rip;
 	vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
 
-	vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_COUNT, 1);
+	vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
 
 	/*
 	 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
@@ -1387,18 +1387,18 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		handled = 1;
 		break;
 	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_VINTR, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
 		handled = 1;
 		break;
 	case VMCB_EXIT_INTR:	/* external interrupt */
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXTINT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
 		handled = 1;
 		break;
 	case VMCB_EXIT_NMI:	/* external NMI */
 		handled = 1;
 		break;
 	case 0x40 ... 0x5F:
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXCEPTION, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
 		reflect = 1;
 		idtvec = code - 0x40;
 		switch (idtvec) {
@@ -1473,7 +1473,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		retu = false;	
 
 		if (info1) {
-			vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_WRMSR, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
 			val = (uint64_t)edx << 32 | eax;
 			SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val);
 			if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
@@ -1488,7 +1488,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 			}
 		} else {
 			SVM_CTR1(vcpu, "rdmsr %#x", ecx);
-			vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_RDMSR, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
 			if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
 				vmexit->exitcode = VM_EXITCODE_RDMSR;
 				vmexit->u.msr.code = ecx;
@@ -1502,21 +1502,21 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		break;
 	case VMCB_EXIT_IO:
 		handled = svm_handle_io(vcpu, vmexit);
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INOUT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
 		break;
 	case VMCB_EXIT_CPUID:
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_CPUID, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
 		handled = x86_emulate_cpuid(svm_sc->vm, vcpuid, &state->rax,
 		    &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
 		break;
 	case VMCB_EXIT_HLT:
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_HLT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
 		vmexit->exitcode = VM_EXITCODE_HLT;
 		vmexit->u.hlt.rflags = state->rflags;
 		break;
 	case VMCB_EXIT_PAUSE:
 		vmexit->exitcode = VM_EXITCODE_PAUSE;
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_PAUSE, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
 		break;
 	case VMCB_EXIT_NPF:
 		/* EXITINFO2 contains the faulting guest physical address */
@@ -1528,13 +1528,13 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 			vmexit->exitcode = VM_EXITCODE_PAGING;
 			vmexit->u.paging.gpa = info2;
 			vmexit->u.paging.fault_type = npf_fault_type(info1);
-			vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
 			SVM_CTR3(vcpu, "nested page fault "
 			    "on gpa %#lx/%#lx at rip %#lx",
 			    info2, info1, state->rip);
 		} else if (svm_npf_emul_fault(info1)) {
 			svm_handle_inst_emul(vmcb, info2, vmexit);
-			vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INST_EMUL, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
 			SVM_CTR3(vcpu, "inst_emul fault "
 			    "for gpa %#lx/%#lx at rip %#lx",
 			    info2, info1, state->rip);
@@ -1565,7 +1565,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 		handled = 1;
 		break;
 	default:
-		vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_UNKNOWN, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
 		break;
 	}	
 
@@ -1610,7 +1610,7 @@ svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
 		VMCB_EXITINTINFO_VECTOR(intinfo),
 		VMCB_EXITINTINFO_EC(intinfo),
 		VMCB_EXITINTINFO_EC_VALID(intinfo));
-	vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_INTINFO_INJECTED, 1);
+	vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
 	SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo);
 }
 
@@ -2044,7 +2044,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 		 * migration should take this case into account.
 		 */
 		vcpu->lastcpu = curcpu;
-		vmm_stat_incr(vm, vcpuid, VCPU_MIGRATIONS, 1);
+		vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
 	}
 
 	svm_msr_guest_enter(svm_sc, vcpu);
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index ec1b5c80dcff..abedecbecd89 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1314,7 +1314,7 @@ vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running)
 		invvpid_desc.vpid = vmxstate->vpid;
 		invvpid_desc.linear_addr = 0;
 		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
-		vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_DONE, 1);
+		vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1);
 	} else {
 		/*
 		 * The invvpid can be skipped if an invept is going to
@@ -1322,7 +1322,7 @@ vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running)
 		 * will invalidate combined mappings tagged with
 		 * 'vmx->eptp' for all vpids.
 		 */
-		vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_SAVED, 1);
+		vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1);
 	}
 }
 
@@ -1337,7 +1337,7 @@ vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap)
 
 	vmxstate->lastcpu = curcpu;
 
-	vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_MIGRATIONS, 1);
+	vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
 
 	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
 	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
@@ -2384,7 +2384,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 	reason = vmexit->u.vmx.exit_reason;
 	vmexit->exitcode = VM_EXITCODE_BOGUS;
 
-	vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_COUNT, 1);
+	vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
 	SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit);
 
 	/*
@@ -2495,7 +2495,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		    ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
 		break;
 	case EXIT_REASON_CR_ACCESS:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CR_ACCESS, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1);
 		SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual);
 		switch (qual & 0xf) {
 		case 0:
@@ -2510,7 +2510,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		}
 		break;
 	case EXIT_REASON_RDMSR:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
 		retu = false;
 		ecx = vmxctx->guest_rcx;
 		VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx);
@@ -2528,7 +2528,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		}
 		break;
 	case EXIT_REASON_WRMSR:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
 		retu = false;
 		eax = vmxctx->guest_rax;
 		ecx = vmxctx->guest_rcx;
@@ -2552,7 +2552,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		}
 		break;
 	case EXIT_REASON_HLT:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_HLT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
 		SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit);
 		vmexit->exitcode = VM_EXITCODE_HLT;
 		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
@@ -2563,18 +2563,18 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 			vmexit->u.hlt.intr_status = 0;
 		break;
 	case EXIT_REASON_MTF:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_MTRAP, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1);
 		SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit);
 		vmexit->exitcode = VM_EXITCODE_MTRAP;
 		vmexit->inst_length = 0;
 		break;
 	case EXIT_REASON_PAUSE:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_PAUSE, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
 		SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit);
 		vmexit->exitcode = VM_EXITCODE_PAUSE;
 		break;
 	case EXIT_REASON_INTR_WINDOW:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1);
 		SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit);
 		vmx_clear_int_window_exiting(vcpu);
 		return (1);
@@ -2607,7 +2607,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		 * This is special. We want to treat this as an 'handled'
 		 * VM-exit but not increment the instruction pointer.
 		 */
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXTINT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
 		return (1);
 	case EXIT_REASON_NMI_WINDOW:
 		SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit);
@@ -2615,10 +2615,10 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		if (vm_nmi_pending(vmx->vm, vcpuid))
 			vmx_inject_nmi(vmx, vcpu);
 		vmx_clear_nmi_window_exiting(vcpu);
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1);
 		return (1);
 	case EXIT_REASON_INOUT:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INOUT, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
 		vmexit->exitcode = VM_EXITCODE_INOUT;
 		vmexit->u.inout.bytes = (qual & 0x7) + 1;
 		vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
@@ -2641,12 +2641,12 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit);
 		break;
 	case EXIT_REASON_CPUID:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CPUID, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
 		SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit);
 		handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx);
 		break;
 	case EXIT_REASON_EXCEPTION:
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXCEPTION, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
 		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
 		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
 		    ("VM exit interruption info invalid: %#x", intr_info));
@@ -2740,12 +2740,12 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 			vmexit->inst_length = 0;
 			vmexit->u.paging.gpa = gpa;
 			vmexit->u.paging.fault_type = ept_fault_type(qual);
-			vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
 			SDT_PROBE5(vmm, vmx, exit, nestedfault,
 			    vmx, vcpuid, vmexit, gpa, qual);
 		} else if (ept_emulation_fault(qual)) {
 			vmexit_inst_emul(vmexit, gpa, vmcs_gla());
-			vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INST_EMUL, 1);
+			vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
 			SDT_PROBE4(vmm, vmx, exit, mmiofault,
 			    vmx, vcpuid, vmexit, gpa);
 		}
@@ -2821,7 +2821,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 	default:
 		SDT_PROBE4(vmm, vmx, exit, unknown,
 		    vmx, vcpuid, vmexit, reason);
-		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_UNKNOWN, 1);
+		vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
 		break;
 	}
 
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index b3711685b70c..fdd971cc03c9 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -600,7 +600,7 @@ vlapic_process_eoi(struct vlapic *vlapic)
 		}
 	}
 	VLAPIC_CTR0(vlapic, "Gratuitous EOI");
-	vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_GRATUITOUS_EOI, 1);
+	vmm_stat_incr(vlapic->vcpu, VLAPIC_GRATUITOUS_EOI, 1);
 }
 
 static __inline int
@@ -636,7 +636,7 @@ vlapic_set_error(struct vlapic *vlapic, uint32_t mask, bool lvt_error)
 		return;
 
 	if (vlapic_fire_lvt(vlapic, APIC_LVT_ERROR)) {
-		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1);
+		vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_ERROR, 1);
 	}
 }
 
@@ -650,7 +650,7 @@ vlapic_fire_timer(struct vlapic *vlapic)
 
 	if (vlapic_fire_lvt(vlapic, APIC_LVT_TIMER)) {
 		VLAPIC_CTR0(vlapic, "vlapic timer fired");
-		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1);
+		vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_TIMER, 1);
 	}
 }
 
@@ -662,7 +662,7 @@ vlapic_fire_cmci(struct vlapic *vlapic)
 {
 
 	if (vlapic_fire_lvt(vlapic, APIC_LVT_CMCI)) {
-		vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1);
+		vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_CMC, 1);
 	}
 }
 
@@ -701,8 +701,8 @@ vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
 	case APIC_LVT_THERMAL:
 	case APIC_LVT_CMCI:
 		if (vlapic_fire_lvt(vlapic, vector)) {
-			vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
-			    LVTS_TRIGGERRED, vector, 1);
+			vmm_stat_array_incr(vlapic->vcpu, LVTS_TRIGGERRED,
+			    vector, 1);
 		}
 		break;
 	default:
@@ -1102,8 +1102,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 
 		CPU_FOREACH_ISSET(i, &dmask) {
 			lapic_intr_edge(vlapic->vm, i, vec);
-			vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
-			    IPIS_SENT, i, 1);
+			vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, i, 1);
 			VLAPIC_CTR2(vlapic,
 			    "vlapic sending ipi %d to vcpuid %d", vec, i);
 		}
@@ -1238,8 +1237,7 @@ vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val)
 
 	vec = val & 0xff;
 	lapic_intr_edge(vlapic->vm, vlapic->vcpuid, vec);
-	vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, IPIS_SENT,
-	    vlapic->vcpuid, 1);
+	vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, vlapic->vcpuid, 1);
 	VLAPIC_CTR1(vlapic, "vlapic self-ipi %d", vec);
 }
 
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 7f9072b7200d..715eaaa8ecaf 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1447,7 +1447,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
 		 */
 		msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
 		vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
-		vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
+		vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
 		if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
 			vcpu_unlock(vcpu);
 			error = thread_check_susp(td, false);
@@ -1727,7 +1727,7 @@ vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
 	vmexit->rip = rip;
 	vmexit->inst_length = 0;
 	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
-	vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
+	vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_RENDEZVOUS, 1);
 }
 
 void
@@ -1739,7 +1739,7 @@ vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
 	vmexit->rip = rip;
 	vmexit->inst_length = 0;
 	vmexit->exitcode = VM_EXITCODE_REQIDLE;
-	vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
+	vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_REQIDLE, 1);
 }
 
 void
@@ -1751,7 +1751,7 @@ vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
 	vmexit->rip = rip;
 	vmexit->inst_length = 0;
 	vmexit->exitcode = VM_EXITCODE_BOGUS;
-	vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
+	vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_ASTPENDING, 1);
 }
 
 int
@@ -1802,7 +1802,7 @@ restart:
 
 	save_guest_fpustate(vcpu);
 
-	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
+	vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
 
 	critical_exit();
 
@@ -1860,7 +1860,7 @@ restart:
 	if (error == 0 && retu == false)
 		goto restart;
 
-	vmm_stat_incr(vm, vcpuid, VMEXIT_USERSPACE, 1);
+	vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
 	VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
 
 	/* copy the exit information */
@@ -2244,7 +2244,7 @@ vm_nmi_clear(struct vm *vm, int vcpuid)
 		panic("vm_nmi_clear: inconsistent nmi_pending state");
 
 	vcpu->nmi_pending = 0;
-	vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
+	vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
 }
 
 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
@@ -2291,7 +2291,7 @@ vm_extint_clear(struct vm *vm, int vcpuid)
 		panic("vm_extint_clear: inconsistent extint_pending state");
 
 	vcpu->extint_pending = 0;
-	vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
+	vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
 }
 
 int
@@ -2530,10 +2530,10 @@ vm_suspended_cpus(struct vm *vm)
 }
 
 void *
-vcpu_stats(struct vm *vm, int vcpuid)
+vcpu_stats(struct vcpu *vcpu)
 {
 
-	return (vm->vcpu[vcpuid].stats);
+	return (vcpu->stats);
 }
 
 int
@@ -2825,7 +2825,7 @@ vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
 {
 
 	if (vcpu == 0) {
-		vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
+		vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_RESIDENT,
 	       	    PAGE_SIZE * vmspace_resident_count(vm->vmspace));
 	}	
 }
@@ -2835,7 +2835,7 @@ vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
 {
 
 	if (vcpu == 0) {
-		vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
+		vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_WIRED,
 	      	    PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
 	}	
 }
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
index 497db4452f3b..b317f5679bf3 100644
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -113,7 +113,7 @@ vmm_stat_copy(struct vm *vm, int vcpu, int index, int count, int *num_stats,
 	}
 
 	/* Copy over the stats */
-	stats = vcpu_stats(vm, vcpu);
+	stats = vcpu_stats(vm_vcpu(vm, vcpu));
 	memcpy(buf, stats + index, tocopy * sizeof(stats[0]));
 	*num_stats = tocopy;
 	return (0);
diff --git a/sys/amd64/vmm/vmm_stat.h b/sys/amd64/vmm/vmm_stat.h
index 0e9c8db8429d..69e7b9367686 100644
--- a/sys/amd64/vmm/vmm_stat.h
+++ b/sys/amd64/vmm/vmm_stat.h
@@ -92,13 +92,13 @@ int	vmm_stat_copy(struct vm *vm, int vcpu, int index, int count,
 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
 
 static void __inline
-vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
-		    int statidx, uint64_t x)
+vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+    uint64_t x)
 {
 #ifdef VMM_KEEP_STATS
 	uint64_t *stats;
 
-	stats = vcpu_stats(vm, vcpu);
+	stats = vcpu_stats(vcpu);
 
 	if (vst->index >= 0 && statidx < vst->nelems)
 		stats[vst->index + statidx] += x;
@@ -106,13 +106,13 @@ vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
 }
 
 static void __inline
-vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
-		   int statidx, uint64_t val)
+vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+    uint64_t val)
 {
 #ifdef VMM_KEEP_STATS
 	uint64_t *stats;
 
-	stats = vcpu_stats(vm, vcpu);
+	stats = vcpu_stats(vcpu);
 
 	if (vst->index >= 0 && statidx < vst->nelems)
 		stats[vst->index + statidx] = val;
@@ -120,20 +120,20 @@ vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
 }
 		   
 static void __inline
-vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
+vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
 {
 
 #ifdef VMM_KEEP_STATS
-	vmm_stat_array_incr(vm, vcpu, vst, 0, x);
+	vmm_stat_array_incr(vcpu, vst, 0, x);
 #endif
 }
 
 static void __inline
-vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
+vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
 {
 
 #ifdef VMM_KEEP_STATS
-	vmm_stat_array_set(vm, vcpu, vst, 0, val);
+	vmm_stat_array_set(vcpu, vst, 0, val);
 #endif
 }