git: 3762293b30f3 - stable/13 - vmm: Use struct vcpu in the instruction emulation code.

From: John Baldwin <jhb_at_FreeBSD.org>
Date: Thu, 26 Jan 2023 22:11:53 UTC
The branch stable/13 has been updated by jhb:

URL: https://cgit.FreeBSD.org/src/commit/?id=3762293b30f3c8467c1dfdc375b7602cf778f801

commit 3762293b30f3c8467c1dfdc375b7602cf778f801
Author:     John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2022-11-18 18:02:09 +0000
Commit:     John Baldwin <jhb@FreeBSD.org>
CommitDate: 2023-01-26 21:47:39 +0000

    vmm: Use struct vcpu in the instruction emulation code.
    
    This passes struct vcpu down in place of struct vm and and integer
    vcpu index through the in-kernel instruction emulation code.  To
    minimize userland disruption, helper macros are used for the vCPU
    arguments passed into and through the shared instruction emulation
    code.
    
    A few other APIs used by the instruction emulation code have also been
    updated to accept struct vcpu in the kernel including
    vm_get/set_register and vm_inject_fault.
    
    Reviewed by:    corvink, markj
    Differential Revision:  https://reviews.freebsd.org/D37161
    
    (cherry picked from commit d3956e46736ffaee5060c9baf0a40f428bc34ec3)
---
 lib/libvmmapi/vmmapi.c                   |   3 +-
 lib/libvmmapi/vmmapi.h                   |   1 +
 sys/amd64/include/vmm.h                  |  54 ++++-
 sys/amd64/include/vmm_instruction_emul.h |  29 ++-
 sys/amd64/vmm/amd/svm.c                  |  16 +-
 sys/amd64/vmm/amd/svm_msr.c              |   4 +-
 sys/amd64/vmm/intel/vmx.c                |  26 +--
 sys/amd64/vmm/intel/vmx_msr.c            |   8 +-
 sys/amd64/vmm/io/vhpet.c                 |   8 +-
 sys/amd64/vmm/io/vhpet.h                 |   4 +-
 sys/amd64/vmm/io/vioapic.c               |  28 +--
 sys/amd64/vmm/io/vioapic.h               |   4 +-
 sys/amd64/vmm/io/vlapic.c                |  18 +-
 sys/amd64/vmm/io/vlapic.h                |   2 +-
 sys/amd64/vmm/vmm.c                      | 125 ++++-------
 sys/amd64/vmm/vmm_dev.c                  |  58 ++---
 sys/amd64/vmm/vmm_instruction_emul.c     | 360 +++++++++++++++----------------
 sys/amd64/vmm/vmm_ioport.c               |   2 +-
 sys/amd64/vmm/vmm_lapic.c                |  16 +-
 sys/amd64/vmm/vmm_lapic.h                |   4 +-
 sys/amd64/vmm/x86.c                      |   2 +-
 21 files changed, 393 insertions(+), 379 deletions(-)

diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index a65b77300b3a..0c994778dd10 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -1671,9 +1671,8 @@ vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
 }
 
 int
-vm_restart_instruction(void *arg, int vcpu)
+vm_restart_instruction(struct vmctx *ctx, int vcpu)
 {
-	struct vmctx *ctx = arg;
 
 	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
 }
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index 82c3dcdf3118..1be1f19507a9 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -238,6 +238,7 @@ int	vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus);
 int	vm_activate_cpu(struct vmctx *ctx, int vcpu);
 int	vm_suspend_cpu(struct vmctx *ctx, int vcpu);
 int	vm_resume_cpu(struct vmctx *ctx, int vcpu);
+int	vm_restart_instruction(struct vmctx *vmctx, int vcpu);
 
 /* CPU topology */
 int	vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores,
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 315a1b19f8f6..0224304f16a2 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -251,16 +251,18 @@ int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
     struct vm_object **objptr);
 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
-void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
+void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
+    int prot, void **cookie);
+void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
     int prot, void **cookie);
 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
     int prot, void **cookie);
 void vm_gpa_release(void *cookie);
 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
 
-int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
-int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
-int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
+int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
+int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
+int vm_get_seg_desc(struct vcpu *vcpu, int reg,
 		    struct seg_desc *ret_desc);
 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
 		    struct seg_desc *desc);
@@ -275,7 +277,7 @@ void vm_extint_clear(struct vm *vm, int vcpuid);
 int vcpu_vcpuid(struct vcpu *vcpu);
 struct vm *vcpu_vm(struct vcpu *vcpu);
 struct vcpu *vm_vcpu(struct vm *vm, int cpu);
-struct vlapic *vm_lapic(struct vm *vm, int cpu);
+struct vlapic *vm_lapic(struct vcpu *vcpu);
 struct vioapic *vm_ioapic(struct vm *vm);
 struct vhpet *vm_hpet(struct vm *vm);
 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
@@ -286,6 +288,7 @@ int vm_apicid2vcpuid(struct vm *vm, int apicid);
 int vm_activate_cpu(struct vm *vm, int vcpu);
 int vm_suspend_cpu(struct vm *vm, int vcpu);
 int vm_resume_cpu(struct vm *vm, int vcpu);
+int vm_restart_instruction(struct vcpu *vcpu);
 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
@@ -360,12 +363,12 @@ enum vcpu_state {
 
 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
     bool from_idle);
-enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
+enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
 
 static int __inline
 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
 {
-	return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
+	return (vcpu_get_state(vm_vcpu(vm, vcpu), hostcpu) == VCPU_RUNNING);
 }
 
 #ifdef _SYS_PROC_H_
@@ -401,7 +404,7 @@ struct vrtc *vm_rtc(struct vm *vm);
  * This function should only be called in the context of the thread that is
  * executing this vcpu.
  */
-int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
+int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
     uint32_t errcode, int restart_instruction);
 
 /*
@@ -463,7 +466,7 @@ struct vm_copyinfo {
  * the return value is 0. The 'copyinfo[]' resources should be freed by calling
  * 'vm_copy_teardown()' after the copy is done.
  */
-int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
     uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
     int num_copyinfo, int *is_fault);
 void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
@@ -756,6 +759,36 @@ struct vm_exit {
 };
 
 /* APIs to inject faults into the guest */
+#ifdef _KERNEL
+void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
+    int errcode);
+
+static __inline void
+vm_inject_ud(struct vcpu *vcpu)
+{
+	vm_inject_fault(vcpu, IDT_UD, 0, 0);
+}
+
+static __inline void
+vm_inject_gp(struct vcpu *vcpu)
+{
+	vm_inject_fault(vcpu, IDT_GP, 1, 0);
+}
+
+static __inline void
+vm_inject_ac(struct vcpu *vcpu, int errcode)
+{
+	vm_inject_fault(vcpu, IDT_AC, 1, errcode);
+}
+
+static __inline void
+vm_inject_ss(struct vcpu *vcpu, int errcode)
+{
+	vm_inject_fault(vcpu, IDT_SS, 1, errcode);
+}
+
+void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
+#else
 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
     int errcode);
 
@@ -784,7 +817,6 @@ vm_inject_ss(void *vm, int vcpuid, int errcode)
 }
 
 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
-
-int vm_restart_instruction(void *vm, int vcpuid);
+#endif
 
 #endif	/* _VMM_H_ */
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
index 4077e0d693e5..082405971fe4 100644
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -33,13 +33,26 @@
 
 #include <sys/mman.h>
 
+/*
+ * Allow for different arguments to identify vCPUs in userspace vs the
+ * kernel.  Eventually we should add struct vcpu in userland and
+ * always use the kernel arguments removing these macros.
+ */
+#ifdef _KERNEL
+#define	VCPU_DECL	struct vcpu *vcpu
+#define	VCPU_ARGS	vcpu
+#else
+#define	VCPU_DECL	void *vm, int vcpuid
+#define	VCPU_ARGS	vm, vcpuid
+#endif
+
 /*
  * Callback functions to read and write memory regions.
  */
-typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
+typedef int (*mem_region_read_t)(VCPU_DECL, uint64_t gpa,
 				 uint64_t *rval, int rsize, void *arg);
 
-typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
+typedef int (*mem_region_write_t)(VCPU_DECL, uint64_t gpa,
 				  uint64_t wval, int wsize, void *arg);
 
 /*
@@ -53,11 +66,11 @@ typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
  * 'struct vmctx *' when called from user context.
  * s
  */
-int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
+int vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
     struct vm_guest_paging *paging, mem_region_read_t mrr,
     mem_region_write_t mrw, void *mrarg);
 
-int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
+int vie_update_register(VCPU_DECL, enum vm_reg_name reg,
     uint64_t val, int size);
 
 /*
@@ -81,7 +94,7 @@ int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
  *
  * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
  */
-int vmm_fetch_instruction(struct vm *vm, int cpuid,
+int vmm_fetch_instruction(struct vcpu *vcpu,
 			  struct vm_guest_paging *guest_paging,
 			  uint64_t rip, int inst_length, struct vie *vie,
 			  int *is_fault);
@@ -94,14 +107,14 @@ int vmm_fetch_instruction(struct vm *vm, int cpuid,
  *   0		   1		An exception was injected into the guest
  * EFAULT	  N/A		An unrecoverable hypervisor error occurred
  */
-int vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
 
 /*
  * Like vm_gla2gpa, but no exceptions are injected into the guest and
  * PTEs are not changed.
  */
-int vm_gla2gpa_nofault(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
 #endif /* _KERNEL */
 
@@ -121,7 +134,7 @@ void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
  */
 #ifdef _KERNEL
 #define	VIE_INVALID_GLA		(1UL << 63)	/* a non-canonical address */
-int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
+int vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla,
 			   enum vm_cpu_mode cpu_mode, int csd, struct vie *vie);
 #else /* !_KERNEL */
 /*
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index f55262faee4f..48c7b53604c1 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -973,12 +973,10 @@ svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
 static void
 svm_update_virqinfo(struct svm_vcpu *vcpu)
 {
-	struct vm *vm;
 	struct vlapic *vlapic;
 	struct vmcb_ctrl *ctrl;
 
-	vm = vcpu->sc->vm;
-	vlapic = vm_lapic(vm, vcpu->vcpuid);
+	vlapic = vm_lapic(vcpu->vcpu);
 	ctrl = svm_get_vmcb_ctrl(vcpu);
 
 	/* Update %cr8 in the emulated vlapic */
@@ -1210,7 +1208,7 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
 	KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
 	return (0);
 gpf:
-	vm_inject_gp(sc->vm, vcpuid);
+	vm_inject_gp(vcpu->vcpu);
 	return (0);
 }
 
@@ -1459,7 +1457,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 			/* Reflect the exception back into the guest */
 			SVM_CTR2(vcpu, "Reflecting exception "
 			    "%d/%#x into the guest", idtvec, (int)info1);
-			error = vm_inject_exception(svm_sc->vm, vcpuid, idtvec,
+			error = vm_inject_exception(vcpu->vcpu, idtvec,
 			    errcode_valid, info1, 0);
 			KASSERT(error == 0, ("%s: vm_inject_exception error %d",
 			    __func__, error));
@@ -1556,7 +1554,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
 	case VMCB_EXIT_SKINIT:
 	case VMCB_EXIT_ICEBP:
 	case VMCB_EXIT_INVLPGA:
-		vm_inject_ud(svm_sc->vm, vcpuid);
+		vm_inject_ud(vcpu->vcpu);
 		handled = 1;
 		break;
 	case VMCB_EXIT_INVD:
@@ -2017,7 +2015,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 	state = svm_get_vmcb_state(vcpu);
 	ctrl = svm_get_vmcb_ctrl(vcpu);
 	vmexit = vm_exitinfo(vm, vcpuid);
-	vlapic = vm_lapic(vm, vcpuid);
+	vlapic = vm_lapic(vcpu->vcpu);
 
 	gctx = svm_get_guest_regctx(vcpu);
 	vmcb_pa = vcpu->vmcb_pa;
@@ -2346,7 +2344,7 @@ svm_setcap(void *vcpui, int type, int val)
 			error = EINVAL;
 		break;
 	case VM_CAP_IPI_EXIT:
-		vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
+		vlapic = vm_lapic(vcpu->vcpu);
 		vlapic->ipi_exit = val;
 		break;
 	default:
@@ -2379,7 +2377,7 @@ svm_getcap(void *vcpui, int type, int *retval)
 		*retval = 1;	/* unrestricted guest is always enabled */
 		break;
 	case VM_CAP_IPI_EXIT:
-		vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
+		vlapic = vm_lapic(vcpu->vcpu);
 		*retval = vlapic->ipi_exit;
 		break;
 	default:
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
index 65cc23352620..52ff6a29f336 100644
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -125,7 +125,7 @@ svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
 	case MSR_MTRR64kBase:
 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
 		if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
-			vm_inject_gp(sc->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		}
 		break;
 	case MSR_SYSCFG:
@@ -158,7 +158,7 @@ svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
 	case MSR_MTRR64kBase:
 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
 		if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
-			vm_inject_gp(sc->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		}
 		break;
 	case MSR_SYSCFG:
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index abedecbecd89..57cc73633b7f 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1692,31 +1692,31 @@ vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu,
 
 	/* Only xcr0 is supported. */
 	if (vmxctx->guest_rcx != 0) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
 	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
 	if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
-		vm_inject_ud(vmx->vm, vcpu->vcpuid);
+		vm_inject_ud(vcpu->vcpu);
 		return (HANDLED);
 	}
 
 	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
 	if ((xcrval & ~limits->xcr0_allowed) != 0) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
 	if (!(xcrval & XFEATURE_ENABLED_X87)) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
 	/* AVX (YMM_Hi128) requires SSE. */
 	if (xcrval & XFEATURE_ENABLED_AVX &&
 	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
@@ -1727,7 +1727,7 @@ vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu,
 	if (xcrval & XFEATURE_AVX512 &&
 	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
 	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
@@ -1737,7 +1737,7 @@ vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu,
 	 */
 	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
 	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
-		vm_inject_gp(vmx->vm, vcpu->vcpuid);
+		vm_inject_gp(vcpu->vcpu);
 		return (HANDLED);
 	}
 
@@ -1927,7 +1927,7 @@ vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu,
 		return (UNHANDLED);
 	}
 
-	vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
+	vlapic = vm_lapic(vcpu->vcpu);
 	regnum = (exitqual >> 8) & 0xf;
 	if (exitqual & 0x10) {
 		cr8 = vlapic_get_cr8(vlapic);
@@ -2721,7 +2721,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		    "the guest", intr_vec, errcode);
 		SDT_PROBE5(vmm, vmx, exit, exception,
 		    vmx, vcpuid, vmexit, intr_vec, errcode);
-		error = vm_inject_exception(vmx->vm, vcpuid, intr_vec,
+		error = vm_inject_exception(vcpu->vcpu, intr_vec,
 		    errcode_valid, errcode, 0);
 		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
 		    __func__, error));
@@ -2777,7 +2777,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		 * pointing to the next instruction.
 		 */
 		vmexit->inst_length = 0;
-		vlapic = vm_lapic(vmx->vm, vcpuid);
+		vlapic = vm_lapic(vcpu->vcpu);
 		SDT_PROBE4(vmm, vmx, exit, apicwrite,
 		    vmx, vcpuid, vmexit, vlapic);
 		handled = vmx_handle_apic_write(vcpu, vlapic, qual);
@@ -2795,7 +2795,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
 		vmexit->exitcode = VM_EXITCODE_MWAIT;
 		break;
 	case EXIT_REASON_TPR:
-		vlapic = vm_lapic(vmx->vm, vcpuid);
+		vlapic = vm_lapic(vcpu->vcpu);
 		vlapic_sync_tpr(vlapic);
 		vmexit->inst_length = 0;
 		handled = HANDLED;
@@ -3030,7 +3030,7 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
 	vcpuid = vcpu->vcpuid;
 	vmcs = vcpu->vmcs;
 	vmxctx = &vcpu->ctx;
-	vlapic = vm_lapic(vm, vcpuid);
+	vlapic = vm_lapic(vcpu->vcpu);
 	vmexit = vm_exitinfo(vm, vcpuid);
 	launched = 0;
 
@@ -3644,7 +3644,7 @@ vmx_setcap(void *vcpui, int type, int val)
 	case VM_CAP_IPI_EXIT:
 		retval = 0;
 
-		vlapic = vm_lapic(vcpu->vmx->vm, vcpu->vcpuid);
+		vlapic = vm_lapic(vcpu->vcpu);
 		vlapic->ipi_exit = val;
 		break;
 	default:
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
index 8fba9be7d57e..a5ff3a9c492d 100644
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -423,7 +423,7 @@ vmx_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
 	case MSR_MTRR64kBase:
 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
 		if (vm_rdmtrr(&vcpu->mtrr, num, val) != 0) {
-			vm_inject_gp(vmx->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		}
 		break;
 	case MSR_IA32_MISC_ENABLE:
@@ -466,7 +466,7 @@ vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
 	case MSR_MTRR64kBase:
 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
 		if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
-			vm_inject_gp(vmx->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		}
 		break;
 	case MSR_IA32_MISC_ENABLE:
@@ -493,7 +493,7 @@ vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
 		if (pat_valid(val))
 			vcpu->guest_msrs[IDX_MSR_PAT] = val;
 		else
-			vm_inject_gp(vmx->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		break;
 	case MSR_TSC:
 		error = vmx_set_tsc_offset(vmx, vcpu, val - rdtsc());
@@ -507,7 +507,7 @@ vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
 			 */
 			vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
 		else
-			vm_inject_gp(vmx->vm, vcpu->vcpuid);
+			vm_inject_gp(vcpu->vcpu);
 		break;
 	default:
 		error = EINVAL;
diff --git a/sys/amd64/vmm/io/vhpet.c b/sys/amd64/vmm/io/vhpet.c
index 530f5d49f8f1..dd409cde188f 100644
--- a/sys/amd64/vmm/io/vhpet.c
+++ b/sys/amd64/vmm/io/vhpet.c
@@ -472,7 +472,7 @@ vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data,
 }
 
 int
-vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size,
+vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val, int size,
     void *arg)
 {
 	struct vhpet *vhpet;
@@ -481,7 +481,7 @@ vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size,
 	sbintime_t now, *nowptr;
 	int i, offset;
 
-	vhpet = vm_hpet(vm);
+	vhpet = vm_hpet(vcpu_vm(vcpu));
 	offset = gpa - VHPET_BASE;
 
 	VHPET_LOCK(vhpet);
@@ -622,14 +622,14 @@ done:
 }
 
 int
-vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval, int size,
+vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
     void *arg)
 {
 	int i, offset;
 	struct vhpet *vhpet;
 	uint64_t data;
 
-	vhpet = vm_hpet(vm);
+	vhpet = vm_hpet(vcpu_vm(vcpu));
 	offset = gpa - VHPET_BASE;
 
 	VHPET_LOCK(vhpet);
diff --git a/sys/amd64/vmm/io/vhpet.h b/sys/amd64/vmm/io/vhpet.h
index f22d0c3b38ac..43a1fde1126e 100644
--- a/sys/amd64/vmm/io/vhpet.h
+++ b/sys/amd64/vmm/io/vhpet.h
@@ -40,9 +40,9 @@ struct vm_snapshot_meta;
 
 struct vhpet *vhpet_init(struct vm *vm);
 void 	vhpet_cleanup(struct vhpet *vhpet);
-int	vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val,
+int	vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val,
 	    int size, void *arg);
-int	vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *val,
+int	vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *val,
 	    int size, void *arg);
 int	vhpet_getcap(struct vm_hpet_cap *cap);
 #ifdef BHYVE_SNAPSHOT
diff --git a/sys/amd64/vmm/io/vioapic.c b/sys/amd64/vmm/io/vioapic.c
index 639c1b07eb08..66a394af0d00 100644
--- a/sys/amd64/vmm/io/vioapic.c
+++ b/sys/amd64/vmm/io/vioapic.c
@@ -245,7 +245,7 @@ vioapic_update_tmr(struct vm *vm, int vcpuid, void *arg)
 	int delmode, pin, vector;
 	bool level, phys;
 
-	vlapic = vm_lapic(vm, vcpuid);
+	vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
 	vioapic = vm_ioapic(vm);
 
 	VIOAPIC_LOCK(vioapic);
@@ -277,7 +277,7 @@ vioapic_update_tmr(struct vm *vm, int vcpuid, void *arg)
 }
 
 static uint32_t
-vioapic_read(struct vioapic *vioapic, int vcpuid, uint32_t addr)
+vioapic_read(struct vioapic *vioapic, struct vcpu *vcpu, uint32_t addr)
 {
 	int regnum, pin, rshift;
 
@@ -312,13 +312,15 @@ vioapic_read(struct vioapic *vioapic, int vcpuid, uint32_t addr)
 }
 
 static void
-vioapic_write(struct vioapic *vioapic, int vcpuid, uint32_t addr, uint32_t data)
+vioapic_write(struct vioapic *vioapic, struct vcpu *vcpu, uint32_t addr,
+    uint32_t data)
 {
 	uint64_t data64, mask64;
 	uint64_t last, changed;
-	int regnum, pin, lshift;
+	int regnum, pin, lshift, vcpuid;
 	cpuset_t allvcpus;
 
+	vcpuid = vcpu_vcpuid(vcpu);
 	regnum = addr & 0xff;
 	switch (regnum) {
 	case IOAPIC_ID:
@@ -392,7 +394,7 @@ vioapic_write(struct vioapic *vioapic, int vcpuid, uint32_t addr, uint32_t data)
 }
 
 static int
-vioapic_mmio_rw(struct vioapic *vioapic, int vcpuid, uint64_t gpa,
+vioapic_mmio_rw(struct vioapic *vioapic, struct vcpu *vcpu, uint64_t gpa,
     uint64_t *data, int size, bool doread)
 {
 	uint64_t offset;
@@ -417,10 +419,10 @@ vioapic_mmio_rw(struct vioapic *vioapic, int vcpuid, uint64_t gpa,
 			vioapic->ioregsel = *data;
 	} else {
 		if (doread) {
-			*data = vioapic_read(vioapic, vcpuid,
+			*data = vioapic_read(vioapic, vcpu,
 			    vioapic->ioregsel);
 		} else {
-			vioapic_write(vioapic, vcpuid, vioapic->ioregsel,
+			vioapic_write(vioapic, vcpu, vioapic->ioregsel,
 			    *data);
 		}
 	}
@@ -430,26 +432,26 @@ vioapic_mmio_rw(struct vioapic *vioapic, int vcpuid, uint64_t gpa,
 }
 
 int
-vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval,
+vioapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval,
     int size, void *arg)
 {
 	int error;
 	struct vioapic *vioapic;
 
-	vioapic = vm_ioapic(vm);
-	error = vioapic_mmio_rw(vioapic, vcpuid, gpa, rval, size, true);
+	vioapic = vm_ioapic(vcpu_vm(vcpu));
+	error = vioapic_mmio_rw(vioapic, vcpu, gpa, rval, size, true);
 	return (error);
 }
 
 int
-vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t wval,
+vioapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval,
     int size, void *arg)
 {
 	int error;
 	struct vioapic *vioapic;
 
-	vioapic = vm_ioapic(vm);
-	error = vioapic_mmio_rw(vioapic, vcpuid, gpa, &wval, size, false);
+	vioapic = vm_ioapic(vcpu_vm(vcpu));
+	error = vioapic_mmio_rw(vioapic, vcpu, gpa, &wval, size, false);
 	return (error);
 }
 
diff --git a/sys/amd64/vmm/io/vioapic.h b/sys/amd64/vmm/io/vioapic.h
index 17d2c29b758f..e45975c548a8 100644
--- a/sys/amd64/vmm/io/vioapic.h
+++ b/sys/amd64/vmm/io/vioapic.h
@@ -45,9 +45,9 @@ int	vioapic_assert_irq(struct vm *vm, int irq);
 int	vioapic_deassert_irq(struct vm *vm, int irq);
 int	vioapic_pulse_irq(struct vm *vm, int irq);
 
-int	vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa,
+int	vioapic_mmio_write(struct vcpu *vcpu, uint64_t gpa,
 	    uint64_t wval, int size, void *arg);
-int	vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa,
+int	vioapic_mmio_read(struct vcpu *vcpu, uint64_t gpa,
 	    uint64_t *rval, int size, void *arg);
 
 int	vioapic_pincount(struct vm *vm);
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index fdd971cc03c9..541e889d9fdd 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -867,7 +867,7 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
 		CPU_ZERO(dmask);
 		amask = vm_active_cpus(vm);
 		CPU_FOREACH_ISSET(vcpuid, &amask) {
-			vlapic = vm_lapic(vm, vcpuid);
+			vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
 			dfr = vlapic->apic_page->dfr;
 			ldr = vlapic->apic_page->ldr;
 
@@ -935,7 +935,7 @@ vlapic_set_cr8(struct vlapic *vlapic, uint64_t val)
 	uint8_t tpr;
 
 	if (val & ~0xf) {
-		vm_inject_gp(vlapic->vm, vlapic->vcpuid);
+		vm_inject_gp(vlapic->vcpu);
 		return;
 	}
 
@@ -1131,7 +1131,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 			 * requires that the boot state is set to SIPI
 			 * here.
 			 */
-			vlapic2 = vm_lapic(vlapic->vm, i);
+			vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
 			vlapic2->boot_state = BS_SIPI;
 			break;
 		}
@@ -1155,7 +1155,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 			/*
 			 * Ignore SIPIs in any state other than wait-for-SIPI
 			 */
-			vlapic2 = vm_lapic(vlapic->vm, i);
+			vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
 			if (vlapic2->boot_state != BS_SIPI)
 				break;
 			vlapic2->boot_state = BS_RUNNING;
@@ -1170,7 +1170,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 		}
 
 		CPU_FOREACH_ISSET(i, &dmask) {
-			vlapic2 = vm_lapic(vlapic->vm, i);
+			vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
 
 			/*
 			 * Ignore SIPIs in any state other than wait-for-SIPI
@@ -1202,7 +1202,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
 static void
 vlapic_handle_init(struct vm *vm, int vcpuid, void *arg)
 {
-	struct vlapic *vlapic = vm_lapic(vm, vcpuid);
+	struct vlapic *vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
 
 	vlapic_reset(vlapic);
 
@@ -1659,12 +1659,12 @@ vlapic_set_apicbase(struct vlapic *vlapic, uint64_t new)
 }
 
 void
-vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
+vlapic_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
 {
 	struct vlapic *vlapic;
 	struct LAPIC *lapic;
 
-	vlapic = vm_lapic(vm, vcpuid);
+	vlapic = vm_lapic(vcpu);
 
 	if (state == X2APIC_DISABLED)
 		vlapic->msr_apicbase &= ~APICBASE_X2APIC;
@@ -1866,7 +1866,7 @@ vlapic_snapshot(struct vm *vm, struct vm_snapshot_meta *meta)
 
 	maxcpus = vm_get_maxcpus(vm);
 	for (i = 0; i < maxcpus; i++) {
-		vlapic = vm_lapic(vm, i);
+		vlapic = vm_lapic(vm_vcpu(vm, i));
 
 		/* snapshot the page first; timer period depends on icr_timer */
 		lapic = vlapic->apic_page;
diff --git a/sys/amd64/vmm/io/vlapic.h b/sys/amd64/vmm/io/vlapic.h
index 87f3d0c2660f..f8ac42fc7514 100644
--- a/sys/amd64/vmm/io/vlapic.h
+++ b/sys/amd64/vmm/io/vlapic.h
@@ -79,7 +79,7 @@ void vlapic_sync_tpr(struct vlapic *vlapic);
 
 uint64_t vlapic_get_apicbase(struct vlapic *vlapic);
 int vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val);
-void vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state s);
+void vlapic_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state s);
 bool vlapic_enabled(struct vlapic *vlapic);
 
 void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 6b93708ba9cc..c2f86c3e4cee 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -687,7 +687,7 @@ vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
 
 #ifdef INVARIANTS
 	int hostcpu, state;
-	state = vcpu_get_state(vm, vcpuid, &hostcpu);
+	state = vcpu_get_state(vm_vcpu(vm, vcpuid), &hostcpu);
 	KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
 	    ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
 #endif
@@ -1064,7 +1064,7 @@ _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
 }
 
 void *
-vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
+vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
     void **cookie)
 {
 #ifdef INVARIANTS
@@ -1072,11 +1072,11 @@ vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
 	 * The current vcpu should be frozen to ensure 'vm_memmap[]'
 	 * stability.
 	 */
-	int state = vcpu_get_state(vm, vcpuid, NULL);
+	int state = vcpu_get_state(vcpu, NULL);
 	KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
 	    __func__, state));
 #endif
-	return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
+	return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
 }
 
 void *
@@ -1091,7 +1091,7 @@ vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
 	 */
 	int state;
 	for (int i = 0; i < vm->maxcpus; i++) {
-		state = vcpu_get_state(vm, i, NULL);
+		state = vcpu_get_state(vm_vcpu(vm, i), NULL);
 		KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
 		    __func__, state));
 	}
@@ -1108,37 +1108,29 @@ vm_gpa_release(void *cookie)
 }
 
 int
-vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
+vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
 {
 
-	if (vcpu < 0 || vcpu >= vm->maxcpus)
-		return (EINVAL);
-
 	if (reg >= VM_REG_LAST)
 		return (EINVAL);
 
-	return (vmmops_getreg(vcpu_cookie(vm, vcpu), reg, retval));
+	return (vmmops_getreg(vcpu->cookie, reg, retval));
 }
 
 int
-vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
+vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
 {
-	struct vcpu *vcpu;
 	int error;
 
-	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
-		return (EINVAL);
-
 	if (reg >= VM_REG_LAST)
 		return (EINVAL);
 
-	vcpu = &vm->vcpu[vcpuid];
 	error = vmmops_setreg(vcpu->cookie, reg, val);
 	if (error || reg != VM_REG_GUEST_RIP)
 		return (error);
 
 	/* Set 'nextrip' to match the value of %rip */
-	VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
+	VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
 	vcpu->nextrip = val;
 	return (0);
 }
@@ -1176,17 +1168,13 @@ is_segment_register(int reg)
 }
 
 int
-vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
-		struct seg_desc *desc)
+vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
 {
 
-	if (vcpu < 0 || vcpu >= vm->maxcpus)
-		return (EINVAL);
-
 	if (!is_segment_register(reg) && !is_descriptor_table(reg))
 		return (EINVAL);
 
-	return (vmmops_getdesc(vcpu_cookie(vm, vcpu), reg, desc));
+	return (vmmops_getdesc(vcpu->cookie, reg, desc));
 }
 
 int
@@ -1566,8 +1554,8 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
 
 	/* Fetch, decode and emulate the faulting instruction */
 	if (vie->num_valid == 0) {
-		error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
-		    cs_base, VIE_INST_SIZE, vie, &fault);
+		error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
+		    VIE_INST_SIZE, vie, &fault);
 	} else {
 		/*
 		 * The instruction bytes have already been copied into 'vie'
@@ -1577,7 +1565,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
 	if (error || fault)
 		return (error);
 
-	if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
+	if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
 		VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
 		    vme->rip + cs_base);
 		*retu = true;	    /* dump instruction bytes in userspace */
@@ -1607,8 +1595,8 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
 		return (0);
 	}
 
-	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
-	    mread, mwrite, retu);
+	error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
+	    retu);
 
 	return (error);
 }
@@ -1860,7 +1848,7 @@ restart:
 		case VM_EXITCODE_MONITOR:
 		case VM_EXITCODE_MWAIT:
 		case VM_EXITCODE_VMINSN:
-			vm_inject_ud(vm, vcpuid);
+			vm_inject_ud(vcpu);
 			break;
 		default:
 			retu = true;	/* handled in userland */
@@ -1889,20 +1877,13 @@ restart:
 }
 
 int
-vm_restart_instruction(void *arg, int vcpuid)
+vm_restart_instruction(struct vcpu *vcpu)
 {
-	struct vm *vm;
-	struct vcpu *vcpu;
 	enum vcpu_state state;
 	uint64_t rip;
 	int error __diagused;
 
-	vm = arg;
-	if (vcpuid < 0 || vcpuid >= vm->maxcpus)
-		return (EINVAL);
-
-	vcpu = &vm->vcpu[vcpuid];
-	state = vcpu_get_state(vm, vcpuid, NULL);
+	state = vcpu_get_state(vcpu, NULL);
 	if (state == VCPU_RUNNING) {
 		/*
 		 * When a vcpu is "running" the next instruction is determined
@@ -1911,7 +1892,7 @@ vm_restart_instruction(void *arg, int vcpuid)
 		 * instruction to be restarted.
 		 */
 		vcpu->exitinfo.inst_length = 0;
-		VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
+		VMM_CTR1(vcpu, "restarting instruction at %#lx by "
 		    "setting inst_length to zero", vcpu->exitinfo.rip);
 	} else if (state == VCPU_FROZEN) {
 		/*
@@ -1920,9 +1901,9 @@ vm_restart_instruction(void *arg, int vcpuid)
 		 * instruction. Thus instruction restart is achieved by setting
 		 * 'nextrip' to the vcpu's %rip.
 		 */
-		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
+		error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
 		KASSERT(!error, ("%s: error %d getting rip", __func__, error));
-		VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
+		VMM_CTR2(vcpu, "restarting instruction by updating "
 		    "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
 		vcpu->nextrip = rip;
 	} else {
@@ -2107,7 +2088,7 @@ vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
 	}
 
 	if (valid) {
-		VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
+		VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
*** 1848 LINES SKIPPED ***