git: 059b0b704663 - main - vmm: Move the guest vmspace into the generic vm_mem structure

From: Mark Johnston <markj_at_FreeBSD.org>
Date: Fri, 10 Oct 2025 13:24:42 UTC
The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=059b0b7046639121f3dca48f5de051e019f9d57c

commit 059b0b7046639121f3dca48f5de051e019f9d57c
Author:     Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2025-10-10 13:20:12 +0000
Commit:     Mark Johnston <markj@FreeBSD.org>
CommitDate: 2025-10-10 13:22:40 +0000

    vmm: Move the guest vmspace into the generic vm_mem structure
    
    This further consolidates handling of guest memory into MI code in
    sys/dev/vmm.
    
    No functional change intended.
    
    Reviewed by:    corvink
    MFC after:      2 weeks
    Sponsored by:   The FreeBSD Foundation
    Sponsored by:   Klara, Inc.
    Differential Revision:  https://reviews.freebsd.org/D53012
---
 sys/amd64/include/vmm.h         |  1 -
 sys/amd64/vmm/vmm.c             | 45 +++++++++++++++++------------------------
 sys/amd64/vmm/vmm_dev_machdep.c |  1 +
 sys/arm64/include/vmm.h         |  1 -
 sys/arm64/vmm/vmm.c             | 33 ++++++++++--------------------
 sys/dev/vmm/vmm_mem.c           | 31 ++++++++++++++++++++--------
 sys/dev/vmm/vmm_mem.h           |  6 +++++-
 sys/riscv/include/vmm.h         |  1 -
 sys/riscv/vmm/vmm.c             | 31 +++++++++-------------------
 9 files changed, 68 insertions(+), 82 deletions(-)

diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 7c01259ed8cb..66d8991d36e8 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -383,7 +383,6 @@ vcpu_should_yield(struct vcpu *vcpu)
 
 void *vcpu_stats(struct vcpu *vcpu);
 void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
-struct vmspace *vm_vmspace(struct vm *vm);
 struct vm_mem *vm_mem(struct vm *vm);
 struct vatpic *vm_atpic(struct vm *vm);
 struct vatpit *vm_atpit(struct vm *vm);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index d1eb2383ffed..2ac076551165 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -163,7 +163,6 @@ struct vm {
 	void		*rendezvous_arg;	/* (x) [r] rendezvous func/arg */
 	vm_rendezvous_func_t rendezvous_func;
 	struct mtx	rendezvous_mtx;		/* (o) rendezvous lock */
-	struct vmspace	*vmspace;		/* (o) guest's address space */
 	struct vm_mem	mem;			/* (i) [m+v] guest memory */
 	char		name[VM_MAX_NAMELEN+1];	/* (o) virtual machine name */
 	struct vcpu	**vcpu;			/* (o) guest vcpus */
@@ -499,7 +498,7 @@ MODULE_VERSION(vmm, 1);
 static void
 vm_init(struct vm *vm, bool create)
 {
-	vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+	vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
 	vm->iommu = NULL;
 	vm->vioapic = vioapic_init(vm);
 	vm->vhpet = vhpet_init(vm);
@@ -584,7 +583,7 @@ int
 vm_create(const char *name, struct vm **retvm)
 {
 	struct vm *vm;
-	struct vmspace *vmspace;
+	int error;
 
 	/*
 	 * If vmm.ko could not be successfully initialized then don't attempt
@@ -597,14 +596,13 @@ vm_create(const char *name, struct vm **retvm)
 	    VM_MAX_NAMELEN + 1)
 		return (EINVAL);
 
-	vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48);
-	if (vmspace == NULL)
-		return (ENOMEM);
-
 	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
+	error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48);
+	if (error != 0) {
+		free(vm, M_VM);
+		return (error);
+	}
 	strcpy(vm->name, name);
-	vm->vmspace = vmspace;
-	vm_mem_init(&vm->mem);
 	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
 	sx_init(&vm->vcpus_init_lock, "vm vcpus");
 	vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
@@ -685,9 +683,6 @@ vm_cleanup(struct vm *vm, bool destroy)
 	if (destroy) {
 		vm_mem_destroy(vm);
 
-		vmmops_vmspace_free(vm->vmspace);
-		vm->vmspace = NULL;
-
 		free(vm->vcpu, M_VM);
 		sx_destroy(&vm->vcpus_init_lock);
 		mtx_destroy(&vm->rendezvous_mtx);
@@ -731,7 +726,7 @@ vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
 {
 	vm_object_t obj;
 
-	if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
+	if ((obj = vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa)) == NULL)
 		return (ENOMEM);
 	else
 		return (0);
@@ -741,19 +736,21 @@ int
 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
 {
 
-	vmm_mmio_free(vm->vmspace, gpa, len);
+	vmm_mmio_free(vm_vmspace(vm), gpa, len);
 	return (0);
 }
 
 static int
 vm_iommu_map(struct vm *vm)
 {
+	pmap_t pmap;
 	vm_paddr_t gpa, hpa;
 	struct vm_mem_map *mm;
 	int error, i;
 
 	sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED);
 
+	pmap = vmspace_pmap(vm_vmspace(vm));
 	for (i = 0; i < VM_MAX_MEMMAPS; i++) {
 		if (!vm_memseg_sysmem(vm, i))
 			continue;
@@ -767,7 +764,7 @@ vm_iommu_map(struct vm *vm)
 		mm->flags |= VM_MEMMAP_F_IOMMU;
 
 		for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
-			hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
+			hpa = pmap_extract(pmap, gpa);
 
 			/*
 			 * All mappings in the vmm vmspace must be
@@ -816,7 +813,7 @@ vm_iommu_unmap(struct vm *vm)
 
 		for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
 			KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
-			    vmspace_pmap(vm->vmspace), gpa))),
+			    vmspace_pmap(vm_vmspace(vm)), gpa))),
 			    ("vm_iommu_unmap: vm %p gpa %jx not wired",
 			    vm, (uintmax_t)gpa));
 			iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
@@ -1249,7 +1246,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 	    ("vm_handle_paging: invalid fault_type %d", ftype));
 
 	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
-		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
+		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm_vmspace(vm)),
 		    vme->u.paging.gpa, ftype);
 		if (rv == 0) {
 			VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
@@ -1259,7 +1256,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 		}
 	}
 
-	map = &vm->vmspace->vm_map;
+	map = &vm_vmspace(vm)->vm_map;
 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
 
 	VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
@@ -1560,7 +1557,7 @@ vm_run(struct vcpu *vcpu)
 	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
 		return (EINVAL);
 
-	pmap = vmspace_pmap(vm->vmspace);
+	pmap = vmspace_pmap(vm_vmspace(vm));
 	vme = &vcpu->exitinfo;
 	evinfo.rptr = &vm->rendezvous_req_cpus;
 	evinfo.sptr = &vm->suspend;
@@ -2302,12 +2299,6 @@ vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
 	vcpu_unlock(vcpu);
 }
 
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
-	return (vm->vmspace);
-}
-
 struct vm_mem *
 vm_mem(struct vm *vm)
 {
@@ -2519,7 +2510,7 @@ vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
 
 	if (vcpu->vcpuid == 0) {
 		vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
-		    vmspace_resident_count(vcpu->vm->vmspace));
+		    vmspace_resident_count(vm_vmspace(vcpu->vm)));
 	}
 }
 
@@ -2529,7 +2520,7 @@ vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
 
 	if (vcpu->vcpuid == 0) {
 		vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
-		    pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
+		    pmap_wired_count(vmspace_pmap(vm_vmspace(vcpu->vm))));
 	}
 }
 
diff --git a/sys/amd64/vmm/vmm_dev_machdep.c b/sys/amd64/vmm/vmm_dev_machdep.c
index d8d2b460404c..dfebc9dcadbf 100644
--- a/sys/amd64/vmm/vmm_dev_machdep.c
+++ b/sys/amd64/vmm/vmm_dev_machdep.c
@@ -48,6 +48,7 @@
 #include <x86/apicreg.h>
 
 #include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
 #include <dev/vmm/vmm_stat.h>
 
 #include "vmm_lapic.h"
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 6f5726f0bb1e..84b286a60b38 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -263,7 +263,6 @@ vcpu_should_yield(struct vcpu *vcpu)
 
 void *vcpu_stats(struct vcpu *vcpu);
 void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
 struct vm_mem *vm_mem(struct vm *vm);
 
 enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index 5a0b627b2fe2..02c5ae2c0223 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -126,7 +126,6 @@ struct vm {
 	bool		dying;			/* (o) is dying */
 	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
 	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
-	struct vmspace	*vmspace;		/* (o) guest's address space */
 	struct vm_mem	mem;			/* (i) guest memory */
 	char		name[VM_MAX_NAMELEN];	/* (o) virtual machine name */
 	struct vcpu	**vcpu;			/* (i) guest vcpus */
@@ -408,7 +407,7 @@ vm_init(struct vm *vm, bool create)
 {
 	int i;
 
-	vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+	vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
 	MPASS(vm->cookie != NULL);
 
 	CPU_ZERO(&vm->active_cpus);
@@ -486,7 +485,7 @@ int
 vm_create(const char *name, struct vm **retvm)
 {
 	struct vm *vm;
-	struct vmspace *vmspace;
+	int error;
 
 	/*
 	 * If vmm.ko could not be successfully initialized then don't attempt
@@ -498,14 +497,13 @@ vm_create(const char *name, struct vm **retvm)
 	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
 		return (EINVAL);
 
-	vmspace = vmmops_vmspace_alloc(0, 1ul << 39);
-	if (vmspace == NULL)
-		return (ENOMEM);
-
 	vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
+	error = vm_mem_init(&vm->mem, 0, 1ul << 39);
+	if (error != 0) {
+		free(vm, M_VMM);
+		return (error);
+	}
 	strcpy(vm->name, name);
-	vm->vmspace = vmspace;
-	vm_mem_init(&vm->mem);
 	sx_init(&vm->vcpus_init_lock, "vm vcpus");
 
 	vm->sockets = 1;
@@ -559,7 +557,7 @@ vm_cleanup(struct vm *vm, bool destroy)
 
 	if (destroy) {
 		vm_xlock_memsegs(vm);
-		pmap = vmspace_pmap(vm->vmspace);
+		pmap = vmspace_pmap(vm_vmspace(vm));
 		sched_pin();
 		PCPU_SET(curvmpmap, NULL);
 		sched_unpin();
@@ -583,9 +581,6 @@ vm_cleanup(struct vm *vm, bool destroy)
 	if (destroy) {
 		vm_mem_destroy(vm);
 
-		vmmops_vmspace_free(vm->vmspace);
-		vm->vmspace = NULL;
-
 		free(vm->vcpu, M_VMM);
 		sx_destroy(&vm->vcpus_init_lock);
 	}
@@ -1089,12 +1084,6 @@ vcpu_notify_event(struct vcpu *vcpu)
 	vcpu_unlock(vcpu);
 }
 
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
-	return (vm->vmspace);
-}
-
 struct vm_mem *
 vm_mem(struct vm *vm)
 {
@@ -1415,7 +1404,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 
 	vme = &vcpu->exitinfo;
 
-	pmap = vmspace_pmap(vcpu->vm->vmspace);
+	pmap = vmspace_pmap(vm_vmspace(vcpu->vm));
 	addr = vme->u.paging.gpa;
 	esr = vme->u.paging.esr;
 
@@ -1432,7 +1421,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 		panic("%s: Invalid exception (esr = %lx)", __func__, esr);
 	}
 
-	map = &vm->vmspace->vm_map;
+	map = &vm_vmspace(vm)->vm_map;
 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
 	if (rv != KERN_SUCCESS)
 		return (EFAULT);
@@ -1506,7 +1495,7 @@ vm_run(struct vcpu *vcpu)
 	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
 		return (EINVAL);
 
-	pmap = vmspace_pmap(vm->vmspace);
+	pmap = vmspace_pmap(vm_vmspace(vm));
 	vme = &vcpu->exitinfo;
 	evinfo.rptr = NULL;
 	evinfo.sptr = &vm->suspend;
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
index be59e37de33d..9df31c9ba133 100644
--- a/sys/dev/vmm/vmm_mem.c
+++ b/sys/dev/vmm/vmm_mem.c
@@ -26,10 +26,14 @@
 
 static void vm_free_memmap(struct vm *vm, int ident);
 
-void
-vm_mem_init(struct vm_mem *mem)
+int
+vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi)
 {
+	mem->mem_vmspace = vmmops_vmspace_alloc(lo, hi);
+	if (mem->mem_vmspace == NULL)
+		return (ENOMEM);
 	sx_init(&mem->mem_segs_lock, "vm_mem_segs");
+	return (0);
 }
 
 static bool
@@ -93,10 +97,21 @@ vm_mem_destroy(struct vm *vm)
 	for (int i = 0; i < VM_MAX_MEMSEGS; i++)
 		vm_free_memseg(vm, i);
 
+	vmmops_vmspace_free(mem->mem_vmspace);
+
 	sx_xunlock(&mem->mem_segs_lock);
 	sx_destroy(&mem->mem_segs_lock);
 }
 
+struct vmspace *
+vm_vmspace(struct vm *vm)
+{
+	struct vm_mem *mem;
+
+	mem = vm_mem(vm);
+	return (mem->mem_vmspace);
+}
+
 void
 vm_slock_memsegs(struct vm *vm)
 {
@@ -246,7 +261,7 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
 	struct vm_mem *mem;
 	struct vm_mem_seg *seg;
 	struct vm_mem_map *m, *map;
-	struct vmspace *vmspace;
+	struct vm_map *vmmap;
 	vm_ooffset_t last;
 	int i, error;
 
@@ -282,19 +297,19 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
 	if (map == NULL)
 		return (ENOSPC);
 
-	vmspace = vm_vmspace(vm);
-	error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa,
-	    len, 0, VMFS_NO_SPACE, prot, prot, 0);
+	vmmap = &mem->mem_vmspace->vm_map;
+	error = vm_map_find(vmmap, seg->object, first, &gpa, len, 0,
+	    VMFS_NO_SPACE, prot, prot, 0);
 	if (error != KERN_SUCCESS)
 		return (EFAULT);
 
 	vm_object_reference(seg->object);
 
 	if (flags & VM_MEMMAP_F_WIRED) {
-		error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len,
+		error = vm_map_wire(vmmap, gpa, gpa + len,
 		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 		if (error != KERN_SUCCESS) {
-			vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
+			vm_map_remove(vmmap, gpa, gpa + len);
 			return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
 			    EFAULT);
 		}
diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h
index 856470cf2590..f3d22058c7b8 100644
--- a/sys/dev/vmm/vmm_mem.h
+++ b/sys/dev/vmm/vmm_mem.h
@@ -36,6 +36,7 @@ enum {
 
 struct vm;
 struct vm_object;
+struct vmspace;
 
 struct vm_mem_seg {
 	size_t	len;
@@ -56,12 +57,15 @@ struct vm_mem {
 	struct vm_mem_map	mem_maps[VM_MAX_MEMMAPS];
 	struct vm_mem_seg	mem_segs[VM_MAX_MEMSEGS];
 	struct sx		mem_segs_lock;
+	struct vmspace		*mem_vmspace;
 };
 
-void	vm_mem_init(struct vm_mem *mem);
+int	vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi);
 void	vm_mem_cleanup(struct vm *vm);
 void	vm_mem_destroy(struct vm *vm);
 
+struct vmspace *vm_vmspace(struct vm *vm);
+
 /*
  * APIs that modify the guest memory map require all vcpus to be frozen.
  */
diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h
index 43d3dbea968e..bc00474ed0fd 100644
--- a/sys/riscv/include/vmm.h
+++ b/sys/riscv/include/vmm.h
@@ -235,7 +235,6 @@ vcpu_should_yield(struct vcpu *vcpu)
 
 void *vcpu_stats(struct vcpu *vcpu);
 void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
 struct vm_mem *vm_mem(struct vm *vm);
 
 enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c
index b1e5c0e52ac8..c6cca7cbdfc6 100644
--- a/sys/riscv/vmm/vmm.c
+++ b/sys/riscv/vmm/vmm.c
@@ -121,7 +121,6 @@ struct vm {
 	bool		dying;			/* (o) is dying */
 	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
 	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
-	struct vmspace	*vmspace;		/* (o) guest's address space */
 	struct vm_mem	mem;			/* (i) [m+v] guest memory */
 	char		name[VM_MAX_NAMELEN];	/* (o) virtual machine name */
 	struct vcpu	**vcpu;			/* (i) guest vcpus */
@@ -286,7 +285,7 @@ vm_init(struct vm *vm, bool create)
 {
 	int i;
 
-	vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+	vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
 	MPASS(vm->cookie != NULL);
 
 	CPU_ZERO(&vm->active_cpus);
@@ -363,7 +362,7 @@ int
 vm_create(const char *name, struct vm **retvm)
 {
 	struct vm *vm;
-	struct vmspace *vmspace;
+	int error;
 
 	/*
 	 * If vmm.ko could not be successfully initialized then don't attempt
@@ -375,14 +374,13 @@ vm_create(const char *name, struct vm **retvm)
 	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
 		return (EINVAL);
 
-	vmspace = vmmops_vmspace_alloc(0, 1ul << 39);
-	if (vmspace == NULL)
-		return (ENOMEM);
-
 	vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
+	error = vm_mem_init(&vm->mem, 0, 1ul << 39);
+	if (error != 0) {
+		free(vm, M_VMM);
+		return (error);
+	}
 	strcpy(vm->name, name);
-	vm->vmspace = vmspace;
-	vm_mem_init(&vm->mem);
 	sx_init(&vm->vcpus_init_lock, "vm vcpus");
 
 	vm->sockets = 1;
@@ -451,9 +449,6 @@ vm_cleanup(struct vm *vm, bool destroy)
 	if (destroy) {
 		vm_mem_destroy(vm);
 
-		vmmops_vmspace_free(vm->vmspace);
-		vm->vmspace = NULL;
-
 		free(vm->vcpu, M_VMM);
 		sx_destroy(&vm->vcpus_init_lock);
 	}
@@ -759,12 +754,6 @@ vcpu_notify_event(struct vcpu *vcpu)
 	vcpu_unlock(vcpu);
 }
 
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
-	return (vm->vmspace);
-}
-
 struct vm_mem *
 vm_mem(struct vm *vm)
 {
@@ -1083,7 +1072,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 	vm = vcpu->vm;
 	vme = &vcpu->exitinfo;
 
-	pmap = vmspace_pmap(vm->vmspace);
+	pmap = vmspace_pmap(vm_vmspace(vm));
 	addr = (vme->htval << 2) & ~(PAGE_SIZE - 1);
 
 	dprintf("%s: %lx\n", __func__, addr);
@@ -1106,7 +1095,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
 	if (pmap_fault(pmap, addr, ftype))
 		return (0);
 
-	map = &vm->vmspace->vm_map;
+	map = &vm_vmspace(vm)->vm_map;
 	rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL);
 	if (rv != KERN_SUCCESS) {
 		printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n",
@@ -1188,7 +1177,7 @@ vm_run(struct vcpu *vcpu)
 	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
 		return (EINVAL);
 
-	pmap = vmspace_pmap(vm->vmspace);
+	pmap = vmspace_pmap(vm_vmspace(vm));
 	vme = &vcpu->exitinfo;
 	evinfo.rptr = NULL;
 	evinfo.sptr = &vm->suspend;