svn commit: r252908 - in projects/bhyve_npt_pmap/sys/amd64: include vmm

Neel Natu neel at FreeBSD.org
Sun Jul 7 01:52:07 UTC 2013


Author: neel
Date: Sun Jul  7 01:52:05 2013
New Revision: 252908
URL: http://svnweb.freebsd.org/changeset/base/252908

Log:
  Allocate an object of type OBJT_DEFAULT for each memory segment created for the
  guest (i.e. guest memory is no longer wired).
  
  Use the 'd_mmap_single' entry point to return the object/offset underlying
  the mmap'ed region.
  
  Since we don't yet handle nested page faults in vmm.ko the guest will
  exit immediately on entry with an unhandled memory fault.

Modified:
  projects/bhyve_npt_pmap/sys/amd64/include/vmm.h
  projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_dev.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.h

Modified: projects/bhyve_npt_pmap/sys/amd64/include/vmm.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/include/vmm.h	Sun Jul  7 01:32:52 2013	(r252907)
+++ projects/bhyve_npt_pmap/sys/amd64/include/vmm.h	Sun Jul  7 01:52:05 2013	(r252908)
@@ -40,6 +40,7 @@ struct vm_exit;
 struct vm_run;
 struct vlapic;
 struct vmspace;
+struct vm_object;
 
 enum x2apic_state;
 
@@ -101,6 +102,8 @@ int vm_unmap_mmio(struct vm *vm, vm_padd
 vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
 int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
 	      struct vm_memory_segment *seg);
+int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
+		  vm_offset_t *offset, struct vm_object **object);
 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c	Sun Jul  7 01:32:52 2013	(r252907)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c	Sun Jul  7 01:52:05 2013	(r252908)
@@ -39,11 +39,14 @@ __FBSDID("$FreeBSD$");
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/systm.h>
 
 #include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
 #include <vm/pmap.h>
 
 #include <machine/vm.h>
@@ -88,6 +91,11 @@ struct vcpu {
 #define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
 #define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
 
+struct mem_seg {
+	vm_paddr_t	gpa;
+	size_t		len;
+	vm_object_t	object;
+};
 #define	VM_MAX_MEMORY_SEGMENTS	2
 
 struct vm {
@@ -96,7 +104,7 @@ struct vm {
 	struct vmspace	*vmspace;	/* guest's address space */
 	struct vcpu	vcpu[VM_MAXCPU];
 	int		num_mem_segs;
-	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
+	struct mem_seg	mem_segs[VM_MAX_MEMORY_SEGMENTS];
 	char		name[VM_MAX_NAMELEN];
 
 	/*
@@ -304,42 +312,17 @@ vm_create(const char *name, struct vm **
 	return (0);
 }
 
+/*
+ * XXX need to deal with iommu mappings
+ */
 static void
-vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
+vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
 {
-	size_t len;
-	vm_paddr_t hpa;
-	void *host_domain;
-
-	host_domain = iommu_host_domain();
-
-	len = 0;
-	while (len < seg->len) {
-		hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
-		if (hpa == (vm_paddr_t)-1) {
-			panic("vm_free_mem_segs: cannot free hpa "
-			      "associated with gpa 0x%016lx", seg->gpa + len);
-		}
-
-		/*
-		 * Remove the 'gpa' to 'hpa' mapping in VMs domain.
-		 * And resurrect the 1:1 mapping for 'hpa' in 'host_domain'.
-		 */
-		iommu_remove_mapping(vm->iommu, seg->gpa + len, PAGE_SIZE);
-		iommu_create_mapping(host_domain, hpa, hpa, PAGE_SIZE);
-
-		vmm_mem_free(hpa, PAGE_SIZE);
-
-		len += PAGE_SIZE;
-	}
 
-	/*
-	 * Invalidate cached translations associated with 'vm->iommu' since
-	 * we have now moved some pages from it.
-	 */
-	iommu_invalidate_tlb(vm->iommu);
+	if (seg->object != NULL)
+		vmm_mem_free(seg->object);
 
-	bzero(seg, sizeof(struct vm_memory_segment));
+	bzero(seg, sizeof(*seg));
 }
 
 void
@@ -412,15 +395,16 @@ vm_gpa_available(struct vm *vm, vm_paddr
 	return (TRUE);
 }
 
+/*
+ * XXX need to deal with iommu
+ */
 int
 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
 {
-	int error, available, allocated;
-	struct vm_memory_segment *seg;
-	vm_paddr_t g, hpa;
-	void *host_domain;
-
-	const boolean_t spok = TRUE;	/* superpage mappings are ok */
+	int available, allocated;
+	struct mem_seg *seg;
+	vm_object_t object;
+	vm_paddr_t g;
 
 	if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
 		return (EINVAL);
@@ -453,45 +437,14 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa,
 	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
 		return (E2BIG);
 
-	host_domain = iommu_host_domain();
-
 	seg = &vm->mem_segs[vm->num_mem_segs];
 
-	error = 0;
-	seg->gpa = gpa;
-	seg->len = 0;
-	while (seg->len < len) {
-		hpa = vmm_mem_alloc(PAGE_SIZE);
-		if (hpa == 0) {
-			error = ENOMEM;
-			break;
-		}
-
-		error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
-				   VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
-		if (error)
-			break;
-
-		/*
-		 * Remove the 1:1 mapping for 'hpa' from the 'host_domain'.
-		 * Add mapping for 'gpa + seg->len' to 'hpa' in the VMs domain.
-		 */
-		iommu_remove_mapping(host_domain, hpa, PAGE_SIZE);
-		iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
-
-		seg->len += PAGE_SIZE;
-	}
-
-	if (error) {
-		vm_free_mem_seg(vm, seg);
-		return (error);
-	}
+	if ((object = vmm_mem_alloc(len)) == NULL)
+		return (ENOMEM);
 
-	/*
-	 * Invalidate cached translations associated with 'host_domain' since
-	 * we have now moved some pages from it.
-	 */
-	iommu_invalidate_tlb(host_domain);
+	seg->gpa = gpa;
+	seg->len = len;
+	seg->object = object;
 
 	vm->num_mem_segs++;
 
@@ -518,7 +471,8 @@ vm_gpabase2memseg(struct vm *vm, vm_padd
 
 	for (i = 0; i < vm->num_mem_segs; i++) {
 		if (gpabase == vm->mem_segs[i].gpa) {
-			*seg = vm->mem_segs[i];
+			seg->gpa = vm->mem_segs[i].gpa;
+			seg->len = vm->mem_segs[i].len;
 			return (0);
 		}
 	}
@@ -526,6 +480,33 @@ vm_gpabase2memseg(struct vm *vm, vm_padd
 }
 
 int
+vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
+	      vm_offset_t *offset, struct vm_object **object)
+{
+	int i;
+	size_t seg_len;
+	vm_paddr_t seg_gpa;
+	vm_object_t seg_obj;
+
+	for (i = 0; i < vm->num_mem_segs; i++) {
+		if ((seg_obj = vm->mem_segs[i].object) == NULL)
+			continue;
+
+		seg_gpa = vm->mem_segs[i].gpa;
+		seg_len = vm->mem_segs[i].len;
+
+		if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
+			*offset = gpa - seg_gpa;
+			*object = seg_obj;
+			vm_object_reference(seg_obj);
+			return (0);
+		}
+	}
+
+	return (EINVAL);
+}
+
+int
 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
 {
 

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_dev.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_dev.c	Sun Jul  7 01:32:52 2013	(r252907)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_dev.c	Sun Jul  7 01:52:05 2013	(r252908)
@@ -365,21 +365,19 @@ done:
 }
 
 static int
-vmmdev_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
-    int nprot, vm_memattr_t *memattr)
+vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
+		   vm_size_t size, struct vm_object **object, int nprot)
 {
 	int error;
 	struct vmmdev_softc *sc;
 
-	error = -1;
 	mtx_lock(&vmmdev_mtx);
 
 	sc = vmmdev_lookup2(cdev);
-	if (sc != NULL && (nprot & PROT_EXEC) == 0) {
-		*paddr = vm_gpa2hpa(sc->vm, (vm_paddr_t)offset, PAGE_SIZE);
-		if (*paddr != (vm_paddr_t)-1)
-			error = 0;
-	}
+	if (sc != NULL && (nprot & PROT_EXEC) == 0)
+		error = vm_get_memobj(sc->vm, *offset, size, offset, object);
+	else
+		error = EINVAL;
 
 	mtx_unlock(&vmmdev_mtx);
 
@@ -446,7 +444,7 @@ static struct cdevsw vmmdevsw = {
 	.d_name		= "vmmdev",
 	.d_version	= D_VERSION,
 	.d_ioctl	= vmmdev_ioctl,
-	.d_mmap		= vmmdev_mmap,
+	.d_mmap_single	= vmmdev_mmap_single,
 	.d_read		= vmmdev_rw,
 	.d_write	= vmmdev_rw,
 };

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.c	Sun Jul  7 01:32:52 2013	(r252907)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.c	Sun Jul  7 01:52:05 2013	(r252908)
@@ -30,40 +30,15 @@
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/linker.h>
 #include <sys/systm.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/sysctl.h>
 
 #include <vm/vm.h>
-#include <vm/pmap.h>
-#include <vm/vm_page.h>
-#include <vm/vm_pageout.h>
+#include <vm/vm_object.h>
 
 #include <machine/md_var.h>
-#include <machine/metadata.h>
-#include <machine/pc/bios.h>
-#include <machine/vmparam.h>
-#include <machine/pmap.h>
 
-#include "vmm_util.h"
 #include "vmm_mem.h"
 
-SYSCTL_DECL(_hw_vmm);
-
-static u_long pages_allocated;
-SYSCTL_ULONG(_hw_vmm, OID_AUTO, pages_allocated, CTLFLAG_RD,
-	     &pages_allocated, 0, "4KB pages allocated");
-
-static void
-update_pages_allocated(int howmany)
-{
-	pages_allocated += howmany;	/* XXX locking? */
-}
-
 int
 vmm_mem_init(void)
 {
@@ -71,60 +46,23 @@ vmm_mem_init(void)
 	return (0);
 }
 
-vm_paddr_t
+vm_object_t
 vmm_mem_alloc(size_t size)
 {
-	int flags;
-	vm_page_t m;
-	vm_paddr_t pa;
+	vm_object_t obj;
 
-	if (size != PAGE_SIZE)
+	if (size & PAGE_MASK)
 		panic("vmm_mem_alloc: invalid allocation size %lu", size);
 
-	flags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
-		VM_ALLOC_ZERO;
-
-	while (1) {
-		/*
-		 * XXX need policy to determine when to back off the allocation
-		 */
-		m = vm_page_alloc(NULL, 0, flags);
-		if (m == NULL)
-			VM_WAIT;
-		else
-			break;
-	}
-
-	pa = VM_PAGE_TO_PHYS(m);
-	
-	if ((m->flags & PG_ZERO) == 0)
-		pagezero((void *)PHYS_TO_DMAP(pa));
-	m->valid = VM_PAGE_BITS_ALL;
-
-	update_pages_allocated(1);
-
-	return (pa);
+	obj = vm_object_allocate(OBJT_DEFAULT, size >> PAGE_SHIFT);
+	return (obj);
 }
 
 void
-vmm_mem_free(vm_paddr_t base, size_t length)
+vmm_mem_free(vm_object_t obj)
 {
-	vm_page_t m;
-
-	if (base & PAGE_MASK) {
-		panic("vmm_mem_free: base 0x%0lx must be aligned on a "
-		      "0x%0x boundary\n", base, PAGE_SIZE);
-	}
-
-	if (length != PAGE_SIZE)
-		panic("vmm_mem_free: invalid length %lu", length);
-
-	m = PHYS_TO_VM_PAGE(base);
-	m->wire_count--;
-	vm_page_free(m);
-	atomic_subtract_int(&cnt.v_wire_count, 1);
 
-	update_pages_allocated(-1);
+	vm_object_deallocate(obj);
 }
 
 vm_paddr_t

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.h	Sun Jul  7 01:32:52 2013	(r252907)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm_mem.h	Sun Jul  7 01:52:05 2013	(r252908)
@@ -29,9 +29,11 @@
 #ifndef	_VMM_MEM_H_
 #define	_VMM_MEM_H_
 
+struct vm_object;
+
 int		vmm_mem_init(void);
-vm_paddr_t	vmm_mem_alloc(size_t size);
-void		vmm_mem_free(vm_paddr_t start, size_t size);
+struct vm_object *vmm_mem_alloc(size_t size);
+void		vmm_mem_free(struct vm_object *obj);
 vm_paddr_t	vmm_mem_maxaddr(void);
 
 #endif


More information about the svn-src-projects mailing list