svn commit: r249967 - projects/bhyve_svm/sys/amd64/vmm/amd

Neel Natu neel at FreeBSD.org
Sat Apr 27 04:49:53 UTC 2013


Author: neel
Date: Sat Apr 27 04:49:51 2013
New Revision: 249967
URL: http://svnweb.freebsd.org/changeset/base/249967

Log:
  - SVM nested paging support
  - Define data structures to contain the SVM vcpu context
  - Define data structures to contain guest and host software context
  - Change license in vmcb.h and vmcb.c to remove references to NetApp that
    inadvertently sneaked in when the license text was copied from amdv.c.
  
  Submitted by:	Anish Gupta (akgupt3 at gmail.com)

Added:
  projects/bhyve_svm/sys/amd64/vmm/amd/npt.c   (contents, props changed)
  projects/bhyve_svm/sys/amd64/vmm/amd/npt.h   (contents, props changed)
  projects/bhyve_svm/sys/amd64/vmm/amd/svm.h   (contents, props changed)
  projects/bhyve_svm/sys/amd64/vmm/amd/svm_genassym.c   (contents, props changed)
  projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h   (contents, props changed)
Modified:
  projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c
  projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h

Added: projects/bhyve_svm/sys/amd64/vmm/amd/npt.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/npt.c	Sat Apr 27 04:49:51 2013	(r249967)
@@ -0,0 +1,323 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/pmap.h>
+#include <machine/md_var.h>
+#include <machine/vmparam.h>
+#include <machine/vmm.h>
+
+#include "svm.h"
+#include "vmcb.h"
+#include "svm_softc.h"
+#include "npt.h"
+
+/*
+ * "Nested Paging" is an optional SVM feature that provides two levels of
+ * address translation, thus eliminating the need for the VMM to maintain
+ * shadow page tables.
+ *
+ * Documented in APMv2, section 15.25, Nested Paging.
+ */
+
+#define PAGE_4KB		(4 * 1024)
+#define PAGE_2MB		(2 * 1024 * 1024UL)
+#define PAGE_1GB		(1024 * 1024 * 1024UL)
+
+#define GPA_UNMAPPED		((vm_paddr_t)~0)
+
+/* Get page entry to physical address. */
+#define PTE2PA(x)		((uint64_t)(x) & ~PAGE_MASK)
+
+MALLOC_DECLARE(M_SVM);
+
+static uint64_t svm_npt_create(pml4_entry_t *pml4, vm_paddr_t gpa, 
+				vm_paddr_t hpa, vm_memattr_t attr, 
+				int prot, uint64_t size);
+
+static const int PT_INDEX_MASK = 0x1FF;
+static const int PT_SHIFT = 9;
+
+/*
+ * Helper function to create nested page table entries for a page
+ * of size 1GB, 2MB or 4KB.
+ *
+ * Starting from PML4 create a PDPTE, PDE or PTE depending on 'pg_size'
+ * value of 1GB, 2MB or 4KB respectively.
+ *
+ * Return size of the mapping created on success and 0 on failure.
+ *
+ * XXX: NPT PAT settings. 
+ */
+static  uint64_t
+svm_npt_create(pml4_entry_t * pml4, vm_paddr_t gpa, vm_paddr_t hpa,
+    		vm_memattr_t attr, int prot, uint64_t pg_size)
+{
+	uint64_t *pt, *page, pa;
+	pt_entry_t mode;
+	int shift, index;
+
+	KASSERT(pg_size, ("Size of page must be 1GB, 2MB or 4KB"));
+	if (hpa & (pg_size - 1)) {
+		ERR("HPA(0x%lx) is not aligned, size:0x%lx\n", hpa, pg_size);
+		return (0);
+	}
+
+	if (gpa & (pg_size - 1)) {
+		ERR("GPA(0x%lx) is not aligned, size (0x%lx)\n", gpa, pg_size);
+		return (0);
+	}
+
+	/* Find out mode bits for PTE */
+	mode = PG_U;
+	if (prot & VM_PROT_WRITE)
+		mode |= PG_RW;
+	if ((prot & VM_PROT_EXECUTE) == 0) 	
+		mode |= pg_nx;
+	if (prot != VM_PROT_NONE) 
+		mode |= PG_V;
+		
+	pt = (uint64_t *)pml4;
+	shift = PML4SHIFT;
+
+	while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) {
+		/* Get PDP, PD or PT index from guest physical address. */
+		index = (gpa >> shift) & PT_INDEX_MASK;
+
+		/* If page entry is missing, allocate new page for table.*/
+		if (pt[index] == 0) {
+			page = malloc(PAGE_SIZE, M_SVM, M_WAITOK | M_ZERO);
+			pt[index] = vtophys(page) | mode;
+		}
+
+		pa = PTE2PA(pt[index]);;
+		pt = (uint64_t *)PHYS_TO_DMAP(pa);
+		shift -= PT_SHIFT;
+	}
+
+	/* Create leaf entry mapping. */
+	index = (gpa >> shift) & PT_INDEX_MASK;
+	if (pt[index] != 0) {
+		ERR("Mapping already exists.\n");
+		return (0);
+	}
+
+	pt[index] = hpa | mode;
+	
+	/* If its not last level page table entry, set PS bit. */
+	if (pg_size > PAGE_SIZE) {
+		pt[index] |= PG_PS;
+	}
+
+	return (1UL << shift);
+}
+
+/*
+ * Map guest physical address to host physical address.
+ */
+int
+svm_npt_vmmap_set(void *arg, vm_paddr_t gpa, vm_paddr_t hpa,
+	size_t size, vm_memattr_t attr, int prot, boolean_t spok)
+{
+	pml4_entry_t *pml4;
+	struct svm_softc *svm_sc;
+	uint64_t len, mapped, pg_size;
+
+	svm_sc = arg;
+	pml4 = svm_sc->np_pml4;
+
+	pg_size = PAGE_4KB;
+	if (spok) {
+		pg_size = PAGE_2MB;
+		if (amd_feature & AMDID_PAGE1GB)
+			pg_size = PAGE_1GB;
+	}
+
+	/* Compute the largest page mapping that can be used */
+	while (pg_size > PAGE_4KB) {
+		if (size >= pg_size &&
+		    (gpa & (pg_size - 1)) == 0 &&
+		    (hpa & (pg_size - 1)) == 0) {
+			break;
+		}
+		pg_size >>= PT_SHIFT;
+	}
+
+	len = 0;
+	while (len < size) {
+		mapped = svm_npt_create(pml4, gpa + len, hpa + len, attr, prot,
+					pg_size);
+		if (mapped == 0) {
+			panic("Couldn't map GPA:0x%lx, size:0x%lx", gpa, 
+				pg_size);
+		}
+
+		len += mapped;
+	}
+
+	return (0);
+}
+
+/*
+ * Get HPA for a given GPA.
+ */
+vm_paddr_t
+svm_npt_vmmap_get(void *arg, vm_paddr_t gpa)
+{
+	struct svm_softc *svm_sc;
+	pml4_entry_t *pml4;
+	uint64_t *pt, pa, hpa, pgmask;
+	int shift, index;
+
+	svm_sc = arg;
+	pml4 = svm_sc->np_pml4;
+
+	pt = (uint64_t *)pml4;
+	shift = PML4SHIFT;
+	
+	while (shift > PAGE_SHIFT) {
+		 /* Get PDP, PD or PT index from GPA */
+		index = (gpa >> shift) & PT_INDEX_MASK;
+		if (pt[index] == 0) {
+			ERR("No entry for GPA:0x%lx.", gpa);
+			return (GPA_UNMAPPED);
+		}
+
+		if (pt[index] & PG_PS) {
+			break;
+		}
+
+		pa = PTE2PA(pt[index]);;
+		pt = (uint64_t *)PHYS_TO_DMAP(pa);
+		shift -= PT_SHIFT;
+	}
+
+	index = (gpa >> shift) & PT_INDEX_MASK;
+	if (pt[index] == 0) {
+		ERR("No mapping for GPA:0x%lx.\n", gpa);
+		return (GPA_UNMAPPED);
+	}
+
+	/* Add GPA offset to HPA */
+	pgmask = (1UL << shift) - 1;
+	hpa = (PTE2PA(pt[index]) & ~pgmask) | (gpa & pgmask);
+
+	return (hpa);
+}
+
+/*
+ * AMD nested page table init.
+ */
+int
+svm_npt_init(void)
+{
+	
+	return (0);
+}
+
+/*
+ * Free Page Table page.
+ */
+static void
+free_pt(pd_entry_t pde)
+{
+	pt_entry_t *pt;
+
+	pt = (pt_entry_t *)PHYS_TO_DMAP(PTE2PA(pde));
+	free(pt, M_SVM);
+}
+
+/*
+ * Free Page Directory page.
+ */
+static void
+free_pd(pdp_entry_t pdpe)
+{
+	pd_entry_t *pd;
+	int i;
+
+	pd = (pd_entry_t *)PHYS_TO_DMAP(PTE2PA(pdpe));
+	for (i = 0; i < NPDEPG; i++) {
+		/* Skip not-present or superpage entries */
+		if ((pd[i] == 0) || (pd[i] & PG_PS))
+			continue;
+
+		free_pt(pd[i]);
+	}
+
+	free(pd, M_SVM);
+}
+
+/*
+ * Free Page Directory Pointer page.
+ */
+static void
+free_pdp(pml4_entry_t pml4e)
+{
+	pdp_entry_t *pdp;
+	int i;
+
+	pdp = (pdp_entry_t *)PHYS_TO_DMAP(PTE2PA(pml4e));
+	for (i = 0; i < NPDPEPG; i++) {
+		/* Skip not-present or superpage entries */
+		if ((pdp[i] == 0) || (pdp[i] & PG_PS))
+			continue;
+
+		free_pd(pdp[i]);
+	}
+
+	free(pdp, M_SVM);
+}
+
+/*
+ * Free the guest's nested page table.
+ */
+int
+svm_npt_cleanup(struct svm_softc *svm_sc)
+{
+	pml4_entry_t *pml4;
+	int i;
+
+	pml4 = svm_sc->np_pml4;
+
+	for (i = 0; i < NPML4EPG; i++) {
+		if (pml4[i] != 0) {
+			free_pdp(pml4[i]);
+			pml4[i] = 0;
+		}
+	}
+
+	return (0);
+}

Added: projects/bhyve_svm/sys/amd64/vmm/amd/npt.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/npt.h	Sat Apr 27 04:49:51 2013	(r249967)
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_NPT_H_
+#define _SVM_NPT_H_
+
+struct svm_softc;
+
+int 	   svm_npt_init(void);
+int 	   svm_npt_cleanup(struct svm_softc *sc);
+vm_paddr_t svm_npt_vmmap_get(void *arg, vm_paddr_t gpa);
+int	   svm_npt_vmmap_set(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, 
+			     size_t len, vm_memattr_t attr, int prot,
+			     boolean_t sp);
+#endif /* _SVM_NPT_H_ */

Added: projects/bhyve_svm/sys/amd64/vmm/amd/svm.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/svm.h	Sat Apr 27 04:49:51 2013	(r249967)
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_H_
+#define _SVM_H_
+
+#define BIT(n)			(1ULL << n)
+#define ERR(fmt, args...)	\
+	printf("SVM ERROR:%s " fmt "\n", __func__, ##args);
+
+/*
+ * Software saved machine state for guest and host. 
+ */
+
+/* Additional guest register state */
+struct svm_gctx {
+	register_t	sctx_rdx;
+	register_t	sctx_rdi;
+	register_t	sctx_rsi;
+	/* Points to host context area. */
+	register_t	sctx_hostctx_base;
+};
+
+/* Additional host register state */
+struct svm_hctx {
+	uint16_t	sctx_fs;
+	uint16_t	sctx_gs;
+
+	register_t	sctx_rsp;
+};
+
+/* Common register context area for guest and host. */
+struct svm_regctx {
+	register_t	sctx_rbp;
+
+	register_t 	sctx_rbx;
+	register_t	sctx_rcx;
+
+	register_t	sctx_r8;
+	register_t	sctx_r9;
+	register_t	sctx_r10;
+	register_t	sctx_r11;
+	register_t	sctx_r12;
+	register_t	sctx_r13;
+	register_t	sctx_r14;
+	register_t	sctx_r15;
+
+	union {
+		struct svm_hctx h;	/* host-specific register state */
+		struct svm_gctx g;	/* guest-specific register state */
+	} e;
+};
+
+void svm_launch(uint64_t pa, struct svm_regctx *, struct svm_regctx *);
+
+static __inline void
+disable_gintr(void)
+{
+
+        __asm __volatile("clgi" : : :);
+}
+
+static __inline void
+enable_gintr(void)
+{
+
+        __asm __volatile("stgi" : : :);
+}
+
+static __inline void
+save_cr2(uint64_t *cr2)
+{
+
+	__asm __volatile(
+		"mov %%cr2, %%rax; movq %%rax, %0"	
+		:"=m"(*cr2)	
+		:
+		:"rax", "memory");
+}
+
+static __inline void
+load_cr2(uint64_t *cr2)
+{
+	__asm __volatile(
+		"movq %0, %%rax; movq %%rax, %%cr2"
+		:
+		:"m"(*cr2)	
+		:"rax");
+}
+
+#endif /* _SVM_H_ */

Added: projects/bhyve_svm/sys/amd64/vmm/amd/svm_genassym.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/svm_genassym.c	Sat Apr 27 04:49:51 2013	(r249967)
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/assym.h>
+
+#include "svm.h"
+
+ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx));
+ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx));
+ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp));
+
+ASSYM(SCTX_R8,  offsetof(struct svm_regctx, sctx_r8));
+ASSYM(SCTX_R9,  offsetof(struct svm_regctx, sctx_r9));
+ASSYM(SCTX_R10, offsetof(struct svm_regctx, sctx_r10));
+ASSYM(SCTX_R11, offsetof(struct svm_regctx, sctx_r11));
+ASSYM(SCTX_R12, offsetof(struct svm_regctx, sctx_r12));
+ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
+ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
+ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));
+
+/* Guest only registers. */
+ASSYM(SCTX_GUEST_RDX, offsetof(struct svm_regctx, e.g.sctx_rdx));
+ASSYM(SCTX_GUEST_RDI, offsetof(struct svm_regctx, e.g.sctx_rdi));
+ASSYM(SCTX_GUEST_RSI, offsetof(struct svm_regctx, e.g.sctx_rsi));
+ASSYM(SCTX_GUEST_HCTX_BASE, offsetof(struct svm_regctx, e.g.sctx_hostctx_base));
+
+/* Host only registers.  */
+ASSYM(SCTX_HOST_GS,  offsetof(struct svm_regctx, e.h.sctx_gs));
+ASSYM(SCTX_HOST_FS,  offsetof(struct svm_regctx, e.h.sctx_fs));
+ASSYM(SCTX_HOST_RSP, offsetof(struct svm_regctx, e.h.sctx_rsp));

Added: projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/svm_softc.h	Sat Apr 27 04:49:51 2013	(r249967)
@@ -0,0 +1,117 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3 at gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_SOFTC_H_
+#define _SVM_SOFTC_H_
+
+#define SVM_IO_BITMAP_SIZE	(3 * PAGE_SIZE)
+#define SVM_MSR_BITMAP_SIZE	(2 * PAGE_SIZE)
+
+/*
+ * svm_vpcu contains SVM VMCB state and vcpu register state.
+ */
+struct svm_vcpu {
+	struct vmcb	 vmcb;	  /* hardware saved vcpu context */
+	struct svm_regctx swctx;  /* software saved vcpu context */
+	uint64_t	 vmcb_pa; /* VMCB physical address */
+	uint64_t	 loop;	  /* loop count for vcpu */
+        int		 lastcpu; /* host cpu that the vcpu last ran on */
+} __aligned(PAGE_SIZE);
+
+/*
+ * SVM softc, one per virtual machine.
+ */
+struct svm_softc {
+	/*
+	 * IO permission map, VMCB.ctrl.iopm_base_pa should point to this.
+	 * If a bit is set, access to I/O port is intercepted.
+	 */
+	uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE];
+
+	/*
+	 * MSR permission bitmap, VMCB.ctrl.msrpm_base_pa should point to this.
+	 * Two bits are used for each MSR with the LSB used for read access
+	 * and the MSB used for write access. A value of '1' indicates that
+	 * the operation is intercepted.
+	 */
+	uint8_t	msr_bitmap[SVM_MSR_BITMAP_SIZE];
+
+	/* Nested Paging */
+	pml4_entry_t	np_pml4[NPML4EPG];
+
+	/* Virtual machine pointer. */
+	struct vm	*vm;
+
+	/* Guest VCPU h/w and s/w context. */
+	struct svm_vcpu vcpu[VM_MAXCPU];
+
+	uint32_t	svm_feature;	/* SVM features from CPUID.*/
+
+	int		asid;		/* Guest Address Space Identifier */
+	int 		vcpu_cnt;	/* number of VCPUs for this guest.*/
+} __aligned(PAGE_SIZE);
+
+CTASSERT((offsetof(struct svm_softc, np_pml4) & PAGE_MASK) == 0);
+
+static __inline struct svm_vcpu *
+svm_get_vcpu(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu]));
+}
+
+static __inline struct vmcb *
+svm_get_vmcb(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb));
+}
+
+static __inline struct vmcb_state *
+svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.state));
+}
+
+static __inline struct vmcb_ctrl *
+svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.ctrl));
+}
+
+static __inline struct svm_regctx *
+svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].swctx));
+}
+
+void svm_dump_vmcb(struct svm_softc *svm_sc, int vcpu);
+#endif /* _SVM_SOFTC_H_ */

Modified: projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c	Sat Apr 27 04:09:09 2013	(r249966)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.c	Sat Apr 27 04:49:51 2013	(r249967)
@@ -6,24 +6,22 @@
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
@@ -38,6 +36,7 @@ __FBSDID("$FreeBSD$");
 #include <machine/vmm.h>
 
 #include "vmcb.h"
+#include "svm.h"
 
 /*
  * The VMCB aka Virtual Machine Control Block is a 4KB aligned page

Modified: projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h	Sat Apr 27 04:09:09 2013	(r249966)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h	Sat Apr 27 04:49:51 2013	(r249967)
@@ -6,22 +6,22 @@
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
@@ -29,14 +29,6 @@
 #ifndef _VMCB_H_
 #define	_VMCB_H_
 
-#ifndef	BIT
-#define	BIT(bitpos)		(1UL << (bitpos))
-#endif
-
-#ifndef	ERR
-#define	ERR(...)
-#endif
-
 /*
  * Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15
  * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B


More information about the svn-src-projects mailing list