svn commit: r242275 - in projects/bhyve/sys: amd64/vmm amd64/vmm/intel modules/vmm x86/include
Neel Natu
neel at FreeBSD.org
Mon Oct 29 01:51:25 UTC 2012
Author: neel
Date: Mon Oct 29 01:51:24 2012
New Revision: 242275
URL: http://svn.freebsd.org/changeset/base/242275
Log:
Corral all the host state associated with the virtual machine into its own file.
This state is independent of the type of hardware assist used so there is
really no need for it to be in Intel-specific code.
Obtained from: NetApp
Added:
projects/bhyve/sys/amd64/vmm/vmm_host.c (contents, props changed)
projects/bhyve/sys/amd64/vmm/vmm_host.h (contents, props changed)
Modified:
projects/bhyve/sys/amd64/vmm/intel/vmcs.c
projects/bhyve/sys/amd64/vmm/intel/vmx.c
projects/bhyve/sys/amd64/vmm/vmm.c
projects/bhyve/sys/modules/vmm/Makefile
projects/bhyve/sys/x86/include/specialreg.h
Modified: projects/bhyve/sys/amd64/vmm/intel/vmcs.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmcs.c Mon Oct 29 01:35:17 2012 (r242274)
+++ projects/bhyve/sys/amd64/vmm/intel/vmcs.c Mon Oct 29 01:51:24 2012 (r242275)
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <machine/pmap.h>
#include <machine/vmm.h>
+#include "vmm_host.h"
#include "vmcs.h"
#include "vmx_cpufunc.h"
#include "ept.h"
@@ -314,12 +315,12 @@ vmcs_set_defaults(struct vmcs *vmcs,
{
int error, codesel, datasel, tsssel;
u_long cr0, cr4, efer;
- uint64_t eptp, pat;
+ uint64_t eptp, pat, fsbase, idtrbase;
uint32_t exc_bitmap;
- codesel = GSEL(GCODE_SEL, SEL_KPL);
- datasel = GSEL(GDATA_SEL, SEL_KPL);
- tsssel = GSEL(GPROC0_SEL, SEL_KPL);
+ codesel = vmm_get_host_codesel();
+ datasel = vmm_get_host_datasel();
+ tsssel = vmm_get_host_tsssel();
/*
* Make sure we have a "current" VMCS to work with.
@@ -357,29 +358,22 @@ vmcs_set_defaults(struct vmcs *vmcs,
/* Host state */
/* Initialize host IA32_PAT MSR */
- pat = rdmsr(MSR_PAT);
+ pat = vmm_get_host_pat();
if ((error = vmwrite(VMCS_HOST_IA32_PAT, pat)) != 0)
goto done;
/* Load the IA32_EFER MSR */
- efer = rdmsr(MSR_EFER);
+ efer = vmm_get_host_efer();
if ((error = vmwrite(VMCS_HOST_IA32_EFER, efer)) != 0)
goto done;
/* Load the control registers */
- /*
- * We always want CR0.TS to be set when the processor does a VM exit.
- *
- * With emulation turned on unconditionally after a VM exit, we are
- * able to trap inadvertent use of the FPU until the guest FPU state
- * has been safely squirreled away.
- */
- cr0 = rcr0() | CR0_TS;
+ cr0 = vmm_get_host_cr0();
if ((error = vmwrite(VMCS_HOST_CR0, cr0)) != 0)
goto done;
- cr4 = rcr4();
+ cr4 = vmm_get_host_cr4() | CR4_VMXE;
if ((error = vmwrite(VMCS_HOST_CR4, cr4)) != 0)
goto done;
@@ -411,10 +405,12 @@ vmcs_set_defaults(struct vmcs *vmcs,
* Note that we exclude %gs, tss and gdtr here because their base
* address is pcpu specific.
*/
- if ((error = vmwrite(VMCS_HOST_FS_BASE, 0)) != 0)
+ fsbase = vmm_get_host_fsbase();
+ if ((error = vmwrite(VMCS_HOST_FS_BASE, fsbase)) != 0)
goto done;
- if ((error = vmwrite(VMCS_HOST_IDTR_BASE, r_idt.rd_base)) != 0)
+ idtrbase = vmm_get_host_idtrbase();
+ if ((error = vmwrite(VMCS_HOST_IDTR_BASE, idtrbase)) != 0)
goto done;
/* instruction pointer */
Modified: projects/bhyve/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmx.c Mon Oct 29 01:35:17 2012 (r242274)
+++ projects/bhyve/sys/amd64/vmm/intel/vmx.c Mon Oct 29 01:51:24 2012 (r242275)
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <x86/apicreg.h>
#include <machine/vmm.h>
+#include "vmm_host.h"
#include "vmm_lapic.h"
#include "vmm_msr.h"
#include "vmm_ktr.h"
@@ -64,8 +65,6 @@ __FBSDID("$FreeBSD$");
#include "vmx_controls.h"
#include "vmm_instruction_emul.h"
-#define CR4_VMXE (1UL << 13)
-
#define PINBASED_CTLS_ONE_SETTING \
(PINBASED_EXTINT_EXITING | \
PINBASED_NMI_EXITING | \
@@ -118,8 +117,6 @@ __FBSDID("$FreeBSD$");
MALLOC_DEFINE(M_VMX, "vmx", "vmx");
-extern struct pcpu __pcpu[];
-
int vmxon_enabled[MAXCPU];
static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -836,15 +833,15 @@ vmx_set_pcpu_defaults(struct vmx *vmx, i
vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
- error = vmwrite(VMCS_HOST_TR_BASE, (u_long)PCPU_GET(tssp));
+ error = vmwrite(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
if (error != 0)
goto done;
- error = vmwrite(VMCS_HOST_GDTR_BASE, (u_long)&gdt[NGDT * curcpu]);
+ error = vmwrite(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
if (error != 0)
goto done;
- error = vmwrite(VMCS_HOST_GS_BASE, (u_long)&__pcpu[curcpu]);
+ error = vmwrite(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
if (error != 0)
goto done;
Modified: projects/bhyve/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/vmm.c Mon Oct 29 01:35:17 2012 (r242274)
+++ projects/bhyve/sys/amd64/vmm/vmm.c Mon Oct 29 01:51:24 2012 (r242275)
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <x86/apicreg.h>
#include <machine/vmm.h>
+#include "vmm_host.h"
#include "vmm_mem.h"
#include "vmm_util.h"
#include <machine/vmm_dev.h>
@@ -196,6 +197,7 @@ vmm_init(void)
{
int error;
+ vmm_host_state_init();
vmm_ipi_init();
error = vmm_mem_init();
Added: projects/bhyve/sys/amd64/vmm/vmm_host.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/bhyve/sys/amd64/vmm/vmm_host.c Mon Oct 29 01:51:24 2012 (r242275)
@@ -0,0 +1,124 @@
+/*-
+ * Copyright (c) 2012 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+
+#include <machine/cpufunc.h>
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+
+#include "vmm_host.h"
+
+static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4;
+
+void
+vmm_host_state_init(void)
+{
+
+ vmm_host_efer = rdmsr(MSR_EFER);
+ vmm_host_pat = rdmsr(MSR_PAT);
+
+ /*
+ * We always want CR0.TS to be set when the processor does a VM exit.
+ *
+ * With emulation turned on unconditionally after a VM exit, we are
+ * able to trap inadvertent use of the FPU until the guest FPU state
+ * has been safely squirreled away.
+ */
+ vmm_host_cr0 = rcr0() | CR0_TS;
+
+ vmm_host_cr4 = rcr4();
+}
+
+uint64_t
+vmm_get_host_pat(void)
+{
+
+ return (vmm_host_pat);
+}
+
+uint64_t
+vmm_get_host_efer(void)
+{
+
+ return (vmm_host_efer);
+}
+
+uint64_t
+vmm_get_host_cr0(void)
+{
+
+ return (vmm_host_cr0);
+}
+
+uint64_t
+vmm_get_host_cr4(void)
+{
+
+ return (vmm_host_cr4);
+}
+
+uint64_t
+vmm_get_host_datasel(void)
+{
+
+ return (GSEL(GDATA_SEL, SEL_KPL));
+
+}
+
+uint64_t
+vmm_get_host_codesel(void)
+{
+
+ return (GSEL(GCODE_SEL, SEL_KPL));
+}
+
+uint64_t
+vmm_get_host_tsssel(void)
+{
+
+ return (GSEL(GPROC0_SEL, SEL_KPL));
+}
+
+uint64_t
+vmm_get_host_fsbase(void)
+{
+
+ return (0);
+}
+
+uint64_t
+vmm_get_host_idtrbase(void)
+{
+
+ return (r_idt.rd_base);
+}
Added: projects/bhyve/sys/amd64/vmm/vmm_host.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/bhyve/sys/amd64/vmm/vmm_host.h Mon Oct 29 01:51:24 2012 (r242275)
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2012 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VMM_HOST_H_
+#define _VMM_HOST_H_
+
+#ifndef _KERNEL
+#error "no user-servicable parts inside"
+#endif
+
+void vmm_host_state_init(void);
+
+uint64_t vmm_get_host_pat(void);
+uint64_t vmm_get_host_efer(void);
+uint64_t vmm_get_host_cr0(void);
+uint64_t vmm_get_host_cr4(void);
+uint64_t vmm_get_host_datasel(void);
+uint64_t vmm_get_host_codesel(void);
+uint64_t vmm_get_host_tsssel(void);
+uint64_t vmm_get_host_fsbase(void);
+uint64_t vmm_get_host_idtrbase(void);
+
+/*
+ * Inline access to host state that is used on every VM entry
+ */
+static __inline uint64_t
+vmm_get_host_trbase(void)
+{
+
+ return ((uint64_t)PCPU_GET(tssp));
+}
+
+static __inline uint64_t
+vmm_get_host_gdtrbase(void)
+{
+
+ return ((uint64_t)&gdt[NGDT * curcpu]);
+}
+
+struct pcpu;
+extern struct pcpu __pcpu[];
+
+static __inline uint64_t
+vmm_get_host_gsbase(void)
+{
+
+ return ((uint64_t)&__pcpu[curcpu]);
+}
+
+#endif
Modified: projects/bhyve/sys/modules/vmm/Makefile
==============================================================================
--- projects/bhyve/sys/modules/vmm/Makefile Mon Oct 29 01:35:17 2012 (r242274)
+++ projects/bhyve/sys/modules/vmm/Makefile Mon Oct 29 01:51:24 2012 (r242275)
@@ -13,6 +13,7 @@ CFLAGS+= -I${.CURDIR}/../../amd64/vmm/in
.PATH: ${.CURDIR}/../../amd64/vmm
SRCS+= vmm.c \
vmm_dev.c \
+ vmm_host.c \
vmm_instruction_emul.c \
vmm_ipi.c \
vmm_lapic.c \
Modified: projects/bhyve/sys/x86/include/specialreg.h
==============================================================================
--- projects/bhyve/sys/x86/include/specialreg.h Mon Oct 29 01:35:17 2012 (r242274)
+++ projects/bhyve/sys/x86/include/specialreg.h Mon Oct 29 01:51:24 2012 (r242275)
@@ -66,6 +66,7 @@
#define CR4_PCE 0x00000100 /* Performance monitoring counter enable */
#define CR4_FXSR 0x00000200 /* Fast FPU save/restore used by OS */
#define CR4_XMM 0x00000400 /* enable SIMD/MMX2 to use except 16 */
+#define CR4_VMXE 0x00002000 /* enable VMX operation (Intel-specific) */
#define CR4_XSAVE 0x00040000 /* XSETBV/XGETBV */
/*
More information about the svn-src-projects
mailing list