svn commit: r252545 - projects/bhyve_npt_pmap/sys/amd64/vmm/intel

Neel Natu neel at FreeBSD.org
Wed Jul 3 06:43:08 UTC 2013


Author: neel
Date: Wed Jul  3 06:43:06 2013
New Revision: 252545
URL: http://svnweb.freebsd.org/changeset/base/252545

Log:
  There is no need to assign a VPID to a virtual machine since we are using
  nested page tables. All mappings created in the VMX non-root mode are tagged
  with the EP4TA which is guaranteed to be unique for each virtual machine.
  
  This makes the use of VPIDs redundant so get rid of them.

Modified:
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.h
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.h
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
  projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.c	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.c	Wed Jul  3 06:43:06 2013	(r252545)
@@ -382,11 +382,14 @@ invept_single_context(void *arg)
 }
 
 void
-ept_invalidate_mappings(u_long pml4ept)
+ept_invalidate_mappings(u_long pml4ept, int allcpus)
 {
 	struct invept_desc invept_desc = { 0 };
 
 	invept_desc.eptp = EPTP(pml4ept);
 
-	smp_rendezvous(NULL, invept_single_context, NULL, &invept_desc);
+	if (allcpus)
+		smp_rendezvous(NULL, invept_single_context, NULL, &invept_desc);
+	else
+		invept_single_context(&invept_desc);
 }

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.h	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/ept.h	Wed Jul  3 06:43:06 2013	(r252545)
@@ -38,6 +38,6 @@ int	ept_init(void);
 int	ept_vmmmap_set(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, size_t length,
 	    vm_memattr_t attr, int prot, boolean_t allow_superpage_mappings);
 vm_paddr_t ept_vmmmap_get(void *arg, vm_paddr_t gpa);
-void	ept_invalidate_mappings(u_long ept_pml4);
+void	ept_invalidate_mappings(u_long ept_pml4, int allcpus);
 void	ept_vmcleanup(struct vmx *vmx);
 #endif

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.c	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.c	Wed Jul  3 06:43:06 2013	(r252545)
@@ -321,7 +321,7 @@ vmcs_set_defaults(struct vmcs *vmcs,
 		  u_long host_rip, u_long host_rsp, u_long ept_pml4,
 		  uint32_t pinbased_ctls, uint32_t procbased_ctls,
 		  uint32_t procbased_ctls2, uint32_t exit_ctls,
-		  uint32_t entry_ctls, u_long msr_bitmap, uint16_t vpid)
+		  uint32_t entry_ctls, u_long msr_bitmap)
 {
 	int error, codesel, datasel, tsssel;
 	u_long cr0, cr4, efer;
@@ -436,10 +436,6 @@ vmcs_set_defaults(struct vmcs *vmcs,
 	if ((error = vmwrite(VMCS_EPTP, eptp)) != 0)
 		goto done;
 
-	/* vpid */
-	if ((error = vmwrite(VMCS_VPID, vpid)) != 0)
-		goto done;
-
 	/* msr bitmap */
 	if ((error = vmwrite(VMCS_MSR_BITMAP, msr_bitmap)) != 0)
 		goto done;
@@ -494,7 +490,6 @@ DB_SHOW_COMMAND(vmcs, db_show_vmcs)
 		return;
 	}
 	db_printf("VMCS: %jx\n", cur_vmcs);
-	db_printf("VPID: %lu\n", vmcs_read(VMCS_VPID));
 	db_printf("Activity: ");
 	val = vmcs_read(VMCS_GUEST_ACTIVITY);
 	switch (val) {

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.h	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmcs.h	Wed Jul  3 06:43:06 2013	(r252545)
@@ -50,8 +50,7 @@ int	vmcs_set_defaults(struct vmcs *vmcs,
 			  u_long ept_pml4,
 			  uint32_t pinbased_ctls, uint32_t procbased_ctls,
 			  uint32_t procbased_ctls2, uint32_t exit_ctls,
-			  uint32_t entry_ctls, u_long msr_bitmap,
-			  uint16_t vpid);
+			  uint32_t entry_ctls, u_long msr_bitmap);
 int	vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv);
 int	vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val);
 int	vmcs_getdesc(struct vmcs *vmcs, int ident,

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c	Wed Jul  3 06:43:06 2013	(r252545)
@@ -138,8 +138,6 @@ SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_
 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
 	     &cr4_zeros_mask, 0, NULL);
 
-static volatile u_int nextvpid;
-
 static int vmx_no_patmsr;
 
 static int vmx_initialized;
@@ -400,7 +398,6 @@ msr_save_area_init(struct msr_entry *g_a
 static void
 vmx_disable(void *arg __unused)
 {
-	struct invvpid_desc invvpid_desc = { 0 };
 	struct invept_desc invept_desc = { 0 };
 
 	if (vmxon_enabled[curcpu]) {
@@ -411,7 +408,6 @@ vmx_disable(void *arg __unused)
 		 * caching structures. This prevents potential retention of
 		 * cached information in the TLB between distinct VMX episodes.
 		 */
-		invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
 		invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
 		vmxoff();
 	}
@@ -489,12 +485,6 @@ vmx_init(void)
 		return (error);
 	}
 
-	/* Check support for VPID */
-	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
-			       PROCBASED2_ENABLE_VPID, 0, &tmp);
-	if (error == 0)
-		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
-
 	/* Check support for pin-based VM-execution controls */
 	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
 			       MSR_VMX_TRUE_PINBASED_CTLS,
@@ -615,37 +605,6 @@ vmx_init(void)
 	return (0);
 }
 
-/*
- * If this processor does not support VPIDs then simply return 0.
- *
- * Otherwise generate the next value of VPID to use. Any value is alright
- * as long as it is non-zero.
- *
- * We always execute in VMX non-root context with EPT enabled. Thus all
- * combined mappings are tagged with the (EP4TA, VPID, PCID) tuple. This
- * in turn means that multiple VMs can share the same VPID as long as
- * they have distinct EPT page tables.
- *
- * XXX
- * We should optimize this so that it returns VPIDs that are not in
- * use. Then we will not unnecessarily invalidate mappings in
- * vmx_set_pcpu_defaults() just because two or more vcpus happen to
- * use the same 'vpid'.
- */
-static uint16_t
-vmx_vpid(void)
-{
-	uint16_t vpid = 0;
-
-	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) != 0) {
-		do {
-			vpid = atomic_fetchadd_int(&nextvpid, 1);
-		} while (vpid == 0);
-	}
-
-	return (vpid);
-}
-
 static int
 vmx_setup_cr_shadow(int which, struct vmcs *vmcs)
 {
@@ -683,7 +642,6 @@ vmx_setup_cr_shadow(int which, struct vm
 static void *
 vmx_vminit(struct vm *vm)
 {
-	uint16_t vpid;
 	int i, error, guest_msr_count;
 	struct vmx *vmx;
 
@@ -700,10 +658,9 @@ vmx_vminit(struct vm *vm)
 	 * VMX transitions are not required to invalidate any guest physical
 	 * mappings. So, it may be possible for stale guest physical mappings
 	 * to be present in the processor TLBs.
-	 *
-	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
 	 */
-	ept_invalidate_mappings(vtophys(vmx->pml4ept));
+	vmx->eptphys = vtophys(vmx->pml4ept);
+	ept_invalidate_mappings(vmx->eptphys, 1);
 
 	msr_bitmap_initialize(vmx->msr_bitmap);
 
@@ -754,8 +711,6 @@ vmx_vminit(struct vm *vm)
 			      error, i);
 		}
 
-		vpid = vmx_vpid();
-
 		error = vmcs_set_defaults(&vmx->vmcs[i],
 					  (u_long)vmx_longjmp,
 					  (u_long)&vmx->ctx[i],
@@ -764,8 +719,7 @@ vmx_vminit(struct vm *vm)
 					  procbased_ctls,
 					  procbased_ctls2,
 					  exit_ctls, entry_ctls,
-					  vtophys(vmx->msr_bitmap),
-					  vpid);
+					  vtophys(vmx->msr_bitmap));
 
 		if (error != 0)
 			panic("vmx_vminit: vmcs_set_defaults error %d", error);
@@ -774,7 +728,6 @@ vmx_vminit(struct vm *vm)
 		vmx->cap[i].proc_ctls = procbased_ctls;
 
 		vmx->state[i].lastcpu = -1;
-		vmx->state[i].vpid = vpid;
 
 		msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
 
@@ -843,7 +796,6 @@ vmx_set_pcpu_defaults(struct vmx *vmx, i
 {
 	int error, lastcpu;
 	struct vmxstate *vmxstate;
-	struct invvpid_desc invvpid_desc = { 0 };
 
 	vmxstate = &vmx->state[vcpu];
 	lastcpu = vmxstate->lastcpu;
@@ -869,24 +821,16 @@ vmx_set_pcpu_defaults(struct vmx *vmx, i
 		goto done;
 
 	/*
-	 * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
+	 * Invalidate all mappings tagged with this EP4TA.
 	 *
 	 * We do this because this vcpu was executing on a different host
-	 * cpu when it last ran. We do not track whether it invalidated
-	 * mappings associated with its 'vpid' during that run. So we must
-	 * assume that the mappings associated with 'vpid' on 'curcpu' are
-	 * stale and invalidate them.
+	 * cpu when it last ran. Thus the mappings on this cpu might be
+	 * potentially out-of-date and therefore must be invalidated.
 	 *
 	 * Note that we incur this penalty only when the scheduler chooses to
 	 * move the thread associated with this vcpu between host cpus.
-	 *
-	 * Note also that this will invalidate mappings tagged with 'vpid'
-	 * for "all" EP4TAs.
 	 */
-	if (vmxstate->vpid != 0) {
-		invvpid_desc.vpid = vmxstate->vpid;
-		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
-	}
+	ept_invalidate_mappings(vmx->eptphys, 0);
 done:
 	return (error);
 }

Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h	Wed Jul  3 06:23:46 2013	(r252544)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h	Wed Jul  3 06:43:06 2013	(r252545)
@@ -77,7 +77,6 @@ struct vmxcap {
 
 struct vmxstate {
 	int	lastcpu;	/* host cpu that this 'vcpu' last ran on */
-	uint16_t vpid;
 };
 
 /* virtual machine softc */
@@ -89,6 +88,7 @@ struct vmx {
 	struct vmxctx	ctx[VM_MAXCPU];
 	struct vmxcap	cap[VM_MAXCPU];
 	struct vmxstate	state[VM_MAXCPU];
+	vm_paddr_t	eptphys;		/* physical addr of pml4ept */
 	struct vm	*vm;
 };
 CTASSERT((offsetof(struct vmx, pml4ept) & PAGE_MASK) == 0);


More information about the svn-src-projects mailing list