svn commit: r241921 - projects/bhyve/sys/amd64/vmm/intel

Neel Natu neel at FreeBSD.org
Tue Oct 23 02:20:42 UTC 2012


Author: neel
Date: Tue Oct 23 02:20:42 2012
New Revision: 241921
URL: http://svn.freebsd.org/changeset/base/241921

Log:
  Test for AST pending with interrupts disabled right before entering the guest.
  
  If an IPI was delivered to this cpu before interrupts were disabled
  then return right away via vmx_setjmp() with a return value of VMX_RETURN_AST.
  
  Obtained from:	NetApp

Modified:
  projects/bhyve/sys/amd64/vmm/intel/vmx.c
  projects/bhyve/sys/amd64/vmm/intel/vmx.h
  projects/bhyve/sys/amd64/vmm/intel/vmx_genassym.c
  projects/bhyve/sys/amd64/vmm/intel/vmx_support.S

Modified: projects/bhyve/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmx.c	Tue Oct 23 02:20:11 2012	(r241920)
+++ projects/bhyve/sys/amd64/vmm/intel/vmx.c	Tue Oct 23 02:20:42 2012	(r241921)
@@ -290,6 +290,8 @@ vmx_setjmp_rc2str(int rc)
 		return "vmresume";
 	case VMX_RETURN_VMLAUNCH:
 		return "vmlaunch";
+	case VMX_RETURN_AST:
+		return "ast";
 	default:
 		return "unknown";
 	}
@@ -798,15 +800,20 @@ vmx_run_trace(struct vmx *vmx, int vcpu)
 
 static __inline void
 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
-	       int handled, int astpending)
+	       int handled)
 {
 #ifdef KTR
 	VMM_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
 		 handled ? "handled" : "unhandled",
 		 exit_reason_to_str(exit_reason), rip);
+#endif
+}
 
-	if (astpending)
-		VMM_CTR0(vmx->vm, vcpu, "astpending");
+static __inline void
+vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
+{
+#ifdef KTR
+	VMM_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
 #endif
 }
 
@@ -981,19 +988,19 @@ vmx_inject_interrupts(struct vmx *vmx, i
 	const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
 				   VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
 
-#if 1
 	/*
-	 * XXX
-	 * If an event is being injected from userland then just return.
-	 * For e.g. we may inject a breakpoint exception to cause the
-	 * guest to enter the debugger so we can inspect its state.
+	 * If there is already an interrupt pending then just return.
+	 *
+	 * This could happen if an interrupt was injected on a prior
+	 * VM entry but the actual entry into guest mode was aborted
+	 * because of a pending AST.
 	 */
 	error = vmread(VMCS_ENTRY_INTR_INFO, &info);
 	if (error)
 		panic("vmx_inject_interrupts: vmread(intrinfo) %d", error);
 	if (info & VMCS_INTERRUPTION_INFO_VALID)
 		return;
-#endif
+
 	/*
 	 * NMI injection has priority so deal with those first
 	 */
@@ -1301,7 +1308,7 @@ vmx_exit_process(struct vmx *vmx, int vc
 		/*
 		 * It is possible that control is returned to userland
 		 * even though we were able to handle the VM exit in the
-		 * kernel (for e.g. 'astpending' is set in the run loop).
+		 * kernel.
 		 *
 		 * In such a case we want to make sure that the userland
 		 * restarts guest execution at the instruction *after*
@@ -1352,6 +1359,7 @@ vmx_run(void *arg, int vcpu, register_t 
 	vmxctx = &vmx->ctx[vcpu];
 	vmxctx->launched = 0;
 
+	astpending = 0;
 	vmexit = vm_exitinfo(vmx->vm, vcpu);
 
 	/*
@@ -1395,6 +1403,9 @@ vmx_run(void *arg, int vcpu, register_t 
 			break;
 		case VMX_RETURN_LONGJMP:
 			break;			/* vm exit */
+		case VMX_RETURN_AST:
+			astpending = 1;
+			break;
 		case VMX_RETURN_VMRESUME:
 			vie = vmcs_instruction_error();
 			if (vmxctx->launch_error == VM_FAIL_INVALID ||
@@ -1417,14 +1428,6 @@ vmx_run(void *arg, int vcpu, register_t 
 			panic("vmx_setjmp returned %d", rc);
 		}
 		
-		/*
-		 * XXX locking?
-		 * See comments in exception.S about checking for ASTs
-		 * atomically while interrupts are disabled. But it is
-		 * not clear that they apply in our case.
-		 */
-		astpending = curthread->td_flags & TDF_ASTPENDING;
-
 		/* enable interrupts */
 		enable_intr();
 
@@ -1434,11 +1437,18 @@ vmx_run(void *arg, int vcpu, register_t 
 		vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
 		vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
 
+		if (astpending) {
+			handled = 1;
+			vmexit->inst_length = 0;
+			vmexit->exitcode = VM_EXITCODE_BOGUS;
+			vmx_astpending_trace(vmx, vcpu, rip);
+			break;
+		}
+
 		handled = vmx_exit_process(vmx, vcpu, vmexit);
+		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
 
-		vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled,
-			       astpending);
-	} while (handled && !astpending);
+	} while (handled);
 
 	/*
 	 * If a VM exit has been handled then the exitcode must be BOGUS
@@ -1646,7 +1656,7 @@ vmx_inject(void *arg, int vcpu, int type
 	   int code_valid)
 {
 	int error;
-	uint32_t info;
+	uint64_t info;
 	struct vmx *vmx = arg;
 	struct vmcs *vmcs = &vmx->vmcs[vcpu];
 
@@ -1660,6 +1670,17 @@ vmx_inject(void *arg, int vcpu, int type
 		0x6,		/* VM_SW_EXCEPTION */
 	};
 
+	/*
+	 * If there is already an exception pending to be delivered to the
+	 * vcpu then just return.
+	 */
+	error = vmcs_getreg(vmcs, VMCS_ENTRY_INTR_INFO, &info);
+	if (error)
+		return (error);
+
+	if (info & VMCS_INTERRUPTION_INFO_VALID)
+		return (EAGAIN);
+
 	info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
 	info |= VMCS_INTERRUPTION_INFO_VALID;
 	error = vmcs_setreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);

Modified: projects/bhyve/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmx.h	Tue Oct 23 02:20:11 2012	(r241920)
+++ projects/bhyve/sys/amd64/vmm/intel/vmx.h	Tue Oct 23 02:20:42 2012	(r241921)
@@ -101,12 +101,14 @@ CTASSERT((offsetof(struct vmx, guest_msr
 #define	VMX_RETURN_LONGJMP	1
 #define	VMX_RETURN_VMRESUME	2
 #define	VMX_RETURN_VMLAUNCH	3
+#define	VMX_RETURN_AST		4
 /*
  * vmx_setjmp() returns:
  * - 0 when it returns directly
  * - 1 when it returns from vmx_longjmp
  * - 2 when it returns from vmx_resume (which would only be in the error case)
  * - 3 when it returns from vmx_launch (which would only be in the error case)
+ * - 4 when it returns from vmx_resume or vmx_launch because of AST pending
  */
 int	vmx_setjmp(struct vmxctx *ctx);
 void	vmx_longjmp(void);			/* returns via vmx_setjmp */

Modified: projects/bhyve/sys/amd64/vmm/intel/vmx_genassym.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmx_genassym.c	Tue Oct 23 02:20:11 2012	(r241920)
+++ projects/bhyve/sys/amd64/vmm/intel/vmx_genassym.c	Tue Oct 23 02:20:42 2012	(r241921)
@@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/malloc.h>
+#include <sys/proc.h>
 #include <sys/assym.h>
 
 #include <vm/vm.h>
@@ -80,3 +81,9 @@ ASSYM(VMX_RETURN_DIRECT,	VMX_RETURN_DIRE
 ASSYM(VMX_RETURN_LONGJMP,	VMX_RETURN_LONGJMP);
 ASSYM(VMX_RETURN_VMRESUME,	VMX_RETURN_VMRESUME);
 ASSYM(VMX_RETURN_VMLAUNCH,	VMX_RETURN_VMLAUNCH);
+ASSYM(VMX_RETURN_AST,		VMX_RETURN_AST);
+
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));

Modified: projects/bhyve/sys/amd64/vmm/intel/vmx_support.S
==============================================================================
--- projects/bhyve/sys/amd64/vmm/intel/vmx_support.S	Tue Oct 23 02:20:11 2012	(r241920)
+++ projects/bhyve/sys/amd64/vmm/intel/vmx_support.S	Tue Oct 23 02:20:42 2012	(r241921)
@@ -31,6 +31,32 @@
 #include "vmx_assym.s"
 
 /*
+ * Disable interrupts before updating %rsp in VMX_CHECK_AST or
+ * VMX_GUEST_RESTORE.
+ *
+ * The location that %rsp points to is a 'vmxctx' and not a
+ * real stack so we don't want an interrupt handler to trash it
+ */
+#define	VMX_DISABLE_INTERRUPTS		cli
+
+/*
+ * If the thread hosting the vcpu has an ast pending then take care of it
+ * by returning from vmx_setjmp() with a return value of VMX_RETURN_AST.
+ *
+ * Assumes that %rdi holds a pointer to the 'vmxctx' and that interrupts
+ * are disabled.
+ */
+#define	VMX_CHECK_AST							\
+	movq	PCPU(CURTHREAD),%rax;					\
+	testl	$TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax);	\
+	je	9f;							\
+	movq	$VMX_RETURN_AST,%rsi;					\
+	movq	%rdi,%rsp;						\
+	addq	$VMXCTX_TMPSTKTOP,%rsp;					\
+	callq	vmx_return;						\
+9:
+
+/*
  * Assumes that %rdi holds a pointer to the 'vmxctx'.
  *
  * On "return" all registers are updated to reflect guest state. The two
@@ -41,12 +67,6 @@
  * host context in case of an error with 'vmlaunch' or 'vmresume'.
  */
 #define	VMX_GUEST_RESTORE						\
-	/*								\
-	 * Disable interrupts before updating %rsp. The location that	\
-	 * %rsp points to is a 'vmxctx' and not a real stack so we	\
-	 * don't want an interrupt handler to trash it.			\
-	 */								\
-	cli;								\
 	movq	%rdi,%rsp;						\
 	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
 	movq	%rsi,%cr2;						\
@@ -169,6 +189,10 @@ END(vmx_longjmp)
  * through vmx_setjmp() with a return value of 2.
  */
 ENTRY(vmx_resume)
+	VMX_DISABLE_INTERRUPTS
+
+	VMX_CHECK_AST
+
 	/*
 	 * Restore guest state that is not automatically loaded from the vmcs.
 	 */
@@ -197,6 +221,10 @@ END(vmx_resume)
  * through vmx_setjmp() with a return value of 3.
  */
 ENTRY(vmx_launch)
+	VMX_DISABLE_INTERRUPTS
+
+	VMX_CHECK_AST
+
 	/*
 	 * Restore guest state that is not automatically loaded from the vmcs.
 	 */


More information about the svn-src-projects mailing list