svn commit: r271694 - projects/bhyve_svm/sys/amd64/vmm/amd

Neel Natu neel at FreeBSD.org
Wed Sep 17 00:30:26 UTC 2014


Author: neel
Date: Wed Sep 17 00:30:25 2014
New Revision: 271694
URL: http://svnweb.freebsd.org/changeset/base/271694

Log:
  Rework vNMI injection.
  
  Keep track of NMI blocking by enabling the IRET intercept on a successful
  vNMI injection. The NMI blocking condition is cleared when the handler
  executes an IRET and traps back into the hypervisor.
  
  Don't inject NMI if the processor is in an interrupt shadow to preserve the
  atomic nature of "STI;HLT". Take advantage of this and artificially set the
  interrupt shadow to prevent NMI injection when restarting the "iret".
  
  Reviewed by:	Anish Gupta (akgupt3 at gmail.com), grehan

Modified:
  projects/bhyve_svm/sys/amd64/vmm/amd/svm.c

Modified: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/amd/svm.c	Tue Sep 16 22:54:15 2014	(r271693)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/svm.c	Wed Sep 17 00:30:25 2014	(r271694)
@@ -1055,25 +1055,73 @@ disable_intr_window_exiting(struct svm_s
 }
 
 static int
-nmi_blocked(struct svm_softc *sc, int vcpu)
+svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, int running,
+    uint64_t val)
 {
-	/* XXX need to track NMI blocking */
+	struct vmcb_ctrl *ctrl;
+	int oldval, newval;
+
+	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+	oldval = ctrl->intr_shadow;
+	newval = val ? 1 : 0;
+	if (newval != oldval) {
+		ctrl->intr_shadow = newval;
+		VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
+	}
 	return (0);
 }
 
+/*
+ * Once an NMI is injected it blocks delivery of further NMIs until the handler
+ * executes an IRET. The IRET intercept is enabled when an NMI is injected to
+ * to track when the vcpu is done handling the NMI.
+ */
+static int
+nmi_blocked(struct svm_softc *sc, int vcpu)
+{
+	int blocked;
+
+	blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+	    VMCB_INTCPT_IRET);
+	return (blocked);
+}
+
 static void
 enable_nmi_blocking(struct svm_softc *sc, int vcpu)
 {
-	/* XXX enable iret intercept */
+
+	KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
+	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
 }
 
-#ifdef notyet
 static void
-clear_nmi_blocking(struct svm_softc *sc, int vcpu)
+clear_nmi_blocking(struct svm_softc *sc, int vcpu, int running)
 {
-	/* XXX disable iret intercept */
+	int error;
+
+	KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
+	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
+	/*
+	 * When the IRET intercept is cleared the vcpu will attempt to execute
+	 * the "iret" when it runs next. However, it is possible to inject
+	 * another NMI into the vcpu before the "iret" has actually executed.
+	 *
+	 * For e.g. if the "iret" encounters a #NPF when accessing the stack
+	 * it will trap back into the hypervisor. If an NMI is pending for
+	 * the vcpu it will be injected into the guest.
+	 *
+	 * XXX this needs to be fixed
+	 */
+	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
+
+	/*
+	 * Set 'intr_shadow' to prevent an NMI from being injected on the
+	 * immediate VMRUN.
+	 */
+	error = svm_modify_intr_shadow(sc, vcpu, running, 1);
+	KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
 }
-#endif
 
 #ifdef KTR
 static const char *
@@ -1106,6 +1154,8 @@ exit_reason_to_str(uint64_t reason)
 		return ("vintr");
 	case VMCB_EXIT_MSR:
 		return ("msr");
+	case VMCB_EXIT_IRET:
+		return ("iret");
 	default:
 		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
 		return (reasonbuf);
@@ -1201,6 +1251,14 @@ svm_vmexit(struct svm_softc *svm_sc, int
 	svm_save_intinfo(svm_sc, vcpu);
 
 	switch (code) {
+	case VMCB_EXIT_IRET:
+		/*
+		 * Restart execution at "iret" but with the intercept cleared.
+		 */
+		vmexit->inst_length = 0;
+		clear_nmi_blocking(svm_sc, vcpu, 1);
+		handled = 1;
+		break;
 	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
 		handled = 1;
@@ -1398,6 +1456,14 @@ svm_inj_interrupts(struct svm_softc *sc,
 			 */
 			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
 			    "to NMI-blocking");
+		} else if (ctrl->intr_shadow) {
+			/*
+			 * Can't inject an NMI if the vcpu is in an intr_shadow.
+			 */
+			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
+			    "interrupt shadow");
+			need_intr_window = 1;
+			goto done;
 		} else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
 			/*
 			 * If there is already an exception/interrupt pending


More information about the svn-src-projects mailing list