svn commit: r261452 - in projects/bhyve_svm/sys/amd64/vmm: . intel io

Peter Grehan grehan at FreeBSD.org
Tue Feb 4 02:41:56 UTC 2014


Author: grehan
Date: Tue Feb  4 02:41:54 2014
New Revision: 261452
URL: http://svnweb.freebsd.org/changeset/base/261452

Log:
  MFC @ r259205 in preparation for some SVM updates.

Added:
  projects/bhyve_svm/sys/amd64/vmm/io/vhpet.c
     - copied unchanged from r259205, head/sys/amd64/vmm/io/vhpet.c
  projects/bhyve_svm/sys/amd64/vmm/io/vhpet.h
     - copied unchanged from r259205, head/sys/amd64/vmm/io/vhpet.h
  projects/bhyve_svm/sys/amd64/vmm/io/vioapic.c
     - copied unchanged from r259205, head/sys/amd64/vmm/io/vioapic.c
  projects/bhyve_svm/sys/amd64/vmm/io/vioapic.h
     - copied unchanged from r259205, head/sys/amd64/vmm/io/vioapic.h
Deleted:
  projects/bhyve_svm/sys/amd64/vmm/io/vdev.c
  projects/bhyve_svm/sys/amd64/vmm/io/vdev.h
Modified:
  projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.c
  projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.h
  projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c
  projects/bhyve_svm/sys/amd64/vmm/intel/vmx.h
  projects/bhyve_svm/sys/amd64/vmm/intel/vmx_controls.h
  projects/bhyve_svm/sys/amd64/vmm/intel/vmx_genassym.c
  projects/bhyve_svm/sys/amd64/vmm/intel/vtd.c
  projects/bhyve_svm/sys/amd64/vmm/io/ppt.c
  projects/bhyve_svm/sys/amd64/vmm/io/vlapic.c
  projects/bhyve_svm/sys/amd64/vmm/io/vlapic.h
  projects/bhyve_svm/sys/amd64/vmm/vmm.c
  projects/bhyve_svm/sys/amd64/vmm/vmm_dev.c
  projects/bhyve_svm/sys/amd64/vmm/vmm_instruction_emul.c
  projects/bhyve_svm/sys/amd64/vmm/vmm_ktr.h
  projects/bhyve_svm/sys/amd64/vmm/vmm_lapic.c
  projects/bhyve_svm/sys/amd64/vmm/vmm_lapic.h
  projects/bhyve_svm/sys/amd64/vmm/vmm_msr.c
  projects/bhyve_svm/sys/amd64/vmm/vmm_msr.h
  projects/bhyve_svm/sys/amd64/vmm/x86.c
Directory Properties:
  projects/bhyve_svm/sys/amd64/vmm/   (props changed)

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.c	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.c	Tue Feb  4 02:41:54 2014	(r261452)
@@ -39,8 +39,6 @@ __FBSDID("$FreeBSD$");
 #include <vm/pmap.h>
 
 #include <machine/segments.h>
-#include <machine/pmap.h>
-
 #include <machine/vmm.h>
 #include "vmm_host.h"
 #include "vmcs.h"

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.h	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmcs.h	Tue Feb  4 02:41:54 2014	(r261452)
@@ -318,7 +318,7 @@ uint64_t vmcs_read(uint32_t encoding);
 /*
  * VMCS IDT-Vectoring information fields
  */
-#define	VMCS_IDT_VEC_VALID		(1 << 31)
+#define	VMCS_IDT_VEC_VALID		(1U << 31)
 #define	VMCS_IDT_VEC_ERRCODE_VALID	(1 << 11)
 
 /*

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c	Tue Feb  4 02:41:54 2014	(r261452)
@@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
 #include <machine/psl.h>
 #include <machine/cpufunc.h>
 #include <machine/md_var.h>
-#include <machine/pmap.h>
 #include <machine/segments.h>
 #include <machine/specialreg.h>
 #include <machine/vmparam.h>
@@ -164,6 +163,7 @@ static int cap_halt_exit;
 static int cap_pause_exit;
 static int cap_unrestricted_guest;
 static int cap_monitor_trap;
+static int cap_invpcid;
  
 static struct unrhdr *vpid_unr;
 static u_int vpid_alloc_failed;
@@ -307,8 +307,8 @@ vmx_setjmp_rc2str(int rc)
 	}
 }
 
-#define	SETJMP_TRACE(vmx, vcpu, vmxctx, regname)			  \
-	VMM_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx", \
+#define	SETJMP_TRACE(vmx, vcpu, vmxctx, regname)			    \
+	VCPU_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx",  \
 		 (vmxctx)->regname)
 
 static void
@@ -320,14 +320,14 @@ vmx_setjmp_trace(struct vmx *vmx, int vc
 		panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
 			vmxctx, &vmx->ctx[vcpu]);
 
-	VMM_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
-	VMM_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
+	VCPU_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
+	VCPU_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
 		 vmx_setjmp_rc2str(rc), rc);
 
 	host_rsp = host_rip = ~0;
 	vmread(VMCS_HOST_RIP, &host_rip);
 	vmread(VMCS_HOST_RSP, &host_rsp);
-	VMM_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp 0x%016lx",
+	VCPU_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp %#lx",
 		 host_rip, host_rsp);
 
 	SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
@@ -660,6 +660,11 @@ vmx_init(void)
 					PROCBASED2_UNRESTRICTED_GUEST, 0,
 				        &tmp) == 0);
 
+	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
+	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
+	    &tmp) == 0);
+
+
 	/* Initialize EPT */
 	error = ept_init();
 	if (error) {
@@ -828,6 +833,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
 
 		vmx->cap[i].set = 0;
 		vmx->cap[i].proc_ctls = procbased_ctls;
+		vmx->cap[i].proc_ctls2 = procbased_ctls2;
 
 		vmx->state[i].lastcpu = -1;
 		vmx->state[i].vpid = vpid[i];
@@ -880,7 +886,7 @@ static __inline void
 vmx_run_trace(struct vmx *vmx, int vcpu)
 {
 #ifdef KTR
-	VMM_CTR1(vmx->vm, vcpu, "Resume execution at 0x%0lx", vmcs_guest_rip());
+	VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
 #endif
 }
 
@@ -889,7 +895,7 @@ vmx_exit_trace(struct vmx *vmx, int vcpu
 	       int handled)
 {
 #ifdef KTR
-	VMM_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
+	VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
 		 handled ? "handled" : "unhandled",
 		 exit_reason_to_str(exit_reason), rip);
 #endif
@@ -899,7 +905,7 @@ static __inline void
 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
 {
 #ifdef KTR
-	VMM_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
+	VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
 #endif
 }
 
@@ -1048,7 +1054,7 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu
 	if (error)
 		panic("vmx_inject_nmi: vmwrite(intrinfo) %d", error);
 
-	VMM_CTR0(vmx->vm, vcpu, "Injecting vNMI");
+	VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
 
 	/* Clear the request */
 	vm_nmi_clear(vmx->vm, vcpu);
@@ -1061,7 +1067,7 @@ nmiblocked:
 	 */
 	vmx_set_nmi_window_exiting(vmx, vcpu);
 
-	VMM_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
+	VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
 	return (1);
 }
 
@@ -1127,7 +1133,7 @@ vmx_inject_interrupts(struct vmx *vmx, i
 	/* Update the Local APIC ISR */
 	lapic_intr_accepted(vmx->vm, vcpu, vector);
 
-	VMM_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
+	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
 
 	return;
 
@@ -1138,7 +1144,7 @@ cantinject:
 	 */
 	vmx_set_int_window_exiting(vmx, vcpu);
 
-	VMM_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
+	VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
 }
 
 static int
@@ -1295,21 +1301,6 @@ ept_fault_type(uint64_t ept_qual)
 	return (fault_type);
 }
 
-static int
-ept_protection(uint64_t ept_qual)
-{
-	int prot = 0;
-
-	if (ept_qual & EPT_VIOLATION_GPA_READABLE)
-		prot |= VM_PROT_READ;
-	if (ept_qual & EPT_VIOLATION_GPA_WRITEABLE)
-		prot |= VM_PROT_WRITE;
-	if (ept_qual & EPT_VIOLATION_GPA_EXECUTABLE)
-		prot |= VM_PROT_EXECUTE;
-
-	return (prot);
-}
-
 static boolean_t
 ept_emulation_fault(uint64_t ept_qual)
 {
@@ -1345,7 +1336,8 @@ vmx_exit_process(struct vmx *vmx, int vc
 	struct vmcs *vmcs;
 	struct vmxctx *vmxctx;
 	uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
-	uint64_t qual, gpa;
+	uint64_t qual, gpa, rflags;
+	bool retu;
 
 	handled = 0;
 	vmcs = &vmx->vmcs[vcpu];
@@ -1391,31 +1383,46 @@ vmx_exit_process(struct vmx *vmx, int vc
 		break;
 	case EXIT_REASON_RDMSR:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
+		retu = false;
 		ecx = vmxctx->guest_rcx;
-		error = emulate_rdmsr(vmx->vm, vcpu, ecx);
+		error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
 		if (error) {
 			vmexit->exitcode = VM_EXITCODE_RDMSR;
 			vmexit->u.msr.code = ecx;
-		} else
+		} else if (!retu) {
 			handled = 1;
+		} else {
+			/* Return to userspace with a valid exitcode */
+			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+			    ("emulate_wrmsr retu with bogus exitcode"));
+		}
 		break;
 	case EXIT_REASON_WRMSR:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
+		retu = false;
 		eax = vmxctx->guest_rax;
 		ecx = vmxctx->guest_rcx;
 		edx = vmxctx->guest_rdx;
 		error = emulate_wrmsr(vmx->vm, vcpu, ecx,
-					(uint64_t)edx << 32 | eax);
+		    (uint64_t)edx << 32 | eax, &retu);
 		if (error) {
 			vmexit->exitcode = VM_EXITCODE_WRMSR;
 			vmexit->u.msr.code = ecx;
 			vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
-		} else
+		} else if (!retu) {
 			handled = 1;
+		} else {
+			/* Return to userspace with a valid exitcode */
+			KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
+			    ("emulate_wrmsr retu with bogus exitcode"));
+		}
 		break;
 	case EXIT_REASON_HLT:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
+		if ((error = vmread(VMCS_GUEST_RFLAGS, &rflags)) != 0)
+			panic("vmx_exit_process: vmread(rflags) %d", error);
 		vmexit->exitcode = VM_EXITCODE_HLT;
+		vmexit->u.hlt.rflags = rflags;
 		break;
 	case EXIT_REASON_MTF:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
@@ -1428,7 +1435,7 @@ vmx_exit_process(struct vmx *vmx, int vc
 	case EXIT_REASON_INTR_WINDOW:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
 		vmx_clear_int_window_exiting(vmx, vcpu);
-		VMM_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
+		VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
 		return (1);
 	case EXIT_REASON_EXT_INTR:
 		/*
@@ -1451,7 +1458,7 @@ vmx_exit_process(struct vmx *vmx, int vc
 		/* Exit to allow the pending virtual NMI to be injected */
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
 		vmx_clear_nmi_window_exiting(vmx, vcpu);
-		VMM_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
+		VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
 		return (1);
 	case EXIT_REASON_INOUT:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
@@ -1479,7 +1486,6 @@ vmx_exit_process(struct vmx *vmx, int vc
 			vmexit->exitcode = VM_EXITCODE_PAGING;
 			vmexit->u.paging.gpa = gpa;
 			vmexit->u.paging.fault_type = ept_fault_type(qual);
-			vmexit->u.paging.protection = ept_protection(qual);
 		} else if (ept_emulation_fault(qual)) {
 			vmexit->exitcode = VM_EXITCODE_INST_EMUL;
 			vmexit->u.inst_emul.gpa = gpa;
@@ -1570,7 +1576,6 @@ vmx_run(void *arg, int vcpu, register_t 
 		panic("vmx_run: error %d setting up pcpu defaults", error);
 
 	do {
-		lapic_timer_tick(vmx->vm, vcpu);
 		vmx_inject_interrupts(vmx, vcpu);
 		vmx_run_trace(vmx, vcpu);
 		rc = vmx_setjmp(vmxctx);
@@ -1652,7 +1657,7 @@ vmx_run(void *arg, int vcpu, register_t 
 	if (!handled)
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
 
-	VMM_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
+	VCPU_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
 
 	/*
 	 * XXX
@@ -1932,6 +1937,10 @@ vmx_getcap(void *arg, int vcpu, int type
 		if (cap_unrestricted_guest)
 			ret = 0;
 		break;
+	case VM_CAP_ENABLE_INVPCID:
+		if (cap_invpcid)
+			ret = 0;
+		break;
 	default:
 		break;
 	}
@@ -1988,11 +1997,21 @@ vmx_setcap(void *arg, int vcpu, int type
 	case VM_CAP_UNRESTRICTED_GUEST:
 		if (cap_unrestricted_guest) {
 			retval = 0;
-			baseval = procbased_ctls2;
+			pptr = &vmx->cap[vcpu].proc_ctls2;
+			baseval = *pptr;
 			flag = PROCBASED2_UNRESTRICTED_GUEST;
 			reg = VMCS_SEC_PROC_BASED_CTLS;
 		}
 		break;
+	case VM_CAP_ENABLE_INVPCID:
+		if (cap_invpcid) {
+			retval = 0;
+			pptr = &vmx->cap[vcpu].proc_ctls2;
+			baseval = *pptr;
+			flag = PROCBASED2_ENABLE_INVPCID;
+			reg = VMCS_SEC_PROC_BASED_CTLS;
+		}
+		break;
 	default:
 		break;
 	}

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmx.h	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmx.h	Tue Feb  4 02:41:54 2014	(r261452)
@@ -84,6 +84,7 @@ struct vmxctx {
 struct vmxcap {
 	int	set;
 	uint32_t proc_ctls;
+	uint32_t proc_ctls2;
 };
 
 struct vmxstate {

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmx_controls.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmx_controls.h	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmx_controls.h	Tue Feb  4 02:41:54 2014	(r261452)
@@ -56,7 +56,7 @@
 #define	PROCBASED_MSR_BITMAPS		(1 << 28)
 #define	PROCBASED_MONITOR_EXITING	(1 << 29)
 #define	PROCBASED_PAUSE_EXITING		(1 << 30)
-#define	PROCBASED_SECONDARY_CONTROLS	(1 << 31)
+#define	PROCBASED_SECONDARY_CONTROLS	(1U << 31)
 
 /* Secondary Processor-Based VM-Execution Controls */
 #define	PROCBASED2_VIRTUALIZE_APIC	(1 << 0)
@@ -68,6 +68,7 @@
 #define	PROCBASED2_WBINVD_EXITING	(1 << 6)
 #define	PROCBASED2_UNRESTRICTED_GUEST	(1 << 7)
 #define	PROCBASED2_PAUSE_LOOP_EXITING	(1 << 10)
+#define	PROCBASED2_ENABLE_INVPCID	(1 << 12)
 
 /* VM Exit Controls */
 #define	VM_EXIT_SAVE_DEBUG_CONTROLS	(1 << 2)

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmx_genassym.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmx_genassym.c	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmx_genassym.c	Tue Feb  4 02:41:54 2014	(r261452)
@@ -38,8 +38,6 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm.h>
 #include <vm/pmap.h>
 
-#include <machine/pmap.h>
-
 #include <machine/vmm.h>
 #include "vmx.h"
 #include "vmx_cpufunc.h"

Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vtd.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vtd.c	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vtd.c	Tue Feb  4 02:41:54 2014	(r261452)
@@ -39,7 +39,6 @@ __FBSDID("$FreeBSD$");
 
 #include <dev/pci/pcireg.h>
 
-#include <machine/pmap.h>
 #include <machine/vmparam.h>
 #include <contrib/dev/acpica/include/acpi.h>
 
@@ -74,11 +73,11 @@ struct vtdmap {
 
 #define	VTD_GCR_WBF		(1 << 27)
 #define	VTD_GCR_SRTP		(1 << 30)
-#define	VTD_GCR_TE		(1 << 31)
+#define	VTD_GCR_TE		(1U << 31)
 
 #define	VTD_GSR_WBFS		(1 << 27)
 #define	VTD_GSR_RTPS		(1 << 30)
-#define	VTD_GSR_TES		(1 << 31)
+#define	VTD_GSR_TES		(1U << 31)
 
 #define	VTD_CCR_ICC		(1UL << 63)	/* invalidate context cache */
 #define	VTD_CCR_CIRG_GLOBAL	(1UL << 61)	/* global invalidation */

Modified: projects/bhyve_svm/sys/amd64/vmm/io/ppt.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/io/ppt.c	Tue Feb  4 00:52:08 2014	(r261451)
+++ projects/bhyve_svm/sys/amd64/vmm/io/ppt.c	Tue Feb  4 02:41:54 2014	(r261452)
@@ -421,7 +421,7 @@ pptintr(void *arg)
 	vec = pptarg->vec;
 
 	if (ppt->vm != NULL)
-		(void) lapic_set_intr(ppt->vm, pptarg->vcpu, vec);
+		lapic_intr_edge(ppt->vm, pptarg->vcpu, vec);
 	else {
 		/*
 		 * XXX

Copied: projects/bhyve_svm/sys/amd64/vmm/io/vhpet.c (from r259205, head/sys/amd64/vmm/io/vhpet.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/bhyve_svm/sys/amd64/vmm/io/vhpet.c	Tue Feb  4 02:41:54 2014	(r261452, copy of r259205, head/sys/amd64/vmm/io/vhpet.c)
@@ -0,0 +1,783 @@
+/*-
+ * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale at pluribusnetworks.com>
+ * Copyright (c) 2013 Neel Natu <neel at freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/cpuset.h>
+
+#include <dev/acpica/acpi_hpet.h>
+
+#include <machine/vmm.h>
+#include <machine/vmm_dev.h>
+
+#include "vmm_lapic.h"
+#include "vioapic.h"
+#include "vhpet.h"
+
+#include "vmm_ktr.h"
+
+static MALLOC_DEFINE(M_VHPET, "vhpet", "bhyve virtual hpet");
+
+#define	HPET_FREQ	10000000		/* 10.0 Mhz */
+#define	FS_PER_S	1000000000000000ul
+
+/* Timer N Configuration and Capabilities Register */
+#define	HPET_TCAP_RO_MASK	(HPET_TCAP_INT_ROUTE 	|		\
+				 HPET_TCAP_FSB_INT_DEL	|		\
+				 HPET_TCAP_SIZE		|		\
+				 HPET_TCAP_PER_INT)
+/*
+ * HPET requires at least 3 timers and up to 32 timers per block.
+ */
+#define	VHPET_NUM_TIMERS	8
+CTASSERT(VHPET_NUM_TIMERS >= 3 && VHPET_NUM_TIMERS <= 32);
+
+struct vhpet_callout_arg {
+	struct vhpet *vhpet;
+	int timer_num;
+};
+
+struct vhpet {
+	struct vm	*vm;
+	struct mtx	mtx;
+	sbintime_t	freq_sbt;
+
+	uint64_t	config;		/* Configuration */
+	uint64_t	isr;		/* Interrupt Status */
+	uint32_t	counter;	/* HPET Counter */
+	sbintime_t	counter_sbt;
+
+	struct {
+		uint64_t	cap_config;	/* Configuration */
+		uint64_t	msireg;		/* FSB interrupt routing */
+		uint32_t	compval;	/* Comparator */
+		uint32_t	comprate;
+		struct callout	callout;
+		struct vhpet_callout_arg arg;
+	} timer[VHPET_NUM_TIMERS];
+};
+
+#define	VHPET_LOCK(vhp)		mtx_lock(&((vhp)->mtx))
+#define	VHPET_UNLOCK(vhp)	mtx_unlock(&((vhp)->mtx))
+
+static uint64_t
+vhpet_capabilities(void)
+{
+	uint64_t cap = 0;
+
+	cap |= 0x8086 << 16;			/* vendor id */
+	cap |= HPET_CAP_LEG_RT;			/* legacy routing capable */
+	cap |= (VHPET_NUM_TIMERS - 1) << 8;	/* number of timers */
+	cap |= 1;				/* revision */
+	cap &= ~HPET_CAP_COUNT_SIZE;		/* 32-bit timer */
+
+	cap &= 0xffffffff;
+	cap |= (FS_PER_S / HPET_FREQ) << 32;	/* tick period in fs */
+
+	return (cap);
+}
+
+static __inline bool
+vhpet_counter_enabled(struct vhpet *vhpet)
+{
+
+	return ((vhpet->config & HPET_CNF_ENABLE) ? true : false);
+}
+
+static __inline bool
+vhpet_timer_msi_enabled(struct vhpet *vhpet, int n)
+{
+	const uint64_t msi_enable = HPET_TCAP_FSB_INT_DEL | HPET_TCNF_FSB_EN;
+
+	/*
+	 * LegacyReplacement Route configuration takes precedence over MSI
+	 * for timers 0 and 1.
+	 */
+	if (n == 0 || n == 1) {
+		if (vhpet->config & HPET_CNF_LEG_RT)
+			return (false);
+	}
+
+	if ((vhpet->timer[n].cap_config & msi_enable) == msi_enable)
+		return (true);
+	else
+		return (false);
+}
+
+static __inline int
+vhpet_timer_ioapic_pin(struct vhpet *vhpet, int n)
+{
+	/*
+	 * If the timer is configured to use MSI then treat it as if the
+	 * timer is not connected to the ioapic.
+	 */
+	if (vhpet_timer_msi_enabled(vhpet, n))
+		return (0);
+
+	if (vhpet->config & HPET_CNF_LEG_RT) {
+		/*
+		 * In "legacy routing" timers 0 and 1 are connected to
+		 * ioapic pins 2 and 8 respectively.
+		 */
+		switch (n) {
+		case 0:
+			return (2);
+		case 1:
+			return (8);
+		}
+	}
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ROUTE) >> 9);
+}
+
+static uint32_t
+vhpet_counter(struct vhpet *vhpet, bool latch)
+{
+	uint32_t val;
+	sbintime_t cur_sbt, delta_sbt;
+
+	val = vhpet->counter;
+	if (vhpet_counter_enabled(vhpet)) {
+		cur_sbt = sbinuptime();
+		delta_sbt = cur_sbt - vhpet->counter_sbt;
+		KASSERT(delta_sbt >= 0,
+		    ("vhpet counter went backwards: %#lx to %#lx",
+		    vhpet->counter_sbt, cur_sbt));
+		val += delta_sbt / vhpet->freq_sbt;
+
+		/*
+		 * Keep track of the last value of the main counter that
+		 * was read by the guest.
+		 */
+		if (latch) {
+			vhpet->counter = val;
+			vhpet->counter_sbt = cur_sbt;
+		}
+	}
+
+	return (val);
+}
+
+static void
+vhpet_timer_clear_isr(struct vhpet *vhpet, int n)
+{
+	int pin;
+
+	if (vhpet->isr & (1 << n)) {
+		pin = vhpet_timer_ioapic_pin(vhpet, n);
+		KASSERT(pin != 0, ("vhpet timer %d irq incorrectly routed", n));
+		vioapic_deassert_irq(vhpet->vm, pin);
+		vhpet->isr &= ~(1 << n);
+	}
+}
+
+static __inline bool
+vhpet_periodic_timer(struct vhpet *vhpet, int n)
+{
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_TYPE) != 0);
+}
+
+static __inline bool
+vhpet_timer_interrupt_enabled(struct vhpet *vhpet, int n)
+{
+
+	return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ENB) != 0);
+}
+
+static __inline bool
+vhpet_timer_edge_trig(struct vhpet *vhpet, int n)
+{
+
+	KASSERT(!vhpet_timer_msi_enabled(vhpet, n), ("vhpet_timer_edge_trig: "
+	    "timer %d is using MSI", n));
+
+	/* The legacy replacement interrupts are always edge triggered */
+	if (vhpet->config & HPET_CNF_LEG_RT) {
+		if (n == 0 || n == 1)
+			return (true);
+	}
+
+	if ((vhpet->timer[n].cap_config & HPET_TCNF_INT_TYPE) == 0)
+		return (true);
+	else
+		return (false);
+}
+
+static void
+vhpet_timer_interrupt(struct vhpet *vhpet, int n)
+{
+	int apicid, vector, vcpuid, pin;
+	cpuset_t dmask;
+
+	/* If interrupts are not enabled for this timer then just return. */
+	if (!vhpet_timer_interrupt_enabled(vhpet, n))
+		return;
+
+	/*
+	 * If a level triggered interrupt is already asserted then just return.
+	 */
+	if ((vhpet->isr & (1 << n)) != 0) {
+		VM_CTR1(vhpet->vm, "hpet t%d intr is already asserted", n);
+		return;
+	}
+
+	if (vhpet_timer_msi_enabled(vhpet, n)) {
+		/*
+		 * XXX should have an API 'vlapic_deliver_msi(vm, addr, data)'
+		 * - assuming physical delivery mode
+		 * - no need to interpret contents of 'msireg' here
+		 */
+		vector = vhpet->timer[n].msireg & 0xff;
+		apicid = (vhpet->timer[n].msireg >> (32 + 12)) & 0xff;
+		if (apicid != 0xff) {
+			/* unicast */
+			vcpuid = vm_apicid2vcpuid(vhpet->vm, apicid);
+			lapic_intr_edge(vhpet->vm, vcpuid, vector);
+		} else {
+			/* broadcast */
+			dmask = vm_active_cpus(vhpet->vm);
+			while ((vcpuid = CPU_FFS(&dmask)) != 0) {
+				vcpuid--;
+				CPU_CLR(vcpuid, &dmask);
+				lapic_intr_edge(vhpet->vm, vcpuid, vector);
+			}
+		}
+		return;
+	}	
+
+	pin = vhpet_timer_ioapic_pin(vhpet, n);
+	if (pin == 0) {
+		VM_CTR1(vhpet->vm, "hpet t%d intr is not routed to ioapic", n);
+		return;
+	}
+
+	if (vhpet_timer_edge_trig(vhpet, n)) {
+		vioapic_pulse_irq(vhpet->vm, pin);
+	} else {
+		vhpet->isr |= 1 << n;
+		vioapic_assert_irq(vhpet->vm, pin);
+	}
+}
+
+static void
+vhpet_adjust_compval(struct vhpet *vhpet, int n, uint32_t counter)
+{
+	uint32_t compval, comprate, compnext;
+
+	KASSERT(vhpet->timer[n].comprate != 0, ("hpet t%d is not periodic", n));
+
+	compval = vhpet->timer[n].compval;
+	comprate = vhpet->timer[n].comprate;
+
+	/*
+	 * Calculate the comparator value to be used for the next periodic
+	 * interrupt.
+	 *
+	 * This function is commonly called from the callout handler.
+	 * In this scenario the 'counter' is ahead of 'compval'. To find
+	 * the next value to program into the accumulator we divide the
+	 * number space between 'compval' and 'counter' into 'comprate'
+	 * sized units. The 'compval' is rounded up such that is "ahead"
+	 * of 'counter'.
+	 */
+	compnext = compval + ((counter - compval) / comprate + 1) * comprate;
+
+	vhpet->timer[n].compval = compnext;
+}
+
+static void
+vhpet_handler(void *a)
+{
+	int n;
+	uint32_t counter;
+	sbintime_t sbt;
+	struct vhpet *vhpet;
+	struct callout *callout;
+	struct vhpet_callout_arg *arg;
+
+	arg = a;
+	vhpet = arg->vhpet;
+	n = arg->timer_num;
+	callout = &vhpet->timer[n].callout;
+
+	VM_CTR1(vhpet->vm, "hpet t%d fired", n);
+
+	VHPET_LOCK(vhpet);
+
+	if (callout_pending(callout))		/* callout was reset */
+		goto done;
+
+	if (!callout_active(callout))		/* callout was stopped */
+		goto done;
+
+	callout_deactivate(callout);
+
+	if (!vhpet_counter_enabled(vhpet))
+		panic("vhpet(%p) callout with counter disabled", vhpet);
+
+	counter = vhpet_counter(vhpet, false);
+
+	/* Update the accumulator for periodic timers */
+	if (vhpet->timer[n].comprate != 0)
+		vhpet_adjust_compval(vhpet, n, counter);
+
+	sbt = (vhpet->timer[n].compval - counter) * vhpet->freq_sbt;
+	callout_reset_sbt(callout, sbt, 0, vhpet_handler, arg, 0);
+	vhpet_timer_interrupt(vhpet, n);
+done:
+	VHPET_UNLOCK(vhpet);
+	return;
+}
+
+static void
+vhpet_stop_timer(struct vhpet *vhpet, int n)
+{
+
+	callout_stop(&vhpet->timer[n].callout);
+	vhpet_timer_clear_isr(vhpet, n);
+}
+
+static void
+vhpet_start_timer(struct vhpet *vhpet, int n)
+{
+	uint32_t counter, delta, delta2;
+	sbintime_t sbt;
+
+	counter = vhpet_counter(vhpet, false);
+
+	if (vhpet->timer[n].comprate != 0)
+		vhpet_adjust_compval(vhpet, n, counter);
+
+	delta = vhpet->timer[n].compval - counter;
+
+	/*
+	 * In one-shot mode the guest will typically read the main counter
+	 * before programming the comparator. We can use this heuristic to
+	 * figure out whether the expiration time is in the past. If this
+	 * is the case we schedule the callout to fire immediately.
+	 */
+	if (!vhpet_periodic_timer(vhpet, n)) {
+		delta2 = vhpet->timer[n].compval - vhpet->counter;
+		if (delta > delta2) {
+			VM_CTR3(vhpet->vm, "hpet t%d comparator value is in "
+			    "the past: %u/%u/%u", counter,
+			    vhpet->timer[n].compval, vhpet->counter);
+			delta = 0;
+		}
+	}
+
+	sbt = delta * vhpet->freq_sbt;
+	callout_reset_sbt(&vhpet->timer[n].callout, sbt, 0, vhpet_handler,
+	    &vhpet->timer[n].arg, 0);
+}
+
+static void
+vhpet_start_counting(struct vhpet *vhpet)
+{
+	int i;
+
+	vhpet->counter_sbt = sbinuptime();
+	for (i = 0; i < VHPET_NUM_TIMERS; i++)
+		vhpet_start_timer(vhpet, i);
+}
+
+static void
+vhpet_stop_counting(struct vhpet *vhpet)
+{
+	int i;
+
+	for (i = 0; i < VHPET_NUM_TIMERS; i++)
+		vhpet_stop_timer(vhpet, i);
+}
+
+static __inline void
+update_register(uint64_t *regptr, uint64_t data, uint64_t mask)
+{
+
+	*regptr &= ~mask;
+	*regptr |= (data & mask);
+}
+
+static void
+vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data,
+    uint64_t mask)
+{
+	bool clear_isr;
+	int old_pin, new_pin;
+	uint32_t allowed_irqs;
+	uint64_t oldval, newval;
+
+	if (vhpet_timer_msi_enabled(vhpet, n) ||
+	    vhpet_timer_edge_trig(vhpet, n)) {
+		if (vhpet->isr & (1 << n))
+			panic("vhpet timer %d isr should not be asserted", n);
+	}
+	old_pin = vhpet_timer_ioapic_pin(vhpet, n);
+	oldval = vhpet->timer[n].cap_config;
+
+	newval = oldval;
+	update_register(&newval, data, mask);
+	newval &= ~(HPET_TCAP_RO_MASK | HPET_TCNF_32MODE);
+	newval |= oldval & HPET_TCAP_RO_MASK;
+
+	if (newval == oldval)
+		return;
+
+	vhpet->timer[n].cap_config = newval;
+	VM_CTR2(vhpet->vm, "hpet t%d cap_config set to 0x%016x", n, newval);
+
+	/*
+	 * Validate the interrupt routing in the HPET_TCNF_INT_ROUTE field.
+	 * If it does not match the bits set in HPET_TCAP_INT_ROUTE then set
+	 * it to the default value of 0.
+	 */
+	allowed_irqs = vhpet->timer[n].cap_config >> 32;
+	new_pin = vhpet_timer_ioapic_pin(vhpet, n);
+	if (new_pin != 0 && (allowed_irqs & (1 << new_pin)) == 0) {
+		VM_CTR3(vhpet->vm, "hpet t%d configured invalid irq %d, "
+		    "allowed_irqs 0x%08x", n, new_pin, allowed_irqs);
+		new_pin = 0;
+		vhpet->timer[n].cap_config &= ~HPET_TCNF_INT_ROUTE;
+	}
+
+	if (!vhpet_periodic_timer(vhpet, n))
+		vhpet->timer[n].comprate = 0;
+
+	/*
+	 * If the timer's ISR bit is set then clear it in the following cases:
+	 * - interrupt is disabled
+	 * - interrupt type is changed from level to edge or fsb.
+	 * - interrupt routing is changed
+	 *
+	 * This is to ensure that this timer's level triggered interrupt does
+	 * not remain asserted forever.
+	 */
+	if (vhpet->isr & (1 << n)) {
+		KASSERT(old_pin != 0, ("timer %d isr asserted to ioapic pin %d",
+		    n, old_pin));
+		if (!vhpet_timer_interrupt_enabled(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_msi_enabled(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_edge_trig(vhpet, n))
+			clear_isr = true;
+		else if (vhpet_timer_ioapic_pin(vhpet, n) != old_pin)
+			clear_isr = true;
+		else
+			clear_isr = false;
+
+		if (clear_isr) {
+			VM_CTR1(vhpet->vm, "hpet t%d isr cleared due to "
+			    "configuration change", n);
+			vioapic_deassert_irq(vhpet->vm, old_pin);
+			vhpet->isr &= ~(1 << n);
+		}
+	}
+}
+
+int
+vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size,
+    void *arg)
+{
+	struct vhpet *vhpet;
+	uint64_t data, mask, oldval, val64;
+	uint32_t isr_clear_mask, old_compval, old_comprate;
+	int i, offset;
+
+	vhpet = vm_hpet(vm);
+	offset = gpa - VHPET_BASE;
+
+	VHPET_LOCK(vhpet);
+
+	/* Accesses to the HPET should be 4 or 8 bytes wide */
+	switch (size) {
+	case 8:
+		mask = 0xffffffffffffffff;
+		data = val;
+		break;
+	case 4:
+		mask = 0xffffffff;
+		data = val;
+		if ((offset & 0x4) != 0) {
+			mask <<= 32;
+			data <<= 32;
+		} 
+		break;
+	default:
+		VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
+		    "offset 0x%08x, size %d", offset, size);
+		goto done;
+	}
+
+	/* Access to the HPET should be naturally aligned to its width */
+	if (offset & (size - 1)) {
+		VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
+		    "offset 0x%08x, size %d", offset, size);
+		goto done;
+	}
+
+	if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
+		oldval = vhpet->config;
+		update_register(&vhpet->config, data, mask);
+		if ((oldval ^ vhpet->config) & HPET_CNF_ENABLE) {
+			if (vhpet_counter_enabled(vhpet)) {
+				vhpet_start_counting(vhpet);
+				VM_CTR0(vhpet->vm, "hpet enabled");
+			} else {
+				vhpet_stop_counting(vhpet);
+				VM_CTR0(vhpet->vm, "hpet disabled");
+			}
+		}
+		goto done;
+	}
+
+	if (offset == HPET_ISR || offset == HPET_ISR + 4) {
+		isr_clear_mask = vhpet->isr & data;
+		for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+			if ((isr_clear_mask & (1 << i)) != 0) {
+				VM_CTR1(vhpet->vm, "hpet t%d isr cleared", i);
+				vhpet_timer_clear_isr(vhpet, i);
+			}
+		}
+		goto done;
+	}
+
+	if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) {
+		/* Zero-extend the counter to 64-bits before updating it */
+		val64 = vhpet->counter;
+		update_register(&val64, data, mask);
+		vhpet->counter = val64;
+		if (vhpet_counter_enabled(vhpet))
+			vhpet_start_counting(vhpet);
+		goto done;
+	}
+
+	for (i = 0; i < VHPET_NUM_TIMERS; i++) {
+		if (offset == HPET_TIMER_CAP_CNF(i) ||
+		    offset == HPET_TIMER_CAP_CNF(i) + 4) {
+			vhpet_timer_update_config(vhpet, i, data, mask);
+			break;
+		}
+
+		if (offset == HPET_TIMER_COMPARATOR(i) ||
+		    offset == HPET_TIMER_COMPARATOR(i) + 4) {
+			old_compval = vhpet->timer[i].compval;
+			old_comprate = vhpet->timer[i].comprate;
+			if (vhpet_periodic_timer(vhpet, i)) {
+				/*
+				 * In periodic mode writes to the comparator
+				 * change the 'compval' register only if the
+				 * HPET_TCNF_VAL_SET bit is set in the config
+				 * register.
+				 */
+				val64 = vhpet->timer[i].comprate;
+				update_register(&val64, data, mask);
+				vhpet->timer[i].comprate = val64;
+				if ((vhpet->timer[i].cap_config &

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-projects mailing list