svn commit: r268891 - in stable/10: lib/libvmmapi sys/amd64/include sys/amd64/vmm sys/amd64/vmm/intel sys/amd64/vmm/io sys/modules/vmm usr.sbin/bhyve

John Baldwin jhb at FreeBSD.org
Sat Jul 19 22:06:49 UTC 2014


Author: jhb
Date: Sat Jul 19 22:06:46 2014
New Revision: 268891
URL: http://svnweb.freebsd.org/changeset/base/268891

Log:
  MFC 259942,262274,263035,263054,263211,263744,264179,264324,264468,264631,
  264648,264650,264651,266572,267558:
  Flesh out the AT PIC and 8254 PIT emulations and move them into the kernel.

Added:
  stable/10/sys/amd64/vmm/io/vatpic.c
     - copied, changed from r263035, head/sys/amd64/vmm/io/vatpic.c
  stable/10/sys/amd64/vmm/io/vatpic.h
     - copied, changed from r263035, head/sys/amd64/vmm/io/vatpic.h
  stable/10/sys/amd64/vmm/io/vatpit.c
     - copied, changed from r263744, head/sys/amd64/vmm/io/vatpit.c
  stable/10/sys/amd64/vmm/io/vatpit.h
     - copied, changed from r263744, head/sys/amd64/vmm/io/vatpit.h
  stable/10/sys/amd64/vmm/vmm_ioport.c
     - copied, changed from r263035, head/sys/amd64/vmm/vmm_ioport.c
  stable/10/sys/amd64/vmm/vmm_ioport.h
     - copied, changed from r263035, head/sys/amd64/vmm/vmm_ioport.h
Deleted:
  stable/10/usr.sbin/bhyve/atpic.c
  stable/10/usr.sbin/bhyve/elcr.c
  stable/10/usr.sbin/bhyve/pit_8254.c
  stable/10/usr.sbin/bhyve/pit_8254.h
Modified:
  stable/10/lib/libvmmapi/vmmapi.c
  stable/10/lib/libvmmapi/vmmapi.h
  stable/10/sys/amd64/include/vmm.h
  stable/10/sys/amd64/include/vmm_dev.h
  stable/10/sys/amd64/vmm/intel/vmx.c
  stable/10/sys/amd64/vmm/io/vhpet.c
  stable/10/sys/amd64/vmm/io/vlapic.c
  stable/10/sys/amd64/vmm/vmm.c
  stable/10/sys/amd64/vmm/vmm_dev.c
  stable/10/sys/modules/vmm/Makefile
  stable/10/usr.sbin/bhyve/Makefile
  stable/10/usr.sbin/bhyve/inout.c
  stable/10/usr.sbin/bhyve/pci_lpc.c
  stable/10/usr.sbin/bhyve/pm.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/lib/libvmmapi/vmmapi.c
==============================================================================
--- stable/10/lib/libvmmapi/vmmapi.c	Sat Jul 19 21:04:21 2014	(r268890)
+++ stable/10/lib/libvmmapi/vmmapi.c	Sat Jul 19 22:06:46 2014	(r268891)
@@ -458,6 +458,41 @@ vm_ioapic_pincount(struct vmctx *ctx, in
 }
 
 int
+vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
+{
+	struct vm_isa_irq isa_irq;
+
+	bzero(&isa_irq, sizeof(struct vm_isa_irq));
+	isa_irq.atpic_irq = atpic_irq;
+	isa_irq.ioapic_irq = ioapic_irq;
+
+	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
+}
+
+int
+vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
+{
+	struct vm_isa_irq isa_irq;
+
+	bzero(&isa_irq, sizeof(struct vm_isa_irq));
+	isa_irq.atpic_irq = atpic_irq;
+	isa_irq.ioapic_irq = ioapic_irq;
+
+	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
+}
+
+int
+vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
+{
+	struct vm_isa_irq isa_irq;
+	bzero(&isa_irq, sizeof(struct vm_isa_irq));
+	isa_irq.atpic_irq = atpic_irq;
+	isa_irq.ioapic_irq = ioapic_irq;
+
+	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
+}
+
+int
 vm_inject_nmi(struct vmctx *ctx, int vcpu)
 {
 	struct vm_nmi vmnmi;

Modified: stable/10/lib/libvmmapi/vmmapi.h
==============================================================================
--- stable/10/lib/libvmmapi/vmmapi.h	Sat Jul 19 21:04:21 2014	(r268890)
+++ stable/10/lib/libvmmapi/vmmapi.h	Sat Jul 19 22:06:46 2014	(r268891)
@@ -71,6 +71,9 @@ int	vm_ioapic_assert_irq(struct vmctx *c
 int	vm_ioapic_deassert_irq(struct vmctx *ctx, int irq);
 int	vm_ioapic_pulse_irq(struct vmctx *ctx, int irq);
 int	vm_ioapic_pincount(struct vmctx *ctx, int *pincount);
+int	vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
+int	vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
+int	vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
 int	vm_inject_nmi(struct vmctx *ctx, int vcpu);
 int	vm_capability_name2type(const char *capname);
 const char *vm_capability_type2name(int type);

Modified: stable/10/sys/amd64/include/vmm.h
==============================================================================
--- stable/10/sys/amd64/include/vmm.h	Sat Jul 19 21:04:21 2014	(r268890)
+++ stable/10/sys/amd64/include/vmm.h	Sat Jul 19 22:06:46 2014	(r268891)
@@ -117,6 +117,9 @@ int vm_run(struct vm *vm, struct vm_run 
 int vm_inject_nmi(struct vm *vm, int vcpu);
 int vm_nmi_pending(struct vm *vm, int vcpuid);
 void vm_nmi_clear(struct vm *vm, int vcpuid);
+int vm_inject_extint(struct vm *vm, int vcpu);
+int vm_extint_pending(struct vm *vm, int vcpuid);
+void vm_extint_clear(struct vm *vm, int vcpuid);
 uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
 struct vlapic *vm_lapic(struct vm *vm, int cpu);
 struct vioapic *vm_ioapic(struct vm *vm);
@@ -187,6 +190,8 @@ void vcpu_notify_event(struct vm *vm, in
 struct vmspace *vm_get_vmspace(struct vm *vm);
 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
+struct vatpic *vm_atpic(struct vm *vm);
+struct vatpit *vm_atpit(struct vm *vm);
 
 /*
  * Inject exception 'vme' into the guest vcpu. This function returns 0 on

Modified: stable/10/sys/amd64/include/vmm_dev.h
==============================================================================
--- stable/10/sys/amd64/include/vmm_dev.h	Sat Jul 19 21:04:21 2014	(r268890)
+++ stable/10/sys/amd64/include/vmm_dev.h	Sat Jul 19 22:06:46 2014	(r268891)
@@ -79,6 +79,11 @@ struct vm_ioapic_irq {
 	int		irq;
 };
 
+struct vm_isa_irq {
+	int		atpic_irq;
+	int		ioapic_irq;
+};
+
 struct vm_capability {
 	int		cpuid;
 	enum vm_cap_type captype;
@@ -198,6 +203,11 @@ enum {
 	IOCNUM_SET_X2APIC_STATE = 60,
 	IOCNUM_GET_X2APIC_STATE = 61,
 	IOCNUM_GET_HPET_CAPABILITIES = 62,
+
+	/* legacy interrupt injection */
+	IOCNUM_ISA_ASSERT_IRQ = 80,
+	IOCNUM_ISA_DEASSERT_IRQ = 81,
+	IOCNUM_ISA_PULSE_IRQ = 82,
 };
 
 #define	VM_RUN		\
@@ -230,6 +240,12 @@ enum {
 	_IOW('v', IOCNUM_IOAPIC_PULSE_IRQ, struct vm_ioapic_irq)
 #define	VM_IOAPIC_PINCOUNT	\
 	_IOR('v', IOCNUM_IOAPIC_PINCOUNT, int)
+#define	VM_ISA_ASSERT_IRQ	\
+	_IOW('v', IOCNUM_ISA_ASSERT_IRQ, struct vm_isa_irq)
+#define	VM_ISA_DEASSERT_IRQ	\
+	_IOW('v', IOCNUM_ISA_DEASSERT_IRQ, struct vm_isa_irq)
+#define	VM_ISA_PULSE_IRQ	\
+	_IOW('v', IOCNUM_ISA_PULSE_IRQ, struct vm_isa_irq)
 #define	VM_SET_CAPABILITY \
 	_IOW('v', IOCNUM_SET_CAPABILITY, struct vm_capability)
 #define	VM_GET_CAPABILITY \

Modified: stable/10/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- stable/10/sys/amd64/vmm/intel/vmx.c	Sat Jul 19 21:04:21 2014	(r268890)
+++ stable/10/sys/amd64/vmm/intel/vmx.c	Sat Jul 19 22:06:46 2014	(r268891)
@@ -52,10 +52,12 @@ __FBSDID("$FreeBSD$");
 #include <machine/vmm.h>
 #include <machine/vmm_dev.h>
 #include "vmm_host.h"
+#include "vmm_ioport.h"
 #include "vmm_ipi.h"
 #include "vmm_msr.h"
 #include "vmm_ktr.h"
 #include "vmm_stat.h"
+#include "vatpic.h"
 #include "vlapic.h"
 #include "vlapic_priv.h"
 
@@ -1046,6 +1048,7 @@ vmx_set_pcpu_defaults(struct vmx *vmx, i
 			invvpid_desc._res1 = 0;
 			invvpid_desc._res2 = 0;
 			invvpid_desc.vpid = vmxstate->vpid;
+			invvpid_desc.linear_addr = 0;
 			invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
 		} else {
 			/*
@@ -1143,7 +1146,7 @@ static void
 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
 {
 	struct vm_exception exc;
-	int vector, need_nmi_exiting;
+	int vector, need_nmi_exiting, extint_pending;
 	uint64_t rflags;
 	uint32_t gi, info;
 
@@ -1195,7 +1198,9 @@ vmx_inject_interrupts(struct vmx *vmx, i
 			vmx_set_nmi_window_exiting(vmx, vcpu);
 	}
 
-	if (virtual_interrupt_delivery) {
+	extint_pending = vm_extint_pending(vmx->vm, vcpu);
+
+	if (!extint_pending && virtual_interrupt_delivery) {
 		vmx_inject_pir(vlapic);
 		return;
 	}
@@ -1211,11 +1216,32 @@ vmx_inject_interrupts(struct vmx *vmx, i
 		return;
 	}
 
-	/* Ask the local apic for a vector to inject */
-	if (!vlapic_pending_intr(vlapic, &vector))
-		return;
+	if (!extint_pending) {
+		/* Ask the local apic for a vector to inject */
+		if (!vlapic_pending_intr(vlapic, &vector))
+			return;
 
-	KASSERT(vector >= 32 && vector <= 255, ("invalid vector %d", vector));
+		/*
+		 * From the Intel SDM, Volume 3, Section "Maskable
+		 * Hardware Interrupts":
+		 * - maskable interrupt vectors [16,255] can be delivered
+		 *   through the local APIC.
+		*/
+		KASSERT(vector >= 16 && vector <= 255,
+		    ("invalid vector %d from local APIC", vector));
+	} else {
+		/* Ask the legacy pic for a vector to inject */
+		vatpic_pending_intr(vmx->vm, &vector);
+
+		/*
+		 * From the Intel SDM, Volume 3, Section "Maskable
+		 * Hardware Interrupts":
+		 * - maskable interrupt vectors [0,255] can be delivered
+		 *   through the INTR pin.
+		 */
+		KASSERT(vector >= 0 && vector <= 255,
+		    ("invalid vector %d from INTR", vector));
+	}
 
 	/* Check RFLAGS.IF and the interruptibility state of the guest */
 	rflags = vmcs_read(VMCS_GUEST_RFLAGS);
@@ -1251,8 +1277,22 @@ vmx_inject_interrupts(struct vmx *vmx, i
 	info |= vector;
 	vmcs_write(VMCS_ENTRY_INTR_INFO, info);
 
-	/* Update the Local APIC ISR */
-	vlapic_intr_accepted(vlapic, vector);
+	if (!extint_pending) {
+		/* Update the Local APIC ISR */
+		vlapic_intr_accepted(vlapic, vector);
+	} else {
+		vm_extint_clear(vmx->vm, vcpu);
+		vatpic_intr_accepted(vmx->vm, vector);
+
+		/*
+		 * After we accepted the current ExtINT the PIC may
+		 * have posted another one.  If that is the case, set
+		 * the Interrupt Window Exiting execution control so
+		 * we can inject that one too.
+		 */
+		if (vm_extint_pending(vmx->vm, vcpu))
+			vmx_set_int_window_exiting(vmx, vcpu);
+	}
 
 	VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
 
@@ -1388,6 +1428,7 @@ vmx_emulate_cr_access(struct vmx *vmx, i
 	if (cr != 0 && cr != 4)
 		return (UNHANDLED);
 
+	regval = 0; /* silence gcc */
 	vmxctx = &vmx->ctx[vcpu];
 
 	/*
@@ -1882,6 +1923,11 @@ vmx_exit_process(struct vmx *vmx, int vc
 		vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
 		vmexit->u.inout.port = (uint16_t)(qual >> 16);
 		vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
+		error = emulate_ioport(vmx->vm, vcpu, vmexit);
+		if (error == 0)  {
+			handled = 1;
+			vmxctx->guest_rax = vmexit->u.inout.eax;
+		}
 		break;
 	case EXIT_REASON_CPUID:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
@@ -2719,7 +2765,7 @@ vmx_inject_pir(struct vlapic *vlapic)
 	struct pir_desc *pir_desc;
 	struct LAPIC *lapic;
 	uint64_t val, pirval;
-	int rvi, pirbase;
+	int rvi, pirbase = -1;
 	uint16_t intr_status_old, intr_status_new;
 
 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
@@ -2731,6 +2777,7 @@ vmx_inject_pir(struct vlapic *vlapic)
 	}
 
 	pirval = 0;
+	pirbase = -1;
 	lapic = vlapic->apic_page;
 
 	val = atomic_readandclear_long(&pir_desc->pir[0]);
@@ -2764,11 +2811,29 @@ vmx_inject_pir(struct vlapic *vlapic)
 		pirbase = 192;
 		pirval = val;
 	}
+
 	VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
 
 	/*
 	 * Update RVI so the processor can evaluate pending virtual
 	 * interrupts on VM-entry.
+	 *
+	 * It is possible for pirval to be 0 here, even though the
+	 * pending bit has been set. The scenario is:
+	 * CPU-Y is sending a posted interrupt to CPU-X, which
+	 * is running a guest and processing posted interrupts in h/w.
+	 * CPU-X will eventually exit and the state seen in s/w is
+	 * the pending bit set, but no PIR bits set.
+	 *
+	 *      CPU-X                      CPU-Y
+	 *   (vm running)                (host running)
+	 *   rx posted interrupt
+	 *   CLEAR pending bit
+	 *				 SET PIR bit
+	 *   READ/CLEAR PIR bits
+	 *				 SET pending bit
+	 *   (vm exit)
+	 *   pending bit set, PIR 0
 	 */
 	if (pirval != 0) {
 		rvi = pirbase + flsl(pirval) - 1;

Copied and modified: stable/10/sys/amd64/vmm/io/vatpic.c (from r263035, head/sys/amd64/vmm/io/vatpic.c)
==============================================================================
--- head/sys/amd64/vmm/io/vatpic.c	Tue Mar 11 16:56:00 2014	(r263035, copy source)
+++ stable/10/sys/amd64/vmm/io/vatpic.c	Sat Jul 19 22:06:46 2014	(r268891)
@@ -67,6 +67,7 @@ struct atpic {
 	bool		aeoi;
 	bool		poll;
 	bool		rotate;
+	bool		sfn;		/* special fully-nested mode */
 
 	int		irq_base;
 	uint8_t		request;	/* Interrupt Request Register (IIR) */
@@ -75,6 +76,8 @@ struct atpic {
 
 	int		acnt[8];	/* sum of pin asserts and deasserts */
 	int		priority;	/* current pin priority */
+
+	bool		intr_raised;
 };
 
 struct vatpic {
@@ -99,6 +102,7 @@ struct vatpic {
 #define	VATPIC_CTR4(vatpic, fmt, a1, a2, a3, a4)			\
 	VM_CTR4((vatpic)->vm, fmt, a1, a2, a3, a4)
 
+static void vatpic_set_pinstate(struct vatpic *vatpic, int pin, bool newstate);
 
 static __inline int
 vatpic_get_highest_isrpin(struct atpic *atpic)
@@ -120,13 +124,23 @@ vatpic_get_highest_isrpin(struct atpic *
 static __inline int
 vatpic_get_highest_irrpin(struct atpic *atpic)
 {
+	int serviced;
 	int bit, pin;
 	int i, j;
 
+	/*
+	 * In 'Special Fully-Nested Mode' when an interrupt request from
+	 * a slave is in service, the slave is not locked out from the
+	 * master's priority logic.
+	 */
+	serviced = atpic->service;
+	if (atpic->sfn)
+		serviced &= ~(1 << 2);
+
 	for (i = 0; i <= 7; i++) {
 		pin = ((i + 7 - atpic->priority) & 0x7);
 		bit = (1 << pin);
-		if (atpic->service & bit)
+		if (serviced & bit)
 			break;
 	}
 
@@ -148,17 +162,65 @@ vatpic_notify_intr(struct vatpic *vatpic
 
 	KASSERT(VATPIC_LOCKED(vatpic), ("vatpic_notify_intr not locked"));
 
-	/* XXX master only */
-	atpic = &vatpic->atpic[0];
+	/*
+	 * First check the slave.
+	 */
+	atpic = &vatpic->atpic[1];
+	if (!atpic->intr_raised &&
+	    (pin = vatpic_get_highest_irrpin(atpic)) != -1) {
+		VATPIC_CTR4(vatpic, "atpic slave notify pin = %d "
+		    "(imr 0x%x irr 0x%x isr 0x%x)", pin,
+		    atpic->mask, atpic->request, atpic->service);
+
+		/*
+		 * Cascade the request from the slave to the master.
+		 */
+		atpic->intr_raised = true;
+		vatpic_set_pinstate(vatpic, 2, true);
+		vatpic_set_pinstate(vatpic, 2, false);
+	} else {
+		VATPIC_CTR3(vatpic, "atpic slave no eligible interrupts "
+		    "(imr 0x%x irr 0x%x isr 0x%x)",
+		    atpic->mask, atpic->request, atpic->service);
+	}
 
-	if ((pin = vatpic_get_highest_irrpin(atpic)) != -1) {
-		VATPIC_CTR4(vatpic, "atpic notify pin = %d "
+	/*
+	 * Then check the master.
+	 */
+	atpic = &vatpic->atpic[0];
+	if (!atpic->intr_raised &&
+	    (pin = vatpic_get_highest_irrpin(atpic)) != -1) {
+		VATPIC_CTR4(vatpic, "atpic master notify pin = %d "
 		    "(imr 0x%x irr 0x%x isr 0x%x)", pin,
 		    atpic->mask, atpic->request, atpic->service);
+
+		/*
+		 * PIC interrupts are routed to both the Local APIC
+		 * and the I/O APIC to support operation in 1 of 3
+		 * modes.
+		 *
+		 * 1. Legacy PIC Mode: the PIC effectively bypasses
+		 * all APIC components.  In mode '1' the local APIC is
+		 * disabled and LINT0 is reconfigured as INTR to
+		 * deliver the PIC interrupt directly to the CPU.
+		 *
+		 * 2. Virtual Wire Mode: the APIC is treated as a
+		 * virtual wire which delivers interrupts from the PIC
+		 * to the CPU.  In mode '2' LINT0 is programmed as
+		 * ExtINT to indicate that the PIC is the source of
+		 * the interrupt.
+		 *
+		 * 3. Symmetric I/O Mode: PIC interrupts are fielded
+		 * by the I/O APIC and delivered to the appropriate
+		 * CPU.  In mode '3' the I/O APIC input 0 is
+		 * programmed as ExtINT to indicate that the PIC is
+		 * the source of the interrupt.
+		 */
+		atpic->intr_raised = true;
 		lapic_set_local_intr(vatpic->vm, -1, APIC_LVT_LINT0);
 		vioapic_pulse_irq(vatpic->vm, 0);
 	} else {
-		VATPIC_CTR3(vatpic, "atpic no eligible interrupts "
+		VATPIC_CTR3(vatpic, "atpic master no eligible interrupts "
 		    "(imr 0x%x irr 0x%x isr 0x%x)",
 		    atpic->mask, atpic->request, atpic->service);
 	}
@@ -384,7 +446,7 @@ vatpic_pulse_irq(struct vm *vm, int irq)
 	return (vatpic_set_irqstate(vm, irq, IRQSTATE_PULSE));
 }
 
-int
+void
 vatpic_pending_intr(struct vm *vm, int *vecptr)
 {
 	struct vatpic *vatpic;
@@ -393,7 +455,6 @@ vatpic_pending_intr(struct vm *vm, int *
 
 	vatpic = vm_atpic(vm);
 
-	/* XXX master only */
 	atpic = &vatpic->atpic[0];
 
 	VATPIC_LOCK(vatpic);
@@ -401,37 +462,53 @@ vatpic_pending_intr(struct vm *vm, int *
 	pin = vatpic_get_highest_irrpin(atpic);
 	if (pin == -1)
 		pin = 7;
+	if (pin == 2) {
+		atpic = &vatpic->atpic[1];
+		pin = vatpic_get_highest_irrpin(atpic);
+	}
 
 	*vecptr = atpic->irq_base + pin;
 
 	VATPIC_UNLOCK(vatpic);
+}
 
-	return (1);
+static void
+vatpic_pin_accepted(struct atpic *atpic, int pin)
+{
+	atpic->intr_raised = false;
+
+	if (atpic->acnt[pin] == 0)
+		atpic->request &= ~(1 << pin);
+
+	if (atpic->aeoi == true) {
+		if (atpic->rotate == true)
+			atpic->priority = pin;
+	} else {
+		atpic->service |= (1 << pin);
+	}
 }
 
 void
 vatpic_intr_accepted(struct vm *vm, int vector)
 {
 	struct vatpic *vatpic;
-	struct atpic *atpic;
 	int pin;
 
 	vatpic = vm_atpic(vm);
 
-	/* XXX master only */
-	atpic = &vatpic->atpic[0];
-
 	VATPIC_LOCK(vatpic);
-	pin = vector & 0x7;
 
-	if (atpic->acnt[pin] == 0)
-		atpic->request &= ~(1 << pin);
+	pin = vector & 0x7;
 
-	if (atpic->aeoi == true) {
-		if (atpic->rotate == true)
-			atpic->priority = pin;
+	if ((vector & ~0x7) == vatpic->atpic[1].irq_base) {
+		vatpic_pin_accepted(&vatpic->atpic[1], pin);
+		/*
+		 * If this vector originated from the slave,
+		 * accept the cascaded interrupt too.
+		 */
+		vatpic_pin_accepted(&vatpic->atpic[0], 2);
 	} else {
-		atpic->service |= (1 << pin);
+		vatpic_pin_accepted(&vatpic->atpic[0], pin);
 	}
 
 	vatpic_notify_intr(vatpic);
@@ -439,51 +516,50 @@ vatpic_intr_accepted(struct vm *vm, int 
 	VATPIC_UNLOCK(vatpic);
 }
 
-int
-vatpic_master_handler(void *vm, int vcpuid, struct vm_exit *vmexit)
+static int
+vatpic_read(struct vatpic *vatpic, struct atpic *atpic, bool in, int port,
+	    int bytes, uint32_t *eax)
 {
-	struct vatpic *vatpic;
-	struct atpic *atpic;
-	int error;
-	uint8_t val;
-
-	vatpic = vm_atpic(vm);
-	atpic = &vatpic->atpic[0];
+	VATPIC_LOCK(vatpic);
 
-	if (vmexit->u.inout.bytes != 1)
+	if (atpic->poll) {
+		VATPIC_CTR0(vatpic, "vatpic polled mode not supported");
+		VATPIC_UNLOCK(vatpic);
 		return (-1);
-
-	if (vmexit->u.inout.in) {
-		VATPIC_LOCK(vatpic);
-		if (atpic->poll) {
-			VATPIC_CTR0(vatpic, "vatpic polled mode not "
-			    "supported");
-			VATPIC_UNLOCK(vatpic);
-			return (-1);
+	} else {
+		if (port & ICU_IMR_OFFSET) {
+			/* read interrrupt mask register */
+			*eax = atpic->mask;
 		} else {
-			if (vmexit->u.inout.port & ICU_IMR_OFFSET) {
-				/* read interrrupt mask register */
-				vmexit->u.inout.eax = atpic->mask;
+			if (atpic->rd_cmd_reg == OCW3_RIS) {
+				/* read interrupt service register */
+				*eax = atpic->service;
 			} else {
-				if (atpic->rd_cmd_reg == OCW3_RIS) {
-					/* read interrupt service register */
-					vmexit->u.inout.eax = atpic->service;
-				} else {
-					/* read interrupt request register */
-					vmexit->u.inout.eax = atpic->request;
-				}
+				/* read interrupt request register */
+				*eax = atpic->request;
 			}
 		}
-		VATPIC_UNLOCK(vatpic);
-
-		return (0);
 	}
 
-	val = vmexit->u.inout.eax;
+	VATPIC_UNLOCK(vatpic);
+
+	return (0);
+
+}
+
+static int
+vatpic_write(struct vatpic *vatpic, struct atpic *atpic, bool in, int port,
+    int bytes, uint32_t *eax)
+{
+	int error;
+	uint8_t val;
+
+	error = 0;
+	val = *eax;
 
 	VATPIC_LOCK(vatpic);
 
-	if (vmexit->u.inout.port & ICU_IMR_OFFSET) {
+	if (port & ICU_IMR_OFFSET) {
 		if (atpic->ready) {
 			error = vatpic_ocw1(vatpic, atpic, val);
 		} else {
@@ -520,41 +596,65 @@ vatpic_master_handler(void *vm, int vcpu
 }
 
 int
-vatpic_slave_handler(void *vm, int vcpuid, struct vm_exit *vmexit)
+vatpic_master_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
 {
-	if (vmexit->u.inout.bytes != 1)
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+
+	vatpic = vm_atpic(vm);
+	atpic = &vatpic->atpic[0];
+
+	if (bytes != 1)
 		return (-1);
  
-	if (vmexit->u.inout.in) {
-		if (vmexit->u.inout.port & ICU_IMR_OFFSET) {
-			/* all interrupts masked */
-			vmexit->u.inout.eax = 0xff;
-		} else {
-			vmexit->u.inout.eax = 0x00;
-		}
+	if (in) {
+		return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
 	}
  
-	/* Pretend all accesses to the slave 8259 are alright */
-	return (0);
+	return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
 }
 
 int
-vatpic_elc_handler(void *vm, int vcpuid, struct vm_exit *vmexit)
+vatpic_slave_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpic *vatpic;
+	struct atpic *atpic;
+
+	vatpic = vm_atpic(vm);
+	atpic = &vatpic->atpic[1];
+
+	if (bytes != 1)
+		return (-1);
+
+	if (in) {
+		return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
+	}
+
+	return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
+}
+
+int
+vatpic_elc_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
 {
 	struct vatpic *vatpic;
 	bool is_master;
 
 	vatpic = vm_atpic(vm);
-	is_master = (vmexit->u.inout.port == IO_ELCR1);
+	is_master = (port == IO_ELCR1);
 
-	if (vmexit->u.inout.bytes != 1)
+	if (bytes != 1)
 		return (-1);
 
-	if (vmexit->u.inout.in) {
+	VATPIC_LOCK(vatpic);
+
+	if (in) {
 		if (is_master)
-			vmexit->u.inout.eax = vatpic->elc[0];
+			*eax = vatpic->elc[0];
 		else
-			vmexit->u.inout.eax = vatpic->elc[1];
+			*eax = vatpic->elc[1];
 	} else {
 		/*
 		 * For the master PIC the cascade channel (IRQ2), the
@@ -567,11 +667,13 @@ vatpic_elc_handler(void *vm, int vcpuid,
 		 * be programmed for level mode.
 		 */
 		if (is_master)
-			vatpic->elc[0] = (vmexit->u.inout.eax & 0xf8);
+			vatpic->elc[0] = (*eax & 0xf8);
 		else
-			vatpic->elc[1] = (vmexit->u.inout.eax & 0xde);
+			vatpic->elc[1] = (*eax & 0xde);
 	}
 
+	VATPIC_UNLOCK(vatpic);
+
 	return (0);
 }
 

Copied and modified: stable/10/sys/amd64/vmm/io/vatpic.h (from r263035, head/sys/amd64/vmm/io/vatpic.h)
==============================================================================
--- head/sys/amd64/vmm/io/vatpic.h	Tue Mar 11 16:56:00 2014	(r263035, copy source)
+++ stable/10/sys/amd64/vmm/io/vatpic.h	Sat Jul 19 22:06:46 2014	(r268891)
@@ -39,15 +39,18 @@
 struct vatpic *vatpic_init(struct vm *vm);
 void vatpic_cleanup(struct vatpic *vatpic);
 
-int vatpic_master_handler(void *vm, int vcpuid, struct vm_exit *vmexit);
-int vatpic_slave_handler(void *vm, int vcpuid, struct vm_exit *vmexit);
-int vatpic_elc_handler(void *vm, int vcpuid, struct vm_exit *vmexit);
+int vatpic_master_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax);
+int vatpic_slave_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax);
+int vatpic_elc_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax);
 
 int vatpic_assert_irq(struct vm *vm, int irq);
 int vatpic_deassert_irq(struct vm *vm, int irq);
 int vatpic_pulse_irq(struct vm *vm, int irq);
 
-int vatpic_pending_intr(struct vm *vm, int *vecptr);
+void vatpic_pending_intr(struct vm *vm, int *vecptr);
 void vatpic_intr_accepted(struct vm *vm, int vector);
 
 #endif	/* _VATPIC_H_ */

Copied and modified: stable/10/sys/amd64/vmm/io/vatpit.c (from r263744, head/sys/amd64/vmm/io/vatpit.c)
==============================================================================
--- head/sys/amd64/vmm/io/vatpit.c	Tue Mar 25 19:20:34 2014	(r263744, copy source)
+++ stable/10/sys/amd64/vmm/io/vatpit.c	Sat Jul 19 22:06:46 2014	(r268891)
@@ -56,6 +56,17 @@ static MALLOC_DEFINE(M_VATPIT, "atpit", 
 #define	TIMER_MODE_MASK		0x0f
 #define	TIMER_SEL_READBACK	0xc0
 
+#define	TIMER_STS_OUT		0x80
+#define	TIMER_STS_NULLCNT	0x40
+
+#define	TIMER_RB_LCTR		0x20
+#define	TIMER_RB_LSTATUS	0x10
+#define	TIMER_RB_CTR_2		0x08
+#define	TIMER_RB_CTR_1		0x04
+#define	TIMER_RB_CTR_0		0x02
+
+#define	TMR2_OUT_STS		0x20
+
 #define	PIT_8254_FREQ		1193182
 #define	TIMER_DIV(freq, hz)	(((freq) + (hz) / 2) / (hz))
 
@@ -71,6 +82,8 @@ struct channel {
 	sbintime_t	now_sbt;	/* uptime when counter was loaded */
 	uint8_t		cr[2];
 	uint8_t		ol[2];
+	bool		slatched;	/* status latched */
+	uint8_t		status;
 	int		crbyte;
 	int		olbyte;
 	int		frbyte;
@@ -88,22 +101,29 @@ struct vatpit {
 	struct channel	channel[3];
 };
 
-#define	VATPIT_CTR0(vatpit, fmt)					\
-	VM_CTR0((vatpit)->vm, fmt)
-
-#define	VATPIT_CTR1(vatpit, fmt, a1)					\
-	VM_CTR1((vatpit)->vm, fmt, a1)
+static void pit_timer_start_cntr0(struct vatpit *vatpit);
 
-#define	VATPIT_CTR2(vatpit, fmt, a1, a2)				\
-	VM_CTR2((vatpit)->vm, fmt, a1, a2)
+static int
+vatpit_get_out(struct vatpit *vatpit, int channel)
+{
+	struct channel *c;
+	sbintime_t delta_ticks;
+	int out;
 
-#define	VATPIT_CTR3(vatpit, fmt, a1, a2, a3)				\
-	VM_CTR3((vatpit)->vm, fmt, a1, a2, a3)
+	c = &vatpit->channel[channel];
 
-#define	VATPIT_CTR4(vatpit, fmt, a1, a2, a3, a4)			\
-	VM_CTR4((vatpit)->vm, fmt, a1, a2, a3, a4)
+	switch (c->mode) {
+	case TIMER_INTTC:
+		delta_ticks = (sbinuptime() - c->now_sbt) / vatpit->freq_sbt;
+		out = ((c->initial - delta_ticks) <= 0);
+		break;
+	default:
+		out = 0;
+		break;
+	}
 
-static void pit_timer_start_cntr0(struct vatpit *vatpit);
+	return (out);
+}
 
 static void
 vatpit_callout_handler(void *a)
@@ -117,7 +137,7 @@ vatpit_callout_handler(void *a)
 	c = &vatpit->channel[arg->channel_num];
 	callout = &c->callout;
 
-	VATPIT_CTR1(vatpit, "atpit t%d fired", arg->channel_num);
+	VM_CTR1(vatpit->vm, "atpit t%d fired", arg->channel_num);
 
 	VATPIT_LOCK(vatpit);
 
@@ -145,7 +165,7 @@ static void
 pit_timer_start_cntr0(struct vatpit *vatpit)
 {
 	struct channel *c;
-	sbintime_t delta, precision;
+	sbintime_t now, delta, precision;
 
 	c = &vatpit->channel[0];
 	if (c->initial != 0) {
@@ -153,6 +173,15 @@ pit_timer_start_cntr0(struct vatpit *vat
 		precision = delta >> tc_precexp;
 		c->callout_sbt = c->callout_sbt + delta;
 
+		/*
+		 * Reset 'callout_sbt' if the time that the callout
+		 * was supposed to fire is more than 'c->initial'
+		 * ticks in the past.
+		 */
+		now = sbinuptime();
+		if (c->callout_sbt < now)
+			c->callout_sbt = now + delta;
+
 		callout_reset_sbt(&c->callout, c->callout_sbt,
 		    precision, vatpit_callout_handler, &c->callout_arg,
 		    C_ABSOLUTE);
@@ -180,6 +209,7 @@ pit_update_counter(struct vatpit *vatpit
 		 */
 		c->initial = TIMER_DIV(PIT_8254_FREQ, 100);
 		c->now_sbt = sbinuptime();
+		c->status &= ~TIMER_STS_NULLCNT;
 	}
 
 	delta_ticks = (sbinuptime() - c->now_sbt) / vatpit->freq_sbt;
@@ -196,6 +226,57 @@ pit_update_counter(struct vatpit *vatpit
 }
 
 static int
+pit_readback1(struct vatpit *vatpit, int channel, uint8_t cmd)
+{
+	struct channel *c;
+
+	c = &vatpit->channel[channel];
+
+	/*
+	 * Latch the count/status of the timer if not already latched.
+	 * N.B. that the count/status latch-select bits are active-low.
+	 */
+	if (!(cmd & TIMER_RB_LCTR) && !c->olbyte) {
+		(void) pit_update_counter(vatpit, c, true);
+	}
+
+	if (!(cmd & TIMER_RB_LSTATUS) && !c->slatched) {
+		c->slatched = true;
+		/*
+		 * For mode 0, see if the elapsed time is greater
+		 * than the initial value - this results in the
+		 * output pin being set to 1 in the status byte.
+		 */
+		if (c->mode == TIMER_INTTC && vatpit_get_out(vatpit, channel))
+			c->status |= TIMER_STS_OUT;
+		else
+			c->status &= ~TIMER_STS_OUT;
+	}
+
+	return (0);
+}
+
+static int
+pit_readback(struct vatpit *vatpit, uint8_t cmd)
+{
+	int error;
+
+	/*
+	 * The readback command can apply to all timers.
+	 */
+	error = 0;
+	if (cmd & TIMER_RB_CTR_0)
+		error = pit_readback1(vatpit, 0, cmd);
+	if (!error && cmd & TIMER_RB_CTR_1)
+		error = pit_readback1(vatpit, 1, cmd);
+	if (!error && cmd & TIMER_RB_CTR_2)
+		error = pit_readback1(vatpit, 2, cmd);
+
+	return (error);
+}
+
+
+static int
 vatpit_update_mode(struct vatpit *vatpit, uint8_t val)
 {
 	struct channel *c;
@@ -206,7 +287,7 @@ vatpit_update_mode(struct vatpit *vatpit
 	mode = val & TIMER_MODE_MASK;
 
 	if (sel == TIMER_SEL_READBACK)
-		return (-1);
+		return (pit_readback(vatpit, val));
 
 	if (rw != TIMER_LATCH && rw != TIMER_16BIT)
 		return (-1);
@@ -229,53 +310,31 @@ vatpit_update_mode(struct vatpit *vatpit
 	else {
 		c->mode = mode;
 		c->olbyte = 0;	/* reset latch after reprogramming */
+		c->status |= TIMER_STS_NULLCNT;
 	}
 
 	return (0);
 }
 
-static int
-vatpit_get_out(struct vatpit *vatpit, int channel)
-{
-	struct channel *c;
-	sbintime_t delta_ticks;
-	int out;
-
-	c = &vatpit->channel[channel];
-
-	switch (c->mode) {
-	case TIMER_INTTC:
-		delta_ticks = (sbinuptime() - c->now_sbt) / vatpit->freq_sbt;
-		out = ((c->initial - delta_ticks) <= 0);
-		break;
-	default:
-		out = 0;
-		break;
-	}
-
-	return (out);
-}
-
 int
-vatpit_handler(void *vm, int vcpuid, struct vm_exit *vmexit)
+vatpit_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
 {
 	struct vatpit *vatpit;
 	struct channel *c;
-	int port;
 	uint8_t val;
 	int error;
 
 	vatpit = vm_atpit(vm);
 
-	if (vmexit->u.inout.bytes != 1)
+	if (bytes != 1)
 		return (-1);
 
-	val = vmexit->u.inout.eax;
-	port = vmexit->u.inout.port;
+	val = *eax;
 
 	if (port == TIMER_MODE) {
-		if (vmexit->u.inout.in != 0) {
-			VATPIT_CTR0(vatpit, "vatpit attempt to read mode");
+		if (in) {
+			VM_CTR0(vatpit->vm, "vatpit attempt to read mode");
 			return (-1);
 		}
 
@@ -287,12 +346,19 @@ vatpit_handler(void *vm, int vcpuid, str
 	}
 
 	/* counter ports */
-	KASSERT(port >= TIMER_CNTR0 && vmexit->u.inout.port <= TIMER_CNTR2,
+	KASSERT(port >= TIMER_CNTR0 && port <= TIMER_CNTR2,
 	    ("invalid port 0x%x", port));
 	c = &vatpit->channel[port - TIMER_CNTR0];
 
 	VATPIT_LOCK(vatpit);
-	if (vmexit->u.inout.in) {
+	if (in && c->slatched) {
+		/*
+		 * Return the status byte if latched
+		 */
+		*eax = c->status;
+		c->slatched = false;
+		c->status = 0;
+	} else if (in) {
 		/*
 		 * The spec says that once the output latch is completely
 		 * read it should revert to "following" the counter. Use
@@ -307,13 +373,14 @@ vatpit_handler(void *vm, int vcpuid, str
 			if (c->frbyte)
 				tmp >>= 8;
 			tmp &= 0xff;
-			vmexit->u.inout.eax = tmp;
+			*eax = tmp;
 			c->frbyte ^= 1;
 		}  else
-			vmexit->u.inout.eax = c->ol[--c->olbyte];
+			*eax = c->ol[--c->olbyte];
 	} else {
-		c->cr[c->crbyte++] = vmexit->u.inout.eax;
+		c->cr[c->crbyte++] = *eax;
 		if (c->crbyte == 2) {
+			c->status &= ~TIMER_STS_NULLCNT;
 			c->frbyte = 0;
 			c->crbyte = 0;
 			c->initial = c->cr[0] | (uint16_t)c->cr[1] << 8;
@@ -332,6 +399,27 @@ vatpit_handler(void *vm, int vcpuid, str
 	return (0);
 }
 
+int
+vatpit_nmisc_handler(void *vm, int vcpuid, bool in, int port, int bytes,
+    uint32_t *eax)
+{
+	struct vatpit *vatpit;
+
+	vatpit = vm_atpit(vm);
+
+	if (in) {
+			VATPIT_LOCK(vatpit);
+			if (vatpit_get_out(vatpit, 2))
+				*eax = TMR2_OUT_STS;
+			else

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-stable-10 mailing list