svn commit: r332157 - in head: lib/libvmmapi sys/amd64/include sys/amd64/vmm sys/amd64/vmm/amd sys/amd64/vmm/intel

John Baldwin jhb at FreeBSD.org
Fri Apr 6 22:03:45 UTC 2018


Author: jhb
Date: Fri Apr  6 22:03:43 2018
New Revision: 332157
URL: https://svnweb.freebsd.org/changeset/base/332157

Log:
  Add a way to temporarily suspend and resume virtual CPUs.
  
  This is used as part of implementing run control in bhyve's debug
  server.  The hypervisor now maintains a set of "debugged" CPUs.
  Attempting to run a debugged CPU will fail to execute any guest
  instructions and will instead report a VM_EXITCODE_DEBUG exit to
  the userland hypervisor.  Virtual CPUs are placed into the debugged
  state via vm_suspend_cpu() (implemented via a new VM_SUSPEND_CPU ioctl).
  Virtual CPUs can be resumed via vm_resume_cpu() (VM_RESUME_CPU ioctl).
  
  The debug server suspends virtual CPUs when it wishes them to stop
  executing in the guest (for example, when a debugger attaches to the
  server).  The debug server can choose to resume only a subset of CPUs
  (for example, when single stepping) or it can choose to resume all
  CPUs.  The debug server must explicitly mark a CPU as resumed via
  vm_resume_cpu() before the virtual CPU will successfully execute any
  guest instructions.
  
  Reviewed by:	avg, grehan
  Tested on:	Intel (jhb), AMD (avg)
  Differential Revision:	https://reviews.freebsd.org/D14466

Modified:
  head/lib/libvmmapi/vmmapi.c
  head/lib/libvmmapi/vmmapi.h
  head/sys/amd64/include/vmm.h
  head/sys/amd64/include/vmm_dev.h
  head/sys/amd64/vmm/amd/svm.c
  head/sys/amd64/vmm/intel/vmx.c
  head/sys/amd64/vmm/vmm.c
  head/sys/amd64/vmm/vmm_dev.c

Modified: head/lib/libvmmapi/vmmapi.c
==============================================================================
--- head/lib/libvmmapi/vmmapi.c	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/lib/libvmmapi/vmmapi.c	Fri Apr  6 22:03:43 2018	(r332157)
@@ -1374,6 +1374,13 @@ vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
 }
 
 int
+vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
+{
+
+	return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
+}
+
+int
 vm_activate_cpu(struct vmctx *ctx, int vcpu)
 {
 	struct vm_activate_cpu ac;
@@ -1386,6 +1393,30 @@ vm_activate_cpu(struct vmctx *ctx, int vcpu)
 }
 
 int
+vm_suspend_cpu(struct vmctx *ctx, int vcpu)
+{
+	struct vm_activate_cpu ac;
+	int error;
+
+	bzero(&ac, sizeof(struct vm_activate_cpu));
+	ac.vcpuid = vcpu;
+	error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
+	return (error);
+}
+
+int
+vm_resume_cpu(struct vmctx *ctx, int vcpu)
+{
+	struct vm_activate_cpu ac;
+	int error;
+
+	bzero(&ac, sizeof(struct vm_activate_cpu));
+	ac.vcpuid = vcpu;
+	error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
+	return (error);
+}
+
+int
 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
 {
 	struct vm_intinfo vmii;
@@ -1501,7 +1532,8 @@ vm_get_ioctls(size_t *len)
 	    VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
 	    VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
 	    VM_GLA2GPA_NOFAULT,
-	    VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SET_INTINFO, VM_GET_INTINFO,
+	    VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
+	    VM_SET_INTINFO, VM_GET_INTINFO,
 	    VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
 	    VM_RESTART_INSTRUCTION };
 

Modified: head/lib/libvmmapi/vmmapi.h
==============================================================================
--- head/lib/libvmmapi/vmmapi.h	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/lib/libvmmapi/vmmapi.h	Fri Apr  6 22:03:43 2018	(r332157)
@@ -216,7 +216,10 @@ int	vcpu_reset(struct vmctx *ctx, int vcpu);
 
 int	vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus);
 int	vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus);
+int	vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus);
 int	vm_activate_cpu(struct vmctx *ctx, int vcpu);
+int	vm_suspend_cpu(struct vmctx *ctx, int vcpu);
+int	vm_resume_cpu(struct vmctx *ctx, int vcpu);
 
 /*
  * FreeBSD specific APIs

Modified: head/sys/amd64/include/vmm.h
==============================================================================
--- head/sys/amd64/include/vmm.h	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/include/vmm.h	Fri Apr  6 22:03:43 2018	(r332157)
@@ -231,8 +231,11 @@ int vm_get_x2apic_state(struct vm *vm, int vcpu, enum 
 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
 int vm_apicid2vcpuid(struct vm *vm, int apicid);
 int vm_activate_cpu(struct vm *vm, int vcpu);
+int vm_suspend_cpu(struct vm *vm, int vcpu);
+int vm_resume_cpu(struct vm *vm, int vcpu);
 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
+void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
@@ -256,6 +259,7 @@ typedef void (*vm_rendezvous_func_t)(struct vm *vm, in
 void vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
     vm_rendezvous_func_t func, void *arg);
 cpuset_t vm_active_cpus(struct vm *vm);
+cpuset_t vm_debug_cpus(struct vm *vm);
 cpuset_t vm_suspended_cpus(struct vm *vm);
 #endif	/* _SYS__CPUSET_H_ */
 
@@ -280,6 +284,8 @@ vcpu_reqidle(struct vm_eventinfo *info)
 	return (*info->iptr);
 }
 
+int vcpu_debugged(struct vm *vm, int vcpuid);
+
 /*
  * Return 1 if device indicated by bus/slot/func is supposed to be a
  * pci passthrough device.
@@ -540,6 +546,7 @@ enum vm_exitcode {
 	VM_EXITCODE_MWAIT,
 	VM_EXITCODE_SVM,
 	VM_EXITCODE_REQIDLE,
+	VM_EXITCODE_DEBUG,
 	VM_EXITCODE_MAX
 };
 

Modified: head/sys/amd64/include/vmm_dev.h
==============================================================================
--- head/sys/amd64/include/vmm_dev.h	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/include/vmm_dev.h	Fri Apr  6 22:03:43 2018	(r332157)
@@ -209,6 +209,7 @@ struct vm_cpuset {
 };
 #define	VM_ACTIVE_CPUS		0
 #define	VM_SUSPENDED_CPUS	1
+#define	VM_DEBUG_CPUS		2
 
 struct vm_intinfo {
 	int		vcpuid;
@@ -292,6 +293,8 @@ enum {
 	/* vm_cpuset */
 	IOCNUM_ACTIVATE_CPU = 90,
 	IOCNUM_GET_CPUSET = 91,
+	IOCNUM_SUSPEND_CPU = 92,
+	IOCNUM_RESUME_CPU = 93,
 
 	/* RTC */
 	IOCNUM_RTC_READ = 100,
@@ -386,6 +389,10 @@ enum {
 	_IOW('v', IOCNUM_ACTIVATE_CPU, struct vm_activate_cpu)
 #define	VM_GET_CPUS	\
 	_IOW('v', IOCNUM_GET_CPUSET, struct vm_cpuset)
+#define	VM_SUSPEND_CPU \
+	_IOW('v', IOCNUM_SUSPEND_CPU, struct vm_activate_cpu)
+#define	VM_RESUME_CPU \
+	_IOW('v', IOCNUM_RESUME_CPU, struct vm_activate_cpu)
 #define	VM_SET_INTINFO	\
 	_IOW('v', IOCNUM_SET_INTINFO, struct vm_intinfo)
 #define	VM_GET_INTINFO	\

Modified: head/sys/amd64/vmm/amd/svm.c
==============================================================================
--- head/sys/amd64/vmm/amd/svm.c	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/vmm/amd/svm.c	Fri Apr  6 22:03:43 2018	(r332157)
@@ -2015,6 +2015,12 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t 
 			break;
 		}
 
+		if (vcpu_debugged(vm, vcpu)) {
+			enable_gintr();
+			vm_exit_debug(vm, vcpu, state->rip);
+			break;
+		}
+
 		svm_inj_interrupts(svm_sc, vcpu, vlapic);
 
 		/* Activate the nested pmap on 'curcpu' */

Modified: head/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- head/sys/amd64/vmm/intel/vmx.c	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/vmm/intel/vmx.c	Fri Apr  6 22:03:43 2018	(r332157)
@@ -2746,6 +2746,12 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pm
 			break;
 		}
 
+		if (vcpu_debugged(vm, vcpu)) {
+			enable_intr();
+			vm_exit_debug(vmx->vm, vcpu, rip);
+			break;
+		}
+
 		vmx_run_trace(vmx, vcpu);
 		vmx_dr_enter_guest(vmxctx);
 		rc = vmx_enter_guest(vmxctx, vmx, launched);

Modified: head/sys/amd64/vmm/vmm.c
==============================================================================
--- head/sys/amd64/vmm/vmm.c	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/vmm/vmm.c	Fri Apr  6 22:03:43 2018	(r332157)
@@ -152,6 +152,7 @@ struct vm {
 	struct vpmtmr	*vpmtmr;		/* (i) virtual ACPI PM timer */
 	struct vrtc	*vrtc;			/* (o) virtual RTC */
 	volatile cpuset_t active_cpus;		/* (i) active vcpus */
+	volatile cpuset_t debug_cpus;		/* (i) vcpus stopped for debug */
 	int		suspend;		/* (i) stop VM execution */
 	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
 	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
@@ -415,6 +416,7 @@ vm_init(struct vm *vm, bool create)
 		vm->vrtc = vrtc_init(vm);
 
 	CPU_ZERO(&vm->active_cpus);
+	CPU_ZERO(&vm->debug_cpus);
 
 	vm->suspend = 0;
 	CPU_ZERO(&vm->suspended_cpus);
@@ -1283,6 +1285,9 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_dis
 		if (vcpu_should_yield(vm, vcpuid))
 			break;
 
+		if (vcpu_debugged(vm, vcpuid))
+			break;
+
 		/*
 		 * Some Linux guests implement "halt" by having all vcpus
 		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
@@ -1554,6 +1559,17 @@ vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t 
 }
 
 void
+vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip)
+{
+	struct vm_exit *vmexit;
+
+	vmexit = vm_exitinfo(vm, vcpuid);
+	vmexit->rip = rip;
+	vmexit->inst_length = 0;
+	vmexit->exitcode = VM_EXITCODE_DEBUG;
+}
+
+void
 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
 {
 	struct vm_exit *vmexit;
@@ -2267,11 +2283,67 @@ vm_activate_cpu(struct vm *vm, int vcpuid)
 	return (0);
 }
 
+int
+vm_suspend_cpu(struct vm *vm, int vcpuid)
+{
+	int i;
+
+	if (vcpuid < -1 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (vcpuid == -1) {
+		vm->debug_cpus = vm->active_cpus;
+		for (i = 0; i < VM_MAXCPU; i++) {
+			if (CPU_ISSET(i, &vm->active_cpus))
+				vcpu_notify_event(vm, i, false);
+		}
+	} else {
+		if (!CPU_ISSET(vcpuid, &vm->active_cpus))
+			return (EINVAL);
+
+		CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
+		vcpu_notify_event(vm, vcpuid, false);
+	}
+	return (0);
+}
+
+int
+vm_resume_cpu(struct vm *vm, int vcpuid)
+{
+
+	if (vcpuid < -1 || vcpuid >= VM_MAXCPU)
+		return (EINVAL);
+
+	if (vcpuid == -1) {
+		CPU_ZERO(&vm->debug_cpus);
+	} else {
+		if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
+			return (EINVAL);
+
+		CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
+	}
+	return (0);
+}
+
+int
+vcpu_debugged(struct vm *vm, int vcpuid)
+{
+
+	return (CPU_ISSET(vcpuid, &vm->debug_cpus));
+}
+
 cpuset_t
 vm_active_cpus(struct vm *vm)
 {
 
 	return (vm->active_cpus);
+}
+
+cpuset_t
+vm_debug_cpus(struct vm *vm)
+{
+
+	return (vm->debug_cpus);
 }
 
 cpuset_t

Modified: head/sys/amd64/vmm/vmm_dev.c
==============================================================================
--- head/sys/amd64/vmm/vmm_dev.c	Fri Apr  6 21:50:09 2018	(r332156)
+++ head/sys/amd64/vmm/vmm_dev.c	Fri Apr  6 22:03:43 2018	(r332157)
@@ -690,11 +690,21 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t da
 			*cpuset = vm_active_cpus(sc->vm);
 		else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
 			*cpuset = vm_suspended_cpus(sc->vm);
+		else if (vm_cpuset->which == VM_DEBUG_CPUS)
+			*cpuset = vm_debug_cpus(sc->vm);
 		else
 			error = EINVAL;
 		if (error == 0)
 			error = copyout(cpuset, vm_cpuset->cpus, size);
 		free(cpuset, M_TEMP);
+		break;
+	case VM_SUSPEND_CPU:
+		vac = (struct vm_activate_cpu *)data;
+		error = vm_suspend_cpu(sc->vm, vac->vcpuid);
+		break;
+	case VM_RESUME_CPU:
+		vac = (struct vm_activate_cpu *)data;
+		error = vm_resume_cpu(sc->vm, vac->vcpuid);
 		break;
 	case VM_SET_INTINFO:
 		vmii = (struct vm_intinfo *)data;


More information about the svn-src-all mailing list