socsvn commit: r269399 - in soc2014/mihai/bhyve-icache-head/sys/amd64: include vmm

mihai at FreeBSD.org mihai at FreeBSD.org
Wed Jun 11 12:25:33 UTC 2014


Author: mihai
Date: Wed Jun 11 12:25:32 2014
New Revision: 269399
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=269399

Log:
  sys: amd64: vmm: vmm.c: fix locking issues when caching/deleting instructions

Modified:
  soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
  soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h	Wed Jun 11 11:27:44 2014	(r269398)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h	Wed Jun 11 12:25:32 2014	(r269399)
@@ -39,4 +39,10 @@
 		    struct vie *vie);
 int vm_inst_cache_delete(struct vm *vm, uint64_t rip, uint64_t cr3);
 
+#define vm_inst_cache_lock_init(vm)	sx_init(&vm->inst_cache_lock, "VM INST CACHE LOCK")
+#define vm_inst_cache_xlock(vm)		sx_xlock(&vm->inst_cache_lock)
+#define vm_inst_cache_slock(vm)		sx_slock(&vm->inst_cache_lock)
+#define vm_inst_cache_unlock(vm)	sx_unlock(&vm->inst_cache_lock)
+#define vm_inst_cache_lock_destroy(vm)	sx_destroy(&vm->inst_cache_lock)
+
 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c	Wed Jun 11 11:27:44 2014	(r269398)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c	Wed Jun 11 12:25:32 2014	(r269399)
@@ -145,6 +145,8 @@
 	volatile cpuset_t suspended_cpus;
 
 	volatile cpuset_t halted_cpus;
+
+	struct sx inst_cache_lock;
 };
 
 static int vmm_initialized;
@@ -371,6 +373,8 @@
 	vm->vatpic = vatpic_init(vm);
 	vm->vatpit = vatpit_init(vm);
 
+	vm_inst_cache_lock_init(vm);
+
 	for (i = 0; i < VM_MAXCPU; i++) {
 		vcpu_init(vm, i);
 		guest_msrs_init(vm, i);
@@ -407,6 +411,8 @@
 	vatpic_cleanup(vm->vatpic);
 	vioapic_cleanup(vm->vioapic);
 
+	vm_inst_cache_lock_destroy(vm);
+
 	for (i = 0; i < vm->num_mem_segs; i++)
 		vm_free_mem_seg(vm, &vm->mem_segs[i]);
 
@@ -1099,21 +1105,18 @@
 	struct vm_map *map;
 	struct vcpu *vcpu;
 	struct vm_exit *vme;
+	bool inst_cache_locked;
 
 	vcpu = &vm->vcpu[vcpuid];
 	vme = &vcpu->exitinfo;
+	inst_cache_locked = false;
 
 	ftype = vme->u.paging.fault_type;
 	KASSERT(ftype == VM_PROT_READ ||
 	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
 	    ("vm_handle_paging: invalid fault_type %d", ftype));
 
-	if (ftype == VM_PROT_WRITE) {
-		/* Remove all the instructions that resides in this page */
-		vm_inst_cache_delete(vm, vme->u.paging.gpa, vme->u.paging.cr3);
-	}
-
-
+again:
 	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
 		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
 		    vme->u.paging.gpa, ftype);
@@ -1123,14 +1126,32 @@
 
 	map = &vm->vmspace->vm_map;
 	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
+	
+	if (rv == KERN_PROTECTION_FAILURE) {
+	/*
+	 * Try to resolve the fault again after evicting any related
+	 * instructions from the cache. This may cause the readonly
+	 * restrictions to go away.
+	 */
+		if (!inst_cache_locked) {
+			inst_cache_locked = true;
+			vm_inst_cache_xlock(vm);
+			vm_inst_cache_delete(vm, vme->u.paging.gpa, vme->u.paging.cr3);
+			goto again;
+		}
+	}
 
 	VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
 	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
 
+done:
+	if (inst_cache_locked)
+		vm_inst_cache_unlock(vm);
+
 	if (rv != KERN_SUCCESS)
 		return (EFAULT);
-done:
-	/* restart execution at the faulting instruction */
+
+		/* restart execution at the faulting instruction */
 	vme->inst_length = 0;
 
 	return (0);
@@ -1163,7 +1184,10 @@
 	vie = &vme->u.inst_emul.vie;
 
 	/* Check to see if the instruction is cached */
+	vm_inst_cache_slock(vm);
 	if (vm_inst_cache_lookup(vm, rip, cr3, vie)) {
+		vm_inst_cache_unlock(vm);
+
 		vie_init(vie);
 
 		/* Fetch, decode and emulate the faulting instruction */
@@ -1174,9 +1198,11 @@
 		if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, vie) != 0)
 			return (EFAULT);
 
+		vm_inst_cache_xlock(vm);
 		/* Cache decoded instruction for further use */
 		vm_inst_cache_add(vm, rip, cr3, paging_mode, vie);
 	}
+	vm_inst_cache_unlock(vm);
 
 	/* return to userland unless this is an in-kernel emulated device */
 	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {


More information about the svn-soc-all mailing list