svn commit: r266626 - in head/sys/amd64: include vmm vmm/intel

Neel Natu neel at FreeBSD.org
Sat May 24 19:13:27 UTC 2014


Author: neel
Date: Sat May 24 19:13:25 2014
New Revision: 266626
URL: http://svnweb.freebsd.org/changeset/base/266626

Log:
  When injecting a page fault into the guest also update the guest's %cr2 to
  indicate the faulting linear address.
  
  If the guest PML4 entry has the PG_PS bit set then inject a page fault into
  the guest with the PGEX_RSV bit set in the error_code.
  
  Get rid of redundant checks for the PG_RW violations when walking the page
  tables.

Modified:
  head/sys/amd64/include/vmm.h
  head/sys/amd64/vmm/intel/vmx.c
  head/sys/amd64/vmm/vmm.c
  head/sys/amd64/vmm/vmm_instruction_emul.c

Modified: head/sys/amd64/include/vmm.h
==============================================================================
--- head/sys/amd64/include/vmm.h	Sat May 24 19:03:30 2014	(r266625)
+++ head/sys/amd64/include/vmm.h	Sat May 24 19:13:25 2014	(r266626)
@@ -237,7 +237,7 @@ int vm_exception_pending(struct vm *vm, 
 
 void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */
 void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */
-void vm_inject_pf(struct vm *vm, int vcpuid, int error_code); /* page fault */
+void vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2);
 
 enum vm_reg_name vm_segment_name(int seg_encoding);
 
@@ -284,6 +284,7 @@ enum vm_reg_name {
 	VM_REG_GUEST_IDTR,
 	VM_REG_GUEST_GDTR,
 	VM_REG_GUEST_EFER,
+	VM_REG_GUEST_CR2,
 	VM_REG_LAST
 };
 

Modified: head/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- head/sys/amd64/vmm/intel/vmx.c	Sat May 24 19:03:30 2014	(r266625)
+++ head/sys/amd64/vmm/intel/vmx.c	Sat May 24 19:13:25 2014	(r266626)
@@ -2383,6 +2383,8 @@ vmxctx_regptr(struct vmxctx *vmxctx, int
 		return (&vmxctx->guest_r14);
 	case VM_REG_GUEST_R15:
 		return (&vmxctx->guest_r15);
+	case VM_REG_GUEST_CR2:
+		return (&vmxctx->guest_cr2);
 	default:
 		break;
 	}

Modified: head/sys/amd64/vmm/vmm.c
==============================================================================
--- head/sys/amd64/vmm/vmm.c	Sat May 24 19:03:30 2014	(r266625)
+++ head/sys/amd64/vmm/vmm.c	Sat May 24 19:13:25 2014	(r266626)
@@ -1441,13 +1441,20 @@ vm_inject_fault(struct vm *vm, int vcpui
 }
 
 void
-vm_inject_pf(struct vm *vm, int vcpuid, int error_code)
+vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2)
 {
 	struct vm_exception pf = {
 		.vector = IDT_PF,
 		.error_code_valid = 1,
 		.error_code = error_code
 	};
+	int error;
+
+	VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
+	    error_code, cr2);
+
+	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
+	KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
 
 	vm_inject_fault(vm, vcpuid, &pf);
 }

Modified: head/sys/amd64/vmm/vmm_instruction_emul.c
==============================================================================
--- head/sys/amd64/vmm/vmm_instruction_emul.c	Sat May 24 19:03:30 2014	(r266625)
+++ head/sys/amd64/vmm/vmm_instruction_emul.c	Sat May 24 19:13:25 2014	(r266626)
@@ -599,7 +599,7 @@ vie_init(struct vie *vie)
 }
 
 static int
-pf_error_code(int usermode, int prot, uint64_t pte)
+pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
 {
 	int error_code = 0;
 
@@ -609,6 +609,8 @@ pf_error_code(int usermode, int prot, ui
 		error_code |= PGEX_W;
 	if (usermode)
 		error_code |= PGEX_U;
+	if (rsvd)
+		error_code |= PGEX_RSV;
 	if (prot & VM_PROT_EXECUTE)
 		error_code |= PGEX_I;
 
@@ -679,14 +681,12 @@ restart:
 			if ((pte32 & PG_V) == 0 ||
 			    (usermode && (pte32 & PG_U) == 0) ||
 			    (writable && (pte32 & PG_RW) == 0)) {
-				pfcode = pf_error_code(usermode, prot, pte32);
-				vm_inject_pf(vm, vcpuid, pfcode);
+				pfcode = pf_error_code(usermode, prot, 0,
+				    pte32);
+				vm_inject_pf(vm, vcpuid, pfcode, gla);
 				goto pagefault;
 			}
 
-			if (writable && (pte32 & PG_RW) == 0)
-				goto error;
-
 			/*
 			 * Emulate the x86 MMU's management of the accessed
 			 * and dirty flags. While the accessed flag is set
@@ -735,8 +735,8 @@ restart:
 		pte = ptpbase[ptpindex];
 
 		if ((pte & PG_V) == 0) {
-			pfcode = pf_error_code(usermode, prot, pte);
-			vm_inject_pf(vm, vcpuid, pfcode);
+			pfcode = pf_error_code(usermode, prot, 0, pte);
+			vm_inject_pf(vm, vcpuid, pfcode, gla);
 			goto pagefault;
 		}
 
@@ -762,14 +762,11 @@ restart:
 		if ((pte & PG_V) == 0 ||
 		    (usermode && (pte & PG_U) == 0) ||
 		    (writable && (pte & PG_RW) == 0)) {
-			pfcode = pf_error_code(usermode, prot, pte);
-			vm_inject_pf(vm, vcpuid, pfcode);
+			pfcode = pf_error_code(usermode, prot, 0, pte);
+			vm_inject_pf(vm, vcpuid, pfcode, gla);
 			goto pagefault;
 		}
 
-		if (writable && (pte & PG_RW) == 0)
-			goto error;
-
 		/* Set the accessed bit in the page table entry */
 		if ((pte & PG_A) == 0) {
 			if (atomic_cmpset_64(&ptpbase[ptpindex],
@@ -779,10 +776,12 @@ restart:
 		}
 
 		if (nlevels > 0 && (pte & PG_PS) != 0) {
-			if (pgsize > 1 * GB)
-				goto error;
-			else
-				break;
+			if (pgsize > 1 * GB) {
+				pfcode = pf_error_code(usermode, prot, 1, pte);
+				vm_inject_pf(vm, vcpuid, pfcode, gla);
+				goto pagefault;
+			}
+			break;
 		}
 
 		ptpphys = pte;


More information about the svn-src-all mailing list