svn commit: r271554 - in projects/bhyve_svm/sys/amd64: include vmm vmm/amd vmm/intel
Neel Natu
neel at FreeBSD.org
Sat Sep 13 22:16:42 UTC 2014
Author: neel
Date: Sat Sep 13 22:16:40 2014
New Revision: 271554
URL: http://svnweb.freebsd.org/changeset/base/271554
Log:
AMD processors that have the SVM decode assist capability will store the
instruction bytes in the VMCB on a nested page fault. This is useful because
it saves having to walk the guest page tables to fetch the instruction.
vie_init() now takes two additional parameters 'inst_bytes' and 'inst_len'
that map directly to 'vie->inst[]' and 'vie->num_valid'.
The instruction emulation handler skips calling 'vmm_fetch_instruction()'
if 'vie->num_valid' is non-zero.
The use of this capability can be turned off by setting the sysctl/tunable
'hw.vmm.svm.disable_npf_assist' to '1'.
Reviewed by: Anish Gupta (akgupt3 at gmail.com)
Discussed with: grehan
Modified:
projects/bhyve_svm/sys/amd64/include/vmm_instruction_emul.h
projects/bhyve_svm/sys/amd64/vmm/amd/svm.c
projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h
projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c
projects/bhyve_svm/sys/amd64/vmm/vmm.c
projects/bhyve_svm/sys/amd64/vmm/vmm_instruction_emul.c
Modified: projects/bhyve_svm/sys/amd64/include/vmm_instruction_emul.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/include/vmm_instruction_emul.h Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/include/vmm_instruction_emul.h Sat Sep 13 22:16:40 2014 (r271554)
@@ -93,7 +93,7 @@ int vmm_fetch_instruction(struct vm *vm,
int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa);
-void vie_init(struct vie *vie);
+void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
/*
* Decode the instruction fetched into 'vie' so it can be emulated.
Modified: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/amd/svm.c Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/svm.c Sat Sep 13 22:16:40 2014 (r271554)
@@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -67,6 +68,9 @@ __FBSDID("$FreeBSD$");
#include "svm_softc.h"
#include "npt.h"
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
+
/*
* SVM CPUID function 0x8000_000A, edx bit decoding.
*/
@@ -96,9 +100,17 @@ extern struct pcpu __pcpu[];
static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc);
static uint32_t svm_feature; /* AMD SVM features. */
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
+ "SVM features advertised by CPUID.8000000AH:EDX");
+
+static int disable_npf_assist;
+SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
+ &disable_npf_assist, 0, NULL);
/* Maximum ASIDs supported by the processor */
static uint32_t nasid;
+SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0,
+ "Number of ASIDs supported by this processor");
/* Current ASID generation for each host cpu */
static struct asid asid[MAXCPU];
@@ -218,6 +230,12 @@ flush_by_asid(void)
return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
}
+static __inline int
+decode_assist(void)
+{
+ return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
+}
+
/*
* Enable SVM for a CPU.
*/
@@ -792,19 +810,22 @@ svm_handle_inst_emul(struct vmcb *vmcb,
{
struct vm_guest_paging *paging;
struct vmcb_segment *seg;
+ struct vmcb_ctrl *ctrl;
+ char *inst_bytes;
+ int inst_len;
+ ctrl = &vmcb->ctrl;
paging = &vmexit->u.inst_emul.paging;
+
vmexit->exitcode = VM_EXITCODE_INST_EMUL;
vmexit->u.inst_emul.gpa = gpa;
vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
svm_paging_info(vmcb, paging);
/*
- * If DecodeAssist SVM feature doesn't exist, we don't have NPF
- * instuction length. RIP will be calculated based on the length
- * determined by instruction emulation.
+ * The inst_length will be determined by decoding the instruction.
*/
- vmexit->inst_length = VIE_INST_SIZE;
+ vmexit->inst_length = 0;
seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
switch(paging->cpu_mode) {
@@ -820,6 +841,18 @@ svm_handle_inst_emul(struct vmcb *vmcb,
vmexit->u.inst_emul.cs_d = 0;
break;
}
+
+ /*
+ * Copy the instruction bytes into 'vie' if available.
+ */
+ if (decode_assist() && !disable_npf_assist) {
+ inst_len = ctrl->inst_len;
+ inst_bytes = ctrl->inst_bytes;
+ } else {
+ inst_len = 0;
+ inst_bytes = NULL;
+ }
+ vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
}
/*
@@ -1182,7 +1215,7 @@ svm_vmexit(struct svm_softc *svm_sc, int
loop = false;
break;
default:
- /* Return to user space. */
+ /* Return to user space. */
loop = false;
update_rip = false;
VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx"
@@ -1190,7 +1223,7 @@ svm_vmexit(struct svm_softc *svm_sc, int
ctrl->exitcode, info1, info2);
VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx"
" Inst decoder len:%d\n", state->rip,
- ctrl->nrip, ctrl->inst_decode_size);
+ ctrl->nrip, ctrl->inst_len);
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
break;
}
Modified: projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/vmm/amd/vmcb.h Sat Sep 13 22:16:40 2014 (r271554)
@@ -218,8 +218,8 @@ struct vmcb_ctrl {
uint32_t vmcb_clean; /* 0xC0: VMCB clean bits for caching */
uint32_t :32; /* 0xC4: Reserved */
uint64_t nrip; /* 0xC8: Guest next nRIP. */
- uint8_t inst_decode_size; /* 0xD0: Instruction decode */
- uint8_t inst_decode_bytes[15];
+ uint8_t inst_len; /* 0xD0: #NPF decode assist */
+ uint8_t inst_bytes[15];
uint8_t padd6[0x320];
} __attribute__ ((__packed__));
CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
Modified: projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/vmm/intel/vmx.c Sat Sep 13 22:16:40 2014 (r271554)
@@ -1847,6 +1847,7 @@ vmexit_inst_emul(struct vm_exit *vmexit,
vmexit->u.inst_emul.cs_d = 0;
break;
}
+ vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
}
static int
Modified: projects/bhyve_svm/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/vmm.c Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/vmm/vmm.c Sat Sep 13 22:16:40 2014 (r271554)
@@ -1216,7 +1216,7 @@ vm_handle_inst_emul(struct vm *vm, int v
mem_region_read_t mread;
mem_region_write_t mwrite;
enum vm_cpu_mode cpu_mode;
- int cs_d, error;
+ int cs_d, error, length;
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
@@ -1228,11 +1228,21 @@ vm_handle_inst_emul(struct vm *vm, int v
paging = &vme->u.inst_emul.paging;
cpu_mode = paging->cpu_mode;
- vie_init(vie);
-
/* Fetch, decode and emulate the faulting instruction */
- error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip,
- vme->inst_length, vie);
+ if (vie->num_valid == 0) {
+ /*
+ * If the instruction length is not known then assume a
+ * maximum size instruction.
+ */
+ length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE;
+ error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip,
+ length, vie);
+ } else {
+ /*
+ * The instruction bytes have already been copied into 'vie'
+ */
+ error = 0;
+ }
if (error == 1)
return (0); /* Resume guest to handle page fault */
else if (error == -1)
@@ -1243,13 +1253,10 @@ vm_handle_inst_emul(struct vm *vm, int v
if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0)
return (EFAULT);
- /*
- * AMD-V doesn't provide instruction length which is nRIP - RIP
- * for some of the exit including Nested Page Fault. Use instruction
- * length calculated by software instruction emulation to update
- * RIP of vcpu.
+ /*
+ * If the instruction length is not specified the update it now.
*/
- if (vme->inst_length == VIE_INST_SIZE)
+ if (vme->inst_length == 0)
vme->inst_length = vie->num_processed;
/* return to userland unless this is an in-kernel emulated device */
Modified: projects/bhyve_svm/sys/amd64/vmm/vmm_instruction_emul.c
==============================================================================
--- projects/bhyve_svm/sys/amd64/vmm/vmm_instruction_emul.c Sat Sep 13 22:14:19 2014 (r271553)
+++ projects/bhyve_svm/sys/amd64/vmm/vmm_instruction_emul.c Sat Sep 13 22:16:40 2014 (r271554)
@@ -1025,13 +1025,20 @@ vie_calculate_gla(enum vm_cpu_mode cpu_m
#ifdef _KERNEL
void
-vie_init(struct vie *vie)
+vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
{
+ KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
+ ("%s: invalid instruction length (%d)", __func__, inst_length));
bzero(vie, sizeof(struct vie));
vie->base_register = VM_REG_LAST;
vie->index_register = VM_REG_LAST;
+
+ if (inst_length) {
+ bcopy(inst_bytes, vie->inst, inst_length);
+ vie->num_valid = inst_length;
+ }
}
static int
More information about the svn-src-projects
mailing list