socsvn commit: r288087 - in soc2015/mihai/bhyve-on-arm-head/sys/arm: conf include vmm
mihai at FreeBSD.org
mihai at FreeBSD.org
Wed Jul 8 12:42:09 UTC 2015
Author: mihai
Date: Wed Jul 8 12:42:04 2015
New Revision: 288087
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=288087
Log:
soc2015: mihai: bhyve: sys: arm: clean-up unused functions, implement MAP_MEMORY policy, implement SET/GET regs, implement GET mappings feature
Modified:
soc2015/mihai/bhyve-on-arm-head/sys/arm/conf/FVP_VE_CORTEX_A15x1
soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h
soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm_dev.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm_mem.c
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/conf/FVP_VE_CORTEX_A15x1
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/conf/FVP_VE_CORTEX_A15x1 Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/conf/FVP_VE_CORTEX_A15x1 Wed Jul 8 12:42:04 2015 (r288087)
@@ -46,7 +46,7 @@
#options ROOTDEVNAME=\"ufs:/dev/da0\"
options MD_ROOT
-options MD_ROOT_SIZE=10240
+options MD_ROOT_SIZE=12288
makeoptions MFS_IMAGE=/root/soc2015/mihai/ramdisk/ramdisk.img
options ROOTDEVNAME=\"ffs:/dev/md0\"
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm.h Wed Jul 8 12:42:04 2015 (r288087)
@@ -15,6 +15,7 @@
* Identifiers for architecturally defined registers.
*/
enum vm_reg_name {
+ VM_REG_GUEST_R0,
VM_REG_GUEST_R1,
VM_REG_GUEST_R2,
VM_REG_GUEST_R3,
@@ -29,13 +30,9 @@
VM_REG_GUEST_R12,
VM_REG_GUEST_SP,
VM_REG_GUEST_LR,
- VM_REG_GUEST_PC
-};
-
-enum x2apic_state {
- X2APIC_DISABLED,
- X2APIC_ENABLED,
- X2APIC_STATE_LAST
+ VM_REG_GUEST_PC,
+ VM_REG_GUEST_CPSR,
+ VM_REG_LAST
};
#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
@@ -55,15 +52,9 @@
struct vm;
struct vm_exception;
struct vm_memory_segment;
-struct seg_desc;
struct vm_exit;
struct vm_run;
-struct vhpet;
-struct vioapic;
-struct vlapic;
-struct vmspace;
struct vm_object;
-struct vm_guest_paging;
struct pmap;
typedef int (*vmm_init_func_t)(int ipinum);
@@ -83,10 +74,6 @@
uint64_t *retval);
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
uint64_t val);
-typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
- struct seg_desc *desc);
-typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
- struct seg_desc *desc);
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
@@ -106,58 +93,26 @@
vmi_mmap_get_func_t vmmapget;
vmi_get_register_t vmgetreg;
vmi_set_register_t vmsetreg;
- vmi_get_desc_t vmgetdesc;
- vmi_set_desc_t vmsetdesc;
vmi_get_cap_t vmgetcap;
vmi_set_cap_t vmsetcap;
- vmi_vmspace_alloc vmspace_alloc;
- vmi_vmspace_free vmspace_free;
- vmi_vlapic_init vlapic_init;
- vmi_vlapic_cleanup vlapic_cleanup;
};
-extern struct vmm_ops vmm_ops_intel;
-extern struct vmm_ops vmm_ops_amd;
extern struct vmm_ops vmm_ops_arm;
int vm_create(const char *name, struct vm **retvm);
void vm_destroy(struct vm *vm);
-int vm_reinit(struct vm *vm);
const char *vm_name(struct vm *vm);
-int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
-int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
-int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
-void *vm_gpa_hold(struct vm *, vm_paddr_t gpa, size_t len, int prot,
- void **cookie);
-void vm_gpa_release(void *cookie);
-int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
+int vm_malloc(struct vm *vm, uint64_t gpa, size_t len);
+uint64_t vm_gpa2hpa(struct vm *vm, uint64_t gpa, size_t size);
+int vm_gpabase2memseg(struct vm *vm, uint64_t gpabase,
struct vm_memory_segment *seg);
-int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
- vm_offset_t *offset, struct vm_object **object);
-boolean_t vm_mem_allocated(struct vm *vm, vm_paddr_t gpa);
+boolean_t vm_mem_allocated(struct vm *vm, uint64_t gpa);
int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
-int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
- struct seg_desc *ret_desc);
-int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
- struct seg_desc *desc);
int vm_run(struct vm *vm, struct vm_run *vmrun);
-int vm_suspend(struct vm *vm, enum vm_suspend_how how);
-int vm_inject_nmi(struct vm *vm, int vcpu);
-int vm_nmi_pending(struct vm *vm, int vcpuid);
-void vm_nmi_clear(struct vm *vm, int vcpuid);
-int vm_inject_extint(struct vm *vm, int vcpu);
-int vm_extint_pending(struct vm *vm, int vcpuid);
-void vm_extint_clear(struct vm *vm, int vcpuid);
-struct vlapic *vm_lapic(struct vm *vm, int cpu);
-struct vioapic *vm_ioapic(struct vm *vm);
-struct vhpet *vm_hpet(struct vm *vm);
int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
-int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
-int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
-int vm_apicid2vcpuid(struct vm *vm, int apicid);
-void vm_activate_cpu(struct vm *vm, int vcpu);
+int vm_activate_cpu(struct vm *vm, int vcpu);
struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
@@ -199,16 +154,6 @@
return (*(int *)suspend_cookie);
}
-/*
- * Return 1 if device indicated by bus/slot/func is supposed to be a
- * pci passthrough device.
- *
- * Return 0 otherwise.
- */
-int vmm_is_pptdev(int bus, int slot, int func);
-
-void *vm_iommu_domain(struct vm *vm);
-
enum vcpu_state {
VCPU_IDLE,
VCPU_FROZEN,
@@ -242,27 +187,6 @@
void *vcpu_stats(struct vm *vm, int vcpu);
void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
-struct vmspace *vm_get_vmspace(struct vm *vm);
-int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
-int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
-struct vatpic *vm_atpic(struct vm *vm);
-struct vatpit *vm_atpit(struct vm *vm);
-struct vpmtmr *vm_pmtmr(struct vm *vm);
-struct vrtc *vm_rtc(struct vm *vm);
-
-/*
- * Inject exception 'vector' into the guest vcpu. This function returns 0 on
- * success and non-zero on failure.
- *
- * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
- * this function directly because they enforce the trap-like or fault-like
- * behavior of an exception.
- *
- * This function should only be called in the context of the thread that is
- * executing this vcpu.
- */
-int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
- uint32_t errcode, int restart_instruction);
/*
* This function is called after a VM-exit that occurred during exception or
@@ -300,30 +224,6 @@
void *cookie;
};
-/*
- * Set up 'copyinfo[]' to copy to/from guest linear address space starting
- * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
- * a copyin or PROT_WRITE for a copyout.
- *
- * retval is_fault Intepretation
- * 0 0 Success
- * 0 1 An exception was injected into the guest
- * EFAULT N/A Unrecoverable error
- *
- * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
- * the return value is 0. The 'copyinfo[]' resources should be freed by calling
- * 'vm_copy_teardown()' after the copy is done.
- */
-int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
- uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
- int num_copyinfo, int *is_fault);
-void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
- int num_copyinfo);
-void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
- void *kaddr, size_t len);
-void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
- struct vm_copyinfo *copyinfo, size_t len);
-
int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
#endif /* KERNEL */
@@ -337,108 +237,8 @@
VM_CAP_MTRAP_EXIT,
VM_CAP_PAUSE_EXIT,
VM_CAP_UNRESTRICTED_GUEST,
- VM_CAP_ENABLE_INVPCID,
VM_CAP_MAX
};
-
-enum vm_intr_trigger {
- EDGE_TRIGGER,
- LEVEL_TRIGGER
-};
-
-/*
- * The 'access' field has the format specified in Table 21-2 of the Intel
- * Architecture Manual vol 3b.
- *
- * XXX The contents of the 'access' field are architecturally defined except
- * bit 16 - Segment Unusable.
- */
-struct seg_desc {
- uint64_t base;
- uint32_t limit;
- uint32_t access;
-};
-#define SEG_DESC_TYPE(access) ((access) & 0x001f)
-#define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
-#define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
-#define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
-#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
-#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
-
-enum vm_cpu_mode {
- CPU_MODE_REAL,
- CPU_MODE_PROTECTED,
- CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
- CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
-};
-
-enum vm_paging_mode {
- PAGING_MODE_FLAT,
- PAGING_MODE_32,
- PAGING_MODE_PAE,
- PAGING_MODE_64,
-};
-
-struct vm_guest_paging {
- uint64_t cr3;
- int cpl;
- enum vm_cpu_mode cpu_mode;
- enum vm_paging_mode paging_mode;
-};
-
-/*
- * The data structures 'vie' and 'vie_op' are meant to be opaque to the
- * consumers of instruction decoding. The only reason why their contents
- * need to be exposed is because they are part of the 'vm_exit' structure.
- */
-struct vie_op {
- uint8_t op_byte; /* actual opcode byte */
- uint8_t op_type; /* type of operation (e.g. MOV) */
- uint16_t op_flags;
-};
-
-#define VIE_INST_SIZE 15
-struct vie {
- uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
- uint8_t num_valid; /* size of the instruction */
- uint8_t num_processed;
-
- uint8_t addrsize:4, opsize:4; /* address and operand sizes */
- uint8_t rex_w:1, /* REX prefix */
- rex_r:1,
- rex_x:1,
- rex_b:1,
- rex_present:1,
- repz_present:1, /* REP/REPE/REPZ prefix */
- repnz_present:1, /* REPNE/REPNZ prefix */
- opsize_override:1, /* Operand size override */
- addrsize_override:1, /* Address size override */
- segment_override:1; /* Segment override */
-
- uint8_t mod:2, /* ModRM byte */
- reg:4,
- rm:4;
-
- uint8_t ss:2, /* SIB byte */
- index:4,
- base:4;
-
- uint8_t disp_bytes;
- uint8_t imm_bytes;
-
- uint8_t scale;
- int base_register; /* VM_REG_GUEST_xyz */
- int index_register; /* VM_REG_GUEST_xyz */
- int segment_register; /* VM_REG_GUEST_xyz */
-
- int64_t displacement; /* optional addr displacement */
- int64_t immediate; /* optional immediate operand */
-
- uint8_t decoded; /* set to 1 if successfully decoded */
-
- struct vie_op op; /* opcode description */
-};
-
enum vm_exitcode {
VM_EXITCODE_INOUT,
VM_EXITCODE_VMX,
@@ -449,41 +249,10 @@
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
- VM_EXITCODE_INST_EMUL,
VM_EXITCODE_SPINUP_AP,
- VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
- VM_EXITCODE_RENDEZVOUS,
- VM_EXITCODE_IOAPIC_EOI,
- VM_EXITCODE_SUSPENDED,
- VM_EXITCODE_INOUT_STR,
- VM_EXITCODE_TASK_SWITCH,
- VM_EXITCODE_MONITOR,
- VM_EXITCODE_MWAIT,
- VM_EXITCODE_SVM,
VM_EXITCODE_MAX
};
-struct vm_inout {
- uint16_t bytes:3; /* 1 or 2 or 4 */
- uint16_t in:1;
- uint16_t string:1;
- uint16_t rep:1;
- uint16_t port;
- uint32_t eax; /* valid for out */
-};
-
-struct vm_inout_str {
- struct vm_inout inout; /* must be the first element */
- struct vm_guest_paging paging;
- uint64_t rflags;
- uint64_t cr0;
- uint64_t index;
- uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
- int addrsize;
- enum vm_reg_name seg_name;
- struct seg_desc seg_desc;
-};
-
enum task_switch_reason {
TSR_CALL,
TSR_IRET,
@@ -497,16 +266,13 @@
uint32_t errcode;
int errcode_valid; /* push 'errcode' on the new stack */
enum task_switch_reason reason;
- struct vm_guest_paging paging;
};
struct vm_exit {
enum vm_exitcode exitcode;
int inst_length; /* 0 means unknown */
- uint64_t rip;
+ uint64_t pc;
union {
- struct vm_inout inout;
- struct vm_inout_str inout_str;
struct {
uint64_t gpa;
int fault_type;
@@ -514,10 +280,6 @@
struct {
uint64_t gpa;
uint64_t gla;
- uint64_t cs_base;
- int cs_d; /* CS.D */
- struct vm_guest_paging paging;
- struct vie vie;
} inst_emul;
/*
* VMX specific payload. Used when there is no "better"
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/include/vmm_dev.h Wed Jul 8 12:42:04 2015 (r288087)
@@ -35,7 +35,7 @@
#endif
struct vm_memory_segment {
- vm_paddr_t gpa; /* in */
+ uint64_t gpa; /* in */
size_t len;
int wired;
};
@@ -46,12 +46,6 @@
uint64_t regval;
};
-struct vm_seg_desc { /* data or code segment */
- int cpuid;
- int regnum; /* enum vm_reg_name */
- struct seg_desc desc;
-};
-
struct vm_run {
int cpuid;
uint64_t pc;
@@ -67,30 +61,6 @@
int restart_instruction;
};
-struct vm_lapic_msi {
- uint64_t msg;
- uint64_t addr;
-};
-
-struct vm_lapic_irq {
- int cpuid;
- int vector;
-};
-
-struct vm_ioapic_irq {
- int irq;
-};
-
-struct vm_isa_irq {
- int atpic_irq;
- int ioapic_irq;
-};
-
-struct vm_isa_irq_trigger {
- int atpic_irq;
- enum vm_intr_trigger trigger;
-};
-
struct vm_capability {
int cpuid;
enum vm_cap_type captype;
@@ -98,46 +68,6 @@
int allcpus;
};
-struct vm_pptdev {
- int bus;
- int slot;
- int func;
-};
-
-struct vm_pptdev_mmio {
- int bus;
- int slot;
- int func;
- vm_paddr_t gpa;
- vm_paddr_t hpa;
- size_t len;
-};
-
-struct vm_pptdev_msi {
- int vcpu;
- int bus;
- int slot;
- int func;
- int numvec; /* 0 means disabled */
- uint64_t msg;
- uint64_t addr;
-};
-
-struct vm_pptdev_msix {
- int vcpu;
- int bus;
- int slot;
- int func;
- int idx;
- uint64_t msg;
- uint32_t vector_control;
- uint64_t addr;
-};
-
-struct vm_nmi {
- int cpuid;
-};
-
#define MAX_VM_STATS 64
struct vm_stats {
int cpuid; /* in */
@@ -145,26 +75,11 @@
struct timeval tv;
uint64_t statbuf[MAX_VM_STATS];
};
-
struct vm_stat_desc {
int index; /* in */
char desc[128]; /* out */
};
-struct vm_x2apic {
- int cpuid;
- enum x2apic_state state;
-};
-
-struct vm_gpa_pte {
- uint64_t gpa; /* in */
- uint64_t pte[4]; /* out */
- int ptenum;
-};
-
-struct vm_hpet_cap {
- uint32_t capabilities; /* lower 32 bits of HPET capabilities */
-};
struct vm_suspend {
enum vm_suspend_how how;
@@ -174,7 +89,6 @@
int vcpuid; /* inputs */
int prot; /* PROT_READ or PROT_WRITE */
uint64_t gla;
- struct vm_guest_paging paging;
int fault; /* outputs */
uint64_t gpa;
};
@@ -186,21 +100,6 @@
#define VM_ACTIVE_CPUS 0
#define VM_SUSPENDED_CPUS 1
-struct vm_intinfo {
- int vcpuid;
- uint64_t info1;
- uint64_t info2;
-};
-
-struct vm_rtc_time {
- time_t secs;
-};
-
-struct vm_rtc_data {
- int offset;
- uint8_t value;
-};
-
enum {
/* general routines */
IOCNUM_ABIVERS = 0,
@@ -219,54 +118,14 @@
/* register/state accessors */
IOCNUM_SET_REGISTER = 20,
IOCNUM_GET_REGISTER = 21,
- IOCNUM_SET_SEGMENT_DESCRIPTOR = 22,
- IOCNUM_GET_SEGMENT_DESCRIPTOR = 23,
-
- /* interrupt injection */
- IOCNUM_GET_INTINFO = 28,
- IOCNUM_SET_INTINFO = 29,
- IOCNUM_INJECT_EXCEPTION = 30,
- IOCNUM_LAPIC_IRQ = 31,
- IOCNUM_INJECT_NMI = 32,
- IOCNUM_IOAPIC_ASSERT_IRQ = 33,
- IOCNUM_IOAPIC_DEASSERT_IRQ = 34,
- IOCNUM_IOAPIC_PULSE_IRQ = 35,
- IOCNUM_LAPIC_MSI = 36,
- IOCNUM_LAPIC_LOCAL_IRQ = 37,
- IOCNUM_IOAPIC_PINCOUNT = 38,
- IOCNUM_RESTART_INSTRUCTION = 39,
-
- /* PCI pass-thru */
- IOCNUM_BIND_PPTDEV = 40,
- IOCNUM_UNBIND_PPTDEV = 41,
- IOCNUM_MAP_PPTDEV_MMIO = 42,
- IOCNUM_PPTDEV_MSI = 43,
- IOCNUM_PPTDEV_MSIX = 44,
/* statistics */
IOCNUM_VM_STATS = 50,
IOCNUM_VM_STAT_DESC = 51,
- /* kernel device state */
- IOCNUM_SET_X2APIC_STATE = 60,
- IOCNUM_GET_X2APIC_STATE = 61,
- IOCNUM_GET_HPET_CAPABILITIES = 62,
-
- /* legacy interrupt injection */
- IOCNUM_ISA_ASSERT_IRQ = 80,
- IOCNUM_ISA_DEASSERT_IRQ = 81,
- IOCNUM_ISA_PULSE_IRQ = 82,
- IOCNUM_ISA_SET_IRQ_TRIGGER = 83,
-
/* vm_cpuset */
IOCNUM_ACTIVATE_CPU = 90,
IOCNUM_GET_CPUSET = 91,
-
- /* RTC */
- IOCNUM_RTC_READ = 100,
- IOCNUM_RTC_WRITE = 101,
- IOCNUM_RTC_SETTIME = 102,
- IOCNUM_RTC_GETTIME = 103,
};
#define VM_RUN \
@@ -283,80 +142,18 @@
_IOW('v', IOCNUM_SET_REGISTER, struct vm_register)
#define VM_GET_REGISTER \
_IOWR('v', IOCNUM_GET_REGISTER, struct vm_register)
-#define VM_SET_SEGMENT_DESCRIPTOR \
- _IOW('v', IOCNUM_SET_SEGMENT_DESCRIPTOR, struct vm_seg_desc)
-#define VM_GET_SEGMENT_DESCRIPTOR \
- _IOWR('v', IOCNUM_GET_SEGMENT_DESCRIPTOR, struct vm_seg_desc)
-#define VM_INJECT_EXCEPTION \
- _IOW('v', IOCNUM_INJECT_EXCEPTION, struct vm_exception)
-#define VM_LAPIC_IRQ \
- _IOW('v', IOCNUM_LAPIC_IRQ, struct vm_lapic_irq)
-#define VM_LAPIC_LOCAL_IRQ \
- _IOW('v', IOCNUM_LAPIC_LOCAL_IRQ, struct vm_lapic_irq)
-#define VM_LAPIC_MSI \
- _IOW('v', IOCNUM_LAPIC_MSI, struct vm_lapic_msi)
-#define VM_IOAPIC_ASSERT_IRQ \
- _IOW('v', IOCNUM_IOAPIC_ASSERT_IRQ, struct vm_ioapic_irq)
-#define VM_IOAPIC_DEASSERT_IRQ \
- _IOW('v', IOCNUM_IOAPIC_DEASSERT_IRQ, struct vm_ioapic_irq)
-#define VM_IOAPIC_PULSE_IRQ \
- _IOW('v', IOCNUM_IOAPIC_PULSE_IRQ, struct vm_ioapic_irq)
-#define VM_IOAPIC_PINCOUNT \
- _IOR('v', IOCNUM_IOAPIC_PINCOUNT, int)
-#define VM_ISA_ASSERT_IRQ \
- _IOW('v', IOCNUM_ISA_ASSERT_IRQ, struct vm_isa_irq)
-#define VM_ISA_DEASSERT_IRQ \
- _IOW('v', IOCNUM_ISA_DEASSERT_IRQ, struct vm_isa_irq)
-#define VM_ISA_PULSE_IRQ \
- _IOW('v', IOCNUM_ISA_PULSE_IRQ, struct vm_isa_irq)
-#define VM_ISA_SET_IRQ_TRIGGER \
- _IOW('v', IOCNUM_ISA_SET_IRQ_TRIGGER, struct vm_isa_irq_trigger)
#define VM_SET_CAPABILITY \
_IOW('v', IOCNUM_SET_CAPABILITY, struct vm_capability)
#define VM_GET_CAPABILITY \
_IOWR('v', IOCNUM_GET_CAPABILITY, struct vm_capability)
-#define VM_BIND_PPTDEV \
- _IOW('v', IOCNUM_BIND_PPTDEV, struct vm_pptdev)
-#define VM_UNBIND_PPTDEV \
- _IOW('v', IOCNUM_UNBIND_PPTDEV, struct vm_pptdev)
-#define VM_MAP_PPTDEV_MMIO \
- _IOW('v', IOCNUM_MAP_PPTDEV_MMIO, struct vm_pptdev_mmio)
-#define VM_PPTDEV_MSI \
- _IOW('v', IOCNUM_PPTDEV_MSI, struct vm_pptdev_msi)
-#define VM_PPTDEV_MSIX \
- _IOW('v', IOCNUM_PPTDEV_MSIX, struct vm_pptdev_msix)
-#define VM_INJECT_NMI \
- _IOW('v', IOCNUM_INJECT_NMI, struct vm_nmi)
#define VM_STATS \
_IOWR('v', IOCNUM_VM_STATS, struct vm_stats)
#define VM_STAT_DESC \
_IOWR('v', IOCNUM_VM_STAT_DESC, struct vm_stat_desc)
-#define VM_SET_X2APIC_STATE \
- _IOW('v', IOCNUM_SET_X2APIC_STATE, struct vm_x2apic)
-#define VM_GET_X2APIC_STATE \
- _IOWR('v', IOCNUM_GET_X2APIC_STATE, struct vm_x2apic)
-#define VM_GET_HPET_CAPABILITIES \
- _IOR('v', IOCNUM_GET_HPET_CAPABILITIES, struct vm_hpet_cap)
-#define VM_GET_GPA_PMAP \
- _IOWR('v', IOCNUM_GET_GPA_PMAP, struct vm_gpa_pte)
#define VM_GLA2GPA \
_IOWR('v', IOCNUM_GLA2GPA, struct vm_gla2gpa)
#define VM_ACTIVATE_CPU \
_IOW('v', IOCNUM_ACTIVATE_CPU, struct vm_activate_cpu)
#define VM_GET_CPUS \
_IOW('v', IOCNUM_GET_CPUSET, struct vm_cpuset)
-#define VM_SET_INTINFO \
- _IOW('v', IOCNUM_SET_INTINFO, struct vm_intinfo)
-#define VM_GET_INTINFO \
- _IOWR('v', IOCNUM_GET_INTINFO, struct vm_intinfo)
-#define VM_RTC_WRITE \
- _IOW('v', IOCNUM_RTC_WRITE, struct vm_rtc_data)
-#define VM_RTC_READ \
- _IOWR('v', IOCNUM_RTC_READ, struct vm_rtc_data)
-#define VM_RTC_SETTIME \
- _IOW('v', IOCNUM_RTC_SETTIME, struct vm_rtc_time)
-#define VM_RTC_GETTIME \
- _IOR('v', IOCNUM_RTC_GETTIME, struct vm_rtc_time)
-#define VM_RESTART_INSTRUCTION \
- _IOW('v', IOCNUM_RESTART_INSTRUCTION, int)
#endif
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c Wed Jul 8 12:42:04 2015 (r288087)
@@ -31,6 +31,7 @@
extern char hyp_vector[];
extern char hyp_code_start[];
extern char hypervisor_stub_vect[];
+extern char hypmode_enabled[];
lpae_pd_entry_t *hyp_l1pd;
char *stack;
@@ -68,14 +69,20 @@
arm_init(int ipinum)
{
char *stack_top;
- lpae_vm_paddr_t phys_hyp_l1pd;
+ lpae_vm_paddr_t phys_hyp_l1pd, phys_check;
+
+ if (hypmode_enabled[0]) {
+ printf("arm_init: processor didn't boot in HYP-mode (no support)\n");
+ return (ENXIO);
+ }
mtx_init(&vmid_generation_mtx, "vmid_generation_mtx", NULL, MTX_DEF);
stack = malloc(PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO);
stack_top = stack + PAGE_SIZE;
- hyp_l1pd = malloc(2 * LPAE_L1_ENTRIES * sizeof(lpae_pd_entry_t), M_HYP, M_WAITOK | M_ZERO);
+ hyp_l1pd = malloc(2 * LPAE_L1_ENTRIES * sizeof(lpae_pd_entry_t),
+ M_HYP, M_WAITOK | M_ZERO);
lpae_vmmmap_set(NULL,
(lpae_vm_vaddr_t)stack,
@@ -83,7 +90,8 @@
PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE);
- printf("%s hyp_code_start: %p, phys_hyp_code_start: %p\n", __func__, (void*) hyp_code_start, (void*)vtophys(hyp_code_start));
+ printf("%s hyp_code_start: %p, phys_hyp_code_start: %p\n", __func__,
+ (void*) hyp_code_start, (void*)vtophys(hyp_code_start));
/*
* Create two mappings:
@@ -101,7 +109,6 @@
(lpae_vm_paddr_t)vtophys(hyp_code_start),
PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE);
-
/*
* Flush all caches to be sure we tables in physical memory
*/
@@ -110,6 +117,13 @@
dump_lpae_mapping(NULL);
+ phys_check = lpae_vmmmap_get(NULL, (lpae_vm_vaddr_t)hyp_code_start);
+ if (phys_check != (lpae_vm_vaddr_t)vtophys(hyp_code_start)) {
+ printf ("%s lpae_vmmmap_get returned %p instead of %p\n",
+ __func__,
+ (void*) phys_check,
+ (void*) vtophys(hyp_code_start));
+ }
/*
* Install the temporary vector from which
* will do the initialization part of VMM
@@ -240,16 +254,82 @@
free(hyp, M_HYP);
}
+static uint32_t *
+hypctx_regptr(struct hypctx *hypctx, int reg)
+{
+
+ switch (reg) {
+ case VM_REG_GUEST_R0:
+ return (&hypctx->regs.r[0]);
+ case VM_REG_GUEST_R1:
+ return (&hypctx->regs.r[1]);
+ case VM_REG_GUEST_R2:
+ return (&hypctx->regs.r[2]);
+ case VM_REG_GUEST_R3:
+ return (&hypctx->regs.r[3]);
+ case VM_REG_GUEST_R5:
+ return (&hypctx->regs.r[4]);
+ case VM_REG_GUEST_R6:
+ return (&hypctx->regs.r[5]);
+ case VM_REG_GUEST_R7:
+ return (&hypctx->regs.r[6]);
+ case VM_REG_GUEST_R8:
+ return (&hypctx->regs.r[7]);
+ case VM_REG_GUEST_R9:
+ return (&hypctx->regs.r[8]);
+ case VM_REG_GUEST_R10:
+ return (&hypctx->regs.r[9]);
+ case VM_REG_GUEST_R11:
+ return (&hypctx->regs.r[10]);
+ case VM_REG_GUEST_R12:
+ return (&hypctx->regs.r[11]);
+ case VM_REG_GUEST_SP:
+ return (&hypctx->regs.r_sp);
+ case VM_REG_GUEST_LR:
+ return (&hypctx->regs.r_lr);
+ case VM_REG_GUEST_PC:
+ return (&hypctx->regs.r_pc);
+ case VM_REG_GUEST_CPSR:
+ return (&hypctx->regs.r_cpsr);
+ default:
+ break;
+ }
+ return (NULL);
+}
static int
-arm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
+arm_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
{
- return (EINVAL);
+ uint32_t *regp;
+ int running, hostcpu;
+ struct hyp *hyp = arg;
+
+ running = vcpu_is_running(hyp->vm, vcpu, &hostcpu);
+ if (running && hostcpu != curcpu)
+ panic("arm_getreg: %s%d is running", vm_name(hyp->vm), vcpu);
+
+ if ((regp = hypctx_regptr(&hyp->ctx[vcpu], reg)) != NULL) {
+ *retval = *regp;
+ return (0);
+ } else
+ return (EINVAL);
}
static int
-arm_setreg(void *arg, int vcpu, int ident, uint64_t val)
+arm_setreg(void *arg, int vcpu, int reg, uint64_t val)
{
- return (EINVAL);
+ uint32_t *regp;
+ struct hyp *hyp = arg;
+ int running, hostcpu;
+
+ running = vcpu_is_running(hyp->vm, vcpu, &hostcpu);
+ if (running && hostcpu != curcpu)
+ panic("hyp_setreg: %s%d is running", vm_name(hyp->vm), vcpu);
+
+ if ((regp = hypctx_regptr(&hyp->ctx[vcpu], reg)) != NULL) {
+ *regp = val;
+ return (0);
+ } else
+ return (EINVAL);
}
struct vmm_ops vmm_ops_arm = {
@@ -260,15 +340,9 @@
arm_vmrun,
arm_vmcleanup,
lpae_vmmmap_set,
- NULL,
+ lpae_vmmmap_get,
arm_getreg,
arm_setreg,
- NULL, /* vmi_get_desc_t */
- NULL, /* vmi_set_desc_t */
NULL, /* vmi_get_cap_t */
- NULL, /* vmi_set_cap_t */
- NULL, /* vmi_vmspace_alloc */
- NULL, /* vmi_vmspace_free */
- NULL, /* vmi_vlapic_init,*/
- NULL /*vmi_vlapic_cleanup */
+ NULL /* vmi_set_cap_t */
};
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c Wed Jul 8 12:42:04 2015 (r288087)
@@ -178,6 +178,7 @@
}
}
}
+
int lpae_vmmmap_set(void *arg,
uint64_t virt_start,
uint64_t phys_start,
@@ -200,6 +201,47 @@
return (0);
}
+uint64_t lpae_vmmmap_get(void *arg, uint64_t ipa)
+{
+ struct hyp *vm_hyp;
+ int l1_index, l2_index, l3_index;
+ lpae_pd_entry_t *l1pd, *l1pd_shadow, *l2pd, *l2pd_shadow, *l3pd;
+
+ vm_hyp = arg;
+
+ if (arg)
+ l1pd = &vm_hyp->l1pd[0];
+ else
+ l1pd = &hyp_l1pd[0];
+
+ l1pd_shadow = &l1pd[LPAE_L1_ENTRIES];
+
+ /* Check if there is a connnection to a 2nd level PT */
+ l1_index = (ipa >> LPAE_L1_SHIFT) & LPAE_L1_INDEX_MASK;
+ if ((l1pd[l1_index] & LPAE_TYPE_LINK) == LPAE_TYPE_LINK) {
+
+ /* Grab the virtual address of the 2nd leel PT */
+ l2pd = (lpae_pd_entry_t *) (l1pd_shadow[l1_index]);
+ l2pd_shadow = &l2pd[LPAE_L2_ENTRIES];
+
+ /* Check if there is a connect to a 3nd level PT */
+ l2_index = (ipa >> LPAE_L2_SHIFT) & LPAE_L2_INDEX_MASK;
+ if ((l2pd[l2_index] & LPAE_TYPE_LINK) == LPAE_TYPE_LINK) {
+
+ l3pd = (lpae_pd_entry_t *) (l2pd_shadow[l2_index]);
+
+ l3_index = (ipa >> LPAE_L3_SHIFT) & LPAE_L3_INDEX_MASK;
+ return (l3pd[l3_index] & LPAE_L3_B_ADDR_MASK);
+ } else {
+ return (l2pd[l2_index] & LPAE_L2_B_ADDR_MASK);
+ }
+ } else {
+ return (l1pd[l1_index] & LPAE_L1_B_ADDR_MASK);
+ }
+
+ return ((uint64_t)-1);
+}
+
void lpae_vmcleanup(void *arg)
{
int i, j;
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h Wed Jul 8 12:42:04 2015 (r288087)
@@ -12,8 +12,11 @@
uint64_t phys_start,
size_t len,
int prot);
-
+uint64_t lpae_vmmmap_get(void *arg,
+ uint64_t ipa);
void lpae_vmcleanup(void *arg);
+
+/* Debug only */
void dump_lpae_mapping(void *arg);
#define LPAE_NLEVELS 3
@@ -42,6 +45,7 @@
#define LPAE_L3_SHIFT 12
#define LPAE_L3_SIZE (1 << 12)
#define LPAE_L3_INDEX_MASK 0x1FF
+#define LPAE_L3_B_ADDR_MASK ((uint64_t)0xFFFFFFF000)/* phys address of Phys Block */
#define LPAE_TYPE_LINK 0x03
#define LPAE_L12_TYPE_BLOCK 0x01
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c Wed Jul 8 12:39:47 2015 (r288086)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c Wed Jul 8 12:42:04 2015 (r288087)
@@ -14,6 +14,8 @@
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/systm.h>
+#include <sys/cpuset.h>
+
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -34,6 +36,7 @@
#include "vmm_stat.h"
#include "vmm_mem.h"
+#include "mmu.h"
struct vcpu {
int flags;
@@ -53,7 +56,7 @@
#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
struct mem_seg {
- vm_paddr_t gpa;
+ uint64_t gpa;
size_t len;
boolean_t wired;
vm_object_t object;
@@ -62,7 +65,6 @@
struct vm {
void *cookie; /* processor-specific data */
- void *iommu; /* iommu-specific data */
struct vcpu vcpu[VM_MAXCPU];
int num_mem_segs;
struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
@@ -87,20 +89,15 @@
#define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \
(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO)
#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
-#define VMMMAP_SET(vmi, gpa, hpa, len, attr, prot, spm) \
+#define VMMMAP_SET(vmi, gpa, hpa, len, prot) \
(ops != NULL ? \
- (*ops->vmmmap_set)(vmi, gpa, hpa, len, attr, prot, spm) : \
- ENXIO)
+ (*ops->vmmapset)(vmi, gpa, hpa, len, prot) : ENXIO)
#define VMMMAP_GET(vmi, gpa) \
- (ops != NULL ? (*ops->vmmmap_get)(vmi, gpa) : ENXIO)
+ (ops != NULL ? (*ops->vmmapget)(vmi, gpa) : ENXIO)
#define VMGETREG(vmi, vcpu, num, retval) \
(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
#define VMSETREG(vmi, vcpu, num, val) \
(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
-#define VMGETDESC(vmi, vcpu, num, desc) \
- (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
-#define VMSETDESC(vmi, vcpu, num, desc) \
- (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
#define VMGETCAP(vmi, vcpu, num, retval) \
(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
#define VMSETCAP(vmi, vcpu, num, val) \
@@ -174,7 +171,7 @@
break;
case MOD_UNLOAD:
error = vmmdev_cleanup();
- if (error == 0) {
+ if (error == 0 && vmm_initialized) {
error = VMM_CLEANUP();
if (error)
vmm_initialized = 0;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-soc-all
mailing list