git: 2b4fe856f44d - main - bhyve: Remove unused vm and vcpu arguments from vm_copy routines.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 18 Nov 2022 18:26:37 UTC
The branch main has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=2b4fe856f44ded02f3450bac1782bb49b60b7dd5
commit 2b4fe856f44ded02f3450bac1782bb49b60b7dd5
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2022-11-18 18:01:44 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2022-11-18 18:25:36 +0000
bhyve: Remove unused vm and vcpu arguments from vm_copy routines.
The arguments identifying the VM and vCPU are only needed for
vm_copy_setup.
Reviewed by: corvink, markj
Differential Revision: https://reviews.freebsd.org/D37158
---
lib/libvmmapi/vmmapi.c | 14 ++++++++------
lib/libvmmapi/vmmapi.h | 11 ++++-------
sys/amd64/include/vmm.h | 9 +++------
sys/amd64/vmm/vmm.c | 11 ++++-------
sys/amd64/vmm/vmm_instruction_emul.c | 18 +++++++++---------
usr.sbin/bhyve/inout.c | 4 ++--
usr.sbin/bhyve/task_switch.c | 14 +++++++-------
7 files changed, 37 insertions(+), 44 deletions(-)
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
index 454d7ee21b36..a65b77300b3a 100644
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -1468,14 +1468,17 @@ vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
}
void
-vm_copy_teardown(struct vmctx *ctx __unused, int vcpu __unused,
- struct iovec *iov __unused, int iovcnt __unused)
+vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
{
+ /*
+ * Intentionally empty. This is used by the instruction
+ * emulation code shared with the kernel. The in-kernel
+ * version of this is non-empty.
+ */
}
void
-vm_copyin(struct vmctx *ctx __unused, int vcpu __unused, struct iovec *iov,
- void *vp, size_t len)
+vm_copyin(struct iovec *iov, void *vp, size_t len)
{
const char *src;
char *dst;
@@ -1495,8 +1498,7 @@ vm_copyin(struct vmctx *ctx __unused, int vcpu __unused, struct iovec *iov,
}
void
-vm_copyout(struct vmctx *ctx __unused, int vcpu __unused, const void *vp,
- struct iovec *iov, size_t len)
+vm_copyout(const void *vp, struct iovec *iov, size_t len)
{
const char *src;
char *dst;
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
index b26f12f7c60e..82c3dcdf3118 100644
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -43,7 +43,7 @@
* API version for out-of-tree consumers like grub-bhyve for making compile
* time decisions.
*/
-#define VMMAPI_VERSION 0103 /* 2 digit major followed by 2 digit minor */
+#define VMMAPI_VERSION 0104 /* 2 digit major followed by 2 digit minor */
struct iovec;
struct vmctx;
@@ -219,12 +219,9 @@ int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg,
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
int *fault);
-void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov,
- void *host_dst, size_t len);
-void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src,
- struct iovec *guest_iov, size_t len);
-void vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov,
- int iovcnt);
+void vm_copyin(struct iovec *guest_iov, void *host_dst, size_t len);
+void vm_copyout(const void *host_src, struct iovec *guest_iov, size_t len);
+void vm_copy_teardown(struct iovec *iov, int iovcnt);
/* RTC */
int vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value);
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 7896a4997f53..a223eb17afe9 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -461,12 +461,9 @@ struct vm_copyinfo {
int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *is_fault);
-void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
- int num_copyinfo);
-void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
- void *kaddr, size_t len);
-void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
- struct vm_copyinfo *copyinfo, size_t len);
+void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
+void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len);
+void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len);
int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
int vcpu_trap_wbinvd(struct vm *vm, int vcpuid);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 2a4ff4799cc0..bffba382b667 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -2723,8 +2723,7 @@ vm_segment_name(int seg)
}
void
-vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
- int num_copyinfo)
+vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo)
{
int idx;
@@ -2773,7 +2772,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
}
if (idx != nused) {
- vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
+ vm_copy_teardown(copyinfo, num_copyinfo);
return (EFAULT);
} else {
*fault = 0;
@@ -2782,8 +2781,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
}
void
-vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
- size_t len)
+vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len)
{
char *dst;
int idx;
@@ -2799,8 +2797,7 @@ vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
}
void
-vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
- struct vm_copyinfo *copyinfo, size_t len)
+vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len)
{
const char *src;
int idx;
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 3b48eee2d8ed..0b774e72b0c3 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -835,8 +835,8 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
/*
* case (2): read from system memory and write to mmio.
*/
- vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ vm_copyin(copyinfo, &val, opsize);
+ vm_copy_teardown(copyinfo, nitems(copyinfo));
error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
if (error)
goto done;
@@ -871,8 +871,8 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
if (error)
goto done;
- vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ vm_copyout(&val, copyinfo, opsize);
+ vm_copy_teardown(copyinfo, nitems(copyinfo));
} else {
/*
* Case (4): read from and write to mmio.
@@ -1599,13 +1599,13 @@ emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
if (pushop) {
error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
if (error == 0)
- vm_copyout(vm, vcpuid, &val, copyinfo, size);
+ vm_copyout(&val, copyinfo, size);
} else {
- vm_copyin(vm, vcpuid, copyinfo, &val, size);
+ vm_copyin(copyinfo, &val, size);
error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
rsp += size;
}
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ vm_copy_teardown(copyinfo, nitems(copyinfo));
if (error == 0) {
error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
@@ -2300,8 +2300,8 @@ vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
if (error || *faultptr)
return (error);
- vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
- vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
+ vm_copyin(copyinfo, vie->inst, inst_length);
+ vm_copy_teardown(copyinfo, nitems(copyinfo));
vie->num_valid = inst_length;
return (0);
}
diff --git a/usr.sbin/bhyve/inout.c b/usr.sbin/bhyve/inout.c
index 02b38c9b7a99..bdbdd0d1c223 100644
--- a/usr.sbin/bhyve/inout.c
+++ b/usr.sbin/bhyve/inout.c
@@ -184,14 +184,14 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
val = 0;
if (!in)
- vm_copyin(ctx, vcpu, iov, &val, bytes);
+ vm_copyin(iov, &val, bytes);
retval = handler(ctx, vcpu, in, port, bytes, &val, arg);
if (retval != 0)
break;
if (in)
- vm_copyout(ctx, vcpu, &val, iov, bytes);
+ vm_copyout(&val, iov, bytes);
/* Update index */
if (vis->rflags & PSL_D)
diff --git a/usr.sbin/bhyve/task_switch.c b/usr.sbin/bhyve/task_switch.c
index 78dfb8190e48..0dfb536f09f8 100644
--- a/usr.sbin/bhyve/task_switch.c
+++ b/usr.sbin/bhyve/task_switch.c
@@ -225,9 +225,9 @@ desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
return (error);
if (doread)
- vm_copyin(ctx, vcpu, iov, desc, sizeof(*desc));
+ vm_copyin(iov, desc, sizeof(*desc));
else
- vm_copyout(ctx, vcpu, desc, iov, sizeof(*desc));
+ vm_copyout(desc, iov, sizeof(*desc));
return (0);
}
@@ -464,7 +464,7 @@ tss32_save(struct vmctx *ctx, int vcpu, struct vm_task_switch *task_switch,
tss->tss_eip = eip;
/* Copy updated old TSS into guest memory */
- vm_copyout(ctx, vcpu, tss, iov, sizeof(struct tss32));
+ vm_copyout(tss, iov, sizeof(struct tss32));
}
static void
@@ -560,7 +560,7 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
* the previous link field.
*/
if (nested)
- vm_copyout(ctx, vcpu, tss, iov, sizeof(*tss));
+ vm_copyout(tss, iov, sizeof(*tss));
/* Validate segment descriptors */
error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
@@ -685,7 +685,7 @@ push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
if (error || *faultptr)
return (error);
- vm_copyout(ctx, vcpu, &errcode, iov, bytes);
+ vm_copyout(&errcode, iov, bytes);
SETREG(ctx, vcpu, VM_REG_GUEST_RSP, esp);
return (0);
}
@@ -798,7 +798,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
error = vm_copy_setup(ctx, vcpu, &sup_paging, nt.base, minlimit + 1,
PROT_READ | PROT_WRITE, nt_iov, nitems(nt_iov), &fault);
CHKERR(error, fault);
- vm_copyin(ctx, vcpu, nt_iov, &newtss, minlimit + 1);
+ vm_copyin(nt_iov, &newtss, minlimit + 1);
/* Get the old TSS selector from the guest's task register */
ot_sel = GETREG(ctx, vcpu, VM_REG_GUEST_TR);
@@ -830,7 +830,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
error = vm_copy_setup(ctx, vcpu, &sup_paging, ot_base, minlimit + 1,
PROT_READ | PROT_WRITE, ot_iov, nitems(ot_iov), &fault);
CHKERR(error, fault);
- vm_copyin(ctx, vcpu, ot_iov, &oldtss, minlimit + 1);
+ vm_copyin(ot_iov, &oldtss, minlimit + 1);
/*
* Clear the busy bit in the old TSS descriptor if the task switch