git: 671a00491d7a - main - vm_iommu_map()/unmap(): stop transiently wiring already wired pages
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 22 Dec 2023 17:34:33 UTC
The branch main has been updated by kib:
URL: https://cgit.FreeBSD.org/src/commit/?id=671a00491d7ac9d6663cdc597ff8c13024eda00d
commit 671a00491d7ac9d6663cdc597ff8c13024eda00d
Author: Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2023-12-21 00:12:37 +0000
Commit: Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2023-12-22 17:34:27 +0000
vm_iommu_map()/unmap(): stop transiently wiring already wired pages
Namely, switch from vm_fault_quick_hold() to pmap_extract() KPI to
translate gpa to hpa. Assert that the looked up hpa belongs to the wired
page, as it should be for the VM which is configured for pass-throu
(this is theoretically a restriction that could be removed on newer
DMARs).
Noted by: alc
Reviewed by: alc, jhb, markj
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Differential revision: https://reviews.freebsd.org/D43140
---
sys/amd64/vmm/vmm.c | 39 +++++++++++++++++++++++++--------------
1 file changed, 25 insertions(+), 14 deletions(-)
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 32ae9f9050a7..f399f876717d 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1041,9 +1041,10 @@ vm_iommu_map(struct vm *vm)
{
vm_paddr_t gpa, hpa;
struct mem_map *mm;
- void *vp, *cookie;
int i;
+ sx_assert(&vm->mem_segs_lock, SX_LOCKED);
+
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
mm = &vm->mem_maps[i];
if (!sysmem_mapping(vm, mm))
@@ -1057,13 +1058,24 @@ vm_iommu_map(struct vm *vm)
mm->flags |= VM_MEMMAP_F_IOMMU;
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
- vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
- VM_PROT_WRITE, &cookie);
- KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
- vm_name(vm), gpa));
- vm_gpa_release(cookie);
+ hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
+
+ /*
+ * All mappings in the vmm vmspace must be
+ * present since they are managed by vmm in this way.
+ * Because we are in pass-through mode, the
+ * mappings must also be wired. This implies
+ * that all pages must be mapped and wired,
+ * allowing to use pmap_extract() and avoiding the
+ * need to use vm_gpa_hold_global().
+ *
+ * This could change if/when we start
+ * supporting page faults on IOMMU maps.
+ */
+ KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)),
+ ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
+ vm, (uintmax_t)gpa, (uintmax_t)hpa));
- hpa = DMAP_TO_PHYS((uintptr_t)vp);
iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
}
}
@@ -1076,9 +1088,10 @@ vm_iommu_unmap(struct vm *vm)
{
vm_paddr_t gpa;
struct mem_map *mm;
- void *vp, *cookie;
int i;
+ sx_assert(&vm->mem_segs_lock, SX_LOCKED);
+
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
mm = &vm->mem_maps[i];
if (!sysmem_mapping(vm, mm))
@@ -1092,12 +1105,10 @@ vm_iommu_unmap(struct vm *vm)
mm->gpa, mm->len, mm->flags));
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
- vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
- VM_PROT_WRITE, &cookie);
- KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
- vm_name(vm), gpa));
- vm_gpa_release(cookie);
-
+ KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
+ vmspace_pmap(vm->vmspace), gpa))),
+ ("vm_iommu_unmap: vm %p gpa %jx not wired",
+ vm, (uintmax_t)gpa));
iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
}
}