git: b57be759d079 - main - vm_fault: Fix some nits in vm_fault_copy_entry()
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 11 Jul 2022 19:58:58 UTC
The branch main has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=b57be759d079d40aa6ec75207faadce5ae1484a2
commit b57be759d079d40aa6ec75207faadce5ae1484a2
Author: Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2022-07-11 19:27:54 +0000
Commit: Mark Johnston <markj@FreeBSD.org>
CommitDate: 2022-07-11 19:58:42 +0000
vm_fault: Fix some nits in vm_fault_copy_entry()
- Correct the description (vm_fault_copy_entry() does not create a
shadow object).
- Move some initialization and assertions out of the scope of the object
locks, when doing so makes sense.
- Merge a pair of conditional blocks.
- Use __unused when appropriate.
No functional change intended.
Reviewed by: alc
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
---
sys/vm/vm_fault.c | 51 +++++++++++++++++++++++----------------------------
1 file changed, 23 insertions(+), 28 deletions(-)
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index ba6f2ebfc730..c379301f866f 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1951,10 +1951,10 @@ error:
* Routine:
* vm_fault_copy_entry
* Function:
- * Create new shadow object backing dst_entry with private copy of
- * all underlying pages. When src_entry is equal to dst_entry,
- * function implements COW for wired-down map entry. Otherwise,
- * it forks wired entry into dst_map.
+ * Create new object backing dst_entry with private copy of all
+ * underlying pages. When src_entry is equal to dst_entry, function
+ * implements COW for wired-down map entry. Otherwise, it forks
+ * wired entry into dst_map.
*
* In/out conditions:
* The source and destination maps must be locked for write.
@@ -1962,7 +1962,7 @@ error:
* entry corresponding to a main map entry that is wired down).
*/
void
-vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
+vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused,
vm_map_entry_t dst_entry, vm_map_entry_t src_entry,
vm_ooffset_t *fork_charge)
{
@@ -1972,14 +1972,25 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
vm_offset_t vaddr;
vm_page_t dst_m;
vm_page_t src_m;
- boolean_t upgrade;
-
-#ifdef lint
- src_map++;
-#endif /* lint */
+ bool upgrade;
upgrade = src_entry == dst_entry;
+ KASSERT(upgrade || dst_entry->object.vm_object == NULL,
+ ("vm_fault_copy_entry: vm_object not NULL"));
+
+ /*
+ * If not an upgrade, then enter the mappings in the pmap as
+ * read and/or execute accesses. Otherwise, enter them as
+ * write accesses.
+ *
+ * A writeable large page mapping is only created if all of
+ * the constituent small page mappings are modified. Marking
+ * PTEs as modified on inception allows promotion to happen
+ * without taking potentially large number of soft faults.
+ */
access = prot = dst_entry->protection;
+ if (!upgrade)
+ access &= ~VM_PROT_WRITE;
src_object = src_entry->object.vm_object;
src_pindex = OFF_TO_IDX(src_entry->offset);
@@ -2001,16 +2012,13 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
#endif
dst_object->domain = src_object->domain;
dst_object->charge = dst_entry->end - dst_entry->start;
- }
- VM_OBJECT_WLOCK(dst_object);
- KASSERT(upgrade || dst_entry->object.vm_object == NULL,
- ("vm_fault_copy_entry: vm_object not NULL"));
- if (src_object != dst_object) {
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC;
}
+
+ VM_OBJECT_WLOCK(dst_object);
if (fork_charge != NULL) {
KASSERT(dst_entry->cred == NULL,
("vm_fault_copy_entry: leaked swp charge"));
@@ -2026,19 +2034,6 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_entry->cred = NULL;
}
- /*
- * If not an upgrade, then enter the mappings in the pmap as
- * read and/or execute accesses. Otherwise, enter them as
- * write accesses.
- *
- * A writeable large page mapping is only created if all of
- * the constituent small page mappings are modified. Marking
- * PTEs as modified on inception allows promotion to happen
- * without taking potentially large number of soft faults.
- */
- if (!upgrade)
- access &= ~VM_PROT_WRITE;
-
/*
* Loop through all of the virtual pages within the entry's
* range, copying each page from the source object to the