svn commit: r333091 - head/sys/vm
Konstantin Belousov
kib at FreeBSD.org
Sun Apr 29 12:43:09 UTC 2018
Author: kib
Date: Sun Apr 29 12:43:08 2018
New Revision: 333091
URL: https://svnweb.freebsd.org/changeset/base/333091
Log:
Eliminate some vm object relocks in vm fault.
For the vm_fault_prefault() call from vm_fault_soft_fast(), extend the
scope of the object rlock to avoid re-taking it inside
vm_fault_prefault(). It causes pmap_enter_quick() sometimes called
with shadow object lock as well as the page lock, but this looks
innocent.
Noted and measured by: mjg
Reviewed by: alc, markj (as part of the larger patch)
Tested by: pho (as part of the larger patch)
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Differential revision: https://reviews.freebsd.org/D15122
Modified:
head/sys/vm/vm_fault.c
Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c Sun Apr 29 11:46:20 2018 (r333090)
+++ head/sys/vm/vm_fault.c Sun Apr 29 12:43:08 2018 (r333091)
@@ -132,7 +132,7 @@ struct faultstate {
static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
int ahead);
static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
- int backward, int forward);
+ int backward, int forward, bool obj_locked);
static inline void
release_page(struct faultstate *fs)
@@ -320,9 +320,9 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t
return (rv);
vm_fault_fill_hold(m_hold, m);
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false);
- VM_OBJECT_RUNLOCK(fs->first_object);
if (psind == 0 && !wired)
- vm_fault_prefault(fs, vaddr, PFBAK, PFFOR);
+ vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true);
+ VM_OBJECT_RUNLOCK(fs->first_object);
vm_map_lookup_done(fs->map, fs->entry);
curthread->td_ru.ru_minflt++;
return (KERN_SUCCESS);
@@ -1262,7 +1262,7 @@ readrest:
wired == 0)
vm_fault_prefault(&fs, vaddr,
faultcount > 0 ? behind : PFBAK,
- faultcount > 0 ? ahead : PFFOR);
+ faultcount > 0 ? ahead : PFFOR, false);
VM_OBJECT_WLOCK(fs.object);
vm_page_lock(fs.m);
@@ -1395,7 +1395,7 @@ vm_fault_dontneed(const struct faultstate *fs, vm_offs
*/
static void
vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
- int backward, int forward)
+ int backward, int forward, bool obj_locked)
{
pmap_t pmap;
vm_map_entry_t entry;
@@ -1441,7 +1441,8 @@ vm_fault_prefault(const struct faultstate *fs, vm_offs
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = entry->object.vm_object;
- VM_OBJECT_RLOCK(lobject);
+ if (!obj_locked)
+ VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
@@ -1449,17 +1450,20 @@ vm_fault_prefault(const struct faultstate *fs, vm_offs
0, ("vm_fault_prefault: unaligned object offset"));
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_RLOCK(backing_object);
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
lobject = backing_object;
}
if (m == NULL) {
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
}
}
More information about the svn-src-all
mailing list