svn commit: r327701 - stable/11/sys/vm
Mark Johnston
markj at FreeBSD.org
Mon Jan 8 16:36:35 UTC 2018
Author: markj
Date: Mon Jan 8 16:36:33 2018
New Revision: 327701
URL: https://svnweb.freebsd.org/changeset/base/327701
Log:
MFC r322547:
Add vm_page_alloc_after().
Modified:
stable/11/sys/vm/vm_kern.c
stable/11/sys/vm/vm_page.c
stable/11/sys/vm/vm_page.h
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/vm/vm_kern.c
==============================================================================
--- stable/11/sys/vm/vm_kern.c Mon Jan 8 15:56:40 2018 (r327700)
+++ stable/11/sys/vm/vm_kern.c Mon Jan 8 16:36:33 2018 (r327701)
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@@ -329,7 +330,7 @@ int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t offset, i;
- vm_page_t m;
+ vm_page_t m, mpred;
int pflags;
KASSERT(object == kmem_object || object == kernel_object,
@@ -338,10 +339,13 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_siz
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
- VM_OBJECT_WLOCK(object);
- for (i = 0; i < size; i += PAGE_SIZE) {
+ i = 0;
retry:
- m = vm_page_alloc(object, atop(offset + i), pflags);
+ VM_OBJECT_WLOCK(object);
+ mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
+ for (; i < size; i += PAGE_SIZE, mpred = m) {
+ m = vm_page_alloc_after(object, atop(offset + i), pflags,
+ mpred);
/*
* Ran out of space, free everything up and return. Don't need
@@ -352,7 +356,6 @@ retry:
VM_OBJECT_WUNLOCK(object);
if ((flags & M_NOWAIT) == 0) {
VM_WAIT;
- VM_OBJECT_WLOCK(object);
goto retry;
}
kmem_unback(object, addr, i);
Modified: stable/11/sys/vm/vm_page.c
==============================================================================
--- stable/11/sys/vm/vm_page.c Mon Jan 8 15:56:40 2018 (r327700)
+++ stable/11/sys/vm/vm_page.c Mon Jan 8 16:36:33 2018 (r327701)
@@ -1583,15 +1583,32 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm
vm_page_t
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
- vm_page_t m, mpred;
+
+ return (vm_page_alloc_after(object, pindex, req, object != NULL ?
+ vm_radix_lookup_le(&object->rtree, pindex) : NULL));
+}
+
+/*
+ * Allocate a page in the specified object with the given page index. To
+ * optimize insertion of the page into the object, the caller must also specifiy
+ * the resident page in the object with largest index smaller than the given
+ * page index, or NULL if no such page exists.
+ */
+vm_page_t
+vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
+ vm_page_t mpred)
+{
+ vm_page_t m;
int flags, req_class;
- mpred = NULL; /* XXX: pacify gcc */
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
- ("vm_page_alloc: inconsistent object(%p)/req(%x)", object, req));
+ ("inconsistent object(%p)/req(%x)", object, req));
+ KASSERT(mpred == NULL || mpred->pindex < pindex,
+ ("mpred %p doesn't precede pindex 0x%jx", mpred,
+ (uintmax_t)pindex));
if (object != NULL)
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -1606,12 +1623,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex,
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
req_class = VM_ALLOC_SYSTEM;
- if (object != NULL) {
- mpred = vm_radix_lookup_le(&object->rtree, pindex);
- KASSERT(mpred == NULL || mpred->pindex != pindex,
- ("vm_page_alloc: pindex already allocated"));
- }
-
/*
* Allocate a page if the number of free pages exceeds the minimum
* for the request class.
@@ -1658,7 +1669,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex,
/*
* At this point we had better have found a good page.
*/
- KASSERT(m != NULL, ("vm_page_alloc: missing page"));
+ KASSERT(m != NULL, ("missing page"));
vm_phys_freecnt_adj(m, -1);
if ((m->flags & PG_ZERO) != 0)
vm_page_zero_count--;
@@ -3261,7 +3272,7 @@ int
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
- vm_page_t m;
+ vm_page_t m, mpred;
int i;
bool sleep;
@@ -3278,7 +3289,12 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pin
return (0);
i = 0;
retrylookup:
- m = vm_page_lookup(object, pindex + i);
+ m = vm_radix_lookup_le(&object->rtree, pindex + i);
+ if (m == NULL || m->pindex != pindex + i) {
+ mpred = m;
+ m = NULL;
+ } else
+ mpred = TAILQ_PREV(m, pglist, listq);
for (; i < count; i++) {
if (m != NULL) {
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
@@ -3310,8 +3326,9 @@ retrylookup:
if ((allocflags & VM_ALLOC_SBUSY) != 0)
vm_page_sbusy(m);
} else {
- m = vm_page_alloc(object, pindex + i, (allocflags &
- ~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
+ m = vm_page_alloc_after(object, pindex + i,
+ (allocflags & ~VM_ALLOC_IGN_SBUSY) |
+ VM_ALLOC_COUNT(count - i), mpred);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
break;
@@ -3326,7 +3343,7 @@ retrylookup:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- ma[i] = m;
+ ma[i] = mpred = m;
m = vm_page_next(m);
}
return (i);
Modified: stable/11/sys/vm/vm_page.h
==============================================================================
--- stable/11/sys/vm/vm_page.h Mon Jan 8 15:56:40 2018 (r327700)
+++ stable/11/sys/vm/vm_page.h Mon Jan 8 16:36:33 2018 (r327701)
@@ -462,7 +462,8 @@ void vm_page_free_zero(vm_page_t m);
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
-vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
+vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
+vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
More information about the svn-src-all
mailing list