svn commit: r238245 - in user/attilio/vmc-playground/sys:
cddl/compat/opensolaris/sys
cddl/contrib/opensolaris/uts/common/fs/zfs fs/tmpfs kern vm
Attilio Rao
attilio at FreeBSD.org
Sun Jul 8 14:01:26 UTC 2012
Author: attilio
Date: Sun Jul 8 14:01:25 2012
New Revision: 238245
URL: http://svn.freebsd.org/changeset/base/238245
Log:
- Split the cached and resident pages tree into 2 distinct ones.
This makes the RED/BLACK support go away and simplifies a lot vmradix
functions used here. This happens because with patricia trie support
the trie will be little enough that keeping 2 diffetnt will be
efficient too.
- Reduce differences with head, in places like backing scan where the
optimizazions used shuffled the code a little bit around.
Tested by: flo, Andrea Barberio
Modified:
user/attilio/vmc-playground/sys/cddl/compat/opensolaris/sys/vnode.h
user/attilio/vmc-playground/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
user/attilio/vmc-playground/sys/fs/tmpfs/tmpfs_vnops.c
user/attilio/vmc-playground/sys/kern/uipc_shm.c
user/attilio/vmc-playground/sys/vm/vm_mmap.c
user/attilio/vmc-playground/sys/vm/vm_object.c
user/attilio/vmc-playground/sys/vm/vm_object.h
user/attilio/vmc-playground/sys/vm/vm_page.c
user/attilio/vmc-playground/sys/vm/vm_page.h
user/attilio/vmc-playground/sys/vm/vm_radix.c
user/attilio/vmc-playground/sys/vm/vm_radix.h
user/attilio/vmc-playground/sys/vm/vm_reserv.c
user/attilio/vmc-playground/sys/vm/vnode_pager.c
Modified: user/attilio/vmc-playground/sys/cddl/compat/opensolaris/sys/vnode.h
==============================================================================
--- user/attilio/vmc-playground/sys/cddl/compat/opensolaris/sys/vnode.h Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/cddl/compat/opensolaris/sys/vnode.h Sun Jul 8 14:01:25 2012 (r238245)
@@ -75,7 +75,7 @@ vn_is_readonly(vnode_t *vp)
#define vn_mountedvfs(vp) ((vp)->v_mountedhere)
#define vn_has_cached_data(vp) \
((vp)->v_object != NULL && \
- (vp)->v_object->cached_page_count > 0)
+ !vm_object_cache_is_empty((vp)->v_object))
#define vn_exists(vp) do { } while (0)
#define vn_invalid(vp) do { } while (0)
#define vn_renamepath(tdvp, svp, tnm, lentnm) do { } while (0)
Modified: user/attilio/vmc-playground/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- user/attilio/vmc-playground/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -330,32 +330,29 @@ page_lookup(vnode_t *vp, int64_t start,
VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
for (;;) {
- pp = vm_radix_lookup(&obj->rtree, OFF_TO_IDX(start),
- VM_RADIX_ANY);
- if (pp != NULL) {
- if (vm_page_is_valid(pp, (vm_offset_t)off, nbytes)) {
- if ((pp->oflags & VPO_BUSY) != 0) {
- /*
- * Reference the page before unlocking
- * and sleeping so that the page
- * daemon is less likely to reclaim it.
- */
- vm_page_reference(pp);
- vm_page_sleep(pp, "zfsmwb");
- continue;
- }
- vm_page_busy(pp);
- vm_page_undirty(pp);
- } else {
- if (obj->cached_page_count != 0 &&
- (pp->flags & PG_CACHED) != 0) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (pp->object == obj)
- vm_page_cache_free(pp);
- mtx_unlock(&vm_page_queue_free_mtx);
- }
- pp = NULL;
+ if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
+ vm_page_is_valid(pp, (vm_offset_t)off, nbytes)) {
+ if ((pp->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking and
+ * sleeping so that the page daemon is less
+ * likely to reclaim it.
+ */
+ vm_page_reference(pp);
+ vm_page_sleep(pp, "zfsmwb");
+ continue;
+ }
+ vm_page_busy(pp);
+ vm_page_undirty(pp);
+ } else {
+ pp = vm_page_is_cached(obj, OFF_TO_IDX(start));
+ if (pp != NULL) {
+ mtx_lock(&vm_page_queue_free_mtx);
+ if (pp->object == obj)
+ vm_page_cache_free(pp);
+ mtx_unlock(&vm_page_queue_free_mtx);
}
+ pp = NULL;
}
break;
}
Modified: user/attilio/vmc-playground/sys/fs/tmpfs/tmpfs_vnops.c
==============================================================================
--- user/attilio/vmc-playground/sys/fs/tmpfs/tmpfs_vnops.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/fs/tmpfs/tmpfs_vnops.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -514,7 +514,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_ob
goto nocache;
VM_OBJECT_LOCK(vobj);
- if (vobj->resident_page_count == 0 && vobj->cached_page_count == 0) {
+ if (vobj->resident_page_count == 0 && vm_object_cache_is_empty(vobj)) {
VM_OBJECT_UNLOCK(vobj);
goto nocache;
}
@@ -647,41 +647,38 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_o
}
VM_OBJECT_LOCK(vobj);
- if (vobj->resident_page_count == 0 && vobj->cached_page_count == 0) {
+ if (vobj->resident_page_count == 0 && vm_object_cache_is_empty(vobj)) {
VM_OBJECT_UNLOCK(vobj);
vpg = NULL;
goto nocache;
}
lookupvpg:
- vpg = vm_radix_lookup(&vobj->rtree, idx, VM_RADIX_ANY);
- if (vpg != NULL) {
- if (vm_page_is_valid(vpg, offset, tlen)) {
- if ((vpg->oflags & VPO_BUSY) != 0) {
- /*
- * Reference the page before unlocking and
- * sleeping so that the page daemon is less
- * likely to reclaim it.
- */
- vm_page_reference(vpg);
- vm_page_sleep(vpg, "tmfsmw");
- goto lookupvpg;
- }
- vm_page_busy(vpg);
- vm_page_undirty(vpg);
- VM_OBJECT_UNLOCK(vobj);
- error = uiomove_fromphys(&vpg, offset, tlen, uio);
- } else {
- if (vpg->flags & PG_CACHED) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (vpg->object == vobj)
- vm_page_cache_free(vpg);
- mtx_unlock(&vm_page_queue_free_mtx);
- }
- VM_OBJECT_UNLOCK(vobj);
- vpg = NULL;
+ if (((vpg = vm_radix_lookup(&vobj->rtree, idx)) != NULL) &&
+ vm_page_is_valid(vpg, offset, tlen)) {
+ if ((vpg->oflags & VPO_BUSY) != 0) {
+ /*
+ * Reference the page before unlocking and sleeping so
+ * that the page daemon is less likely to reclaim it.
+ */
+ vm_page_reference(vpg);
+ vm_page_sleep(vpg, "tmfsmw");
+ goto lookupvpg;
}
- } else
+ vm_page_busy(vpg);
+ vm_page_undirty(vpg);
VM_OBJECT_UNLOCK(vobj);
+ error = uiomove_fromphys(&vpg, offset, tlen, uio);
+ } else {
+ vpg = vm_page_is_cached(vobj, idx);
+ if (vpg != NULL) {
+ mtx_lock(&vm_page_queue_free_mtx);
+ if (vpg->object == vobj)
+ vm_page_cache_free(vpg);
+ mtx_unlock(&vm_page_queue_free_mtx);
+ }
+ VM_OBJECT_UNLOCK(vobj);
+ vpg = NULL;
+ }
nocache:
VM_OBJECT_LOCK(tobj);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
Modified: user/attilio/vmc-playground/sys/kern/uipc_shm.c
==============================================================================
--- user/attilio/vmc-playground/sys/kern/uipc_shm.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/kern/uipc_shm.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -278,8 +278,7 @@ shm_dotruncate(struct shmfd *shmfd, off_
if (base != 0) {
idx = OFF_TO_IDX(length);
retry:
- m = vm_radix_lookup(&object->rtree, idx,
- VM_RADIX_BLACK);
+ m = vm_radix_lookup(&object->rtree, idx);
if (m != NULL) {
if ((m->oflags & VPO_BUSY) != 0 ||
m->busy != 0) {
Modified: user/attilio/vmc-playground/sys/vm/vm_mmap.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_mmap.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_mmap.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -81,7 +81,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pageout.h>
#include <vm/vm_extern.h>
#include <vm/vm_page.h>
-#include <vm/vm_radix.h>
#include <vm/vnode_pager.h>
#ifdef HWPMC_HOOKS
@@ -888,15 +887,10 @@ RestartScan:
object->type == OBJT_VNODE) {
pindex = OFF_TO_IDX(current->offset +
(addr - current->start));
- m = vm_radix_lookup(&object->rtree,
- pindex, VM_RADIX_ANY);
-
- /* Lock just for consistency. */
- mtx_lock(&vm_page_queue_free_mtx);
- if (m != NULL &&
- (m->flags & PG_CACHED) != 0)
+ m = vm_page_lookup(object, pindex);
+ if (m == NULL &&
+ vm_page_is_cached(object, pindex))
mincoreinfo = MINCORE_INCORE;
- mtx_unlock(&vm_page_queue_free_mtx);
if (m != NULL && m->valid == 0)
m = NULL;
if (m != NULL)
Modified: user/attilio/vmc-playground/sys/vm/vm_object.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_object.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_object.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -164,9 +164,6 @@ vm_object_zdtor(void *mem, int size, voi
vm_object_t object;
object = (vm_object_t)mem;
- KASSERT(object->resident_page_count == 0,
- ("object %p resident_page_count = %d",
- object, object->resident_page_count));
KASSERT(TAILQ_EMPTY(&object->memq),
("object %p has resident pages",
object));
@@ -175,12 +172,15 @@ vm_object_zdtor(void *mem, int size, voi
("object %p has reservations",
object));
#endif
- KASSERT(object->cached_page_count == 0,
+ KASSERT(vm_object_cache_is_empty(object),
("object %p has cached pages",
object));
KASSERT(object->paging_in_progress == 0,
("object %p paging_in_progress = %d",
object, object->paging_in_progress));
+ KASSERT(object->resident_page_count == 0,
+ ("object %p resident_page_count = %d",
+ object, object->resident_page_count));
KASSERT(object->shadow_count == 0,
("object %p shadow_count = %d",
object, object->shadow_count));
@@ -211,7 +211,6 @@ _vm_object_allocate(objtype_t type, vm_p
LIST_INIT(&object->shadow_head);
object->rtree.rt_root = 0;
- object->cache.rt_root = 0;
object->type = type;
object->size = size;
object->generation = 1;
@@ -229,6 +228,7 @@ _vm_object_allocate(objtype_t type, vm_p
#if VM_NRESERVLEVEL > 0
LIST_INIT(&object->rvq);
#endif
+ object->cache.rt_root = 0;
mtx_lock(&vm_object_list_mtx);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@@ -680,6 +680,8 @@ vm_object_terminate(vm_object_t object)
vm_page_t pa[VM_RADIX_STACK];
vm_page_t p;
vm_pindex_t start;
+ struct vnode *vp;
+ u_int exhausted;
int n, i;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
@@ -726,32 +728,15 @@ vm_object_terminate(vm_object_t object)
* the object, the page and object are reset to any empty state.
*/
start = 0;
- while ((n = vm_radix_lookupn(&object->rtree, start, 0, VM_RADIX_ANY,
- (void **)pa, VM_RADIX_STACK, &start)) != 0) {
+ exhausted = 0;
+ while (exhausted == 0 && (n = vm_radix_lookupn(&object->rtree, start,
+ 0, (void **)pa, VM_RADIX_STACK, &start, &exhausted)) != 0) {
for (i = 0; i < n; i++) {
p = pa[i];
- /*
- * Another thread may allocate this cached page from
- * the queue before we acquire the page queue free
- * mtx.
- */
- if (p->flags & PG_CACHED) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (p->object == object) {
- p->object = NULL;
- p->valid = 0;
- /* Clear PG_CACHED and set PG_FREE. */
- p->flags ^= PG_CACHED | PG_FREE;
- cnt.v_cache_count--;
- cnt.v_free_count++;
- }
- mtx_unlock(&vm_page_queue_free_mtx);
- continue;
- } else if (p->object != object)
- continue;
KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
("vm_object_terminate: freeing busy page %p", p));
vm_page_lock(p);
+
/*
* Optimize the page's removal from the object by
* resetting its "object" field. Specifically, if
@@ -774,7 +759,40 @@ vm_object_terminate(vm_object_t object)
break;
}
vm_radix_reclaim_allnodes(&object->rtree);
- vm_radix_reclaim_allnodes(&object->cache);
+ vp = NULL;
+ if (!vm_object_cache_is_empty(object)) {
+ mtx_lock(&vm_page_queue_free_mtx);
+ start = 0;
+ exhausted = 0;
+ while (exhausted == 0 && (n = vm_radix_lookupn(&object->cache,
+ start, 0, (void **)pa, VM_RADIX_STACK, &start,
+ &exhausted)) != 0) {
+ for (i = 0; i < n; i++) {
+ p = pa[i];
+ MPASS(p->object == object);
+ p->object = NULL;
+ p->valid = 0;
+
+ /* Clear PG_CACHED and set PG_FREE. */
+ p->flags ^= PG_CACHED | PG_FREE;
+ cnt.v_cache_count--;
+ cnt.v_free_count++;
+
+ /*
+ * At least one cached page was removed and
+ * in the end all the cached pages will be
+ * reclaimed. If the object is a vnode,
+ * drop a reference to it.
+ */
+ if (object->type == OBJT_VNODE)
+ vp = object->handle;
+ }
+ if (n < VM_RADIX_STACK)
+ break;
+ }
+ vm_radix_reclaim_allnodes(&object->cache);
+ mtx_unlock(&vm_page_queue_free_mtx);
+ }
/*
* If the object contained any pages, then reset it to an empty state.
* None of the object's fields, including "resident_page_count", were
@@ -786,11 +804,8 @@ vm_object_terminate(vm_object_t object)
if (object->type == OBJT_VNODE)
vdrop(object->handle);
}
- if (object->cached_page_count != 0) {
- object->cached_page_count = 0;
- if (object->type == OBJT_VNODE)
- vdrop(object->handle);
- }
+ if (vp)
+ vdrop(vp);
#if VM_NRESERVLEVEL > 0
if (__predict_false(!LIST_EMPTY(&object->rvq)))
@@ -1317,6 +1332,7 @@ vm_object_split(vm_map_entry_t entry)
vm_object_t orig_object, new_object, source;
vm_pindex_t idx, offidxstart, start;
vm_size_t size;
+ u_int exhausted;
int i, n;
orig_object = entry->object.vm_object;
@@ -1372,21 +1388,14 @@ vm_object_split(vm_map_entry_t entry)
}
start = offidxstart;
retry:
- while ((n = vm_radix_lookupn(&orig_object->rtree, start,
- offidxstart + size, VM_RADIX_ANY, (void **)ma, VM_RADIX_STACK,
- &start)) != 0) {
+ exhausted = 0;
+ while (exhausted == 0 && (n = vm_radix_lookupn(&orig_object->rtree,
+ start, offidxstart + size, (void **)ma, VM_RADIX_STACK, &start,
+ &exhausted)) != 0) {
for (i = 0; i < n; i++) {
m = ma[i];
idx = m->pindex - offidxstart;
- if (m->flags & PG_CACHED) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (m->object == orig_object)
- vm_page_cache_rename(m, new_object,
- idx);
- mtx_unlock(&vm_page_queue_free_mtx);
- continue;
- } else if (m->object != orig_object)
- continue;
+
/*
* We must wait for pending I/O to complete before
* we can rename the page.
@@ -1438,6 +1447,29 @@ retry:
* and new_object's locks are released and reacquired.
*/
swap_pager_copy(orig_object, new_object, offidxstart, 0);
+
+ /*
+ * Transfer any cached pages from orig_object to new_object.
+ */
+ if (!vm_object_cache_is_empty(orig_object)) {
+ start = offidxstart;
+ exhausted = 0;
+ mtx_lock(&vm_page_queue_free_mtx);
+ while (exhausted == 0 &&
+ (n = vm_radix_lookupn(&orig_object->cache, start,
+ offidxstart + size, (void **)ma, VM_RADIX_STACK,
+ &start, &exhausted)) != 0) {
+ for (i = 0; i < n; i++) {
+ m = ma[i];
+ idx = m->pindex - offidxstart;
+ vm_page_cache_rename(m, new_object,
+ idx);
+ }
+ if (n < VM_RADIX_STACK)
+ break;
+ }
+ mtx_unlock(&vm_page_queue_free_mtx);
+ }
}
VM_OBJECT_UNLOCK(orig_object);
TAILQ_FOREACH(m, &new_object->memq, listq)
@@ -1461,7 +1493,8 @@ vm_object_backing_scan(vm_object_t objec
vm_object_t backing_object;
vm_pindex_t backing_offset_index, new_pindex;
vm_pindex_t start;
- int color, i, n;
+ u_int exhausted;
+ int i, n;
int r = 1;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
@@ -1490,37 +1523,25 @@ vm_object_backing_scan(vm_object_t objec
if (op & OBSC_COLLAPSE_WAIT) {
vm_object_set_flag(backing_object, OBJ_DEAD);
}
- color = VM_RADIX_BLACK;
- if (op & OBSC_COLLAPSE_WAIT)
- color |= VM_RADIX_RED;
/*
* Our scan
*/
restart:
start = 0;
i = n = VM_RADIX_STACK;
+ exhausted = 0;
for (;;) {
if (i == n) {
if (n < VM_RADIX_STACK)
break;
- if ((n = vm_radix_lookupn(&backing_object->rtree,
- start, 0, color, (void **)pa, VM_RADIX_STACK,
- &start)) == 0)
+ if (exhausted != 0 ||
+ (n = vm_radix_lookupn(&backing_object->rtree,
+ start, 0, (void **)pa, VM_RADIX_STACK,
+ &start, &exhausted)) == 0)
break;
i = 0;
}
p = pa[i++];
- /*
- * Free cached pages. XXX Why? Emulating old behavior here.
- */
- if (p->flags & PG_CACHED) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (p->object == backing_object)
- vm_page_cache_free(p);
- mtx_unlock(&vm_page_queue_free_mtx);
- continue;
- } else if (p->object != backing_object)
- continue;
new_pindex = p->pindex - backing_offset_index;
if (op & OBSC_TEST_ALL_SHADOWED) {
@@ -1716,6 +1737,11 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
+ vm_page_t pa[VM_RADIX_STACK];
+ vm_pindex_t start;
+ u_int exhausted;
+ int i, n;
+
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
while (TRUE) {
@@ -1792,7 +1818,30 @@ vm_object_collapse(vm_object_t object)
backing_object,
object,
OFF_TO_IDX(object->backing_object_offset), TRUE);
+
+ if (!vm_object_cache_is_empty(backing_object)) {
+
+ /*
+ * Free any cached pages from
+ * backing_object.
+ */
+ start = 0;
+ exhausted = 0;
+ mtx_lock(&vm_page_queue_free_mtx);
+ while (exhausted == 0 && (n =
+ vm_radix_lookupn(&backing_object->cache,
+ start, 0, (void **)pa,
+ VM_RADIX_STACK, &start,
+ &exhausted)) != 0) {
+ for (i = 0; i < n; i++)
+ vm_page_cache_free(pa[i]);
+ if (n < VM_RADIX_STACK)
+ break;
+ }
+ mtx_unlock(&vm_page_queue_free_mtx);
+ }
}
+
/*
* Object now shadows whatever backing_object did.
* Note that the reference to
@@ -1914,7 +1963,9 @@ vm_object_page_remove(vm_object_t object
{
struct vnode *vp;
vm_page_t pa[VM_RADIX_STACK];
+ vm_pindex_t cstart;
vm_page_t p;
+ u_int exhausted;
int i, n;
int wirings;
@@ -1922,32 +1973,19 @@ vm_object_page_remove(vm_object_t object
KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
- if (object->resident_page_count == 0 && object->cached_page_count == 0)
+ if (object->resident_page_count == 0 &&
+ vm_object_cache_is_empty(object))
return;
vp = NULL;
vm_object_pip_add(object, 1);
+ cstart = start;
restart:
- while ((n = vm_radix_lookupn(&object->rtree, start, end, VM_RADIX_ANY,
- (void **)pa, VM_RADIX_STACK, &start)) != 0) {
+ exhausted = 0;
+ while (exhausted == 0 && (n = vm_radix_lookupn(&object->rtree, start,
+ end, (void **)pa, VM_RADIX_STACK, &start, &exhausted)) != 0) {
for (i = 0; i < n; i++) {
p = pa[i];
- /*
- * Another thread may allocate this cached page from
- * the queue before we acquire the page queue free
- * mtx.
- */
- if (p->flags & PG_CACHED) {
- mtx_lock(&vm_page_queue_free_mtx);
- if (p->object == object) {
- vm_page_cache_free(p);
- if (object->type == OBJT_VNODE &&
- object->cached_page_count == 0)
- vp = object->handle;
- }
- mtx_unlock(&vm_page_queue_free_mtx);
- continue;
- } else if (p->object != object)
- continue;
+
/*
* If the page is wired for any reason besides
* the existence of managed, wired mappings, then
@@ -1978,7 +2016,7 @@ restart:
continue;
}
if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) {
- start = 0;
+ start = cstart;
goto restart;
}
KASSERT((p->flags & PG_FICTITIOUS) == 0,
@@ -2005,6 +2043,25 @@ restart:
break;
}
vm_object_pip_wakeup(object);
+ if (!vm_object_cache_is_empty(object)) {
+ start = cstart;
+ exhausted = 0;
+ mtx_lock(&vm_page_queue_free_mtx);
+ while (exhausted == 0 && (n = vm_radix_lookupn(&object->cache,
+ start, end, (void **)pa, VM_RADIX_STACK, &start,
+ &exhausted)) != 0) {
+ for (i = 0; i < n; i++) {
+ p = pa[i];
+ vm_page_cache_free(p);
+ if (vm_object_cache_is_empty(object) &&
+ object->type == OBJT_VNODE)
+ vp = object->handle;
+ }
+ if (n < VM_RADIX_STACK)
+ break;
+ }
+ mtx_unlock(&vm_page_queue_free_mtx);
+ }
if (vp)
vdrop(vp);
}
@@ -2240,6 +2297,13 @@ vm_object_set_writeable_dirty(vm_object_
vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
}
+int
+vm_object_cache_is_empty(vm_object_t object)
+{
+
+ return (__predict_true(object->cache.rt_root == 0));
+}
+
#include "opt_ddb.h"
#ifdef DDB
#include <sys/kernel.h>
Modified: user/attilio/vmc-playground/sys/vm/vm_object.h
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_object.h Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_object.h Sun Jul 8 14:01:25 2012 (r238245)
@@ -101,7 +101,6 @@ struct vm_object {
u_short pg_color; /* (c) color of first page in obj */
u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */
- int cached_page_count; /* number of cached pages */
struct vm_object *backing_object; /* object that I'm a shadow of */
vm_ooffset_t backing_object_offset;/* Offset in backing object */
TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
@@ -222,6 +221,7 @@ vm_object_t vm_object_allocate (objtype_
void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
boolean_t);
+int vm_object_cache_is_empty (vm_object_t);
void vm_object_collapse (vm_object_t);
void vm_object_deallocate (vm_object_t);
void vm_object_destroy (vm_object_t);
Modified: user/attilio/vmc-playground/sys/vm/vm_page.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_page.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_page.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -298,7 +298,7 @@ vm_page_startup(vm_offset_t vaddr)
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
MTX_RECURSE);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
- MTX_DEF);
+ MTX_DEF | MTX_RECURSE);
/* Setup page locks. */
for (i = 0; i < PA_LOCK_COUNT; i++)
@@ -835,8 +835,7 @@ vm_page_insert(vm_page_t m, vm_object_t
if (object->resident_page_count == 0) {
TAILQ_INSERT_TAIL(&object->memq, m, listq);
} else {
- neighbor = vm_radix_lookup_ge(&object->rtree, pindex,
- VM_RADIX_BLACK);
+ neighbor = vm_radix_lookup_ge(&object->rtree, pindex);
if (neighbor != NULL) {
KASSERT(pindex < neighbor->pindex,
("vm_page_insert: offset %ju not minor than %ju",
@@ -893,7 +892,7 @@ vm_page_remove(vm_page_t m)
vm_page_flash(m);
}
- vm_radix_remove(&object->rtree, m->pindex, VM_RADIX_BLACK);
+ vm_radix_remove(&object->rtree, m->pindex);
TAILQ_REMOVE(&object->memq, m, listq);
/*
@@ -925,7 +924,7 @@ vm_page_lookup(vm_object_t object, vm_pi
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- return vm_radix_lookup(&object->rtree, pindex, VM_RADIX_BLACK);
+ return vm_radix_lookup(&object->rtree, pindex);
}
/*
@@ -943,8 +942,7 @@ vm_page_find_least(vm_object_t object, v
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (object->resident_page_count)
- return vm_radix_lookup_ge(&object->rtree, pindex,
- VM_RADIX_BLACK);
+ return (vm_radix_lookup_ge(&object->rtree, pindex));
return (NULL);
}
@@ -1026,8 +1024,8 @@ vm_page_cache_lookup(vm_object_t object,
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- if (object->cached_page_count != 0)
- return vm_radix_lookup(&object->rtree, pindex, VM_RADIX_RED);
+ if (!vm_object_cache_is_empty(object))
+ return (vm_radix_lookup(&object->cache, pindex));
return (NULL);
}
@@ -1044,8 +1042,7 @@ vm_page_cache_remove(vm_page_t m)
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
KASSERT((m->flags & PG_CACHED) != 0,
("vm_page_cache_remove: page %p is not cached", m));
- vm_radix_remove(&m->object->rtree, m->pindex, VM_RADIX_RED);
- m->object->cached_page_count--;
+ vm_radix_remove(&m->object->cache, m->pindex);
m->object = NULL;
cnt.v_cache_count--;
}
@@ -1071,8 +1068,7 @@ vm_page_cache_free(vm_page_t m)
* Replicate vm_page_cache_remove with a version that can collapse
* internal nodes since the object lock is held.
*/
- vm_radix_remove(&object->rtree, m->pindex, VM_RADIX_ANY);
- object->cached_page_count--;
+ vm_radix_remove(&object->cache, m->pindex);
m->object = NULL;
m->valid = 0;
/* Clear PG_CACHED and set PG_FREE. */
@@ -1094,23 +1090,36 @@ vm_page_cache_rename(vm_page_t m, vm_obj
VM_OBJECT_LOCK_ASSERT(orig_object, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- /*
- * If the insert fails we simply free the cached page.
- */
- if (vm_radix_insert(&new_object->rtree, idx, m) != 0) {
- vm_page_cache_free(m);
- return;
- }
- vm_radix_color(&new_object->rtree, idx, VM_RADIX_RED);
- /*
- * We use any color here though we know it's red so that tree
- * compaction will still work.
- */
- vm_radix_remove(&orig_object->rtree, m->pindex, VM_RADIX_ANY);
+ vm_radix_remove(&orig_object->cache, m->pindex);
+ if (vm_radix_insert(&new_object->cache, idx, m) != 0)
+ panic("vm_page_cache_rename: failed vm_radix_insert");
m->object = new_object;
m->pindex = idx;
- new_object->cached_page_count++;
- orig_object->cached_page_count--;
+}
+
+/*
+ * Returns a pointer to the cached page associated with the given object
+ * and offset, NULL otherwise.
+ *
+ * The object must be locked.
+ */
+vm_page_t
+vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
+{
+ vm_page_t m;
+
+ /*
+ * Insertion into an object's collection of cached pages requires the
+ * object to be locked. Therefore, if the object is locked and the
+ * object's collection is empty, there is no need to acquire the free
+ * page queues lock in order to prove that the specified page doesn't
+ * exist.
+ */
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ mtx_lock(&vm_page_queue_free_mtx);
+ m = vm_page_cache_lookup(object, pindex);
+ mtx_unlock(&vm_page_queue_free_mtx);
+ return (m);
}
/*
@@ -1245,7 +1254,7 @@ vm_page_alloc(vm_object_t object, vm_pin
m_object = m->object;
vm_page_cache_remove(m);
if (m_object->type == OBJT_VNODE &&
- m_object->cached_page_count == 0)
+ vm_object_cache_is_empty(m_object))
vp = m_object->handle;
} else {
KASSERT(VM_PAGE_IS_FREE(m),
@@ -1505,7 +1514,7 @@ vm_page_alloc_init(vm_page_t m)
m_object = m->object;
vm_page_cache_remove(m);
if (m_object->type == OBJT_VNODE &&
- m_object->cached_page_count == 0)
+ vm_object_cache_is_empty(m_object))
drop = m_object->handle;
} else {
KASSERT(VM_PAGE_IS_FREE(m),
@@ -2085,7 +2094,7 @@ void
vm_page_cache(vm_page_t m)
{
vm_object_t object;
- int old_cached;
+ int old_empty_cache;
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
@@ -2116,7 +2125,7 @@ vm_page_cache(vm_page_t m)
*/
vm_pageq_remove(m);
- vm_radix_color(&object->rtree, m->pindex, VM_RADIX_RED);
+ vm_radix_remove(&object->rtree, m->pindex);
TAILQ_REMOVE(&object->memq, m, listq);
object->resident_page_count--;
@@ -2133,9 +2142,10 @@ vm_page_cache(vm_page_t m)
m->flags &= ~PG_ZERO;
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_CACHED;
- old_cached = object->cached_page_count;
- object->cached_page_count++;
+ old_empty_cache = vm_object_cache_is_empty(object);
cnt.v_cache_count++;
+ if (vm_radix_insert(&object->cache, m->pindex, m) != 0)
+ panic("vm_page_cache: vm_radix_insert failed");
#if VM_NRESERVLEVEL > 0
if (!vm_reserv_free_page(m)) {
#else
@@ -2153,9 +2163,10 @@ vm_page_cache(vm_page_t m)
* the object's only resident page.
*/
if (object->type == OBJT_VNODE) {
- if (old_cached == 0 && object->resident_page_count != 0)
+ if (old_empty_cache != 0 && object->resident_page_count != 0)
vhold(object->handle);
- else if (old_cached != 0 && object->resident_page_count == 0)
+ else if (old_empty_cache == 0 &&
+ object->resident_page_count == 0)
vdrop(object->handle);
}
}
Modified: user/attilio/vmc-playground/sys/vm/vm_page.h
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_page.h Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_page.h Sun Jul 8 14:01:25 2012 (r238245)
@@ -379,6 +379,7 @@ vm_page_t vm_page_find_least(vm_object_t
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
+vm_page_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex);
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
Modified: user/attilio/vmc-playground/sys/vm/vm_radix.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_radix.c Sun Jul 8 12:39:02 2012 (r238244)
+++ user/attilio/vmc-playground/sys/vm/vm_radix.c Sun Jul 8 14:01:25 2012 (r238245)
@@ -293,14 +293,12 @@ vm_radix_setroot(struct vm_radix *rtree,
}
static inline void *
-vm_radix_match(void *child, int color)
+vm_radix_match(void *child)
{
uintptr_t c;
c = (uintptr_t)child;
- if ((c & color) == 0)
- return (NULL);
return ((void *)(c & ~VM_RADIX_FLAGS));
}
@@ -429,9 +427,8 @@ vm_radix_insert(struct vm_radix *rtree,
KASSERT(rnode->rn_child[slot] == NULL,
("vm_radix_insert: Duplicate value %p at index: %lu\n",
rnode->rn_child[slot], (u_long)index));
- val = (void *)((uintptr_t)val | VM_RADIX_BLACK);
rnode->rn_child[slot] = val;
- atomic_add_32(&rnode->rn_count, 1);
+ rnode->rn_count++;
CTR5(KTR_VM,
"insert: tree %p, " KFRMT64(index) ", level %d, slot %d",
rtree, KSPLT64L(index), KSPLT64H(index), level, slot);
@@ -446,7 +443,7 @@ vm_radix_insert(struct vm_radix *rtree,
* NULL is returned.
*/
void *
-vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index, int color)
+vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
{
struct vm_radix_node *rnode;
int slot;
@@ -465,7 +462,7 @@ vm_radix_lookup(struct vm_radix *rtree,
CTR2(KTR_VM, "lookup: rnode %p, child %p", rnode,
rnode->rn_child[slot]);
if (level == 0)
- return vm_radix_match(rnode->rn_child[slot], color);
+ return vm_radix_match(rnode->rn_child[slot]);
rnode = rnode->rn_child[slot];
level--;
}
@@ -475,40 +472,6 @@ vm_radix_lookup(struct vm_radix *rtree,
return NULL;
}
-void *
-vm_radix_color(struct vm_radix *rtree, vm_pindex_t index, int color)
-{
- struct vm_radix_node *rnode;
- uintptr_t child;
- int slot;
- int level;
-
- level = vm_radix_height(rtree, &rnode);
- if (index > VM_RADIX_MAX(level))
- return NULL;
- level--;
- while (rnode) {
- slot = vm_radix_slot(index, level);
- CTR6(KTR_VM,
- "color: tree %p, " KFRMT64(index) ", level %d, slot %d, rnode %p",
- rtree, KSPLT64L(index), KSPLT64H(index), level, slot,
- rnode);
- CTR2(KTR_VM, "color: rnode %p, child %p", rnode,
- rnode->rn_child[slot]);
- if (level == 0)
- break;
- rnode = rnode->rn_child[slot];
- level--;
- }
- if (rnode == NULL || rnode->rn_child[slot] == NULL)
- return (NULL);
- child = (uintptr_t)rnode->rn_child[slot];
- child &= ~VM_RADIX_FLAGS;
- rnode->rn_child[slot] = (void *)(child | color);
-
- return (void *)child;
-}
-
/*
* Find the first leaf with a valid node between *startp and end. Return
* the index of the first valid item in the leaf in *startp.
@@ -598,7 +561,7 @@ out:
*/
int
vm_radix_lookupn(struct vm_radix *rtree, vm_pindex_t start,
- vm_pindex_t end, int color, void **out, int cnt, vm_pindex_t *next)
+ vm_pindex_t end, void **out, int cnt, vm_pindex_t *next, u_int *exhausted)
{
struct vm_radix_node *rnode;
void *val;
@@ -608,6 +571,8 @@ vm_radix_lookupn(struct vm_radix *rtree,
CTR5(KTR_VM, "lookupn: tree %p, " KFRMT64(start) ", " KFRMT64(end),
rtree, KSPLT64L(start), KSPLT64H(start), KSPLT64L(end),
KSPLT64H(end));
+ if (end == 0)
+ *exhausted = 0;
if (rtree->rt_root == 0)
return (0);
outidx = 0;
@@ -616,7 +581,7 @@ vm_radix_lookupn(struct vm_radix *rtree,
for (; slot < VM_RADIX_COUNT; slot++, start++) {
if (end != 0 && start >= end)
goto out;
- val = vm_radix_match(rnode->rn_child[slot], color);
+ val = vm_radix_match(rnode->rn_child[slot]);
if (val == NULL) {
/*
@@ -632,6 +597,8 @@ vm_radix_lookupn(struct vm_radix *rtree,
*/
if ((VM_RADIX_MAXVAL - start) == 0) {
start++;
+ if (end == 0)
+ *exhausted = 1;
goto out;
}
continue;
@@ -640,10 +607,11 @@ vm_radix_lookupn(struct vm_radix *rtree,
"lookupn: tree %p " KFRMT64(index) " slot %d found child %p",
rtree, KSPLT64L(start), KSPLT64H(start), slot, val);
out[outidx] = val;
- if (++outidx == cnt)
- goto out;
- if ((VM_RADIX_MAXVAL - start) == 0) {
+ if (++outidx == cnt ||
+ (VM_RADIX_MAXVAL - start) == 0) {
start++;
+ if ((VM_RADIX_MAXVAL - start) == 0 && end == 0)
+ *exhausted = 1;
goto out;
}
}
@@ -656,38 +624,11 @@ out:
return (outidx);
}
-#if 0
-void
-vm_radix_foreach(struct vm_radix *rtree, vm_pindex_t start, vm_pindex_t end,
- int color, void (*iter)(void *))
-{
- struct vm_radix_node *rnode;
- void *val;
- int slot;
-
- if (rtree->rt_root == 0)
- return;
- while ((rnode = vm_radix_leaf(rtree, &start, end)) != NULL) {
- slot = vm_radix_slot(start, 0);
- for (; slot < VM_RADIX_COUNT; slot++, start++) {
- if (end != 0 && start >= end)
- return;
- val = vm_radix_match(rnode->rn_child[slot], color);
- if (val)
- iter(val);
- }
- if (end != 0 && start >= end)
- return;
- }
-}
-#endif
-
-
/*
* Look up any entry at a position less than or equal to index.
*/
void *
-vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index, int color)
+vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
{
struct vm_radix_node *rnode;
struct vm_radix_node *child;
@@ -751,7 +692,7 @@ restart:
}
if (rnode) {
for (; slot >= 0; slot--, index--) {
- val = vm_radix_match(rnode->rn_child[slot], color);
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list