svn commit: r328761 - user/jeff/numa/sys/vm
Jeff Roberson
jeff at FreeBSD.org
Thu Feb 1 23:49:19 UTC 2018
Author: jeff
Date: Thu Feb 1 23:49:17 2018
New Revision: 328761
URL: https://svnweb.freebsd.org/changeset/base/328761
Log:
Implement a prototype of pageout free page batching.
Modified:
user/jeff/numa/sys/vm/vm_object.c
user/jeff/numa/sys/vm/vm_object.h
user/jeff/numa/sys/vm/vm_pageout.c
user/jeff/numa/sys/vm/vnode_pager.c
Modified: user/jeff/numa/sys/vm/vm_object.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_object.c Thu Feb 1 23:48:47 2018 (r328760)
+++ user/jeff/numa/sys/vm/vm_object.c Thu Feb 1 23:49:17 2018 (r328761)
@@ -282,6 +282,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size,
object->handle = NULL;
object->backing_object = NULL;
object->backing_object_offset = (vm_ooffset_t) 0;
+ object->iosize = PAGE_SIZE;
#if VM_NRESERVLEVEL > 0
LIST_INIT(&object->rvq);
#endif
Modified: user/jeff/numa/sys/vm/vm_object.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_object.h Thu Feb 1 23:48:47 2018 (r328760)
+++ user/jeff/numa/sys/vm/vm_object.h Thu Feb 1 23:49:17 2018 (r328761)
@@ -111,6 +111,7 @@ struct vm_object {
objtype_t type; /* type of pager */
u_short flags; /* see below */
u_short pg_color; /* (c) color of first page in obj */
+ u_int iosize; /* (c) Natural I/O size in bytes. */
u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */
struct vm_object *backing_object; /* object that I'm a shadow of */
Modified: user/jeff/numa/sys/vm/vm_pageout.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_pageout.c Thu Feb 1 23:48:47 2018 (r328760)
+++ user/jeff/numa/sys/vm/vm_pageout.c Thu Feb 1 23:49:17 2018 (r328761)
@@ -1092,6 +1092,83 @@ dolaundry:
}
}
+static int
+vm_pageout_free_pages(vm_object_t object, vm_page_t m)
+{
+ vm_page_t p, pp;
+ struct mtx *mtx;
+ struct pglist pgl;
+ vm_pindex_t start;
+ int pcount, count;
+
+ pcount = MAX(object->iosize / PAGE_SIZE, 1);
+ if (pcount == 1) {
+ vm_page_free(m);
+ vm_page_unlock(m);
+ VM_OBJECT_WUNLOCK(object);
+ count = 1;
+ goto out;
+ }
+ TAILQ_INIT(&pgl);
+ count = 0;
+
+ /* Find the first page in the block. */
+ start = m->pindex - (m->pindex % pcount);
+ for (p = m; p->pindex > start && (pp = vm_page_prev(p)) != NULL;
+ p = pp);
+
+ /* Free the original page so we don't validate it twice. */
+ if (p == m)
+ p = vm_page_next(m);
+ if (vm_page_free_prep(m, false)) {
+ m->flags &= ~PG_ZERO;
+ TAILQ_INSERT_TAIL(&pgl, m, listq);
+ count++;
+ }
+
+ /* Iterate through the block range and free compatible pages. */
+ mtx = vm_page_lockptr(m);
+ for ( ; p != NULL && p->pindex < start + pcount; p = pp) {
+ pp = TAILQ_NEXT(p, listq);
+ if (mtx != vm_page_lockptr(p)) {
+ mtx_unlock(mtx);
+ mtx = vm_page_lockptr(p);
+ mtx_lock(mtx);
+ }
+ if (p->hold_count || vm_page_busied(p) ||
+ p->queue != PQ_INACTIVE)
+ continue;
+ if (p->valid == 0)
+ goto free_page;
+ if ((p->aflags & PGA_REFERENCED) != 0)
+ continue;
+ if (object->ref_count != 0) {
+ if (pmap_ts_referenced(p)) {
+ vm_page_aflag_set(p, PGA_REFERENCED);
+ continue;
+ }
+ vm_page_test_dirty(p);
+ if (p->dirty == 0)
+ pmap_remove_all(p);
+ }
+ if (p->dirty)
+ continue;
+free_page:
+ if (vm_page_free_prep(p, false)) {
+ p->flags &= ~PG_ZERO;
+ TAILQ_INSERT_TAIL(&pgl, p, listq);
+ count++;
+ }
+ }
+ mtx_unlock(mtx);
+ VM_OBJECT_WUNLOCK(object);
+ vm_page_free_phys_pglist(&pgl);
+out:
+ VM_CNT_ADD(v_dfree, count);
+
+ return (count);
+}
+
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*
@@ -1310,14 +1387,14 @@ unlock_page:
*/
if (m->dirty == 0) {
free_page:
- vm_page_free(m);
- VM_CNT_INC(v_dfree);
- --page_shortage;
+ page_shortage -= vm_pageout_free_pages(object, m);
+ goto lock_queue;
} else if ((object->flags & OBJ_DEAD) == 0)
vm_page_launder(m);
drop_page:
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
+lock_queue:
if (!queue_locked) {
vm_pagequeue_lock(pq);
queue_locked = TRUE;
Modified: user/jeff/numa/sys/vm/vnode_pager.c
==============================================================================
--- user/jeff/numa/sys/vm/vnode_pager.c Thu Feb 1 23:48:47 2018 (r328760)
+++ user/jeff/numa/sys/vm/vnode_pager.c Thu Feb 1 23:49:17 2018 (r328761)
@@ -241,6 +241,7 @@ retry:
object->un_pager.vnp.vnp_size = size;
object->un_pager.vnp.writemappings = 0;
+ object->iosize = vp->v_mount->mnt_stat.f_iosize;
object->handle = handle;
VI_LOCK(vp);
@@ -760,7 +761,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page
object = vp->v_object;
foff = IDX_TO_OFF(m[0]->pindex);
- bsize = vp->v_mount->mnt_stat.f_iosize;
+ bsize = object->iosize;
pagesperblock = bsize / PAGE_SIZE;
KASSERT(foff < object->un_pager.vnp.vnp_size,
More information about the svn-src-user
mailing list