git: acb4cb33d358 - main - vm_pageout: simplify pageout_cluster
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sun, 04 Aug 2024 20:33:56 UTC
The branch main has been updated by dougm:
URL: https://cgit.FreeBSD.org/src/commit/?id=acb4cb33d35838e3e86412202cd63d9021b21ce2
commit acb4cb33d35838e3e86412202cd63d9021b21ce2
Author: Doug Moore <dougm@FreeBSD.org>
AuthorDate: 2024-08-04 20:32:15 +0000
Commit: Doug Moore <dougm@FreeBSD.org>
CommitDate: 2024-08-04 20:32:15 +0000
vm_pageout: simplify pageout_cluster
Rewrite vm_pageout_cluster to eliminate redundant variables and
duplicated code.
Remove tests on pindex to check for object boundary conditions, since
the page_next and page_prev functions return NULL at the object
boundaries. Fix an alignment error that could happen if pindex is
aligned, and the first of vm_pageout_page_count flushable pages, and
the page at pindex-1 is also flushable.
Reviewed by: kib
Differential Revision: https://reviews.freebsd.org/D46217
---
sys/vm/vm_pageout.c | 115 +++++++++++++++++++++-------------------------------
1 file changed, 46 insertions(+), 69 deletions(-)
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 742e0afbc690..0d054da34866 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -339,6 +339,25 @@ vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
return (false);
}
+/*
+ * We can cluster only if the page is not clean, busy, or held, and the page is
+ * in the laundry queue.
+ */
+static bool
+vm_pageout_flushable(vm_page_t m)
+{
+ if (vm_page_tryxbusy(m) == 0)
+ return (false);
+ if (!vm_page_wired(m)) {
+ vm_page_test_dirty(m);
+ if (m->dirty != 0 && vm_page_in_laundry(m) &&
+ vm_page_try_remove_write(m))
+ return (true);
+ }
+ vm_page_xunbusy(m);
+ return (false);
+}
+
/*
* Scan for pages at adjacent offsets within the given page's object that are
* eligible for laundering, form a cluster of these pages and the given page,
@@ -348,26 +367,21 @@ static int
vm_pageout_cluster(vm_page_t m)
{
vm_object_t object;
- vm_page_t mc[2 * vm_pageout_page_count - 1], p, pb, ps;
- vm_pindex_t pindex;
- int ib, is, page_base, pageout_count;
+ vm_page_t mc[2 * vm_pageout_page_count - 1];
+ int alignment, num_ends, page_base, pageout_count;
object = m->object;
VM_OBJECT_ASSERT_WLOCKED(object);
- pindex = m->pindex;
vm_page_assert_xbusied(m);
- pageout_count = 1;
+ alignment = m->pindex % vm_pageout_page_count;
+ num_ends = 0;
page_base = nitems(mc) / 2;
- mc[page_base] = pb = ps = m;
- ib = 1;
- is = 1;
+ pageout_count = 1;
+ mc[page_base] = m;
/*
- * We can cluster only if the page is not clean, busy, or held, and
- * the page is in the laundry queue.
- *
* During heavy mmap/modification loads the pageout
* daemon can really fragment the underlying file
* due to flushing pages out of order and not trying to
@@ -377,74 +391,37 @@ vm_pageout_cluster(vm_page_t m)
* forward scan if room remains.
*/
more:
- while (ib != 0 && pageout_count < vm_pageout_page_count) {
- if (ib > pindex) {
- ib = 0;
- break;
- }
- if ((p = vm_page_prev(pb)) == NULL ||
- vm_page_tryxbusy(p) == 0) {
- ib = 0;
- break;
- }
- if (vm_page_wired(p)) {
- ib = 0;
- vm_page_xunbusy(p);
- break;
- }
- vm_page_test_dirty(p);
- if (p->dirty == 0) {
- ib = 0;
- vm_page_xunbusy(p);
- break;
- }
- if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
- vm_page_xunbusy(p);
- ib = 0;
- break;
- }
- mc[--page_base] = pb = p;
- ++pageout_count;
- ++ib;
-
+ m = mc[page_base];
+ while (pageout_count < vm_pageout_page_count) {
/*
- * We are at an alignment boundary. Stop here, and switch
- * directions. Do not clear ib.
+ * If we are at an alignment boundary, and haven't reached the
+ * last flushable page forward, stop here, and switch
+ * directions.
*/
- if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
- break;
- }
- while (pageout_count < vm_pageout_page_count &&
- pindex + is < object->size) {
- if ((p = vm_page_next(ps)) == NULL ||
- vm_page_tryxbusy(p) == 0)
+ if (alignment == pageout_count - 1 && num_ends == 0)
break;
- if (vm_page_wired(p)) {
- vm_page_xunbusy(p);
- break;
- }
- vm_page_test_dirty(p);
- if (p->dirty == 0) {
- vm_page_xunbusy(p);
+
+ m = vm_page_prev(m);
+ if (m == NULL || !vm_pageout_flushable(m)) {
+ num_ends++;
break;
}
- if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
- vm_page_xunbusy(p);
+ mc[--page_base] = m;
+ ++pageout_count;
+ }
+ m = mc[page_base + pageout_count - 1];
+ while (num_ends != 2 && pageout_count < vm_pageout_page_count) {
+ m = vm_page_next(m);
+ if (m == NULL || !vm_pageout_flushable(m)) {
+ if (num_ends++ == 0)
+ /* Resume the reverse scan. */
+ goto more;
break;
}
- mc[page_base + pageout_count] = ps = p;
+ mc[page_base + pageout_count] = m;
++pageout_count;
- ++is;
}
- /*
- * If we exhausted our forward scan, continue with the reverse scan
- * when possible, even past an alignment boundary. This catches
- * boundary conditions.
- */
- if (ib != 0 && pageout_count < vm_pageout_page_count)
- goto more;
-
return (vm_pageout_flush(&mc[page_base], pageout_count,
VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
}