git: bb1dc6cf9c36 - main - vm_page: define partial page invalidate
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 22 Feb 2025 01:23:51 UTC
The branch main has been updated by dougm:
URL: https://cgit.FreeBSD.org/src/commit/?id=bb1dc6cf9c3671c82318e22825d86d54c8d672cb
commit bb1dc6cf9c3671c82318e22825d86d54c8d672cb
Author: Doug Moore <dougm@FreeBSD.org>
AuthorDate: 2025-02-22 01:22:47 +0000
Commit: Doug Moore <dougm@FreeBSD.org>
CommitDate: 2025-02-22 01:22:47 +0000
vm_page: define partial page invalidate
Two different functions in different files do the same thing - fill a
partial page with zeroes. Add that functionality to vm_page.c and
remove it elsewhere to avoid code duplication.
Reviewed by: markj, kib
Differential Revision: https://reviews.freebsd.org/D49096
---
sys/fs/tmpfs/tmpfs_subr.c | 47 ++++---------------------------------------
sys/kern/uipc_shm.c | 47 ++++---------------------------------------
sys/vm/vm_page.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++
sys/vm/vm_page.h | 2 ++
4 files changed, 61 insertions(+), 86 deletions(-)
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 41d1f27caf13..37be0b762579 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -493,50 +493,11 @@ static int
tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
int end, boolean_t ignerr)
{
- vm_page_t m;
- int rv, error;
-
- VM_OBJECT_ASSERT_WLOCKED(object);
- KASSERT(base >= 0, ("%s: base %d", __func__, base));
- KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
- end));
- error = 0;
-
-retry:
- m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
- if (m != NULL) {
- MPASS(vm_page_all_valid(m));
- } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
- m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL |
- VM_ALLOC_WAITFAIL);
- if (m == NULL)
- goto retry;
- vm_object_pip_add(object, 1);
- VM_OBJECT_WUNLOCK(object);
- rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
- VM_OBJECT_WLOCK(object);
- vm_object_pip_wakeup(object);
- if (rv == VM_PAGER_OK) {
- /*
- * Since the page was not resident, and therefore not
- * recently accessed, immediately enqueue it for
- * asynchronous laundering. The current operation is
- * not regarded as an access.
- */
- vm_page_launder(m);
- } else {
- vm_page_free(m);
- m = NULL;
- if (!ignerr)
- error = EIO;
- }
- }
- if (m != NULL) {
- pmap_zero_page_area(m, base, end - base);
- vm_page_set_dirty(m);
- vm_page_xunbusy(m);
- }
+ int error;
+ error = vm_page_grab_zero_partial(object, idx, base, end);
+ if (ignerr)
+ error = 0;
return (error);
}
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 026611a59593..b4016e9dd6bf 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -697,51 +697,12 @@ static int
shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
int end)
{
- vm_page_t m;
- int rv;
+ int error;
- VM_OBJECT_ASSERT_WLOCKED(object);
- KASSERT(base >= 0, ("%s: base %d", __func__, base));
- KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
- end));
-
-retry:
- m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
- if (m != NULL) {
- MPASS(vm_page_all_valid(m));
- } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
- m = vm_page_alloc(object, idx,
- VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
- if (m == NULL)
- goto retry;
- vm_object_pip_add(object, 1);
+ error = vm_page_grab_zero_partial(object, idx, base, end);
+ if (error == EIO)
VM_OBJECT_WUNLOCK(object);
- rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
- VM_OBJECT_WLOCK(object);
- vm_object_pip_wakeup(object);
- if (rv == VM_PAGER_OK) {
- /*
- * Since the page was not resident, and therefore not
- * recently accessed, immediately enqueue it for
- * asynchronous laundering. The current operation is
- * not regarded as an access.
- */
- vm_page_launder(m);
- } else {
- vm_page_free(m);
- VM_OBJECT_WUNLOCK(object);
- return (EIO);
- }
- }
- if (m != NULL) {
- pmap_zero_page_area(m, base, end - base);
- KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
- __func__, m));
- vm_page_set_dirty(m);
- vm_page_xunbusy(m);
- }
-
- return (0);
+ return (error);
}
static int
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index c105aafca40f..e4c2aadf5d56 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -5086,6 +5086,57 @@ out:
return (VM_PAGER_OK);
}
+/*
+ * Fill a partial page with zeroes. The object write lock is held on entry and
+ * exit, but may be temporarily released.
+ */
+int
+vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base,
+ int end)
+{
+ vm_page_t m;
+ int rv;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(base >= 0, ("%s: base %d", __func__, base));
+ KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
+ end));
+
+retry:
+ m = vm_page_grab(object, pindex, VM_ALLOC_NOCREAT);
+ if (m != NULL) {
+ MPASS(vm_page_all_valid(m));
+ } else if (vm_pager_has_page(object, pindex, NULL, NULL)) {
+ m = vm_page_alloc(object, pindex,
+ VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
+ if (m == NULL)
+ goto retry;
+ vm_object_pip_add(object, 1);
+ VM_OBJECT_WUNLOCK(object);
+ rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
+ VM_OBJECT_WLOCK(object);
+ vm_object_pip_wakeup(object);
+ if (rv != VM_PAGER_OK) {
+ vm_page_free(m);
+ return (EIO);
+ }
+
+ /*
+ * Since the page was not resident, and therefore not recently
+ * accessed, immediately enqueue it for asynchronous laundering.
+ * The current operation is not regarded as an access.
+ */
+ vm_page_launder(m);
+ } else
+ return (0);
+
+ pmap_zero_page_area(m, base, end - base);
+ KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m));
+ vm_page_set_dirty(m);
+ vm_page_xunbusy(m);
+ return (0);
+}
+
/*
* Locklessly grab a valid page. If the page is not valid or not yet
* allocated this will fall back to the object lock method.
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 744688bf789b..5a166d9ba44c 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -627,6 +627,8 @@ vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
vm_memattr_t memattr);
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
+int vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base,
+ int end);
vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,