git: b9fc7628dbb2 - main - vm_page_free_prep(): convert PG_ZERO zeroed page check to use sf_buf
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 21 Nov 2025 15:03:01 UTC
The branch main has been updated by kib:
URL: https://cgit.FreeBSD.org/src/commit/?id=b9fc7628dbb24b55cbb8791c83bd69f73cfadf23
commit b9fc7628dbb24b55cbb8791c83bd69f73cfadf23
Author: Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2025-11-21 08:57:17 +0000
Commit: Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2025-11-21 15:02:21 +0000
vm_page_free_prep(): convert PG_ZERO zeroed page check to use sf_buf
Make the check MI by allocating sf_buf in non-blockable manner. For
DMAP arches, this should be nop since sf_buf allocation cannot fail
trivially. For non-DMAP arches, we get the checks activated unless there
is serious sf_buf pressure, which typically should be not.
The context for vm_page_free_prep() should be ready to block on some VM
mutexes, which should make it reasonable to block on sf_buf list lock.
Move the code to INVARIANTS build from DIAGNOSTIC, and control its activation
with the sysctl debug.vm_check_pg_zero.
Reviewed by: markj
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Differential revision: https://reviews.freebsd.org/D53850
---
sys/vm/vm_extern.h | 3 +++
sys/vm/vm_page.c | 32 +++++++++++++++++++++++++-------
2 files changed, 28 insertions(+), 7 deletions(-)
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 1fd6518cf4ed..d0e005088745 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -164,5 +164,8 @@ vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
return (vm_addr_align_ok(pa, alignment) &&
vm_addr_bound_ok(pa, size, boundary));
}
+
+extern bool vm_check_pg_zero;
+
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 16878604fa11..b39d665f9e0f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -84,6 +84,7 @@
#include <sys/sleepqueue.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
+#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
@@ -145,6 +146,13 @@ SYSCTL_ULONG(_vm_stats_page, OID_AUTO, nofreeq_size, CTLFLAG_RD,
&nofreeq_size, 0,
"Size of the nofree queue");
+#ifdef INVARIANTS
+bool vm_check_pg_zero = false;
+SYSCTL_BOOL(_debug, OID_AUTO, vm_check_pg_zero, CTLFLAG_RWTUN,
+ &vm_check_pg_zero, 0,
+ "verify content of freed zero-filled pages");
+#endif
+
/*
* bogus page -- for I/O to/from partially complete buffers,
* or for paging into sparsely invalid regions.
@@ -4050,14 +4058,24 @@ vm_page_free_prep(vm_page_t m)
*/
atomic_thread_fence_acq();
-#if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
- if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
- uint64_t *p;
+#ifdef INVARIANTS
+ if (vm_check_pg_zero && (m->flags & PG_ZERO) != 0) {
+ struct sf_buf *sf;
+ unsigned long *p;
int i;
- p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
- for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
- KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
- m, i, (uintmax_t)*p));
+
+ sched_pin();
+ sf = sf_buf_alloc(m, SFB_CPUPRIVATE | SFB_NOWAIT);
+ if (sf != NULL) {
+ p = (unsigned long *)sf_buf_kva(sf);
+ for (i = 0; i < PAGE_SIZE / sizeof(*p); i++, p++) {
+ KASSERT(*p == 0,
+ ("zerocheck failed page %p PG_ZERO %d %jx",
+ m, i, (uintmax_t)*p));
+ }
+ sf_buf_free(sf);
+ }
+ sched_unpin();
}
#endif
if ((m->oflags & VPO_UNMANAGED) == 0) {