git: 3f32a7e4eee5 - main - vm: Add a KVA arena for M_NEVERFREED allocations
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 30 Jul 2024 15:38:48 UTC
The branch main has been updated by bnovkov:
URL: https://cgit.FreeBSD.org/src/commit/?id=3f32a7e4eee53d5565a4076e69a41d1afd803e0c
commit 3f32a7e4eee53d5565a4076e69a41d1afd803e0c
Author: Bojan Novković <bnovkov@FreeBSD.org>
AuthorDate: 2024-07-16 14:14:30 +0000
Commit: Bojan Novković <bnovkov@FreeBSD.org>
CommitDate: 2024-07-30 15:38:24 +0000
vm: Add a KVA arena for M_NEVERFREED allocations
This patch adds a new KVA arena for separating M_NEVERFREED allocations.
Separating KVAs for pages that are never freed should facilitate
superpage promotion in the kernel.
Differential Revision: https://reviews.freebsd.org/D45997
Reviewed by: alc, kib, markj
Tested by: alc
---
sys/vm/vm_kern.c | 21 ++++++++++++++++-----
sys/vm/vm_pagequeue.h | 1 +
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index a04044463fe2..fb7c80b767ed 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -473,10 +473,12 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags)
vm_size_t asize;
int rv;
- if (__predict_true((flags & M_EXEC) == 0))
+ if (__predict_true((flags & (M_EXEC | M_NEVERFREED)) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
- else
+ else if ((flags & M_EXEC) != 0)
arena = vm_dom[domain].vmd_kernel_rwx_arena;
+ else
+ arena = vm_dom[domain].vmd_kernel_nofree_arena;
asize = round_page(size);
if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr))
return (0);
@@ -882,20 +884,29 @@ kmem_init(vm_offset_t start, vm_offset_t end)
/*
* In architectures with superpages, maintain separate arenas
* for allocations with permissions that differ from the
- * "standard" read/write permissions used for kernel memory,
- * so as not to inhibit superpage promotion.
+ * "standard" read/write permissions used for kernel memory
+ * and pages that are never released, so as not to inhibit
+ * superpage promotion.
*
- * Use the base import quantum since this arena is rarely used.
+ * Use the base import quantum since these arenas are rarely
+ * used.
*/
#if VM_NRESERVLEVEL > 0
vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
"kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
+ vm_dom[domain].vmd_kernel_nofree_arena = vmem_create(
+ "kernel NOFREE arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
kva_import_domain, (vmem_release_t *)vmem_xfree,
kernel_arena, KVA_QUANTUM);
+ vmem_set_import(vm_dom[domain].vmd_kernel_nofree_arena,
+ kva_import_domain, (vmem_release_t *)vmem_xfree,
+ kernel_arena, KVA_QUANTUM);
#else
vm_dom[domain].vmd_kernel_rwx_arena =
vm_dom[domain].vmd_kernel_arena;
+ vm_dom[domain].vmd_kernel_nofree_arena =
+ vm_dom[domain].vmd_kernel_arena;
#endif
}
diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h
index 86863a0a6400..af1183e63e53 100644
--- a/sys/vm/vm_pagequeue.h
+++ b/sys/vm/vm_pagequeue.h
@@ -243,6 +243,7 @@ struct vm_domain {
} vmd_pgcache[VM_NFREEPOOL];
struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
+ struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
u_int vmd_domain; /* (c) Domain number. */
u_int vmd_page_count; /* (c) Total page count. */
long vmd_segs; /* (c) bitmask of the segments */