git: cd2512eaab5c - main - vm: Add flags for unprotected allocations

From: Andrew Turner <andrew_at_FreeBSD.org>
Date: Tue, 12 May 2026 16:55:27 UTC
The branch main has been updated by andrew:

URL: https://cgit.FreeBSD.org/src/commit/?id=cd2512eaab5cd402a0177a8078b9234b215b39bd

commit cd2512eaab5cd402a0177a8078b9234b215b39bd
Author:     Sarah Walker <sarah.walker2@arm.com>
AuthorDate: 2026-05-12 12:16:00 +0000
Commit:     Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2026-05-12 16:54:40 +0000

    vm: Add flags for unprotected allocations
    
    Unprotected allocations are intended to be accessible outside of the current
    VM on systems such as Arm CCA.
    
    Reviewed by:    markj
    Sponsored by:   Arm Ltd
    Differential Revision:  https://reviews.freebsd.org/D56518
---
 sys/sys/malloc.h |  1 +
 sys/vm/pmap.h    |  1 +
 sys/vm/vm_kern.c | 23 ++++++++++++++++++++---
 3 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index 9b281da4b4d4..68dffb395534 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -61,6 +61,7 @@
 #define	M_EXEC		0x4000		/* allocate executable space */
 #define	M_NEXTFIT	0x8000		/* only for vmem, follow cursor */
 #define	M_NEVERFREED 	0x10000		/* chunk will never get freed */
+#define	M_UNPROTECTED	0x20000		/* alloc out of unprotected memory */
 
 #define	M_VERSION	2024073001
 
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index e6dcd47d32f6..868bb787dc98 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -104,6 +104,7 @@ extern vm_offset_t kernel_vm_end;
 #define	PMAP_ENTER_NOSLEEP	0x00000100
 #define	PMAP_ENTER_WIRED	0x00000200
 #define	PMAP_ENTER_LARGEPAGE	0x00000400
+#define	PMAP_ENTER_UNPROTECTED	0x00000800
 #define	PMAP_ENTER_RESERVED	0xFF000000
 
 /*
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index fc5d0de424bd..41ba07373976 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -270,6 +270,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
 	vm_size_t asize;
 	int pflags;
 	vm_prot_t prot;
+	u_int pmap_enter_flags;
 
 	object = kernel_object;
 	asize = round_page(size);
@@ -279,6 +280,11 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
+
+	pmap_enter_flags = prot | PMAP_ENTER_WIRED;
+	if ((flags & M_UNPROTECTED) != 0)
+		pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
+
 	VM_OBJECT_WLOCK(object);
 	for (i = 0; i < asize; i += PAGE_SIZE) {
 		m = kmem_alloc_contig_pages(object, atop(offset + i),
@@ -296,7 +302,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
 			pmap_zero_page(m);
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
-		    prot | PMAP_ENTER_WIRED, 0);
+		    pmap_enter_flags, 0);
 	}
 	VM_OBJECT_WUNLOCK(object);
 	kmem_alloc_san(addr, size, asize, flags);
@@ -363,6 +369,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
 	vm_size_t asize;
 	u_long npages;
 	int pflags;
+	u_int pmap_enter_flags;
 
 	object = kernel_object;
 	asize = round_page(size);
@@ -385,12 +392,17 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
 	    vm_page_domain(m), domain));
 	end_m = m + npages;
 	tmp = addr;
+
+	pmap_enter_flags = VM_PROT_RW | PMAP_ENTER_WIRED;
+	if ((flags & M_UNPROTECTED) != 0)
+                pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
+
 	for (; m < end_m; m++) {
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
-		    VM_PROT_RW | PMAP_ENTER_WIRED, 0);
+		    pmap_enter_flags, 0);
 		tmp += PAGE_SIZE;
 	}
 	VM_OBJECT_WUNLOCK(object);
@@ -549,6 +561,7 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
 	vm_page_t m;
 	vm_prot_t prot;
 	int pflags;
+	u_int pmap_enter_flags;
 
 	KASSERT(object == kernel_object,
 	    ("kmem_back_domain: only supports kernel object."));
@@ -560,6 +573,10 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
 		pflags |= VM_ALLOC_WAITFAIL;
 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
 
+	pmap_enter_flags = prot | PMAP_ENTER_WIRED;
+	if ((flags & M_UNPROTECTED) != 0)
+		pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
+
 	i = 0;
 	vm_page_iter_init(&pages, object);
 	VM_OBJECT_WLOCK(object);
@@ -589,7 +606,7 @@ retry:
 		    ("kmem_malloc: page %p is managed", m));
 		vm_page_valid(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
-		    prot | PMAP_ENTER_WIRED, 0);
+		    pmap_enter_flags, 0);
 		if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
 			m->oflags |= VPO_KMEM_EXEC;
 	}