git: d7670d965c3f - stable/13 - kern_malloc: fold free and zfree together into one __always_inline func
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 28 Sep 2024 21:52:17 UTC
The branch stable/13 has been updated by bz:
URL: https://cgit.FreeBSD.org/src/commit/?id=d7670d965c3fc3dcb36162a5054356524b3fd4ce
commit d7670d965c3fc3dcb36162a5054356524b3fd4ce
Author: Bjoern A. Zeeb <bz@FreeBSD.org>
AuthorDate: 2024-07-24 15:56:32 +0000
Commit: Bjoern A. Zeeb <bz@FreeBSD.org>
CommitDate: 2024-09-28 19:33:43 +0000
kern_malloc: fold free and zfree together into one __always_inline func
free() and zfree() are essentially the same copy and pasted code with
the extra explicit_bzero() (and formerly kasan) calls. Add a bool to add
the extra functionality and make both functions a wrapper around the common
code and let the compiler do the optimization based on the bool input
when inlining.
No functional changes intended.
Suggested by: kib (in D45812)
Sponsored by: The FreeBSD Foundation
Reviewed by: kib, markj
Differential Revision: https://reviews.freebsd.org/D46101
And the fix from Olivier Certner (olce):
kern_malloc: Restore working KASAN runtime after free() and zfree() folding
In the zfree() case, the explicit_bzero() calls zero all the allocation,
including the redzone which malloc() has marked as invalid. So calling
kasan_mark() before those is in fact necessary.
This fixes a crash at boot when 'ldconfig' is run and tries to get
random bytes through getrandom() (relevant part of the stack is
read_random_uio() -> zfree() -> explicit_bzero()) for kernels with KASAN
compiled in.
Approved by: markj (mentor)
Fixes: 4fab5f005482 ("kern_malloc: fold free and zfree together into one __always_inline func")
MFC with: 4fab5f005482
Sponsored by: The FreeBSD Foundation
(cherry picked from commit 4fab5f005482aa88bc0f7d7a0a5e81b436869112)
(cherry picked from commit 28391f188ca18b6251ba46040adf81946b0ccb03)
---
sys/kern/kern_malloc.c | 94 +++++++++++++++++---------------------------------
1 file changed, 31 insertions(+), 63 deletions(-)
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 6a36f982cc78..4669e072c966 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -914,15 +914,8 @@ free_dbg(void **addrp, struct malloc_type *mtp)
}
#endif
-/*
- * free:
- *
- * Free a block of memory allocated by malloc.
- *
- * This routine may not block.
- */
-void
-free(void *addr, struct malloc_type *mtp)
+static __always_inline void
+_free(void *addr, struct malloc_type *mtp, bool dozero)
{
uma_zone_t zone;
uma_slab_t slab;
@@ -938,8 +931,8 @@ free(void *addr, struct malloc_type *mtp)
vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
if (slab == NULL)
- panic("free: address %p(%p) has not been allocated",
- addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
+ panic("%s(%d): address %p(%p) has not been allocated", __func__,
+ dozero, addr, (void *)((uintptr_t)addr & (~UMA_SLAB_MASK)));
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
@@ -947,79 +940,54 @@ free(void *addr, struct malloc_type *mtp)
#if defined(INVARIANTS) && !defined(KASAN)
free_save_type(addr, mtp, size);
#endif
+ if (dozero) {
+ kasan_mark(addr, size, size, 0);
+ explicit_bzero(addr, size);
+ }
uma_zfree_arg(zone, addr, slab);
break;
case SLAB_COOKIE_MALLOC_LARGE:
size = malloc_large_size(slab);
+ if (dozero) {
+ kasan_mark(addr, size, size, 0);
+ explicit_bzero(addr, size);
+ }
free_large(addr, size);
break;
case SLAB_COOKIE_CONTIG_MALLOC:
- size = contigmalloc_size(slab);
+ size = round_page(contigmalloc_size(slab));
+ if (dozero)
+ explicit_bzero(addr, size);
kmem_free((vm_offset_t)addr, size);
- size = round_page(size);
break;
default:
- panic("%s: addr %p slab %p with unknown cookie %d", __func__,
- addr, slab, GET_SLAB_COOKIE(slab));
+ panic("%s(%d): addr %p slab %p with unknown cookie %d",
+ __func__, dozero, addr, slab, GET_SLAB_COOKIE(slab));
/* NOTREACHED */
}
malloc_type_freed(mtp, size);
}
/*
- * zfree:
- *
- * Zero then free a block of memory allocated by malloc.
- *
+ * free:
+ * Free a block of memory allocated by malloc/contigmalloc.
* This routine may not block.
*/
void
-zfree(void *addr, struct malloc_type *mtp)
+free(void *addr, struct malloc_type *mtp)
{
- uma_zone_t zone;
- uma_slab_t slab;
- u_long size;
-
-#ifdef MALLOC_DEBUG
- if (free_dbg(&addr, mtp) != 0)
- return;
-#endif
- /* free(NULL, ...) does nothing */
- if (addr == NULL)
- return;
-
- vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
- if (slab == NULL)
- panic("free: address %p(%p) has not been allocated",
- addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
+ _free(addr, mtp, false);
+}
- switch (GET_SLAB_COOKIE(slab)) {
- case __predict_true(SLAB_COOKIE_SLAB_PTR):
- size = zone->uz_size;
-#if defined(INVARIANTS) && !defined(KASAN)
- free_save_type(addr, mtp, size);
-#endif
- kasan_mark(addr, size, size, 0);
- explicit_bzero(addr, size);
- uma_zfree_arg(zone, addr, slab);
- break;
- case SLAB_COOKIE_MALLOC_LARGE:
- size = malloc_large_size(slab);
- kasan_mark(addr, size, size, 0);
- explicit_bzero(addr, size);
- free_large(addr, size);
- break;
- case SLAB_COOKIE_CONTIG_MALLOC:
- size = round_page(contigmalloc_size(slab));
- explicit_bzero(addr, size);
- kmem_free((vm_offset_t)addr, size);
- break;
- default:
- panic("%s: addr %p slab %p with unknown cookie %d", __func__,
- addr, slab, GET_SLAB_COOKIE(slab));
- /* NOTREACHED */
- }
- malloc_type_freed(mtp, size);
+/*
+ * zfree:
+ * Zero then free a block of memory allocated by malloc/contigmalloc.
+ * This routine may not block.
+ */
+void
+zfree(void *addr, struct malloc_type *mtp)
+{
+ _free(addr, mtp, true);
}
/*