svn commit: r367186 - in head/sys: amd64/amd64 i386/i386 kern sys

Mateusz Guzik mjg at FreeBSD.org
Fri Oct 30 20:02:33 UTC 2020


Author: mjg
Date: Fri Oct 30 20:02:32 2020
New Revision: 367186
URL: https://svnweb.freebsd.org/changeset/base/367186

Log:
  malloc: delegate M_EXEC handling to dedicacted routines
  
  It is almost never needed and adds an avoidable branch.
  
  While here do minior clean ups in preparation for larger changes.
  
  Reviewed by:	markj
  Differential Revision:	https://reviews.freebsd.org/D27019

Modified:
  head/sys/amd64/amd64/bpf_jit_machdep.c
  head/sys/i386/i386/bpf_jit_machdep.c
  head/sys/kern/kern_malloc.c
  head/sys/kern/link_elf.c
  head/sys/sys/malloc.h

Modified: head/sys/amd64/amd64/bpf_jit_machdep.c
==============================================================================
--- head/sys/amd64/amd64/bpf_jit_machdep.c	Fri Oct 30 19:53:16 2020	(r367185)
+++ head/sys/amd64/amd64/bpf_jit_machdep.c	Fri Oct 30 20:02:32 2020	(r367186)
@@ -602,7 +602,7 @@ bpf_jit_compile(struct bpf_insn *prog, u_int nins, siz
 
 		*size = stream.cur_ip;
 #ifdef _KERNEL
-		stream.ibuf = malloc(*size, M_BPFJIT, M_EXEC | M_NOWAIT);
+		stream.ibuf = malloc_exec(*size, M_BPFJIT, M_NOWAIT);
 		if (stream.ibuf == NULL)
 			break;
 #else

Modified: head/sys/i386/i386/bpf_jit_machdep.c
==============================================================================
--- head/sys/i386/i386/bpf_jit_machdep.c	Fri Oct 30 19:53:16 2020	(r367185)
+++ head/sys/i386/i386/bpf_jit_machdep.c	Fri Oct 30 20:02:32 2020	(r367186)
@@ -632,7 +632,7 @@ bpf_jit_compile(struct bpf_insn *prog, u_int nins, siz
 
 		*size = stream.cur_ip;
 #ifdef _KERNEL
-		stream.ibuf = malloc(*size, M_BPFJIT, M_EXEC | M_NOWAIT);
+		stream.ibuf = malloc_exec(*size, M_BPFJIT, M_NOWAIT);
 		if (stream.ibuf == NULL)
 			break;
 #else

Modified: head/sys/kern/kern_malloc.c
==============================================================================
--- head/sys/kern/kern_malloc.c	Fri Oct 30 19:53:16 2020	(r367185)
+++ head/sys/kern/kern_malloc.c	Fri Oct 30 20:02:32 2020	(r367186)
@@ -618,13 +618,14 @@ void *
 	unsigned long osize = size;
 #endif
 
+	MPASS((flags & M_EXEC) == 0);
 #ifdef MALLOC_DEBUG
 	va = NULL;
 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
 		return (va);
 #endif
 
-	if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
+	if (size <= kmem_zmax) {
 		if (size & KMEM_ZMASK)
 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
 		indx = kmemsize[size >> KMEM_ZSHIFT];
@@ -640,10 +641,11 @@ void *
 		va = malloc_large(&size, DOMAINSET_RR(), flags);
 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
 	}
-	if (flags & M_WAITOK)
-		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
-	else if (va == NULL)
+	if (__predict_false(va == NULL)) {
+		KASSERT((flags & M_WAITOK) == 0,
+		    ("malloc(M_WAITOK) returned NULL"));
 		t_malloc_fail = time_uptime;
+	}
 #ifdef DEBUG_REDZONE
 	if (va != NULL)
 		va = redzone_setup(va, osize);
@@ -682,40 +684,102 @@ malloc_domainset(size_t size, struct malloc_type *mtp,
     int flags)
 {
 	struct vm_domainset_iter di;
-	caddr_t ret;
+	caddr_t va;
 	int domain;
 	int indx;
 
 #if defined(DEBUG_REDZONE)
 	unsigned long osize = size;
 #endif
+	MPASS((flags & M_EXEC) == 0);
 #ifdef MALLOC_DEBUG
-	ret= NULL;
-	if (malloc_dbg(&ret, &size, mtp, flags) != 0)
-		return (ret);
+	va = NULL;
+	if (malloc_dbg(&va, &size, mtp, flags) != 0)
+		return (va);
 #endif
-	if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
+	if (size <= kmem_zmax) {
 		vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
 		do {
-			ret = malloc_domain(&size, &indx, mtp, domain, flags);
-		} while (ret == NULL &&
+			va = malloc_domain(&size, &indx, mtp, domain, flags);
+		} while (va == NULL &&
 		    vm_domainset_iter_policy(&di, &domain) == 0);
-		malloc_type_zone_allocated(mtp, ret == NULL ? 0 : size, indx);
+		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
 	} else {
 		/* Policy is handled by kmem. */
-		ret = malloc_large(&size, ds, flags);
-		malloc_type_allocated(mtp, ret == NULL ? 0 : size);
+		va = malloc_large(&size, ds, flags);
+		malloc_type_allocated(mtp, va == NULL ? 0 : size);
 	}
+	if (__predict_false(va == NULL)) {
+		KASSERT((flags & M_WAITOK) == 0,
+		    ("malloc(M_WAITOK) returned NULL"));
+		t_malloc_fail = time_uptime;
+	}
+#ifdef DEBUG_REDZONE
+	if (va != NULL)
+		va = redzone_setup(va, osize);
+#endif
+	return (va);
+}
 
-	if (flags & M_WAITOK)
-		KASSERT(ret != NULL, ("malloc(M_WAITOK) returned NULL"));
-	else if (ret == NULL)
+/*
+ * Allocate an executable area.
+ */
+void *
+malloc_exec(size_t size, struct malloc_type *mtp, int flags)
+{
+	caddr_t va;
+#if defined(DEBUG_REDZONE)
+	unsigned long osize = size;
+#endif
+
+	flags |= M_EXEC;
+#ifdef MALLOC_DEBUG
+	va = NULL;
+	if (malloc_dbg(&va, &size, mtp, flags) != 0)
+		return (va);
+#endif
+	va = malloc_large(&size, DOMAINSET_RR(), flags);
+	malloc_type_allocated(mtp, va == NULL ? 0 : size);
+	if (__predict_false(va == NULL)) {
+		KASSERT((flags & M_WAITOK) == 0,
+		    ("malloc(M_WAITOK) returned NULL"));
 		t_malloc_fail = time_uptime;
+	}
 #ifdef DEBUG_REDZONE
-	if (ret != NULL)
-		ret = redzone_setup(ret, osize);
+	if (va != NULL)
+		va = redzone_setup(va, osize);
 #endif
-	return (ret);
+	return ((void *) va);
+}
+
+void *
+malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
+    int flags)
+{
+	caddr_t va;
+#if defined(DEBUG_REDZONE)
+	unsigned long osize = size;
+#endif
+
+	flags |= M_EXEC;
+#ifdef MALLOC_DEBUG
+	va = NULL;
+	if (malloc_dbg(&va, &size, mtp, flags) != 0)
+		return (va);
+#endif
+	/* Policy is handled by kmem. */
+	va = malloc_large(&size, ds, flags);
+	malloc_type_allocated(mtp, va == NULL ? 0 : size);
+	if (__predict_false(va == NULL)) {
+		KASSERT((flags & M_WAITOK) == 0,
+		    ("malloc(M_WAITOK) returned NULL"));
+		t_malloc_fail = time_uptime;
+	}
+#ifdef DEBUG_REDZONE
+	if (va != NULL)
+		va = redzone_setup(va, osize);
+#endif
+	return (va);
 }
 
 void *

Modified: head/sys/kern/link_elf.c
==============================================================================
--- head/sys/kern/link_elf.c	Fri Oct 30 19:53:16 2020	(r367185)
+++ head/sys/kern/link_elf.c	Fri Oct 30 20:02:32 2020	(r367186)
@@ -1129,7 +1129,7 @@ link_elf_load_file(linker_class_t cls, const char* fil
 		goto out;
 	}
 #else
-	mapbase = malloc(mapsize, M_LINKER, M_EXEC | M_WAITOK);
+	mapbase = malloc_exec(mapsize, M_LINKER, M_WAITOK);
 #endif
 	ef->address = mapbase;
 

Modified: head/sys/sys/malloc.h
==============================================================================
--- head/sys/sys/malloc.h	Fri Oct 30 19:53:16 2020	(r367185)
+++ head/sys/sys/malloc.h	Fri Oct 30 20:02:32 2020	(r367186)
@@ -239,6 +239,11 @@ void	*malloc_domainset(size_t size, struct malloc_type
 void	*mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
 	    int flags) __malloc_like __result_use_check
 	    __alloc_size2(1, 2);
+void	*malloc_exec(size_t size, struct malloc_type *type, int flags) __malloc_like
+	    __result_use_check __alloc_size(1);
+void	*malloc_domainset_exec(size_t size, struct malloc_type *type,
+	    struct domainset *ds, int flags) __malloc_like __result_use_check
+	    __alloc_size(1);
 void	malloc_init(void *);
 int	malloc_last_fail(void);
 void	malloc_type_allocated(struct malloc_type *type, unsigned long size);


More information about the svn-src-head mailing list