svn commit: r365007 - stable/12/sys/vm
Mark Johnston
markj at FreeBSD.org
Mon Aug 31 19:59:07 UTC 2020
Author: markj
Date: Mon Aug 31 19:59:05 2020
New Revision: 365007
URL: https://svnweb.freebsd.org/changeset/base/365007
Log:
Revert r364987, r364988.
The removal of the kernel_map symbol breaks at least one out-of-tree module,
the nvidia binary driver. Revert the removal for now to maintain KBI
compatibility.
Reported by: np
Modified:
stable/12/sys/vm/vm_extern.h
stable/12/sys/vm/vm_init.c
stable/12/sys/vm/vm_kern.c
stable/12/sys/vm/vm_kern.h
stable/12/sys/vm/vm_map.c
stable/12/sys/vm/vm_map.h
stable/12/sys/vm/vm_page.c
Directory Properties:
stable/12/ (props changed)
Modified: stable/12/sys/vm/vm_extern.h
==============================================================================
--- stable/12/sys/vm/vm_extern.h Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_extern.h Mon Aug 31 19:59:05 2020 (r365007)
@@ -77,8 +77,8 @@ void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
-void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
- bool);
+vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
+ boolean_t);
void kmem_init(vm_offset_t, vm_offset_t);
void kmem_init_zero_region(void);
void kmeminit(void);
Modified: stable/12/sys/vm/vm_init.c
==============================================================================
--- stable/12/sys/vm/vm_init.c Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_init.c Mon Aug 31 19:59:05 2020 (r365007)
@@ -273,8 +273,8 @@ again:
exec_map_entries = 2 * mp_ncpus + 4;
#endif
exec_map_entry_size = round_page(PATH_MAX + ARG_MAX);
- kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr,
- exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false);
- kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva,
- false);
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE);
+ pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
+ FALSE);
}
Modified: stable/12/sys/vm/vm_kern.c
==============================================================================
--- stable/12/sys/vm/vm_kern.c Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_kern.c Mon Aug 31 19:59:05 2020 (r365007)
@@ -97,9 +97,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
-struct vm_map kernel_map_store;
-struct vm_map exec_map_store;
-struct vm_map pipe_map_store;
+vm_map_t kernel_map;
+vm_map_t exec_map;
+vm_map_t pipe_map;
const void *zero_region;
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
@@ -357,9 +357,9 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_s
}
/*
- * kmem_subinit:
+ * kmem_suballoc:
*
- * Initializes a map to manage a subrange
+ * Allocates a map to manage a subrange
* of the kernel virtual address space.
*
* Arguments are as follows:
@@ -369,11 +369,12 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_s
* size Size of range to find
* superpage_align Request that min is superpage aligned
*/
-void
-kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
- vm_size_t size, bool superpage_align)
+vm_map_t
+kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
+ vm_size_t size, boolean_t superpage_align)
{
int ret;
+ vm_map_t result;
size = round_page(size);
@@ -382,11 +383,14 @@ kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_
VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
- panic("kmem_subinit: bad status return of %d", ret);
+ panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
- vm_map_init(map, vm_map_pmap(parent), *min, *max);
- if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
- panic("kmem_subinit: unable to change range to submap");
+ result = vm_map_create(vm_map_pmap(parent), *min, *max);
+ if (result == NULL)
+ panic("kmem_suballoc: cannot create submap");
+ if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
+ panic("kmem_suballoc: unable to change range to submap");
+ return (result);
}
/*
@@ -742,13 +746,15 @@ kva_import_domain(void *arena, vmem_size_t size, int f
void
kmem_init(vm_offset_t start, vm_offset_t end)
{
+ vm_map_t m;
int domain;
- vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
- kernel_map->system_map = 1;
- vm_map_lock(kernel_map);
+ m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
+ m->system_map = 1;
+ vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
- (void) vm_map_insert(kernel_map, NULL, (vm_ooffset_t) 0,
+ kernel_map = m;
+ (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
#ifdef __amd64__
KERNBASE,
#else
@@ -756,7 +762,7 @@ kmem_init(vm_offset_t start, vm_offset_t end)
#endif
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
/* ... and ending with the completion of the above `insert' */
- vm_map_unlock(kernel_map);
+ vm_map_unlock(m);
/*
* Initialize the kernel_arena. This can grow on demand.
Modified: stable/12/sys/vm/vm_kern.h
==============================================================================
--- stable/12/sys/vm/vm_kern.h Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_kern.h Mon Aug 31 19:59:05 2020 (r365007)
@@ -66,12 +66,9 @@
#define _VM_VM_KERN_H_
/* Kernel memory management definitions. */
-extern struct vm_map kernel_map_store;
-#define kernel_map (&kernel_map_store)
-extern struct vm_map exec_map_store;
-#define exec_map (&exec_map_store)
-extern struct vm_map pipe_map_store;
-#define pipe_map (&pipe_map_store)
+extern vm_map_t kernel_map;
+extern vm_map_t exec_map;
+extern vm_map_t pipe_map;
extern struct vmem *kernel_arena;
extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
Modified: stable/12/sys/vm/vm_map.c
==============================================================================
--- stable/12/sys/vm/vm_map.c Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_map.c Mon Aug 31 19:59:05 2020 (r365007)
@@ -128,8 +128,10 @@ __FBSDID("$FreeBSD$");
static struct mtx map_sleep_mtx;
static uma_zone_t mapentzone;
static uma_zone_t kmapentzone;
+static uma_zone_t mapzone;
static uma_zone_t vmspace_zone;
static int vmspace_zinit(void *mem, int size, int flags);
+static int vm_map_zinit(void *mem, int ize, int flags);
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
vm_offset_t max);
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
@@ -140,6 +142,7 @@ static int vm_map_growstack(vm_map_t map, vm_offset_t
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
#ifdef INVARIANTS
+static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
@@ -195,6 +198,14 @@ void
vm_map_startup(void)
{
mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
+ mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
+#ifdef INVARIANTS
+ vm_map_zdtor,
+#else
+ NULL,
+#endif
+ vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_prealloc(mapzone, MAX_KMAP);
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
@@ -213,16 +224,24 @@ static int
vmspace_zinit(void *mem, int size, int flags)
{
struct vmspace *vm;
- vm_map_t map;
vm = (struct vmspace *)mem;
- map = &vm->vm_map;
+ vm->vm_map.pmap = NULL;
+ (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
+ PMAP_LOCK_INIT(vmspace_pmap(vm));
+ return (0);
+}
+
+static int
+vm_map_zinit(void *mem, int size, int flags)
+{
+ vm_map_t map;
+
+ map = (vm_map_t)mem;
memset(map, 0, sizeof(*map));
- mtx_init(&map->system_mtx, "vm map (system)", NULL,
- MTX_DEF | MTX_DUPOK);
+ mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "vm map (user)");
- PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}
@@ -233,16 +252,29 @@ vmspace_zdtor(void *mem, int size, void *arg)
struct vmspace *vm;
vm = (struct vmspace *)mem;
- KASSERT(vm->vm_map.nentries == 0,
- ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
- KASSERT(vm->vm_map.size == 0,
- ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
+
+ vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
}
+static void
+vm_map_zdtor(void *mem, int size, void *arg)
+{
+ vm_map_t map;
+
+ map = (vm_map_t)mem;
+ KASSERT(map->nentries == 0,
+ ("map %p nentries == %d on free.",
+ map, map->nentries));
+ KASSERT(map->size == 0,
+ ("map %p size == %lu on free.",
+ map, (unsigned long)map->size));
+}
#endif /* INVARIANTS */
/*
* Allocate a vmspace structure, including a vm_map and pmap,
* and initialize those structures. The refcnt is set to 1.
+ *
+ * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
*/
struct vmspace *
vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
@@ -846,6 +878,24 @@ vmspace_resident_count(struct vmspace *vmspace)
}
/*
+ * vm_map_create:
+ *
+ * Creates and returns a new empty VM map with
+ * the given physical map structure, and having
+ * the given lower and upper address bounds.
+ */
+vm_map_t
+vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
+{
+ vm_map_t result;
+
+ result = uma_zalloc(mapzone, M_WAITOK);
+ CTR1(KTR_VM, "vm_map_create: %p", result);
+ _vm_map_init(result, pmap, min, max);
+ return (result);
+}
+
+/*
* Initialize an existing vm_map structure
* such as that in the vmspace structure.
*/
@@ -872,9 +922,8 @@ vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min
{
_vm_map_init(map, pmap, min, max);
- mtx_init(&map->system_mtx, "vm map (system)", NULL,
- MTX_DEF | MTX_DUPOK);
- sx_init(&map->lock, "vm map (user)");
+ mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
+ sx_init(&map->lock, "user map");
}
/*
Modified: stable/12/sys/vm/vm_map.h
==============================================================================
--- stable/12/sys/vm/vm_map.h Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_map.h Mon Aug 31 19:59:05 2020 (r365007)
@@ -346,6 +346,10 @@ bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t
long vmspace_resident_count(struct vmspace *vmspace);
#endif /* _KERNEL */
+
+/* XXX: number of kernel maps to statically allocate */
+#define MAX_KMAP 10
+
/*
* Copy-on-write flags for vm_map operations
*/
@@ -409,6 +413,7 @@ long vmspace_resident_count(struct vmspace *vmspace);
#ifdef _KERNEL
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
+vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
vm_offset_t, int, vm_prot_t, vm_prot_t, int);
Modified: stable/12/sys/vm/vm_page.c
==============================================================================
--- stable/12/sys/vm/vm_page.c Mon Aug 31 19:47:30 2020 (r365006)
+++ stable/12/sys/vm/vm_page.c Mon Aug 31 19:59:05 2020 (r365007)
@@ -590,6 +590,9 @@ vm_page_startup(vm_offset_t vaddr)
#ifndef UMA_MD_SMALL_ALLOC
/* vmem_startup() calls uma_prealloc(). */
boot_pages += vmem_startup_count();
+ /* vm_map_startup() calls uma_prealloc(). */
+ boot_pages += howmany(MAX_KMAP,
+ UMA_SLAB_SPACE / sizeof(struct vm_map));
/*
* Before going fully functional kmem_init() does allocation
More information about the svn-src-all
mailing list