svn commit: r325992 - in user/jeff/numa/sys: kern vm
Jeff Roberson
jeff at FreeBSD.org
Sun Nov 19 03:18:31 UTC 2017
Author: jeff
Date: Sun Nov 19 03:18:29 2017
New Revision: 325992
URL: https://svnweb.freebsd.org/changeset/base/325992
Log:
Use a vmem per-domain kernel vmem to ensure that reservations and domain
specific allocations are honored for kernel memory.
Provide domain iterators for kmem along with domain specific allocation
functions.
Modified:
user/jeff/numa/sys/kern/subr_vmem.c
user/jeff/numa/sys/vm/vm_extern.h
user/jeff/numa/sys/vm/vm_init.c
user/jeff/numa/sys/vm/vm_kern.c
user/jeff/numa/sys/vm/vm_page.h
Modified: user/jeff/numa/sys/kern/subr_vmem.c
==============================================================================
--- user/jeff/numa/sys/kern/subr_vmem.c Sun Nov 19 03:14:10 2017 (r325991)
+++ user/jeff/numa/sys/kern/subr_vmem.c Sun Nov 19 03:18:29 2017 (r325992)
@@ -184,6 +184,7 @@ static struct task vmem_periodic_wk;
static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
+static uma_zone_t vmem_zone;
/* ---- misc */
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
@@ -655,6 +656,9 @@ vmem_startup(void)
{
mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
+ vmem_zone = uma_zcreate("vmem",
+ sizeof(struct vmem), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_VM);
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
@@ -824,7 +828,7 @@ vmem_destroy1(vmem_t *vm)
VMEM_CONDVAR_DESTROY(vm);
VMEM_LOCK_DESTROY(vm);
- free(vm, M_VMEM);
+ uma_zfree(vmem_zone, vm);
}
static int
@@ -1056,7 +1060,7 @@ vmem_create(const char *name, vmem_addr_t base, vmem_s
vmem_t *vm;
- vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
+ vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
if (vm == NULL)
return (NULL);
if (vmem_init(vm, name, base, size, quantum, qcache_max,
Modified: user/jeff/numa/sys/vm/vm_extern.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_extern.h Sun Nov 19 03:14:10 2017 (r325991)
+++ user/jeff/numa/sys/vm/vm_extern.h Sun Nov 19 03:18:29 2017 (r325992)
@@ -54,14 +54,21 @@ void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t
/* These operate on virtual addresses backed by memory. */
vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
+vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags,
+ vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
+vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
+ vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
+ vm_memattr_t memattr);
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
+vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
/* This provides memory for previously allocated address space. */
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
+int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */
Modified: user/jeff/numa/sys/vm/vm_init.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_init.c Sun Nov 19 03:14:10 2017 (r325991)
+++ user/jeff/numa/sys/vm/vm_init.c Sun Nov 19 03:18:29 2017 (r325992)
@@ -79,16 +79,25 @@ __FBSDID("$FreeBSD$");
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/vmem.h>
+#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_phys.h>
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
+
+#if VM_NRESERVLEVEL > 0
+#define KVA_QUANTUM 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT)
+#else
+ /* On non-superpage architectures want large import sizes. */
+#define KVA_QUANTUM PAGE_SIZE * 1024
+#endif
long physmem;
/*
@@ -128,6 +137,7 @@ static void
vm_mem_init(dummy)
void *dummy;
{
+ int domain;
/*
* Initializes resident memory structures. From here on, all physical
@@ -148,13 +158,15 @@ vm_mem_init(dummy)
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
- vmem_set_import(kernel_arena, kva_import, NULL, NULL,
-#if VM_NRESERVLEVEL > 0
- 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT));
-#else
- /* On non-superpage architectures want large import sizes. */
- PAGE_SIZE * 1024);
-#endif
+ vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
+
+ for (domain = 0; domain < vm_ndomains; domain++) {
+ vm_dom[domain].vmd_kernel_arena = vmem_create(
+ "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
+ vmem_set_import(vm_dom[domain].vmd_kernel_arena,
+ (vmem_import_t *)vmem_alloc, NULL, kernel_arena,
+ KVA_QUANTUM);
+ }
kmem_init_zero_region();
pmap_init();
Modified: user/jeff/numa/sys/vm/vm_kern.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_kern.c Sun Nov 19 03:14:10 2017 (r325991)
+++ user/jeff/numa/sys/vm/vm_kern.c Sun Nov 19 03:18:29 2017 (r325992)
@@ -75,8 +75,10 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmem.h>
+#include <sys/vmmeter.h>
#include <vm/vm.h>
+#include <vm/vm_domain.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
@@ -84,6 +86,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@@ -159,17 +162,17 @@ kva_free(vm_offset_t addr, vm_size_t size)
* given flags, then the pages are zeroed before they are mapped.
*/
vm_offset_t
-kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
+ vmem_t *vmem;
vm_object_t object = kernel_object;
vm_offset_t addr, i, offset;
vm_page_t m;
int pflags, tries;
- KASSERT(vmem == kernel_arena,
- ("kmem_alloc_attr: Only kernel_arena is supported."));
size = round_page(size);
+ vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
@@ -207,6 +210,36 @@ retry:
return (addr);
}
+vm_offset_t
+kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
+ vm_paddr_t high, vm_memattr_t memattr)
+{
+ struct vm_domain_iterator vi;
+ vm_offset_t addr;
+ int domain, wait;
+
+ KASSERT(vmem == kernel_arena,
+ ("kmem_alloc_attr: Only kernel_arena is supported."));
+ addr = 0;
+ vm_policy_iterator_init(&vi);
+ wait = flags & M_WAITOK;
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT;
+ while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
+ if (vm_domain_iterator_isdone(&vi) && wait) {
+ flags |= wait;
+ flags &= ~M_NOWAIT;
+ }
+ addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
+ memattr);
+ if (addr != 0)
+ break;
+ }
+ vm_policy_iterator_finish(&vi);
+
+ return (addr);
+}
+
/*
* Allocates a region from the kernel address map and physically
* contiguous pages within the specified address range to the kernel
@@ -216,19 +249,19 @@ retry:
* mapped.
*/
vm_offset_t
-kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
+ vmem_t *vmem;
vm_object_t object = kernel_object;
vm_offset_t addr, offset, tmp;
vm_page_t end_m, m;
u_long npages;
int pflags, tries;
- KASSERT(vmem == kernel_arena,
- ("kmem_alloc_contig: Only kernel_arena is supported."));
size = round_page(size);
+ vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
@@ -268,6 +301,37 @@ retry:
return (addr);
}
+vm_offset_t
+kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
+ vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
+ vm_memattr_t memattr)
+{
+ struct vm_domain_iterator vi;
+ vm_offset_t addr;
+ int domain, wait;
+
+ KASSERT(vmem == kernel_arena,
+ ("kmem_alloc_contig: Only kernel_arena is supported."));
+ addr = 0;
+ vm_policy_iterator_init(&vi);
+ wait = flags & M_WAITOK;
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT;
+ while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
+ if (vm_domain_iterator_isdone(&vi) && wait) {
+ flags |= wait;
+ flags &= ~M_NOWAIT;
+ }
+ addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
+ alignment, boundary, memattr);
+ if (addr != 0)
+ break;
+ }
+ vm_policy_iterator_finish(&vi);
+
+ return (addr);
+}
+
/*
* kmem_suballoc:
*
@@ -311,18 +375,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_of
* Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
-kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
+kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
+ vmem_t *vmem;
vm_offset_t addr;
int rv;
- KASSERT(vmem == kernel_arena,
- ("kmem_malloc: Only kernel_arena is supported."));
+ vmem = vm_dom[domain].vmd_kernel_arena;
size = round_page(size);
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
- rv = kmem_back(kernel_object, addr, size, flags);
+ rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
if (rv != KERN_SUCCESS) {
vmem_free(vmem, addr, size);
return (0);
@@ -330,20 +394,49 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int fla
return (addr);
}
+vm_offset_t
+kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
+{
+ struct vm_domain_iterator vi;
+ vm_offset_t addr;
+ int domain, wait;
+
+ KASSERT(vmem == kernel_arena,
+ ("kmem_malloc: Only kernel_arena is supported."));
+ addr = 0;
+ vm_policy_iterator_init(&vi);
+ wait = flags & M_WAITOK;
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT;
+ while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
+ if (vm_domain_iterator_isdone(&vi) && wait) {
+ flags |= wait;
+ flags &= ~M_NOWAIT;
+ }
+ addr = kmem_malloc_domain(domain, size, flags);
+ if (addr != 0)
+ break;
+ }
+ vm_policy_iterator_finish(&vi);
+
+ return (addr);
+}
+
/*
* kmem_back:
*
* Allocate physical pages for the specified virtual address range.
*/
int
-kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
+kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
+ vm_size_t size, int flags)
{
vm_offset_t offset, i;
vm_page_t m, mpred;
int pflags;
KASSERT(object == kernel_object,
- ("kmem_back: only supports kernel object."));
+ ("kmem_back_domain: only supports kernel object."));
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
@@ -356,8 +449,8 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_siz
retry:
mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
for (; i < size; i += PAGE_SIZE, mpred = m) {
- m = vm_page_alloc_after(object, atop(offset + i), pflags,
- mpred);
+ m = vm_page_alloc_domain_after(object, atop(offset + i),
+ domain, pflags, mpred);
/*
* Ran out of space, free everything up and return. Don't need
@@ -384,6 +477,33 @@ retry:
return (KERN_SUCCESS);
}
+int
+kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
+{
+ struct vm_domain_iterator vi;
+ int domain, wait, ret;
+
+ KASSERT(object == kernel_object,
+ ("kmem_back: only supports kernel object."));
+ ret = 0;
+ vm_policy_iterator_init(&vi);
+ wait = flags & M_WAITOK;
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT;
+ while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
+ if (vm_domain_iterator_isdone(&vi) && wait) {
+ flags |= wait;
+ flags &= ~M_NOWAIT;
+ }
+ ret = kmem_back_domain(domain, object, addr, size, flags);
+ if (ret == KERN_SUCCESS)
+ break;
+ }
+ vm_policy_iterator_finish(&vi);
+
+ return (addr);
+}
+
/*
* kmem_unback:
*
@@ -393,11 +513,12 @@ retry:
* A physical page must exist within the specified object at each index
* that is being unmapped.
*/
-void
-kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
+static int
+_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
vm_page_t m, next;
vm_offset_t end, offset;
+ int domain;
KASSERT(object == kernel_object,
("kmem_unback: only supports kernel object."));
@@ -406,15 +527,25 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_s
offset = addr - VM_MIN_KERNEL_ADDRESS;
end = offset + size;
VM_OBJECT_WLOCK(object);
- for (m = vm_page_lookup(object, atop(offset)); offset < end;
- offset += PAGE_SIZE, m = next) {
+ m = vm_page_lookup(object, atop(offset));
+ domain = vm_phys_domidx(m);
+ for (; offset < end; offset += PAGE_SIZE, m = next) {
next = vm_page_next(m);
vm_page_unwire(m, PQ_NONE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);
+
+ return domain;
}
+void
+kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
+{
+
+ _kmem_unback(object, addr, size);
+}
+
/*
* kmem_free:
*
@@ -424,12 +555,13 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_s
void
kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
{
+ int domain;
KASSERT(vmem == kernel_arena,
("kmem_free: Only kernel_arena is supported."));
size = round_page(size);
- kmem_unback(kernel_object, addr, size);
- vmem_free(vmem, addr, size);
+ domain = _kmem_unback(kernel_object, addr, size);
+ vmem_free(vm_dom[domain].vmd_kernel_arena, addr, size);
}
/*
Modified: user/jeff/numa/sys/vm/vm_page.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.h Sun Nov 19 03:14:10 2017 (r325991)
+++ user/jeff/numa/sys/vm/vm_page.h Sun Nov 19 03:18:29 2017 (r325992)
@@ -227,6 +227,7 @@ struct vm_pagequeue {
struct vm_domain {
struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
+ struct vmem *vmd_kernel_arena;
u_int vmd_page_count;
u_int vmd_free_count;
long vmd_segs; /* bitmask of the segments */
More information about the svn-src-user
mailing list