svn commit: r328074 - in user/jeff/numa/sys: amd64/amd64 arm/arm cddl/compat/opensolaris/sys cddl/contrib/opensolaris/uts/common/fs/zfs compat/linprocfs fs/tmpfs i386/i386 kern mips/mips powerpc/po...
Jeff Roberson
jeff at FreeBSD.org
Wed Jan 17 06:22:13 UTC 2018
Author: jeff
Date: Wed Jan 17 06:22:10 2018
New Revision: 328074
URL: https://svnweb.freebsd.org/changeset/base/328074
Log:
WIP per-domain page queue free locking and pageout targets.
Added:
user/jeff/numa/sys/vm/vm_pagequeue.h (contents, props changed)
Modified:
user/jeff/numa/sys/amd64/amd64/machdep.c
user/jeff/numa/sys/arm/arm/machdep.c
user/jeff/numa/sys/cddl/compat/opensolaris/sys/kmem.h
user/jeff/numa/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
user/jeff/numa/sys/compat/linprocfs/linprocfs.c
user/jeff/numa/sys/fs/tmpfs/tmpfs_subr.c
user/jeff/numa/sys/i386/i386/machdep.c
user/jeff/numa/sys/kern/init_main.c
user/jeff/numa/sys/kern/subr_vmem.c
user/jeff/numa/sys/mips/mips/machdep.c
user/jeff/numa/sys/powerpc/powerpc/machdep.c
user/jeff/numa/sys/sparc64/sparc64/machdep.c
user/jeff/numa/sys/sys/vmmeter.h
user/jeff/numa/sys/vm/swap_pager.c
user/jeff/numa/sys/vm/uma_core.c
user/jeff/numa/sys/vm/vm_extern.h
user/jeff/numa/sys/vm/vm_init.c
user/jeff/numa/sys/vm/vm_kern.c
user/jeff/numa/sys/vm/vm_map.c
user/jeff/numa/sys/vm/vm_meter.c
user/jeff/numa/sys/vm/vm_object.c
user/jeff/numa/sys/vm/vm_page.c
user/jeff/numa/sys/vm/vm_page.h
user/jeff/numa/sys/vm/vm_pageout.c
user/jeff/numa/sys/vm/vm_pageout.h
user/jeff/numa/sys/vm/vm_phys.c
user/jeff/numa/sys/vm/vm_phys.h
user/jeff/numa/sys/vm/vm_reserv.c
user/jeff/numa/sys/vm/vnode_pager.c
Modified: user/jeff/numa/sys/amd64/amd64/machdep.c
==============================================================================
--- user/jeff/numa/sys/amd64/amd64/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/amd64/amd64/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -279,7 +279,7 @@ cpu_startup(dummy)
memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
freeenv(sysenv);
}
- if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
+ if (memsize < ptoa((uintmax_t)vm_free_count()))
memsize = ptoa((uintmax_t)Maxmem);
printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
realmem = atop(memsize);
@@ -306,8 +306,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)vm_cnt.v_free_count),
- ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
+ ptoa((uintmax_t)vm_free_count()),
+ ptoa((uintmax_t)vm_free_count()) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
Modified: user/jeff/numa/sys/arm/arm/machdep.c
==============================================================================
--- user/jeff/numa/sys/arm/arm/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/arm/arm/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -228,8 +228,8 @@ cpu_startup(void *dummy)
(uintmax_t)arm32_ptob(realmem),
(uintmax_t)arm32_ptob(realmem) / mbyte);
printf("avail memory = %ju (%ju MB)\n",
- (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
- (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
+ (uintmax_t)arm32_ptob(vm_free_count()),
+ (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
if (bootverbose) {
arm_physmem_print_tables();
devmap_print_table();
Modified: user/jeff/numa/sys/cddl/compat/opensolaris/sys/kmem.h
==============================================================================
--- user/jeff/numa/sys/cddl/compat/opensolaris/sys/kmem.h Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/cddl/compat/opensolaris/sys/kmem.h Wed Jan 17 06:22:10 2018 (r328074)
@@ -78,7 +78,7 @@ void kmem_reap(void);
int kmem_debugging(void);
void *calloc(size_t n, size_t s);
-#define freemem vm_cnt.v_free_count
+#define freemem vm_free_count()
#define minfree vm_cnt.v_free_min
#define heap_arena kernel_arena
#define zio_arena NULL
Modified: user/jeff/numa/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
==============================================================================
--- user/jeff/numa/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -379,7 +379,7 @@ static void
arc_free_target_init(void *unused __unused)
{
- zfs_arc_free_target = vm_pageout_wakeup_thresh;
+ zfs_arc_free_target = (vm_cnt.v_free_min / 10) * 11;
}
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
arc_free_target_init, NULL);
Modified: user/jeff/numa/sys/compat/linprocfs/linprocfs.c
==============================================================================
--- user/jeff/numa/sys/compat/linprocfs/linprocfs.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/compat/linprocfs/linprocfs.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -156,7 +156,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
/*
* The correct thing here would be:
*
- memfree = vm_cnt.v_free_count * PAGE_SIZE;
+ memfree = vm_free_count() * PAGE_SIZE;
memused = memtotal - memfree;
*
* but it might mislead linux binaries into thinking there
@@ -178,7 +178,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
* like unstaticizing it just for linprocfs's sake.
*/
buffers = 0;
- cached = vm_cnt.v_inactive_count * PAGE_SIZE;
+ cached = vm_inactive_count() * PAGE_SIZE;
sbuf_printf(sb,
"MemTotal: %9lu kB\n"
Modified: user/jeff/numa/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- user/jeff/numa/sys/fs/tmpfs/tmpfs_subr.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/fs/tmpfs/tmpfs_subr.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -106,7 +106,7 @@ tmpfs_mem_avail(void)
{
vm_ooffset_t avail;
- avail = swap_pager_avail + vm_cnt.v_free_count - tmpfs_pages_reserved;
+ avail = swap_pager_avail + vm_free_count() - tmpfs_pages_reserved;
if (__predict_false(avail < 0))
avail = 0;
return (avail);
Modified: user/jeff/numa/sys/i386/i386/machdep.c
==============================================================================
--- user/jeff/numa/sys/i386/i386/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/i386/i386/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -271,7 +271,7 @@ cpu_startup(dummy)
memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
freeenv(sysenv);
}
- if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
+ if (memsize < ptoa((uintmax_t)vm_free_count()))
memsize = ptoa((uintmax_t)Maxmem);
printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
realmem = atop(memsize);
@@ -298,8 +298,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)vm_cnt.v_free_count),
- ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
+ ptoa((uintmax_t)vm_free_count()),
+ ptoa((uintmax_t)vm_free_count()) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
Modified: user/jeff/numa/sys/kern/init_main.c
==============================================================================
--- user/jeff/numa/sys/kern/init_main.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/kern/init_main.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -87,6 +87,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_param.h>
+#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <sys/copyright.h>
@@ -555,7 +556,7 @@ proc0_init(void *dummy __unused)
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
/* Cast to avoid overflow on i386/PAE. */
- pageablemem = ptoa((vm_paddr_t)vm_cnt.v_free_count);
+ pageablemem = ptoa((vm_paddr_t)vm_free_count());
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3;
Modified: user/jeff/numa/sys/kern/subr_vmem.c
==============================================================================
--- user/jeff/numa/sys/kern/subr_vmem.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/kern/subr_vmem.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/vmem.h>
+#include <sys/vmmeter.h>
#include "opt_vm.h"
@@ -72,6 +73,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
#define VMEM_OPTORDER 5
#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
Modified: user/jeff/numa/sys/mips/mips/machdep.c
==============================================================================
--- user/jeff/numa/sys/mips/mips/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/mips/mips/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -210,8 +210,8 @@ cpu_startup(void *dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%juMB)\n",
- ptoa((uintmax_t)vm_cnt.v_free_count),
- ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
+ ptoa((uintmax_t)vm_free_count()),
+ ptoa((uintmax_t)vm_free_count()) / 1048576);
cpu_init_interrupts();
/*
Modified: user/jeff/numa/sys/powerpc/powerpc/machdep.c
==============================================================================
--- user/jeff/numa/sys/powerpc/powerpc/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/powerpc/powerpc/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -213,8 +213,8 @@ cpu_startup(void *dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)vm_cnt.v_free_count),
- ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
+ ptoa((uintmax_t)vm_free_count()),
+ ptoa((uintmax_t)vm_free_count()) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
Modified: user/jeff/numa/sys/sparc64/sparc64/machdep.c
==============================================================================
--- user/jeff/numa/sys/sparc64/sparc64/machdep.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/sparc64/sparc64/machdep.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -190,8 +190,8 @@ cpu_startup(void *arg)
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
SHUTDOWN_PRI_LAST);
- printf("avail memory = %lu (%lu MB)\n", vm_cnt.v_free_count * PAGE_SIZE,
- vm_cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
+ printf("avail memory = %lu (%lu MB)\n", vm_free_count() * PAGE_SIZE,
+ vm_free_count() / ((1024 * 1024) / PAGE_SIZE));
if (bootverbose)
printf("machine: %s\n", sparc64_model);
Modified: user/jeff/numa/sys/sys/vmmeter.h
==============================================================================
--- user/jeff/numa/sys/sys/vmmeter.h Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/sys/vmmeter.h Wed Jan 17 06:22:10 2018 (r328074)
@@ -141,23 +141,19 @@ struct vmmeter {
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
u_int v_wire_count VMMETER_ALIGNED; /* (a) pages wired down */
- u_int v_active_count VMMETER_ALIGNED; /* (a) pages active */
- u_int v_inactive_count VMMETER_ALIGNED; /* (a) pages inactive */
- u_int v_laundry_count VMMETER_ALIGNED; /* (a) pages eligible for
- laundering */
- u_int v_free_count VMMETER_ALIGNED; /* (f) pages free */
};
#endif /* _KERNEL || _WANT_VMMETER */
#ifdef _KERNEL
extern struct vmmeter vm_cnt;
-extern u_int vm_pageout_wakeup_thresh;
#define VM_CNT_ADD(var, x) counter_u64_add(vm_cnt.var, x)
#define VM_CNT_INC(var) VM_CNT_ADD(var, 1)
#define VM_CNT_FETCH(var) counter_u64_fetch(vm_cnt.var)
+u_int vm_free_count(void);
+
/*
* Return TRUE if we are under our severe low-free-pages threshold
*
@@ -168,7 +164,8 @@ static inline int
vm_page_count_severe(void)
{
- return (vm_cnt.v_free_severe > vm_cnt.v_free_count);
+ /* XXX */
+ return (vm_cnt.v_free_severe > vm_free_count());
}
/*
@@ -184,50 +181,9 @@ static inline int
vm_page_count_min(void)
{
- return (vm_cnt.v_free_min > vm_cnt.v_free_count);
+ /* XXX */
+ return (vm_cnt.v_free_min > vm_free_count());
}
-/*
- * Return TRUE if we have not reached our free page target during
- * free page recovery operations.
- */
-static inline int
-vm_page_count_target(void)
-{
-
- return (vm_cnt.v_free_target > vm_cnt.v_free_count);
-}
-
-/*
- * Return the number of pages we need to free-up or cache
- * A positive number indicates that we do not have enough free pages.
- */
-static inline int
-vm_paging_target(void)
-{
-
- return (vm_cnt.v_free_target - vm_cnt.v_free_count);
-}
-
-/*
- * Returns TRUE if the pagedaemon needs to be woken up.
- */
-static inline int
-vm_paging_needed(u_int free_count)
-{
-
- return (free_count < vm_pageout_wakeup_thresh);
-}
-
-/*
- * Return the number of pages we need to launder.
- * A positive number indicates that we have a shortfall of clean pages.
- */
-static inline int
-vm_laundry_target(void)
-{
-
- return (vm_paging_target());
-}
#endif /* _KERNEL */
#endif /* _SYS_VMMETER_H_ */
Modified: user/jeff/numa/sys/vm/swap_pager.c
==============================================================================
--- user/jeff/numa/sys/vm/swap_pager.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/swap_pager.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -2327,7 +2327,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
* of data we will have to page back in, plus an epsilon so
* the system doesn't become critically low on swap space.
*/
- if (vm_cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat)
+ if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
return (ENOMEM);
/*
Modified: user/jeff/numa/sys/vm/uma_core.c
==============================================================================
--- user/jeff/numa/sys/vm/uma_core.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/uma_core.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -3409,7 +3409,7 @@ uma_large_malloc_domain(vm_size_t size, int domain, in
slab->us_data = (void *)addr;
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
slab->us_size = size;
- slab->us_domain = vm_phys_domidx(PHYS_TO_VM_PAGE(
+ slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
pmap_kextract(addr)));
uma_total_inc(size);
} else {
Modified: user/jeff/numa/sys/vm/vm_extern.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_extern.h Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_extern.h Wed Jan 17 06:22:10 2018 (r328074)
@@ -122,5 +122,8 @@ struct sf_buf *vm_imgact_map_page(vm_object_t object,
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
int vm_thread_new(struct thread *td, int pages);
+u_int vm_active_count(void);
+u_int vm_inactive_count(void);
+u_int vm_laundry_count(void);
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */
Modified: user/jeff/numa/sys/vm/vm_init.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_init.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_init.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
Modified: user/jeff/numa/sys/vm/vm_kern.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_kern.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_kern.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@@ -205,9 +206,9 @@ retry:
vmem_free(vmem, addr, size);
return (0);
}
- KASSERT(vm_phys_domidx(m) == domain,
+ KASSERT(vm_phys_domain(m) == domain,
("kmem_alloc_attr_domain: Domain mismatch %d != %d",
- vm_phys_domidx(m), domain));
+ vm_phys_domain(m), domain));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
@@ -288,9 +289,9 @@ retry:
vmem_free(vmem, addr, size);
return (0);
}
- KASSERT(vm_phys_domidx(m) == domain,
+ KASSERT(vm_phys_domain(m) == domain,
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
- vm_phys_domidx(m), domain));
+ vm_phys_domain(m), domain));
end_m = m + npages;
tmp = addr;
for (; m < end_m; m++) {
@@ -452,9 +453,9 @@ retry:
kmem_unback(object, addr, i);
return (KERN_NO_SPACE);
}
- KASSERT(vm_phys_domidx(m) == domain,
+ KASSERT(vm_phys_domain(m) == domain,
("kmem_back_domain: Domain mismatch %d != %d",
- vm_phys_domidx(m), domain));
+ vm_phys_domain(m), domain));
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
@@ -514,7 +515,7 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_
end = offset + size;
VM_OBJECT_WLOCK(object);
m = vm_page_lookup(object, atop(offset));
- domain = vm_phys_domidx(m);
+ domain = vm_phys_domain(m);
for (; offset < end; offset += PAGE_SIZE, m = next) {
next = vm_page_next(m);
vm_page_unwire(m, PQ_NONE);
Modified: user/jeff/numa/sys/vm/vm_map.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_map.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_map.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -2016,7 +2016,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_p
* free pages allocating pv entries.
*/
if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
- vm_cnt.v_free_count < vm_cnt.v_free_reserved) ||
+ vm_page_count_severe()) ||
((flags & MAP_PREFAULT_PARTIAL) != 0 &&
tmpidx >= threshold)) {
psize = tmpidx;
Modified: user/jeff/numa/sys/vm/vm_meter.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_meter.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_meter.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -53,6 +53,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
@@ -213,9 +215,11 @@ vmtotal(SYSCTL_HANDLER_ARGS)
total.t_dw++;
else
total.t_sl++;
+#if 0 /* XXX */
if (td->td_wchan ==
&vm_cnt.v_free_count)
total.t_pw++;
+#endif
}
break;
case TDS_CAN_RUN:
@@ -283,7 +287,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
}
}
mtx_unlock(&vm_object_list_mtx);
- total.t_free = vm_cnt.v_free_count;
+ total.t_free = vm_free_count();
#if defined(COMPAT_FREEBSD11)
/* sysctl(8) allocates twice as much memory as reported by sysctl(3) */
if (curproc->p_osrel < P_OSREL_VMTOTAL64 && (req->oldlen ==
@@ -339,7 +343,7 @@ sysctl_handle_vmstat(SYSCTL_HANDLER_ARGS)
#define VM_STATS(parent, var, descr) \
SYSCTL_OID(parent, OID_AUTO, var, CTLTYPE_U64 | CTLFLAG_MPSAFE | \
- CTLFLAG_RD, &vm_cnt.var, 0, sysctl_handle_vmstat, "QU", descr);
+ CTLFLAG_RD, &vm_cnt.var, 0, sysctl_handle_vmstat, "QU", descr)
#define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr)
#define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr)
@@ -379,19 +383,36 @@ VM_STATS_VM(v_vforkpages, "VM pages affected by vfork(
VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()");
VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel");
+static int
+sysctl_handle_vmstat_proc(SYSCTL_HANDLER_ARGS)
+{
+ u_int (*fn)(void);
+ uint32_t val;
+
+ fn = arg1;
+ val = fn();
+ return (SYSCTL_OUT(req, &val, sizeof(val)));
+}
+
+#define VM_STATS_PROC(var, descr, fn) \
+ SYSCTL_OID(_vm_stats_vm, OID_AUTO, var, CTLTYPE_U32 | CTLFLAG_MPSAFE | \
+ CTLFLAG_RD, fn, 0, sysctl_handle_vmstat_proc, "IU", descr)
+
#define VM_STATS_UINT(var, descr) \
SYSCTL_UINT(_vm_stats_vm, OID_AUTO, var, CTLFLAG_RD, &vm_cnt.var, 0, descr)
+
VM_STATS_UINT(v_page_size, "Page size in bytes");
VM_STATS_UINT(v_page_count, "Total number of pages in system");
VM_STATS_UINT(v_free_reserved, "Pages reserved for deadlock");
VM_STATS_UINT(v_free_target, "Pages desired free");
VM_STATS_UINT(v_free_min, "Minimum low-free-pages threshold");
-VM_STATS_UINT(v_free_count, "Free pages");
+VM_STATS_PROC(v_free_count, "Free pages", vm_free_count);
VM_STATS_UINT(v_wire_count, "Wired pages");
-VM_STATS_UINT(v_active_count, "Active pages");
+VM_STATS_PROC(v_active_count, "Active pages", vm_active_count);
VM_STATS_UINT(v_inactive_target, "Desired inactive pages");
-VM_STATS_UINT(v_inactive_count, "Inactive pages");
-VM_STATS_UINT(v_laundry_count, "Pages eligible for laundering");
+VM_STATS_PROC(v_inactive_count, "Inactive pages", vm_inactive_count);
+VM_STATS_PROC(v_laundry_count, "Pages eligible for laundering",
+ vm_laundry_count);
VM_STATS_UINT(v_pageout_free_min, "Min pages reserved for kernel");
VM_STATS_UINT(v_interrupt_free_min, "Reserved pages for interrupt code");
VM_STATS_UINT(v_free_severe, "Severe page depletion point");
@@ -406,3 +427,50 @@ SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_cache_count, CTL
SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_tcached, CTLFLAG_RD,
SYSCTL_NULL_UINT_PTR, 0, "Dummy for compatibility");
#endif
+
+u_int
+vm_free_count(void)
+{
+ u_int v;
+ int i;
+
+ v = 0;
+ for (i = 0; i < vm_ndomains; i++)
+ v += vm_dom[i].vmd_free_count;
+
+ return (v);
+}
+
+u_int
+vm_active_count(void)
+{
+ u_int v;
+ int i;
+
+ v = 0;
+ for (i = 0; i < vm_ndomains; i++)
+ v += vm_dom[i].vmd_pagequeues[PQ_ACTIVE].pq_cnt;
+
+ return (v);
+}
+
+u_int
+vm_inactive_count(void)
+{
+ u_int v;
+ int i;
+
+ v = 0;
+ for (i = 0; i < vm_ndomains; i++)
+ v += vm_dom[i].vmd_pagequeues[PQ_INACTIVE].pq_cnt;
+
+ return (v);
+}
+
+u_int
+vm_laundry_count(void)
+{
+
+ return (vm_dom[0].vmd_pagequeues[PQ_LAUNDRY].pq_cnt);
+}
+
Modified: user/jeff/numa/sys/vm/vm_object.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_object.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_object.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -96,6 +96,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
#include <vm/swap_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c Wed Jan 17 05:09:15 2018 (r328073)
+++ user/jeff/numa/sys/vm/vm_page.c Wed Jan 17 06:22:10 2018 (r328074)
@@ -115,8 +115,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
-#include <vm/vm_pager.h>
#include <vm/vm_phys.h>
+#include <vm/vm_pagequeue.h>
+#include <vm/vm_pager.h>
#include <vm/vm_radix.h>
#include <vm/vm_reserv.h>
#include <vm/vm_extern.h>
@@ -131,7 +132,6 @@ __FBSDID("$FreeBSD$");
*/
struct vm_domain vm_dom[MAXMEMDOM];
-struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx;
struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
@@ -159,16 +159,13 @@ static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARG
SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
-/* Is the page daemon waiting for free pages? */
-static int vm_pageout_pages_needed;
-
static uma_zone_t fakepg_zone;
static void vm_page_alloc_check(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
static void vm_page_enqueue(uint8_t queue, vm_page_t m);
static void vm_page_free_phys(vm_page_t m);
-static void vm_page_free_wakeup(void);
+static void vm_page_free_wakeup(int domain);
static void vm_page_init(void *dummy);
static int vm_page_insert_after(vm_page_t m, vm_object_t object,
vm_pindex_t pindex, vm_page_t mpred);
@@ -176,7 +173,7 @@ static void vm_page_insert_radixdone(vm_page_t m, vm_o
vm_page_t mpred);
static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
vm_page_t m_run, vm_paddr_t high);
-static int vm_page_alloc_fail(vm_object_t object, int req);
+static int vm_page_alloc_fail(vm_object_t object, int domain, int req);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
@@ -325,7 +322,7 @@ vm_page_blacklist_check(char *list, char *end)
m = vm_phys_paddr_to_vm_page(pa);
if (m == NULL)
continue;
- domain = vm_phys_domidx(m);
+ domain = vm_phys_domain(m);
vm_pagequeue_free_lock(domain);
ret = vm_phys_unfree_page(m);
vm_pagequeue_free_unlock(domain);
@@ -396,23 +393,15 @@ vm_page_domain_init(struct vm_domain *vmd)
struct vm_pagequeue *pq;
int i;
+ bzero(vmd, sizeof(*vmd));
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
"vm inactive pagequeue";
- *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
- &vm_cnt.v_inactive_count;
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
"vm active pagequeue";
- *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
- &vm_cnt.v_active_count;
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
"vm laundry pagequeue";
- *__DECONST(int **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_vcnt) =
- &vm_cnt.v_laundry_count;
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
"vm unswappable pagequeue";
- /* Unswappable dirty pages are counted as being in the laundry. */
- *__DECONST(int **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_vcnt) =
- &vm_cnt.v_laundry_count;
vmd->vmd_page_count = 0;
vmd->vmd_free_count = 0;
vmd->vmd_segs = 0;
@@ -423,6 +412,8 @@ vm_page_domain_init(struct vm_domain *vmd)
mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
MTX_DEF | MTX_DUPOK);
}
+ mtx_init(&vmd->vmd_pagequeue_free_mtx, "vm page free queue", NULL,
+ MTX_DEF);
}
/*
@@ -490,7 +481,6 @@ vm_page_startup(vm_offset_t vaddr)
/*
* Initialize the page and queue locks.
*/
- mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
for (i = 0; i < PA_LOCK_COUNT; i++)
mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
for (i = 0; i < vm_ndomains; i++)
@@ -692,7 +682,6 @@ vm_page_startup(vm_offset_t vaddr)
* physical memory allocator's free lists.
*/
vm_cnt.v_page_count = 0;
- vm_cnt.v_free_count = 0;
for (segind = 0; segind < vm_phys_nsegs; segind++) {
seg = &vm_phys_segs[segind];
for (m = seg->first_page, pa = seg->start; pa < seg->end;
@@ -716,7 +705,7 @@ vm_page_startup(vm_offset_t vaddr)
vm_pagequeue_free_lock(seg->domain);
vm_phys_free_contig(m, pagecount);
- vm_phys_freecnt_adj(seg->domain, (int)pagecount);
+ vm_pagequeue_freecnt_adj(seg->domain, (int)pagecount);
vm_pagequeue_free_unlock(seg->domain);
vm_cnt.v_page_count += (u_int)pagecount;
@@ -1652,8 +1641,10 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
int
vm_page_available(int domain, int req, int npages)
{
+ struct vm_domain *vmd;
vm_pagequeue_free_assert_locked(domain);
+ vmd = &vm_dom[domain];
req = req & VM_ALLOC_CLASS_MASK;
/*
@@ -1662,12 +1653,11 @@ vm_page_available(int domain, int req, int npages)
if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
req = VM_ALLOC_SYSTEM;
- /* XXX Global counts. */
- if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
+ if (vmd->vmd_free_count >= npages + vmd->vmd_free_reserved ||
(req == VM_ALLOC_SYSTEM &&
- vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) ||
+ vmd->vmd_free_count >= npages + vmd->vmd_interrupt_free_min) ||
(req == VM_ALLOC_INTERRUPT &&
- vm_cnt.v_free_count >= npages))
+ vmd->vmd_free_count >= npages))
return (1);
return (0);
@@ -1702,10 +1692,15 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind
again:
m = NULL;
+#if VM_NRESERVLEVEL > 0
if (reserv &&
- (m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL)
+ (m = vm_reserv_extend(req, object, pindex, domain, mpred))
+ != NULL) {
+ domain = vm_phys_domain(m);
goto found;
- mtx_lock(&vm_page_queue_free_mtx);
+ }
+#endif
+ vm_pagequeue_free_lock(domain);
if (vm_page_available(domain, req, 1)) {
/*
* Can we allocate the page from a reservation?
@@ -1734,7 +1729,7 @@ again:
/*
* Not allocatable, give up.
*/
- if (vm_page_alloc_fail(object, req))
+ if (vm_page_alloc_fail(object, domain, req))
goto again;
return (NULL);
}
@@ -1743,16 +1738,18 @@ again:
* At this point we had better have found a good page.
*/
KASSERT(m != NULL, ("missing page"));
- free_count = vm_phys_freecnt_adj(domain, -1);
- mtx_unlock(&vm_page_queue_free_mtx);
+ free_count = vm_pagequeue_freecnt_adj(domain, -1);
+ vm_pagequeue_free_unlock(domain);
/*
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
*/
- if (vm_paging_needed(free_count))
- pagedaemon_wakeup();
+ if (vm_paging_needed(domain, free_count))
+ pagedaemon_wakeup(domain);
+#if VM_NRESERVLEVEL > 0
found:
+#endif
vm_page_alloc_check(m);
/*
@@ -1785,7 +1782,7 @@ found:
if (object != NULL) {
if (vm_page_insert_after(m, object, pindex, mpred)) {
- pagedaemon_wakeup();
+ pagedaemon_wakeup(domain);
if (req & VM_ALLOC_WIRED) {
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
m->wire_count = 0;
@@ -1916,11 +1913,13 @@ again:
#if VM_NRESERVLEVEL > 0
if (reserv &&
(m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
- npages, low, high, alignment, boundary, mpred)) != NULL)
+ npages, low, high, alignment, boundary, mpred)) != NULL) {
+ domain = vm_phys_domain(m_ret);
goto found;
+ }
#endif
m_ret = NULL;
- mtx_lock(&vm_page_queue_free_mtx);
+ vm_pagequeue_free_lock(domain);
if (vm_page_available(domain, req, npages)) {
/*
* Can we allocate the pages from a reservation?
@@ -1943,13 +1942,15 @@ retry:
#endif
}
if (m_ret == NULL) {
- if (vm_page_alloc_fail(object, req))
+ if (vm_page_alloc_fail(object, domain, req))
goto again;
return (NULL);
}
- vm_phys_freecnt_adj(domain, -npages);
- mtx_unlock(&vm_page_queue_free_mtx);
+ vm_pagequeue_freecnt_adj(domain, -npages);
+ vm_pagequeue_free_unlock(domain);
+#if VM_NRESERVLEVEL > 0
found:
+#endif
for (m = m_ret; m < &m_ret[npages]; m++)
vm_page_alloc_check(m);
@@ -1985,7 +1986,7 @@ found:
m->oflags = oflags;
if (object != NULL) {
if (vm_page_insert_after(m, object, pindex, mpred)) {
- pagedaemon_wakeup();
+ pagedaemon_wakeup(domain);
if ((req & VM_ALLOC_WIRED) != 0)
atomic_subtract_int(
&vm_cnt.v_wire_count, npages);
@@ -2015,8 +2016,8 @@ found:
pmap_page_set_memattr(m, memattr);
pindex++;
}
- if (vm_paging_needed(vm_cnt.v_free_count))
- pagedaemon_wakeup();
+ if (vm_paging_needed(domain, vm_dom[domain].vmd_free_count))
+ pagedaemon_wakeup(domain);
return (m_ret);
}
@@ -2085,17 +2086,17 @@ vm_page_alloc_freelist_domain(int domain, int freelist
* Do not allocate reserved pages unless the req has asked for it.
*/
again:
- mtx_lock(&vm_page_queue_free_mtx);
+ vm_pagequeue_free_lock(domain);
if (vm_page_available(domain, req, 1))
m = vm_phys_alloc_freelist_pages(domain, freelist,
VM_FREEPOOL_DIRECT, 0);
if (m == NULL) {
- if (vm_page_alloc_fail(NULL, req))
+ if (vm_page_alloc_fail(NULL, domain, req))
goto again;
return (NULL);
}
- free_count = vm_phys_freecnt_adj(domain, -1);
- mtx_unlock(&vm_page_queue_free_mtx);
+ free_count = vm_pagequeue_freecnt_adj(domain, -1);
+ vm_pagequeue_free_unlock(domain);
vm_page_alloc_check(m);
/*
@@ -2116,8 +2117,8 @@ again:
}
/* Unmanaged pages don't use "act_count". */
m->oflags = VPO_UNMANAGED;
- if (vm_paging_needed(free_count))
- pagedaemon_wakeup();
+ if (vm_paging_needed(domain, free_count))
+ pagedaemon_wakeup(domain);
return (m);
}
@@ -2491,7 +2492,7 @@ retry:
unlock:
VM_OBJECT_WUNLOCK(object);
} else {
- MPASS(vm_phys_domidx(m) == domain);
+ MPASS(vm_phys_domain(m) == domain);
vm_pagequeue_free_lock(domain);
order = m->order;
if (order < VM_NFREEORDER) {
@@ -2517,13 +2518,13 @@ unlock:
if (m_mtx != NULL)
mtx_unlock(m_mtx);
if ((m = SLIST_FIRST(&free)) != NULL) {
- MPASS(vm_phys_domidx(m) == domain);
+ MPASS(vm_phys_domain(m) == domain);
vm_pagequeue_free_lock(domain);
do {
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
vm_page_free_phys(m);
} while ((m = SLIST_FIRST(&free)) != NULL);
- vm_page_free_wakeup();
+ vm_page_free_wakeup(domain);
vm_pagequeue_free_unlock(domain);
}
return (error);
@@ -2564,6 +2565,7 @@ bool
vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
{
+ struct vm_domain *vmd;
vm_paddr_t curr_low;
vm_page_t m_run, m_runs[NRUNS];
u_long count, reclaimed;
@@ -2584,9 +2586,10 @@ vm_page_reclaim_contig_domain(int domain, int req, u_l
* Return if the number of free pages cannot satisfy the requested
* allocation.
*/
- count = vm_cnt.v_free_count;
- if (count < npages + vm_cnt.v_free_reserved || (count < npages +
- vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
+ vmd = &vm_dom[domain];
+ count = vmd->vmd_free_count;
+ if (count < npages + vmd->vmd_free_reserved || (count < npages +
+ vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
(count < npages && req_class == VM_ALLOC_INTERRUPT))
return (false);
@@ -2671,27 +2674,33 @@ vm_page_reclaim_contig(int req, u_long npages, vm_padd
* - Called in various places before memory allocations.
*/
static void
-_vm_wait(void)
+vm_wait_domain(int domain)
{
+ struct vm_domain *vmd;
- mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ vm_pagequeue_free_assert_locked(domain);
+ vmd = &vm_dom[domain];
+
if (curproc == pageproc) {
- vm_pageout_pages_needed = 1;
- msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
- PDROP | PSWP, "VMWait", 0);
+ vmd->vmd_pageout_pages_needed = 1;
+ msleep(&vmd->vmd_pageout_pages_needed,
+ &vmd->vmd_pagequeue_free_mtx, PDROP | PSWP, "VMWait", 0);
} else {
if (pageproc == NULL)
panic("vm_wait in early boot");
- pagedaemon_wait(PVM, "vmwait");
+ pagedaemon_wait(domain, PVM, "vmwait");
}
}
void
vm_wait(void)
{
-
+#if 0 /* XXX */
mtx_lock(&vm_page_queue_free_mtx);
_vm_wait();
+#else
+ pause("vmxxx", 1);
+#endif
}
/*
@@ -2705,24 +2714,26 @@ vm_wait(void)
*
*/
static int
-vm_page_alloc_fail(vm_object_t object, int req)
+vm_page_alloc_fail(vm_object_t object, int domain, int req)
{
+ struct vm_domain *vmd;
- mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ vm_pagequeue_free_assert_locked(domain);
- atomic_add_int(&vm_pageout_deficit,
+ vmd = &vm_dom[domain];
+ atomic_add_int(&vmd->vmd_pageout_deficit,
max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
if (object != NULL)
VM_OBJECT_WUNLOCK(object);
- _vm_wait();
+ vm_wait_domain(domain);
if (object != NULL)
VM_OBJECT_WLOCK(object);
if (req & VM_ALLOC_WAITOK)
return (EAGAIN);
} else {
- mtx_unlock(&vm_page_queue_free_mtx);
- pagedaemon_wakeup();
+ vm_pagequeue_free_unlock(domain);
+ pagedaemon_wakeup(domain);
}
return (0);
}
@@ -2740,9 +2751,12 @@ vm_page_alloc_fail(vm_object_t object, int req)
void
vm_waitpfault(void)
{
-
+#if 0 /* XXX */
mtx_lock(&vm_page_queue_free_mtx);
pagedaemon_wait(PUSER, "pfault");
+#else
+ pause("vmxxx", 1);
+#endif
}
struct vm_pagequeue *
@@ -2752,7 +2766,7 @@ vm_page_pagequeue(vm_page_t m)
if (vm_page_in_laundry(m))
return (&vm_dom[0].vmd_pagequeues[m->queue]);
else
- return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]);
+ return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]);
}
/*
@@ -2817,7 +2831,7 @@ vm_page_enqueue(uint8_t queue, vm_page_t m)
if (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE)
pq = &vm_dom[0].vmd_pagequeues[queue];
else
- pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
+ pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
vm_pagequeue_lock(pq);
m->queue = queue;
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
@@ -2907,27 +2921,30 @@ vm_page_activate(vm_page_t m)
* The page queues must be locked.
*/
static void
-vm_page_free_wakeup(void)
+vm_page_free_wakeup(int domain)
{
+ struct vm_domain *vmd;
- mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ vm_pagequeue_free_assert_locked(domain);
+ vmd = &vm_dom[domain];
+
/*
* if pageout daemon needs pages, then tell it that there are
* some free.
*/
- if (vm_pageout_pages_needed &&
- vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
- wakeup(&vm_pageout_pages_needed);
- vm_pageout_pages_needed = 0;
+ if (vmd->vmd_pageout_pages_needed &&
+ vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
+ wakeup(&vmd->vmd_pageout_pages_needed);
+ vmd->vmd_pageout_pages_needed = 0;
}
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list