svn commit: r309017 - in head/sys: cddl/compat/opensolaris/sys compat/linprocfs fs/tmpfs sys vm

Alan Cox alc at FreeBSD.org
Tue Nov 22 18:13:48 UTC 2016


Author: alc
Date: Tue Nov 22 18:13:46 2016
New Revision: 309017
URL: https://svnweb.freebsd.org/changeset/base/309017

Log:
  Remove PG_CACHED-related fields from struct vmmeter, because they are no
  longer used.  More precisely, they are always zero because the code that
  decremented and incremented them no longer exists.
  
  Bump __FreeBSD_version to mark this change.
  
  Reviewed by:	kib, markj
  Sponsored by:	Dell EMC Isilon
  Differential Revision:	https://reviews.freebsd.org/D8583

Modified:
  head/sys/cddl/compat/opensolaris/sys/kmem.h
  head/sys/compat/linprocfs/linprocfs.c
  head/sys/fs/tmpfs/tmpfs_subr.c
  head/sys/sys/param.h
  head/sys/sys/vmmeter.h
  head/sys/vm/swap_pager.c
  head/sys/vm/vm_meter.c
  head/sys/vm/vm_page.c
  head/sys/vm/vnode_pager.c

Modified: head/sys/cddl/compat/opensolaris/sys/kmem.h
==============================================================================
--- head/sys/cddl/compat/opensolaris/sys/kmem.h	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/cddl/compat/opensolaris/sys/kmem.h	Tue Nov 22 18:13:46 2016	(r309017)
@@ -77,7 +77,7 @@ void kmem_reap(void);
 int kmem_debugging(void);
 void *calloc(size_t n, size_t s);
 
-#define	freemem				(vm_cnt.v_free_count + vm_cnt.v_cache_count)
+#define	freemem				vm_cnt.v_free_count
 #define	minfree				vm_cnt.v_free_min
 #define	heap_arena			kmem_arena
 #define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))

Modified: head/sys/compat/linprocfs/linprocfs.c
==============================================================================
--- head/sys/compat/linprocfs/linprocfs.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/compat/linprocfs/linprocfs.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -176,7 +176,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
 	 * like unstaticizing it just for linprocfs's sake.
 	 */
 	buffers = 0;
-	cached = vm_cnt.v_cache_count * PAGE_SIZE;
+	cached = vm_cnt.v_inactive_count * PAGE_SIZE;
 
 	sbuf_printf(sb,
 	    "MemTotal: %9lu kB\n"

Modified: head/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- head/sys/fs/tmpfs/tmpfs_subr.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/fs/tmpfs/tmpfs_subr.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -105,8 +105,7 @@ tmpfs_mem_avail(void)
 {
 	vm_ooffset_t avail;
 
-	avail = swap_pager_avail + vm_cnt.v_free_count + vm_cnt.v_cache_count -
-	    tmpfs_pages_reserved;
+	avail = swap_pager_avail + vm_cnt.v_free_count - tmpfs_pages_reserved;
 	if (__predict_false(avail < 0))
 		avail = 0;
 	return (avail);

Modified: head/sys/sys/param.h
==============================================================================
--- head/sys/sys/param.h	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/sys/param.h	Tue Nov 22 18:13:46 2016	(r309017)
@@ -58,7 +58,7 @@
  *		in the range 5 to 9.
  */
 #undef __FreeBSD_version
-#define __FreeBSD_version 1200015	/* Master, propagated to newvers */
+#define __FreeBSD_version 1200016	/* Master, propagated to newvers */
 
 /*
  * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

Modified: head/sys/sys/vmmeter.h
==============================================================================
--- head/sys/sys/vmmeter.h	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/sys/vmmeter.h	Tue Nov 22 18:13:46 2016	(r309017)
@@ -80,7 +80,6 @@ struct vmmeter {
 	u_int v_pdpages;	/* (p) pages analyzed by daemon */
 	u_int v_pdshortfalls;	/* (p) page reclamation shortfalls */
 
-	u_int v_tcached;	/* (p) total pages cached */
 	u_int v_dfree;		/* (p) pages freed by daemon */
 	u_int v_pfree;		/* (p) pages freed by exiting processes */
 	u_int v_tfree;		/* (p) total pages freed */
@@ -98,7 +97,6 @@ struct vmmeter {
 	u_int v_inactive_target; /* (c) pages desired inactive */
 	u_int v_inactive_count;	/* (q) pages inactive */
 	u_int v_laundry_count;	/* (q) pages eligible for laundering */
-	u_int v_cache_count;	/* (f) pages on cache queue */
 	u_int v_pageout_free_min;   /* (c) min pages reserved for kernel */
 	u_int v_interrupt_free_min; /* (c) reserved pages for int code */
 	u_int v_free_severe;	/* (c) severe page depletion point */
@@ -130,8 +128,7 @@ static inline int
 vm_page_count_severe(void)
 {
 
-	return (vm_cnt.v_free_severe > vm_cnt.v_free_count +
-	    vm_cnt.v_cache_count);
+	return (vm_cnt.v_free_severe > vm_cnt.v_free_count);
 }
 
 /*
@@ -147,7 +144,7 @@ static inline int
 vm_page_count_min(void)
 {
 
-	return (vm_cnt.v_free_min > vm_cnt.v_free_count + vm_cnt.v_cache_count);
+	return (vm_cnt.v_free_min > vm_cnt.v_free_count);
 }
 
 /*
@@ -158,8 +155,7 @@ static inline int
 vm_page_count_target(void)
 {
 
-	return (vm_cnt.v_free_target > vm_cnt.v_free_count +
-	    vm_cnt.v_cache_count);
+	return (vm_cnt.v_free_target > vm_cnt.v_free_count);
 }
 
 /*
@@ -170,8 +166,7 @@ static inline int
 vm_paging_target(void)
 {
 
-	return (vm_cnt.v_free_target - (vm_cnt.v_free_count +
-	    vm_cnt.v_cache_count));
+	return (vm_cnt.v_free_target - vm_cnt.v_free_count);
 }
 
 /*
@@ -181,8 +176,7 @@ static inline int
 vm_paging_needed(void)
 {
 
-	return (vm_cnt.v_free_count + vm_cnt.v_cache_count <
-	    vm_pageout_wakeup_thresh);
+	return (vm_cnt.v_free_count < vm_pageout_wakeup_thresh);
 }
 
 /*

Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/vm/swap_pager.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -2254,10 +2254,8 @@ swapoff_one(struct swdevt *sp, struct uc
 	 * of data we will have to page back in, plus an epsilon so
 	 * the system doesn't become critically low on swap space.
 	 */
-	if (vm_cnt.v_free_count + vm_cnt.v_cache_count + swap_pager_avail <
-	    nblks + nswap_lowat) {
+	if (vm_cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat)
 		return (ENOMEM);
-	}
 
 	/*
 	 * Prevent further allocations on this device.

Modified: head/sys/vm/vm_meter.c
==============================================================================
--- head/sys/vm/vm_meter.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/vm/vm_meter.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -211,7 +211,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 		}
 	}
 	mtx_unlock(&vm_object_list_mtx);
-	total.t_free = vm_cnt.v_free_count + vm_cnt.v_cache_count;
+	total.t_free = vm_cnt.v_free_count;
 	return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
 }
 
@@ -290,7 +290,6 @@ VM_STATS_VM(v_reactivated, "Pages reacti
 VM_STATS_VM(v_pdwakeups, "Pagedaemon wakeups");
 VM_STATS_VM(v_pdpages, "Pages analyzed by pagedaemon");
 VM_STATS_VM(v_pdshortfalls, "Page reclamation shortfalls");
-VM_STATS_VM(v_tcached, "Total pages cached");
 VM_STATS_VM(v_dfree, "Pages freed by pagedaemon");
 VM_STATS_VM(v_pfree, "Pages freed by exiting processes");
 VM_STATS_VM(v_tfree, "Total pages freed");
@@ -305,7 +304,6 @@ VM_STATS_VM(v_active_count, "Active page
 VM_STATS_VM(v_inactive_target, "Desired inactive pages");
 VM_STATS_VM(v_inactive_count, "Inactive pages");
 VM_STATS_VM(v_laundry_count, "Pages eligible for laundering");
-VM_STATS_VM(v_cache_count, "Pages on cache queue");
 VM_STATS_VM(v_pageout_free_min, "Min pages reserved for kernel");
 VM_STATS_VM(v_interrupt_free_min, "Reserved pages for interrupt code");
 VM_STATS_VM(v_forks, "Number of fork() calls");

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/vm/vm_page.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -1517,11 +1517,11 @@ vm_page_alloc(vm_object_t object, vm_pin
 	 * vm_page_cache().
 	 */
 	mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
-	if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
+	if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
 	    (req_class == VM_ALLOC_SYSTEM &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
+	    vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
 	    (req_class == VM_ALLOC_INTERRUPT &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) {
+	    vm_cnt.v_free_count > 0)) {
 		/*
 		 * Allocate from the free queue if the number of free pages
 		 * exceeds the minimum for the request class.
@@ -1690,11 +1690,11 @@ vm_page_alloc_contig(vm_object_t object,
 		req_class = VM_ALLOC_SYSTEM;
 
 	mtx_lock(&vm_page_queue_free_mtx);
-	if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
-	    vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
-	    vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) {
+	if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
+	    (req_class == VM_ALLOC_SYSTEM &&
+	    vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) ||
+	    (req_class == VM_ALLOC_INTERRUPT &&
+	    vm_cnt.v_free_count >= npages)) {
 #if VM_NRESERVLEVEL > 0
 retry:
 		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
@@ -1842,11 +1842,11 @@ vm_page_alloc_freelist(int flind, int re
 	 * Do not allocate reserved pages unless the req has asked for it.
 	 */
 	mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
-	if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
+	if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
 	    (req_class == VM_ALLOC_SYSTEM &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
+	    vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
 	    (req_class == VM_ALLOC_INTERRUPT &&
-	    vm_cnt.v_free_count + vm_cnt.v_cache_count > 0))
+	    vm_cnt.v_free_count > 0))
 		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
 	else {
 		mtx_unlock(&vm_page_queue_free_mtx);
@@ -2372,7 +2372,7 @@ vm_page_reclaim_contig(int req, u_long n
 	 * Return if the number of cached and free pages cannot satisfy the
 	 * requested allocation.
 	 */
-	count = vm_cnt.v_free_count + vm_cnt.v_cache_count;
+	count = vm_cnt.v_free_count;
 	if (count < npages + vm_cnt.v_free_reserved || (count < npages +
 	    vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
 	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
@@ -2656,7 +2656,7 @@ vm_page_free_wakeup(void)
 	 * some free.
 	 */
 	if (vm_pageout_pages_needed &&
-	    vm_cnt.v_cache_count + vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
+	    vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
 		wakeup(&vm_pageout_pages_needed);
 		vm_pageout_pages_needed = 0;
 	}
@@ -3509,8 +3509,8 @@ vm_page_assert_pga_writeable(vm_page_t m
 
 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 {
+
 	db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count);
-	db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count);
 	db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count);
 	db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count);
 	db_printf("vm_cnt.v_laundry_count: %d\n", vm_cnt.v_laundry_count);
@@ -3525,8 +3525,7 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pag
 {
 	int dom;
 
-	db_printf("pq_free %d pq_cache %d\n",
-	    vm_cnt.v_free_count, vm_cnt.v_cache_count);
+	db_printf("pq_free %d\n", vm_cnt.v_free_count);
 	for (dom = 0; dom < vm_ndomains; dom++) {
 		db_printf(
 	    "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d\n",

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c	Tue Nov 22 18:13:04 2016	(r309016)
+++ head/sys/vm/vnode_pager.c	Tue Nov 22 18:13:46 2016	(r309017)
@@ -1161,8 +1161,7 @@ vnode_pager_putpages(vm_object_t object,
 	 * daemon up.  This should be probably be addressed XXX.
 	 */
 
-	if (vm_cnt.v_free_count + vm_cnt.v_cache_count <
-	    vm_cnt.v_pageout_free_min)
+	if (vm_cnt.v_free_count < vm_cnt.v_pageout_free_min)
 		flags |= VM_PAGER_PUT_SYNC;
 
 	/*


More information about the svn-src-head mailing list