svn commit: r323234 - in head/sys: kern vm

Mateusz Guzik mjg at FreeBSD.org
Wed Sep 6 20:28:21 UTC 2017


Author: mjg
Date: Wed Sep  6 20:28:18 2017
New Revision: 323234
URL: https://svnweb.freebsd.org/changeset/base/323234

Log:
  Start annotating global _padalign locks with __exclusive_cache_line
  
  While these locks are guarnteed to not share their respective cache lines,
  their current placement leaves unnecessary holes in lines which preceeded them.
  
  For instance the annotation of vm_page_queue_free_mtx allows 2 neighbour
  cachelines (previously separate by the lock) to be collapsed into 1.
  
  The annotation is only effective on architectures which have it implemented in
  their linker script (currently only amd64). Thus locks are not converted to
  their not-padaligned variants as to not affect the rest.
  
  MFC after:	1 week

Modified:
  head/sys/kern/subr_vmem.c
  head/sys/kern/vfs_bio.c
  head/sys/vm/uma_core.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_pager.c

Modified: head/sys/kern/subr_vmem.c
==============================================================================
--- head/sys/kern/subr_vmem.c	Wed Sep  6 20:19:30 2017	(r323233)
+++ head/sys/kern/subr_vmem.c	Wed Sep  6 20:28:18 2017	(r323234)
@@ -181,7 +181,7 @@ static struct callout	vmem_periodic_ch;
 static int		vmem_periodic_interval;
 static struct task	vmem_periodic_wk;
 
-static struct mtx_padalign vmem_list_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
 
 /* ---- misc */
@@ -580,7 +580,7 @@ qc_drain(vmem_t *vm)
 
 #ifndef UMA_MD_SMALL_ALLOC
 
-static struct mtx_padalign vmem_bt_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
 
 /*
  * vmem_bt_alloc:  Allocate a new page of boundary tags.

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c	Wed Sep  6 20:19:30 2017	(r323233)
+++ head/sys/kern/vfs_bio.c	Wed Sep  6 20:28:18 2017	(r323234)
@@ -253,23 +253,23 @@ SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN
 /*
  * This lock synchronizes access to bd_request.
  */
-static struct mtx_padalign bdlock;
+static struct mtx_padalign __exclusive_cache_line bdlock;
 
 /*
  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  * waitrunningbufspace().
  */
-static struct mtx_padalign rbreqlock;
+static struct mtx_padalign __exclusive_cache_line rbreqlock;
 
 /*
  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  */
-static struct rwlock_padalign nblock;
+static struct rwlock_padalign __exclusive_cache_line nblock;
 
 /*
  * Lock that protects bdirtywait.
  */
-static struct mtx_padalign bdirtylock;
+static struct mtx_padalign __exclusive_cache_line bdirtylock;
 
 /*
  * Wakeup point for bufdaemon, as well as indicator of whether it is already
@@ -339,7 +339,7 @@ static int bq_len[BUFFER_QUEUES];
 /*
  * Lock for each bufqueue
  */
-static struct mtx_padalign bqlocks[BUFFER_QUEUES];
+static struct mtx_padalign __exclusive_cache_line bqlocks[BUFFER_QUEUES];
 
 /*
  * per-cpu empty buffer cache.

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c	Wed Sep  6 20:19:30 2017	(r323233)
+++ head/sys/vm/uma_core.c	Wed Sep  6 20:28:18 2017	(r323234)
@@ -131,7 +131,7 @@ static LIST_HEAD(,uma_zone) uma_cachezones =
     LIST_HEAD_INITIALIZER(uma_cachezones);
 
 /* This RW lock protects the keg list */
-static struct rwlock_padalign uma_rwlock;
+static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
 
 /*
  * Pointer and counter to pool of pages, that is preallocated at

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Wed Sep  6 20:19:30 2017	(r323233)
+++ head/sys/vm/vm_page.c	Wed Sep  6 20:28:18 2017	(r323234)
@@ -127,9 +127,9 @@ __FBSDID("$FreeBSD$");
  */
 
 struct vm_domain vm_dom[MAXMEMDOM];
-struct mtx_padalign vm_page_queue_free_mtx;
+struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx;
 
-struct mtx_padalign pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
 
 /*
  * bogus page -- for I/O to/from partially complete buffers,

Modified: head/sys/vm/vm_pager.c
==============================================================================
--- head/sys/vm/vm_pager.c	Wed Sep  6 20:19:30 2017	(r323233)
+++ head/sys/vm/vm_pager.c	Wed Sep  6 20:28:18 2017	(r323234)
@@ -165,7 +165,7 @@ struct pagerops *pagertab[] = {
  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
  * (MAXPHYS == 64k) if you want to get the most efficiency.
  */
-struct mtx_padalign pbuf_mtx;
+struct mtx_padalign __exclusive_cache_line pbuf_mtx;
 static TAILQ_HEAD(swqueue, buf) bswlist;
 static int bswneeded;
 vm_offset_t swapbkva;		/* swap buffers kva */


More information about the svn-src-head mailing list