svn commit: r242534 - in head/sys: amd64/amd64 i386/i386 ia64/ia64 mips/mips powerpc/aim sparc64/include sparc64/sparc64

Attilio Rao attilio at FreeBSD.org
Sat Nov 3 23:03:15 UTC 2012


Author: attilio
Date: Sat Nov  3 23:03:14 2012
New Revision: 242534
URL: http://svn.freebsd.org/changeset/base/242534

Log:
  Rework the known rwlock to benefit about staying on their own
  cache line in order to avoid manual frobbing but using
  struct rwlock_padalign.
  
  Reviewed by:	alc, jimharris

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/ia64/ia64/pmap.c
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/sparc64/include/pmap.h
  head/sys/sparc64/sparc64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/amd64/amd64/pmap.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -225,16 +225,7 @@ u_int64_t		KPML4phys;	/* phys addr of ke
 static u_int64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
 static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
 
-/*
- * Isolate the global pv list lock from data and other locks to prevent false
- * sharing within the cache.
- */
-static struct {
-	struct rwlock	lock;
-	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-} pvh_global __aligned(CACHE_LINE_SIZE);
-
-#define	pvh_global_lock	pvh_global.lock
+static struct rwlock_padalign pvh_global_lock;
 
 /*
  * Data for the pv entry allocation mechanism

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/i386/i386/pmap.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -224,16 +224,7 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_ena
 #define	PAT_INDEX_SIZE	8
 static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
 
-/*
- * Isolate the global pv list lock from data and other locks to prevent false
- * sharing within the cache.
- */
-static struct {
-	struct rwlock	lock;
-	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-} pvh_global __aligned(CACHE_LINE_SIZE);
-
-#define	pvh_global_lock	pvh_global.lock
+static struct rwlock_padalign pvh_global_lock;
 
 /*
  * Data for the pv entry allocation mechanism

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/ia64/ia64/pmap.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -214,16 +214,7 @@ static int pmap_ridmax;
 static uint64_t *pmap_ridmap;
 struct mtx pmap_ridmutex;
 
-/*
- * Isolate the global pv list lock from data and other locks to prevent false
- * sharing within the cache.
- */
-static struct {
-	struct rwlock	lock;
-	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-} pvh_global __aligned(CACHE_LINE_SIZE);
-
-#define	pvh_global_lock	pvh_global.lock
+static struct rwlock_padalign pvh_global_lock;
 
 /*
  * Data for the pv entry allocation mechanism

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/mips/mips/pmap.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -148,16 +148,7 @@ vm_offset_t kernel_vm_end = VM_MIN_KERNE
 
 static void pmap_asid_alloc(pmap_t pmap);
 
-/*
- * Isolate the global pv list lock from data and other locks to prevent false
- * sharing within the cache.
- */
-static struct {
-	struct rwlock	lock;
-	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-} pvh_global __aligned(CACHE_LINE_SIZE);
-
-#define	pvh_global_lock	pvh_global.lock
+static struct rwlock_padalign pvh_global_lock;
 
 /*
  * Data for the pv entry allocation mechanism

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/powerpc/aim/mmu_oea.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -200,16 +200,7 @@ struct	pvo_head *moea_pvo_table;		/* pvo
 struct	pvo_head moea_pvo_kunmanaged =
     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);	/* list of unmanaged pages */
 
-/*
- * Isolate the global pv list lock from data and other locks to prevent false
- * sharing within the cache.
- */
-static struct {
-	struct rwlock	lock;
-	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-} pvh_global __aligned(CACHE_LINE_SIZE);
-
-#define	pvh_global_lock	pvh_global.lock
+static struct rwlock_padalign pvh_global_lock;
 
 uma_zone_t	moea_upvo_zone;	/* zone for pvo entries for unmanaged pages */
 uma_zone_t	moea_mpvo_zone;	/* zone for pvo entries for managed pages */

Modified: head/sys/sparc64/include/pmap.h
==============================================================================
--- head/sys/sparc64/include/pmap.h	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/sparc64/include/pmap.h	Sat Nov  3 23:03:14 2012	(r242534)
@@ -68,11 +68,6 @@ struct pmap {
 	struct	pmap_statistics pm_stats;
 };
 
-struct tte_list_lock {
-	struct rwlock lock;
-	char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
-};
-
 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
 #define	PMAP_LOCK_ASSERT(pmap, type)					\
 				mtx_assert(&(pmap)->pm_mtx, (type))
@@ -108,8 +103,7 @@ void	pmap_set_kctx(void);
 
 extern	struct pmap kernel_pmap_store;
 #define	kernel_pmap	(&kernel_pmap_store)
-extern	struct tte_list_lock tte_list_global;
-#define	tte_list_global_lock	tte_list_global.lock
+extern	struct rwlock_padalign tte_list_global_lock;
 extern	vm_paddr_t phys_avail[];
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c	Sat Nov  3 23:00:05 2012	(r242533)
+++ head/sys/sparc64/sparc64/pmap.c	Sat Nov  3 23:03:14 2012	(r242534)
@@ -129,12 +129,7 @@ vm_offset_t vm_max_kernel_address;
  */
 struct pmap kernel_pmap_store;
 
-/*
- * Isolate the global TTE list lock from data and other locks to prevent
- * false sharing within the cache (see also the declaration of struct
- * tte_list_lock).
- */
-struct tte_list_lock tte_list_global __aligned(CACHE_LINE_SIZE);
+struct rwlock_padalign tte_list_global_lock;
 
 /*
  * Allocate physical memory for use in pmap_bootstrap.


More information about the svn-src-all mailing list