svn commit: r252573 - in user/attilio/vmobj-readlock/sys: amd64/amd64 amd64/include arm/arm arm/include i386/i386 i386/include i386/xen ia64/ia64 mips/include mips/mips powerpc/aim powerpc/include ...

Attilio Rao attilio at FreeBSD.org
Wed Jul 3 16:56:27 UTC 2013


Author: attilio
Date: Wed Jul  3 16:56:23 2013
New Revision: 252573
URL: http://svnweb.freebsd.org/changeset/base/252573

Log:
  Many 64-bits arches use a lot of padding in the definition of
  struct md_page because of a single vm_memattr_t attribute. Move this
  into the MI specification, replacing the old "busy" member and making
  space for the new "busy_lock" member.
  
  Sponsored by:	EMC / Isilon storage division

Modified:
  user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c
  user/attilio/vmobj-readlock/sys/amd64/include/pmap.h
  user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c
  user/attilio/vmobj-readlock/sys/arm/arm/pmap.c
  user/attilio/vmobj-readlock/sys/arm/include/pmap.h
  user/attilio/vmobj-readlock/sys/i386/i386/pmap.c
  user/attilio/vmobj-readlock/sys/i386/i386/vm_machdep.c
  user/attilio/vmobj-readlock/sys/i386/include/pmap.h
  user/attilio/vmobj-readlock/sys/i386/xen/pmap.c
  user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c
  user/attilio/vmobj-readlock/sys/mips/include/pmap.h
  user/attilio/vmobj-readlock/sys/mips/mips/pmap.c
  user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c
  user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c
  user/attilio/vmobj-readlock/sys/powerpc/include/pmap.h
  user/attilio/vmobj-readlock/sys/sparc64/include/pmap.h
  user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c
  user/attilio/vmobj-readlock/sys/vm/vm_page.h

Modified: user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -791,7 +791,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pat_mode = PAT_WRITE_BACK;
+	m->mdmemattr = PAT_WRITE_BACK;
 }
 
 /*
@@ -1454,7 +1454,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 	endpte = pte + count;
 	while (pte < endpte) {
 		m = *ma++;
-		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0);
 		if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
 			oldpte |= *pte;
 			pte_store(pte, pa | PG_G | PG_RW | PG_V);
@@ -3469,7 +3469,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 		newpte |= PG_U;
 	if (pmap == kernel_pmap)
 		newpte |= PG_G;
-	newpte |= pmap_cache_bits(m->md.pat_mode, 0);
+	newpte |= pmap_cache_bits(m->mdmemattr, 0);
 
 	mpte = NULL;
 
@@ -3657,7 +3657,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t 
 		    " in pmap %p", va, pmap);
 		return (FALSE);
 	}
-	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 1) |
 	    PG_PS | PG_V;
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		newpde |= PG_MANAGED;
@@ -3859,7 +3859,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 	 */
 	pmap_resident_count_inc(pmap, 1);
 
-	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0);
 	if ((prot & VM_PROT_EXECUTE) == 0)
 		pa |= pg_nx;
 
@@ -3900,7 +3900,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 	pd_entry_t *pde;
 	vm_paddr_t pa, ptepa;
 	vm_page_t p, pdpg;
-	int pat_mode;
+	vm_memattr_t pat_mode;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
@@ -3911,7 +3911,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 		p = vm_page_lookup(object, pindex);
 		KASSERT(p->valid == VM_PAGE_BITS_ALL,
 		    ("pmap_object_init_pt: invalid page %p", p));
-		pat_mode = p->md.pat_mode;
+		pat_mode = p->mdmemattr;
 
 		/*
 		 * Abort the mapping if the first page is not physically
@@ -3932,7 +3932,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 			KASSERT(p->valid == VM_PAGE_BITS_ALL,
 			    ("pmap_object_init_pt: invalid page %p", p));
 			if (pa != VM_PAGE_TO_PHYS(p) ||
-			    pat_mode != p->md.pat_mode)
+			    pat_mode != p->mdmemattr)
 				return;
 			p = TAILQ_NEXT(p, listq);
 		}
@@ -5099,7 +5099,7 @@ void
 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 {
 
-	m->md.pat_mode = ma;
+	m->mdmemattr = ma;
 
 	/*
 	 * If "m" is a normal page, update its direct mapping.  This update
@@ -5108,7 +5108,7 @@ pmap_page_set_memattr(vm_page_t m, vm_me
 	 */
 	if ((m->flags & PG_FICTITIOUS) == 0 &&
 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
-	    m->md.pat_mode))
+	    m->mdmemattr))
 		panic("memory attribute change on the direct map failed");
 }
 

Modified: user/attilio/vmobj-readlock/sys/amd64/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/amd64/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/amd64/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -233,7 +233,6 @@ struct	pv_chunk;
 
 struct md_page {
 	TAILQ_HEAD(,pv_entry)	pv_list;
-	int			pat_mode;
 };
 
 /*
@@ -300,7 +299,7 @@ extern vm_paddr_t dump_avail[];
 extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
-#define	pmap_page_get_memattr(m)	((vm_memattr_t)(m)->md.pat_mode)
+#define	pmap_page_get_memattr(m)	((m)->mdmemattr)
 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
 #define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
 

Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -1183,7 +1183,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pv_memattr = VM_MEMATTR_DEFAULT;
+	m->mdmemattr = VM_MEMATTR_DEFAULT;
 }
 
 static vm_offset_t
@@ -2819,7 +2819,7 @@ validate:
 		if (!(prot & VM_PROT_EXECUTE))
 			npte |= L2_XN;
 
-		if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+		if (m->mdmemattr != VM_MEMATTR_UNCACHEABLE)
 			npte |= pte_l2_s_cache_mode;
 	}
 
@@ -4375,7 +4375,7 @@ pmap_page_set_memattr(vm_page_t m, vm_me
 	 * Remember the memattr in a field that gets used to set the appropriate
 	 * bits in the PTEs as mappings are established.
 	 */
-	m->md.pv_memattr = ma;
+	m->mdmemattr = ma;
 
 	/*
 	 * It appears that this function can only be called before any mappings

Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/arm/arm/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/arm/arm/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -1380,7 +1380,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_
 		    (pv->pv_flags & PVF_NC)) {
 
 			pv->pv_flags &= ~PVF_NC;
-			if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+			if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE)
 				pmap_set_cache_entry(pv, pm, va, 1);
 			continue;
 		}
@@ -1390,7 +1390,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_
 		    !pmwc && (pv->pv_flags & PVF_NC)) {
 
 			pv->pv_flags &= ~(PVF_NC | PVF_MWC);
-			if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+			if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE)
 				pmap_set_cache_entry(pv, pm, va, 1);
 		}
 	}
@@ -1442,8 +1442,7 @@ pmap_clearbit(struct vm_page *pg, u_int 
 
 		if (!(oflags & maskbits)) {
 			if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
-				if (pg->md.pv_memattr != 
-				    VM_MEMATTR_UNCACHEABLE) {
+				if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE) {
 					PMAP_LOCK(pm);
 					l2b = pmap_get_l2_bucket(pm, va);
 					ptep = &l2b->l2b_kva[l2pte_index(va)];
@@ -1480,7 +1479,7 @@ pmap_clearbit(struct vm_page *pg, u_int 
 				 * permission.
 				 */
 				if (maskbits & PVF_WRITE) {
-					if (pg->md.pv_memattr !=
+					if (pg->mdmemattr !=
 					    VM_MEMATTR_UNCACHEABLE)
 						npte |= pte_l2_s_cache_mode;
 					pv->pv_flags &= ~(PVF_NC | PVF_MWC);
@@ -1811,7 +1810,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pv_memattr = VM_MEMATTR_DEFAULT;
+	m->mdmemattr = VM_MEMATTR_DEFAULT;
 }
 
 /*
@@ -3412,7 +3411,7 @@ do_l2b_alloc:
 		    (m->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(m, PGA_WRITEABLE);
 	}
-	if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+	if (m->mdmemattr != VM_MEMATTR_UNCACHEABLE)
 		npte |= pte_l2_s_cache_mode;
 	if (m && m == opg) {
 		/*
@@ -5037,7 +5036,7 @@ pmap_page_set_memattr(vm_page_t m, vm_me
 	 * Remember the memattr in a field that gets used to set the appropriate
 	 * bits in the PTEs as mappings are established.
 	 */
-	m->md.pv_memattr = ma;
+	m->mdmemattr = ma;
 
 	/*
 	 * It appears that this function can only be called before any mappings

Modified: user/attilio/vmobj-readlock/sys/arm/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/arm/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/arm/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -96,7 +96,7 @@ enum mem_type {
 
 #endif
 
-#define	pmap_page_get_memattr(m)	((m)->md.pv_memattr)
+#define	pmap_page_get_memattr(m)	((m)->mdmemattr)
 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
@@ -120,7 +120,6 @@ struct	pv_chunk;
 
 struct	md_page {
 	int pvh_attrs;
-	vm_memattr_t	 pv_memattr;
 	vm_offset_t pv_kva;		/* first kernel VA mapping */
 	TAILQ_HEAD(,pv_entry)	pv_list;
 };

Modified: user/attilio/vmobj-readlock/sys/i386/i386/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/i386/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/i386/i386/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -645,7 +645,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pat_mode = PAT_WRITE_BACK;
+	m->mdmemattr = PAT_WRITE_BACK;
 }
 
 #ifdef PAE
@@ -1534,7 +1534,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 	endpte = pte + count;
 	while (pte < endpte) {
 		m = *ma++;
-		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+		pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0);
 		if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
 			oldpte |= *pte;
 			pte_store(pte, pa | pgeflag | PG_RW | PG_V);
@@ -3531,7 +3531,7 @@ validate:
 	/*
 	 * Now validate mapping with desired protection/wiring.
 	 */
-	newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
+	newpte = (pt_entry_t)(pa | pmap_cache_bits(m->mdmemattr, 0) | PG_V);
 	if ((prot & VM_PROT_WRITE) != 0) {
 		newpte |= PG_RW;
 		if ((newpte & PG_MANAGED) != 0)
@@ -3620,7 +3620,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t 
 		    " in pmap %p", va, pmap);
 		return (FALSE);
 	}
-	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 1) |
 	    PG_PS | PG_V;
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
 		newpde |= PG_MANAGED;
@@ -3811,7 +3811,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 	 */
 	pmap->pm_stats.resident_count++;
 
-	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0);
 #ifdef PAE
 	if ((prot & VM_PROT_EXECUTE) == 0)
 		pa |= pg_nx;
@@ -3854,7 +3854,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 	pd_entry_t *pde;
 	vm_paddr_t pa, ptepa;
 	vm_page_t p;
-	int pat_mode;
+	vm_memattr_t pat_mode;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
@@ -3866,7 +3866,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 		p = vm_page_lookup(object, pindex);
 		KASSERT(p->valid == VM_PAGE_BITS_ALL,
 		    ("pmap_object_init_pt: invalid page %p", p));
-		pat_mode = p->md.pat_mode;
+		pat_mode = p->mdmemattr;
 
 		/*
 		 * Abort the mapping if the first page is not physically
@@ -3887,7 +3887,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 			KASSERT(p->valid == VM_PAGE_BITS_ALL,
 			    ("pmap_object_init_pt: invalid page %p", p));
 			if (pa != VM_PAGE_TO_PHYS(p) ||
-			    pat_mode != p->md.pat_mode)
+			    pat_mode != p->mdmemattr)
 				return;
 			p = TAILQ_NEXT(p, listq);
 		}
@@ -4118,7 +4118,7 @@ pmap_zero_page(vm_page_t m)
 		panic("pmap_zero_page: CMAP2 busy");
 	sched_pin();
 	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
-	    pmap_cache_bits(m->md.pat_mode, 0);
+	    pmap_cache_bits(m->mdmemattr, 0);
 	invlcaddr(sysmaps->CADDR2);
 	pagezero(sysmaps->CADDR2);
 	*sysmaps->CMAP2 = 0;
@@ -4143,7 +4143,7 @@ pmap_zero_page_area(vm_page_t m, int off
 		panic("pmap_zero_page_area: CMAP2 busy");
 	sched_pin();
 	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
-	    pmap_cache_bits(m->md.pat_mode, 0);
+	    pmap_cache_bits(m->mdmemattr, 0);
 	invlcaddr(sysmaps->CADDR2);
 	if (off == 0 && size == PAGE_SIZE) 
 		pagezero(sysmaps->CADDR2);
@@ -4168,7 +4168,7 @@ pmap_zero_page_idle(vm_page_t m)
 		panic("pmap_zero_page_idle: CMAP3 busy");
 	sched_pin();
 	*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
-	    pmap_cache_bits(m->md.pat_mode, 0);
+	    pmap_cache_bits(m->mdmemattr, 0);
 	invlcaddr(CADDR3);
 	pagezero(CADDR3);
 	*CMAP3 = 0;
@@ -4196,9 +4196,9 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 	invlpg((u_int)sysmaps->CADDR1);
 	invlpg((u_int)sysmaps->CADDR2);
 	*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
-	    pmap_cache_bits(src->md.pat_mode, 0);
+	    pmap_cache_bits(src->mdmemattr, 0);
 	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
-	    pmap_cache_bits(dst->md.pat_mode, 0);
+	    pmap_cache_bits(dst->mdmemattr, 0);
 	bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 	*sysmaps->CMAP1 = 0;
 	*sysmaps->CMAP2 = 0;
@@ -4235,9 +4235,9 @@ pmap_copy_pages(vm_page_t ma[], vm_offse
 		b_pg_offset = b_offset & PAGE_MASK;
 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 		*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
-		    pmap_cache_bits(b_pg->md.pat_mode, 0);
+		    pmap_cache_bits(b_pg->mdmemattr, 0);
 		*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
-		    PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
+		    PG_M | pmap_cache_bits(b_pg->mdmemattr, 0);
 		a_cp = sysmaps->CADDR1 + a_pg_offset;
 		b_cp = sysmaps->CADDR2 + b_pg_offset;
 		bcopy(a_cp, b_cp, cnt);
@@ -5049,7 +5049,7 @@ void
 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 {
 
-	m->md.pat_mode = ma;
+	m->mdmemattr = ma;
 	if ((m->flags & PG_FICTITIOUS) != 0)
 		return;
 
@@ -5087,7 +5087,7 @@ pmap_flush_page(vm_page_t m)
 			panic("pmap_flush_page: CMAP2 busy");
 		sched_pin();
 		*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
-		    PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
+		    PG_A | PG_M | pmap_cache_bits(m->mdmemattr, 0);
 		invlcaddr(sysmaps->CADDR2);
 		sva = (vm_offset_t)sysmaps->CADDR2;
 		eva = sva + PAGE_SIZE;

Modified: user/attilio/vmobj-readlock/sys/i386/i386/vm_machdep.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/i386/vm_machdep.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/i386/i386/vm_machdep.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -864,10 +864,10 @@ sf_buf_alloc(struct vm_page *m, int flag
 	opte = *ptep;
 #ifdef XEN
        PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag
-	   | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0));
+	   | PG_RW | PG_V | pmap_cache_bits(m->mdmemattr, 0));
 #else
 	*ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V |
-	    pmap_cache_bits(m->md.pat_mode, 0);
+	    pmap_cache_bits(m->mdmemattr, 0);
 #endif
 
 	/*

Modified: user/attilio/vmobj-readlock/sys/i386/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/i386/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -429,7 +429,6 @@ struct	pv_chunk;
 
 struct md_page {
 	TAILQ_HEAD(,pv_entry)	pv_list;
-	int			pat_mode;
 };
 
 struct pmap {
@@ -499,7 +498,7 @@ extern char *ptvmmap;		/* poor name! */
 extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
-#define	pmap_page_get_memattr(m)	((vm_memattr_t)(m)->md.pat_mode)
+#define	pmap_page_get_memattr(m)	((m)->mdmemattr)
 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
 #define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
 

Modified: user/attilio/vmobj-readlock/sys/i386/xen/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/xen/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/i386/xen/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -536,7 +536,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pat_mode = PAT_WRITE_BACK;
+	m->mdmemattr = PAT_WRITE_BACK;
 }
 
 /*
@@ -3110,7 +3110,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 	pd_entry_t *pde;
 	vm_paddr_t pa, ptepa;
 	vm_page_t p;
-	int pat_mode;
+	vm_memattr_t pat_mode;
 
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
@@ -3122,7 +3122,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 		p = vm_page_lookup(object, pindex);
 		KASSERT(p->valid == VM_PAGE_BITS_ALL,
 		    ("pmap_object_init_pt: invalid page %p", p));
-		pat_mode = p->md.pat_mode;
+		pat_mode = p->mdmemattr;
 
 		/*
 		 * Abort the mapping if the first page is not physically
@@ -3143,7 +3143,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
 			KASSERT(p->valid == VM_PAGE_BITS_ALL,
 			    ("pmap_object_init_pt: invalid page %p", p));
 			if (pa != VM_PAGE_TO_PHYS(p) ||
-			    pat_mode != p->md.pat_mode)
+			    pat_mode != p->mdmemattr)
 				return;
 			p = TAILQ_NEXT(p, listq);
 		}
@@ -4065,7 +4065,7 @@ void
 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 {
 
-	m->md.pat_mode = ma;
+	m->mdmemattr = ma;
 	if ((m->flags & PG_FICTITIOUS) != 0)
 		return;
 
@@ -4104,7 +4104,7 @@ pmap_flush_page(vm_page_t m)
 		sched_pin();
 		PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
 		    VM_PAGE_TO_MACH(m) | PG_A | PG_M |
-		    pmap_cache_bits(m->md.pat_mode, 0));
+		    pmap_cache_bits(m->mdmemattr, 0));
 		invlcaddr(sysmaps->CADDR2);
 		sva = (vm_offset_t)sysmaps->CADDR2;
 		eva = sva + PAGE_SIZE;

Modified: user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -473,7 +473,7 @@ pmap_page_to_va(vm_page_t m)
 	vm_offset_t va;
 
 	pa = VM_PAGE_TO_PHYS(m);
-	va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
+	va = (m->mdmemattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
 	    IA64_PHYS_TO_RR7(pa);
 	return (va);
 }
@@ -486,7 +486,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.memattr = VM_MEMATTR_DEFAULT;
+	m->mdmemattr = VM_MEMATTR_DEFAULT;
 }
 
 /*
@@ -1439,7 +1439,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m
 		else
 			pmap_enter_vhpt(pte, va);
 		pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
-		pmap_pte_attr(pte, m[i]->md.memattr);
+		pmap_pte_attr(pte, m[i]->mdmemattr);
 		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
 		va += PAGE_SIZE;
 	}
@@ -1768,7 +1768,7 @@ validate:
 	 * adds the pte to the VHPT if necessary.
 	 */
 	pmap_pte_prot(pmap, pte, prot);
-	pmap_pte_attr(pte, m->md.memattr);
+	pmap_pte_attr(pte, m->mdmemattr);
 	pmap_set_pte(pte, va, pa, wired, managed);
 
 	/* Invalidate the I-cache when needed. */
@@ -1875,7 +1875,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 		pmap_enter_vhpt(pte, va);
 		pmap_pte_prot(pmap, pte,
 		    prot & (VM_PROT_READ | VM_PROT_EXECUTE));
-		pmap_pte_attr(pte, m->md.memattr);
+		pmap_pte_attr(pte, m->mdmemattr);
 		pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
 
 		if (prot & VM_PROT_EXECUTE)
@@ -2417,7 +2417,7 @@ pmap_remove_write(vm_page_t m)
 			}
 			prot &= ~VM_PROT_WRITE;
 			pmap_pte_prot(pmap, pte, prot);
-			pmap_pte_attr(pte, m->md.memattr);
+			pmap_pte_attr(pte, m->mdmemattr);
 			pmap_invalidate_page(pv->pv_va);
 		}
 		pmap_switch(oldpmap);
@@ -2499,7 +2499,7 @@ pmap_page_set_memattr(vm_page_t m, vm_me
 	void *va;
 
 	rw_wlock(&pvh_global_lock);
-	m->md.memattr = ma;
+	m->mdmemattr = ma;
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 		pmap = PV_PMAP(pv);
 		PMAP_LOCK(pmap);

Modified: user/attilio/vmobj-readlock/sys/mips/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/mips/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/mips/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -69,7 +69,6 @@ struct pv_entry;
 struct pv_chunk;
 
 struct md_page {
-	int pv_flags;
 	TAILQ_HEAD(, pv_entry) pv_list;
 };
 

Modified: user/attilio/vmobj-readlock/sys/mips/mips/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/mips/mips/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/mips/mips/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -594,7 +594,7 @@ pmap_page_init(vm_page_t m)
 {
 
 	TAILQ_INIT(&m->md.pv_list);
-	m->md.pv_flags = 0;
+	m->mdmemattr = 0;
 }
 
 /*
@@ -1435,9 +1435,9 @@ pmap_pv_reclaim(pmap_t locked_pmap)
 				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
 				if (pte_test(&oldpte, PTE_D))
 					vm_page_dirty(m);
-				if (m->md.pv_flags & PV_TABLE_REF)
+				if (m->mdmemattr & PV_TABLE_REF)
 					vm_page_aflag_set(m, PGA_REFERENCED);
-				m->md.pv_flags &= ~PV_TABLE_REF;
+				m->mdmemattr &= ~PV_TABLE_REF;
 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 				if (TAILQ_EMPTY(&m->md.pv_list))
 					vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -1705,9 +1705,9 @@ pmap_remove_pte(struct pmap *pmap, pt_en
 			    __func__, (void *)va, (uintmax_t)oldpte));
 			vm_page_dirty(m);
 		}
-		if (m->md.pv_flags & PV_TABLE_REF)
+		if (m->mdmemattr & PV_TABLE_REF)
 			vm_page_aflag_set(m, PGA_REFERENCED);
-		m->md.pv_flags &= ~PV_TABLE_REF;
+		m->mdmemattr &= ~PV_TABLE_REF;
 
 		pmap_remove_entry(pmap, m, va);
 	}
@@ -1846,7 +1846,7 @@ pmap_remove_all(vm_page_t m)
 	    ("pmap_remove_all: page %p is not managed", m));
 	rw_wlock(&pvh_global_lock);
 
-	if (m->md.pv_flags & PV_TABLE_REF)
+	if (m->mdmemattr & PV_TABLE_REF)
 		vm_page_aflag_set(m, PGA_REFERENCED);
 
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
@@ -1893,7 +1893,7 @@ pmap_remove_all(vm_page_t m)
 	}
 
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
-	m->md.pv_flags &= ~PV_TABLE_REF;
+	m->mdmemattr &= ~PV_TABLE_REF;
 	rw_wunlock(&pvh_global_lock);
 }
 
@@ -2078,7 +2078,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 			mpte->wire_count--;
 
 		if (pte_test(&origpte, PTE_MANAGED)) {
-			m->md.pv_flags |= PV_TABLE_REF;
+			m->mdmemattr |= PV_TABLE_REF;
 			om = m;
 			newpte |= PTE_MANAGED;
 			if (!pte_test(&newpte, PTE_RO))
@@ -2114,7 +2114,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * Enter on the PV list if part of our managed memory.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0) {
-		m->md.pv_flags |= PV_TABLE_REF;
+		m->mdmemattr |= PV_TABLE_REF;
 		if (pv == NULL)
 			pv = get_pv_entry(pmap, FALSE);
 		pv->pv_va = va;
@@ -2145,9 +2145,9 @@ validate:
 		*pte = newpte;
 		if (pte_test(&origpte, PTE_V)) {
 			if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
-				if (om->md.pv_flags & PV_TABLE_REF)
+				if (om->mdmemattr & PV_TABLE_REF)
 					vm_page_aflag_set(om, PGA_REFERENCED);
-				om->md.pv_flags &= ~PV_TABLE_REF;
+				om->mdmemattr &= ~PV_TABLE_REF;
 			}
 			if (pte_test(&origpte, PTE_D)) {
 				KASSERT(!pte_test(&origpte, PTE_RO),
@@ -2853,9 +2853,9 @@ pmap_ts_referenced(vm_page_t m)
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_ts_referenced: page %p is not managed", m));
-	if (m->md.pv_flags & PV_TABLE_REF) {
+	if (m->mdmemattr & PV_TABLE_REF) {
 		rw_wlock(&pvh_global_lock);
-		m->md.pv_flags &= ~PV_TABLE_REF;
+		m->mdmemattr &= ~PV_TABLE_REF;
 		rw_wunlock(&pvh_global_lock);
 		return (1);
 	}
@@ -2965,7 +2965,7 @@ pmap_is_referenced(vm_page_t m)
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_is_referenced: page %p is not managed", m));
-	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
+	return ((m->mdmemattr & PV_TABLE_REF) != 0);
 }
 
 /*
@@ -2980,8 +2980,8 @@ pmap_clear_reference(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_reference: page %p is not managed", m));
 	rw_wlock(&pvh_global_lock);
-	if (m->md.pv_flags & PV_TABLE_REF) {
-		m->md.pv_flags &= ~PV_TABLE_REF;
+	if (m->mdmemattr & PV_TABLE_REF) {
+		m->mdmemattr &= ~PV_TABLE_REF;
 	}
 	rw_wunlock(&pvh_global_lock);
 }

Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -1474,7 +1474,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page
 	u_int	lo;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
-		m->md.mdpg_cache_attrs = ma;
+		m->mdmemattr = ma;
 		return;
 	}
 
@@ -1497,7 +1497,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page
 		mtx_unlock(&moea_table_mutex);
 		PMAP_UNLOCK(pmap);
 	}
-	m->md.mdpg_cache_attrs = ma;
+	m->mdmemattr = ma;
 	rw_wunlock(&pvh_global_lock);
 }
 

Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -1657,7 +1657,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 	uint64_t lo;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0) {
-		m->md.mdpg_cache_attrs = ma;
+		m->mdmemattr = ma;
 		return;
 	}
 
@@ -1679,7 +1679,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 		PMAP_UNLOCK(pmap);
 	}
 	UNLOCK_TABLE_RD();
-	m->md.mdpg_cache_attrs = ma;
+	m->mdmemattr = ma;
 }
 
 /*

Modified: user/attilio/vmobj-readlock/sys/powerpc/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/powerpc/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -144,11 +144,10 @@ struct	pmap {
 
 struct	md_page {
 	u_int64_t	 mdpg_attrs;
-	vm_memattr_t	 mdpg_cache_attrs;
 	struct	pvo_head mdpg_pvoh;
 };
 
-#define	pmap_page_get_memattr(m)	((m)->md.mdpg_cache_attrs)
+#define	pmap_page_get_memattr(m)	((m)->mdmemattr)
 #define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
 
 /*

Modified: user/attilio/vmobj-readlock/sys/sparc64/include/pmap.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/sparc64/include/pmap.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/sparc64/include/pmap.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -56,7 +56,6 @@ struct md_page {
 	struct	pmap *pmap;
 	uint32_t colors[DCACHE_COLORS];
 	int32_t	color;
-	uint32_t flags;
 };
 
 struct pmap {

Modified: user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c	Wed Jul  3 16:56:23 2013	(r252573)
@@ -765,7 +765,6 @@ pmap_page_init(vm_page_t m)
 
 	TAILQ_INIT(&m->md.tte_list);
 	m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
-	m->md.flags = 0;
 	m->md.pmap = NULL;
 }
 

Modified: user/attilio/vmobj-readlock/sys/vm/vm_page.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/vm/vm_page.h	Wed Jul  3 16:51:33 2013	(r252572)
+++ user/attilio/vmobj-readlock/sys/vm/vm_page.h	Wed Jul  3 16:56:23 2013	(r252573)
@@ -144,6 +144,7 @@ struct vm_page {
 	vm_pindex_t pindex;		/* offset into object (O,P) */
 	vm_paddr_t phys_addr;		/* physical address of page */
 	struct md_page md;		/* machine dependant stuff */
+	vm_memattr_t mdmemattr;		/* arch specific memory attribute */
 	uint8_t	queue;			/* page queue index (P,Q) */
 	int8_t segind;
 	short hold_count;		/* page hold count (P) */


More information about the svn-src-user mailing list