svn commit: r248814 - in stable/9/sys: amd64/amd64 arm/arm arm/include i386/i386 i386/xen ia64/ia64 mips/mips powerpc/aim powerpc/booke powerpc/powerpc sparc64/sparc64 vm

Konstantin Belousov kib at FreeBSD.org
Thu Mar 28 06:31:07 UTC 2013


Author: kib
Date: Thu Mar 28 06:31:04 2013
New Revision: 248814
URL: http://svnweb.freebsd.org/changeset/base/248814

Log:
  MFC r248280:
  Add pmap function pmap_copy_pages().

Modified:
  stable/9/sys/amd64/amd64/pmap.c
  stable/9/sys/arm/arm/pmap.c
  stable/9/sys/arm/include/pmap.h
  stable/9/sys/i386/i386/pmap.c
  stable/9/sys/i386/xen/pmap.c
  stable/9/sys/ia64/ia64/pmap.c
  stable/9/sys/mips/mips/pmap.c
  stable/9/sys/powerpc/aim/mmu_oea.c
  stable/9/sys/powerpc/aim/mmu_oea64.c
  stable/9/sys/powerpc/booke/pmap.c
  stable/9/sys/powerpc/powerpc/mmu_if.m
  stable/9/sys/powerpc/powerpc/pmap_dispatch.c
  stable/9/sys/sparc64/sparc64/pmap.c
  stable/9/sys/vm/pmap.h
Directory Properties:
  stable/9/sys/   (props changed)

Modified: stable/9/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/9/sys/amd64/amd64/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/amd64/amd64/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -4245,6 +4245,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t
 	pagecopy((void *)src, (void *)dst);
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_cp = (char *)PHYS_TO_DMAP(ma[a_offset >> PAGE_SHIFT]->
+		    phys_addr) + a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_cp = (char *)PHYS_TO_DMAP(mb[b_offset >> PAGE_SHIFT]->
+		    phys_addr) + b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/arm/arm/pmap.c
==============================================================================
--- stable/9/sys/arm/arm/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/arm/arm/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -250,6 +250,9 @@ pt_entry_t	pte_l1_c_proto;
 pt_entry_t	pte_l2_s_proto;
 
 void		(*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+void		(*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
+		    vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs,
+		    int cnt);
 void		(*pmap_zero_page_func)(vm_paddr_t, int, int);
 /*
  * Which pmap is currently 'live' in the cache
@@ -406,6 +409,13 @@ static vm_paddr_t pmap_kernel_l2ptp_phys
 static struct vm_object pvzone_obj;
 static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
 
+void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
+    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
+#if ARM_MMU_XSCALE == 1
+void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
+    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
+#endif
+
 /*
  * This list exists for the benefit of pmap_map_chunk().  It keeps track
  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
@@ -490,6 +500,7 @@ pmap_pte_init_generic(void)
 	pte_l2_s_proto = L2_S_PROTO_generic;
 
 	pmap_copy_page_func = pmap_copy_page_generic;
+	pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
 	pmap_zero_page_func = pmap_zero_page_generic;
 }
 
@@ -666,6 +677,7 @@ pmap_pte_init_xscale(void)
 
 #ifdef CPU_XSCALE_CORE3
 	pmap_copy_page_func = pmap_copy_page_generic;
+	pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
 	pmap_zero_page_func = pmap_zero_page_generic;
 	xscale_use_minidata = 0;
 	/* Make sure it is L2-cachable */
@@ -678,6 +690,7 @@ pmap_pte_init_xscale(void)
 
 #else
 	pmap_copy_page_func = pmap_copy_page_xscale;
+	pmap_copy_page_offs_func = pmap_copy_page_offs_xscale;
 	pmap_zero_page_func = pmap_zero_page_xscale;
 #endif
 
@@ -4333,6 +4346,29 @@ pmap_copy_page_generic(vm_paddr_t src, v
 	cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
 	cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
 }
+
+void
+pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
+    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
+{
+
+	mtx_lock(&cmtx);
+	*csrc_pte = L2_S_PROTO | a_phys |
+	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+	PTE_SYNC(csrc_pte);
+	*cdst_pte = L2_S_PROTO | b_phys |
+	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+	PTE_SYNC(cdst_pte);
+	cpu_tlb_flushD_SE(csrcp);
+	cpu_tlb_flushD_SE(cdstp);
+	cpu_cpwait();
+	bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
+	mtx_unlock(&cmtx);
+	cpu_dcache_inv_range(csrcp + a_offs, cnt);
+	cpu_dcache_wbinv_range(cdstp + b_offs, cnt);
+	cpu_l2cache_inv_range(csrcp + a_offs, cnt);
+	cpu_l2cache_wbinv_range(cdstp + b_offs, cnt);
+}
 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 
 #if ARM_MMU_XSCALE == 1
@@ -4384,6 +4420,28 @@ pmap_copy_page_xscale(vm_paddr_t src, vm
 	mtx_unlock(&cmtx);
 	xscale_cache_clean_minidata();
 }
+
+void
+pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
+    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
+{
+
+	mtx_lock(&cmtx);
+	*csrc_pte = L2_S_PROTO | a_phys |
+	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+	PTE_SYNC(csrc_pte);
+	*cdst_pte = L2_S_PROTO | b_phys |
+	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+	PTE_SYNC(cdst_pte);
+	cpu_tlb_flushD_SE(csrcp);
+	cpu_tlb_flushD_SE(cdstp);
+	cpu_cpwait();
+	bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
+	mtx_unlock(&cmtx);
+	xscale_cache_clean_minidata();
+}
 #endif /* ARM_MMU_XSCALE == 1 */
 
 void
@@ -4410,8 +4468,41 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 #endif
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	vm_page_t a_pg, b_pg;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+#ifdef ARM_USE_SMALL_ALLOC
+	vm_offset_t a_va, b_va;
+#endif
 
-
+	cpu_dcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
+	while (xfersize > 0) {
+		a_pg = ma[a_offset >> PAGE_SHIFT];
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		b_pg = mb[b_offset >> PAGE_SHIFT];
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+#ifdef ARM_USE_SMALL_ALLOC
+		a_va = arm_ptovirt(VM_PAGE_TO_PHYS(a_pg)) + a_pg_offset;
+		b_va = arm_ptovirt(VM_PAGE_TO_PHYS(b_pg)) + b_pg_offset;
+		bcopy((char *)a_va, (char *)b_va, cnt);
+		cpu_dcache_wbinv_range(b_va, cnt);
+		cpu_l2cache_wbinv_range(b_va, cnt);
+#else
+		pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset,
+		    VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt);
+#endif
+		xfersize -= cnt;
+		a_offset += cnt;
+		b_offset += cnt;
+	}
+}
 
 /*
  * this routine returns true if a physical page resides

Modified: stable/9/sys/arm/include/pmap.h
==============================================================================
--- stable/9/sys/arm/include/pmap.h	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/arm/include/pmap.h	Thu Mar 28 06:31:04 2013	(r248814)
@@ -420,6 +420,8 @@ extern pt_entry_t		pte_l1_c_proto;
 extern pt_entry_t		pte_l2_s_proto;
 
 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
+extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
+    vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
 
 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)

Modified: stable/9/sys/i386/i386/pmap.c
==============================================================================
--- stable/9/sys/i386/i386/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/i386/i386/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -4256,6 +4256,49 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 	mtx_unlock(&sysmaps->lock);
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	struct sysmaps *sysmaps;
+	vm_page_t a_pg, b_pg;
+	char *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+	mtx_lock(&sysmaps->lock);
+	if (*sysmaps->CMAP1 != 0)
+		panic("pmap_copy_pages: CMAP1 busy");
+	if (*sysmaps->CMAP2 != 0)
+		panic("pmap_copy_pages: CMAP2 busy");
+	sched_pin();
+	while (xfersize > 0) {
+		invlpg((u_int)sysmaps->CADDR1);
+		invlpg((u_int)sysmaps->CADDR2);
+		a_pg = ma[a_offset >> PAGE_SHIFT];
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		b_pg = mb[b_offset >> PAGE_SHIFT];
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
+		    pmap_cache_bits(b_pg->md.pat_mode, 0);
+		*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
+		    PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
+		a_cp = sysmaps->CADDR1 + a_pg_offset;
+		b_cp = sysmaps->CADDR2 + b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+	*sysmaps->CMAP1 = 0;
+	*sysmaps->CMAP2 = 0;
+	sched_unpin();
+	mtx_unlock(&sysmaps->lock);
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/i386/xen/pmap.c
==============================================================================
--- stable/9/sys/i386/xen/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/i386/xen/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -3444,6 +3444,46 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 	mtx_unlock(&sysmaps->lock);
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	struct sysmaps *sysmaps;
+	vm_page_t a_pg, b_pg;
+	char *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
+	mtx_lock(&sysmaps->lock);
+	if (*sysmaps->CMAP1 != 0)
+		panic("pmap_copy_pages: CMAP1 busy");
+	if (*sysmaps->CMAP2 != 0)
+		panic("pmap_copy_pages: CMAP2 busy");
+	sched_pin();
+	while (xfersize > 0) {
+		a_pg = ma[a_offset >> PAGE_SHIFT];
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		b_pg = mb[b_offset >> PAGE_SHIFT];
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		PT_SET_MA(sysmaps->CADDR1, PG_V | VM_PAGE_TO_MACH(a_pg) | PG_A);
+		PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
+		    VM_PAGE_TO_MACH(b_pg) | PG_A | PG_M);
+		a_cp = sysmaps->CADDR1 + a_pg_offset;
+		b_cp = sysmaps->CADDR2 + b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+	PT_SET_MA(sysmaps->CADDR1, 0);
+	PT_SET_MA(sysmaps->CADDR2, 0);
+	sched_unpin();
+	mtx_unlock(&sysmaps->lock);
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/ia64/ia64/pmap.c
==============================================================================
--- stable/9/sys/ia64/ia64/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/ia64/ia64/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -1884,6 +1884,30 @@ pmap_copy_page(vm_page_t msrc, vm_page_t
 	bcopy(src, dst, PAGE_SIZE);
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
+		    a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
+		    b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/mips/mips/pmap.c
==============================================================================
--- stable/9/sys/mips/mips/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/mips/mips/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -2445,6 +2445,51 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 	}
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+	char *a_cp, *b_cp;
+	vm_page_t a_m, b_m;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	vm_paddr_t a_phys, b_phys;
+	int cnt;
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_m = ma[a_offset >> PAGE_SHIFT];
+		a_phys = VM_PAGE_TO_PHYS(a_m);
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_m = mb[b_offset >> PAGE_SHIFT];
+		b_phys = VM_PAGE_TO_PHYS(b_m);
+		if (MIPS_DIRECT_MAPPABLE(a_phys) &&
+		    MIPS_DIRECT_MAPPABLE(b_phys)) {
+			pmap_flush_pvcache(a_m);
+			mips_dcache_wbinv_range_index(
+			    MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
+			a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
+			    a_pg_offset;
+			b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
+			    b_pg_offset;
+			bcopy(a_cp, b_cp, cnt);
+			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
+		} else {
+			a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
+			b_cp = (char *)a_cp + PAGE_SIZE;
+			a_cp += a_pg_offset;
+			b_cp += b_pg_offset;
+			bcopy(a_cp, b_cp, cnt);
+			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
+			pmap_lmem_unmap();
+		}
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- stable/9/sys/powerpc/aim/mmu_oea.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/powerpc/aim/mmu_oea.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -277,6 +277,8 @@ void moea_change_wiring(mmu_t, pmap_t, v
 void moea_clear_modify(mmu_t, vm_page_t);
 void moea_clear_reference(mmu_t, vm_page_t);
 void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
@@ -322,6 +324,7 @@ static mmu_method_t moea_methods[] = {
 	MMUMETHOD(mmu_clear_modify,	moea_clear_modify),
 	MMUMETHOD(mmu_clear_reference,	moea_clear_reference),
 	MMUMETHOD(mmu_copy_page,	moea_copy_page),
+	MMUMETHOD(mmu_copy_pages,	moea_copy_pages),
 	MMUMETHOD(mmu_enter,		moea_enter),
 	MMUMETHOD(mmu_enter_object,	moea_enter_object),
 	MMUMETHOD(mmu_enter_quick,	moea_enter_quick),
@@ -994,6 +997,30 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc
 	bcopy((void *)src, (void *)dst, PAGE_SIZE);
 }
 
+void
+moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+		    a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+		    b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
 /*
  * Zero a page of physical memory by temporarily mapping it into the tlb.
  */

Modified: stable/9/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/9/sys/powerpc/aim/mmu_oea64.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/powerpc/aim/mmu_oea64.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -297,6 +297,8 @@ void moea64_change_wiring(mmu_t, pmap_t,
 void moea64_clear_modify(mmu_t, vm_page_t);
 void moea64_clear_reference(mmu_t, vm_page_t);
 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
@@ -341,6 +343,7 @@ static mmu_method_t moea64_methods[] = {
 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
 	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
+	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
 	MMUMETHOD(mmu_enter,		moea64_enter),
 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
@@ -1111,6 +1114,72 @@ moea64_copy_page(mmu_t mmu, vm_page_t ms
 	}
 }
 
+static inline void
+moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+		    a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+		    b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+}
+
+static inline void
+moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	mtx_lock(&moea64_scratchpage_mtx);
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		moea64_set_scratchpage_pa(mmu, 0,
+		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		moea64_set_scratchpage_pa(mmu, 1,
+		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+	mtx_unlock(&moea64_scratchpage_mtx);
+}
+
+void
+moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+
+	if (hw_direct_map) {
+		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
+		    xfersize);
+	} else {
+		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
+		    xfersize);
+	}
+}
+
 void
 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {

Modified: stable/9/sys/powerpc/booke/pmap.c
==============================================================================
--- stable/9/sys/powerpc/booke/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/powerpc/booke/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -277,6 +277,8 @@ static void		mmu_booke_clear_reference(m
 static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
     vm_size_t, vm_offset_t);
 static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
+static void		mmu_booke_copy_pages(mmu_t, vm_page_t *,
+    vm_offset_t, vm_page_t *, vm_offset_t, int);
 static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
     vm_prot_t, boolean_t);
 static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
@@ -337,6 +339,7 @@ static mmu_method_t mmu_booke_methods[] 
 	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
 	MMUMETHOD(mmu_copy,		mmu_booke_copy),
 	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
+	MMUMETHOD(mmu_copy_pages,	mmu_booke_copy_pages),
 	MMUMETHOD(mmu_enter,		mmu_booke_enter),
 	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
 	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
@@ -2137,6 +2140,36 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t
 	mtx_unlock(&copy_page_mutex);
 }
 
+static inline void
+mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+	void *a_cp, *b_cp;
+	vm_offset_t a_pg_offset, b_pg_offset;
+	int cnt;
+
+	mtx_lock(&copy_page_mutex);
+	while (xfersize > 0) {
+		a_pg_offset = a_offset & PAGE_MASK;
+		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+		mmu_booke_kenter(mmu, copy_page_src_va,
+		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+		a_cp = (char *)copy_page_src_va + a_pg_offset;
+		b_pg_offset = b_offset & PAGE_MASK;
+		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+		mmu_booke_kenter(mmu, copy_page_dst_va,
+		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+		b_cp = (char *)copy_page_dst_va + b_pg_offset;
+		bcopy(a_cp, b_cp, cnt);
+		mmu_booke_kremove(mmu, copy_page_dst_va);
+		mmu_booke_kremove(mmu, copy_page_src_va);
+		a_offset += cnt;
+		b_offset += cnt;
+		xfersize -= cnt;
+	}
+	mtx_unlock(&copy_page_mutex);
+}
+
 /*
  * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
  * into virtual memory and using bzero to clear its contents. This is intended

Modified: stable/9/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- stable/9/sys/powerpc/powerpc/mmu_if.m	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/powerpc/powerpc/mmu_if.m	Thu Mar 28 06:31:04 2013	(r248814)
@@ -215,6 +215,14 @@ METHOD void copy_page {
 	vm_page_t	_dst;
 };
 
+METHOD void copy_pages {
+	mmu_t		_mmu;
+	vm_page_t	*_ma;
+	vm_offset_t	_a_offset;
+	vm_page_t	*_mb;
+	vm_offset_t	_b_offset;
+	int		_xfersize;
+};
 
 /**
  * @brief Create a mapping between a virtual/physical address pair in the

Modified: stable/9/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- stable/9/sys/powerpc/powerpc/pmap_dispatch.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/powerpc/powerpc/pmap_dispatch.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -119,6 +119,16 @@ pmap_copy_page(vm_page_t src, vm_page_t 
 }
 
 void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+
+	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
+	    a_offset, mb, b_offset, xfersize);
+	MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
+}
+
+void
 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
     vm_prot_t prot, boolean_t wired)
 {

Modified: stable/9/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/9/sys/sparc64/sparc64/pmap.c	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/sparc64/sparc64/pmap.c	Thu Mar 28 06:31:04 2013	(r248814)
@@ -1928,6 +1928,14 @@ pmap_copy_page(vm_page_t msrc, vm_page_t
 	}
 }
 
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+    vm_offset_t b_offset, int xfersize)
+{
+
+	panic("pmap_copy_pages: not implemented");
+}
+
 /*
  * Returns true if the pmap's pv is one of the first
  * 16 pvs linked to from this page.  This count may

Modified: stable/9/sys/vm/pmap.h
==============================================================================
--- stable/9/sys/vm/pmap.h	Thu Mar 28 06:22:43 2013	(r248813)
+++ stable/9/sys/vm/pmap.h	Thu Mar 28 06:31:04 2013	(r248814)
@@ -108,6 +108,8 @@ void		 pmap_clear_modify(vm_page_t m);
 void		 pmap_clear_reference(vm_page_t m);
 void		 pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
 void		 pmap_copy_page(vm_page_t, vm_page_t);
+void		 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
+		    vm_page_t mb[], vm_offset_t b_offset, int xfersize);
 void		 pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
 		    vm_prot_t, boolean_t);
 void		 pmap_enter_object(pmap_t pmap, vm_offset_t start,


More information about the svn-src-stable-9 mailing list