PERFORCE change 149479 for review

Rafal Jaworowski raj at FreeBSD.org
Tue Sep 9 15:09:51 UTC 2008


http://perforce.freebsd.org/chv.cgi?CH=149479

Change 149479 by raj at raj_mimi on 2008/09/09 15:09:37

	Provide L2 cache synchronization (invalidation) on ARM.
	
	Mainly pmap module and context switching routines are affected.
	
	Obtained from:	Marvell, Semihalf

Affected files ...

.. //depot/projects/arm/src/sys/arm/arm/elf_machdep.c#4 edit
.. //depot/projects/arm/src/sys/arm/arm/genassym.c#9 edit
.. //depot/projects/arm/src/sys/arm/arm/locore.S#27 edit
.. //depot/projects/arm/src/sys/arm/arm/pmap.c#41 edit
.. //depot/projects/arm/src/sys/arm/arm/swtch.S#11 edit

Differences ...

==== //depot/projects/arm/src/sys/arm/arm/elf_machdep.c#4 (text+ko) ====

@@ -214,6 +214,7 @@
 {
 
 	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 	cpu_tlb_flushID();
 	return (0);
 }

==== //depot/projects/arm/src/sys/arm/arm/genassym.c#9 (text+ko) ====

@@ -79,7 +79,9 @@
 ASSYM(CF_CONTROL, offsetof(struct cpu_functions, cf_control));
 ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch));
 ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range));
+ASSYM(CF_L2CACHE_WB_RANGE, offsetof(struct cpu_functions, cf_l2cache_wb_range));
 ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all));
+ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all));
 ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE));
 ASSYM(CF_ICACHE_SYNC, offsetof(struct cpu_functions, cf_icache_sync_all));
 

==== //depot/projects/arm/src/sys/arm/arm/locore.S#27 (text+ko) ====

@@ -245,6 +245,8 @@
 	ldr	r0, .Lcpufuncs
 	mov	lr, pc
 	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
+	mov	lr, pc
+	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
 
 	/*
 	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's

==== //depot/projects/arm/src/sys/arm/arm/pmap.c#41 (text+ko) ====

@@ -151,6 +151,7 @@
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
+#include <sys/ktr.h>
 #include <sys/proc.h>
 #include <sys/malloc.h>
 #include <sys/msgbuf.h>
@@ -1196,27 +1197,104 @@
 }
 
 static PMAP_INLINE void
+pmap_l2cache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
+{
+	vm_size_t rest;
+	pd_entry_t *pde;
+	pt_entry_t *ptep;
+
+	rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
+
+	while (len > 0) {
+		CTR4(KTR_PMAP, "pmap_l2cache_wbinv_range: pmap %p is_kernel %d "
+		    "va 0x%08x len 0x%x ", pm, pm == pmap_kernel(), va, rest);
+		if (pmap_get_pde_pte(pm, va, &pde, &ptep) && l2pte_valid(*ptep))
+		    cpu_l2cache_wb_range(va, rest);
+
+		len -= rest;
+		va += rest;
+
+		rest = MIN(PAGE_SIZE, len);
+	}
+}
+
+static PMAP_INLINE void
 pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
 {
 
-	if (pmap_is_current(pm))
+	if (pmap_is_current(pm)) {
 		cpu_idcache_wbinv_range(va, len);
+		pmap_l2cache_wbinv_range(pm, va, len);
+	}
+}
+
+static PMAP_INLINE void
+pmap_l2cache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len)
+{
+	vm_size_t rest;
+	pd_entry_t *pde;
+	pt_entry_t *ptep;
+
+	rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
+
+	while (len > 0) {
+		CTR4(KTR_PMAP, "pmap_l2cache_wb_range: pmap %p is_kernel %d "
+		    "va 0x%08x len 0x%x ", pm, pm == pmap_kernel(), va, rest);
+		if (pmap_get_pde_pte(pm, va, &pde, &ptep) && l2pte_valid(*ptep))
+		    cpu_l2cache_wb_range(va, rest);
+
+		len -= rest;
+		va += rest;
+
+		rest = MIN(PAGE_SIZE, len);
+	}
 }
 
 static PMAP_INLINE void
-pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len,
-    boolean_t do_inv, boolean_t rd_only)
+pmap_l2cache_inv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
+{
+	vm_size_t rest;
+	pd_entry_t *pde;
+	pt_entry_t *ptep;
+
+	rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
+
+	while (len > 0) {
+		CTR4(KTR_PMAP, "pmap_l2cache_wb_range: pmap %p is_kernel %d "
+		    "va 0x%08x len 0x%x ", pm, pm == pmap_kernel(), va, rest);
+		if (pmap_get_pde_pte(pm, va, &pde, &ptep) && l2pte_valid(*ptep)) 
+		    cpu_l2cache_inv_range(va, rest);
+
+		len -= rest;
+		va += rest;
+
+		rest = MIN(PAGE_SIZE, len);
+	}
+}
+
+static PMAP_INLINE void
+pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv,
+    boolean_t rd_only)
 {
+	CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x "
+	    "len 0x%x ", pm, pm == pmap_kernel(), va, len);
+	CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only);
 
 	if (pmap_is_current(pm)) {
 		if (do_inv) {
-			if (rd_only)
+			if (rd_only) {
 				cpu_dcache_inv_range(va, len);
-			else
+				pmap_l2cache_inv_range(pm, va, len);
+			}
+			else {
 				cpu_dcache_wbinv_range(va, len);
+				pmap_l2cache_wbinv_range(pm, va, len);
+			}
 		} else
-		if (!rd_only)
+		if (!rd_only) {
 			cpu_dcache_wb_range(va, len);
+			pmap_l2cache_wb_range(pm, va, len);
+		}
 	}
 }
 
@@ -1224,16 +1302,20 @@
 pmap_idcache_wbinv_all(pmap_t pm)
 {
 
-	if (pmap_is_current(pm))
+	if (pmap_is_current(pm)) {
 		cpu_idcache_wbinv_all();
+		cpu_l2cache_wbinv_all();
+	}
 }
 
 static PMAP_INLINE void
 pmap_dcache_wbinv_all(pmap_t pm)
 {
 
-	if (pmap_is_current(pm))
+	if (pmap_is_current(pm)) {
 		cpu_dcache_wbinv_all();
+		cpu_l2cache_wbinv_all();
+	}
 }
 
 /*
@@ -2169,6 +2251,8 @@
 			PTE_SYNC(pdep);
 			cpu_dcache_wbinv_range((vm_offset_t)pdep,
 			    sizeof(*pdep));
+			cpu_l2cache_wbinv_range((vm_offset_t)pdep,
+			    sizeof(*pdep));
 			rv = 1;
 		}
 	} else {
@@ -2185,6 +2269,8 @@
 			PTE_SYNC(ptep);
 			cpu_dcache_wbinv_range((vm_offset_t)ptep,
 			    sizeof(*ptep));
+			cpu_l2cache_wbinv_range((vm_offset_t)ptep,
+			    sizeof(*ptep));
 			rv = 1;
 		}
 	}
@@ -2337,6 +2423,7 @@
 	}
 
 	cpu_dcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 	cpu_tlb_flushID();
 	cpu_cpwait();
 
@@ -2373,6 +2460,7 @@
 	mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
 	pmap_init_l1(l1, kernel_l1pt);
 	cpu_dcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 
 	virtual_avail = round_page(virtual_avail);
 	virtual_end = lastaddr;
@@ -2402,6 +2490,7 @@
 	struct pcb *pcb;
 	
 	pmap_idcache_wbinv_all(pmap);
+	cpu_l2cache_wbinv_all();
 	pmap_tlb_flushID(pmap);
 	cpu_cpwait();
 	if (vector_page < KERNBASE) {
@@ -2589,6 +2678,7 @@
 	 * rarely
 	 */
 	cpu_dcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 	cpu_tlb_flushD();
 	cpu_cpwait();
 	kernel_vm_end = pmap_curmaxkvaddr;
@@ -2615,6 +2705,7 @@
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	cpu_idcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
 		if (pv->pv_flags & PVF_WIRED) {
 			/* The page is wired, cannot remove it now. */
@@ -2727,6 +2818,7 @@
 	    (uint32_t) pte, opte, *pte));
 	if (l2pte_valid(opte)) {
 		cpu_dcache_wbinv_range(va, PAGE_SIZE);
+		cpu_l2cache_wbinv_range(va, PAGE_SIZE);
 		cpu_tlb_flushD_SE(va);
 		cpu_cpwait();
 	} else {
@@ -2785,6 +2877,7 @@
 	opte = *pte;
 	if (l2pte_valid(opte)) {
 		cpu_dcache_wbinv_range(va, PAGE_SIZE);
+		cpu_l2cache_wbinv_range(va, PAGE_SIZE);
 		cpu_tlb_flushD_SE(va);
 		cpu_cpwait();
 		*pte = 0;
@@ -3053,6 +3146,9 @@
 	u_int flags;
 	int flush;
 
+	CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x",
+	    pm, sva, eva, prot);
+
 	if ((prot & VM_PROT_READ) == 0) {
 		pmap_remove(pm, sva, eva);
 		return;
@@ -3287,9 +3383,11 @@
 		 */
 		if (pmap_is_current(pmap) &&
 		    (oflags & PVF_NC) == 0 &&
-			    (opte & L2_S_PROT_W) != 0 &&
-			    (prot & VM_PROT_WRITE) == 0)
+		    (opte & L2_S_PROT_W) != 0 &&
+		    (prot & VM_PROT_WRITE) == 0) {
 			cpu_dcache_wb_range(va, PAGE_SIZE);
+			pmap_l2cache_wb_range(pmap, va, PAGE_SIZE);
+		}
 	} else {
 		/*
 		 * New mapping, or changing the backing page
@@ -3776,6 +3874,8 @@
 			if (l2pte_valid(pte) && pmap_is_current(pm)) {
 				if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
 					total++;
+        				cpu_l2cache_wbinv_range(sva,
+					    PAGE_SIZE);
 			   		if (is_exec) {
         					cpu_idcache_wbinv_range(sva,
 								 PAGE_SIZE);
@@ -3790,6 +3890,7 @@
 					 * for a current pmap
 					 */
 					cpu_idcache_wbinv_all();
+					cpu_l2cache_wbinv_all();
 					flushall = 1;
 					total++;
 				}
@@ -3843,9 +3944,11 @@
 	if (off || size != PAGE_SIZE) {
 		bzero(dstpg + off, size);
 		cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
+		cpu_l2cache_wbinv_range((vm_offset_t)(dstpg + off), size);
 	} else {
 		bzero_page((vm_offset_t)dstpg);
 		cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
+		cpu_l2cache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
 	}
 #else
 
@@ -4140,6 +4243,8 @@
 	mtx_unlock(&cmtx);
 	cpu_dcache_inv_range(csrcp, PAGE_SIZE);
 	cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+	cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
+	cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
 }
 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 
@@ -4202,6 +4307,7 @@
 #endif
 
 	cpu_dcache_wbinv_all();
+	cpu_l2cache_wbinv_all();
 	if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size &&
 	    _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), 
 	    (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
@@ -4211,6 +4317,7 @@
 	dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst));
 	bcopy_page(srcpg, dstpg);
 	cpu_dcache_wbinv_range(dstpg, PAGE_SIZE);
+	cpu_l2cache_wbinv_range(dstpg, PAGE_SIZE);
 #else
 	pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
 #endif

==== //depot/projects/arm/src/sys/arm/arm/swtch.S#11 (text+ko) ====

@@ -143,6 +143,8 @@
 	ldr	r9, .Lcpufuncs
 	mov	lr, pc
 	ldr	pc, [r9, #CF_IDCACHE_WBINV_ALL]
+	mov	lr, pc
+	ldr	pc, [r9, #CF_L2CACHE_WBINV_ALL]
 	ldr	r0, [r7, #(PCB_PL1VEC)]
 	ldr	r1, [r7, #(PCB_DACR)]
 	/*
@@ -172,6 +174,8 @@
 	movne	r1, #4
 	movne	lr, pc
 	ldrne	pc, [r9, #CF_DCACHE_WB_RANGE]
+	movne	lr, pc
+	ldrne	pc, [r9, #CF_L2CACHE_WB_RANGE]
 #endif /* PMAP_INCLUDE_PTE_SYNC */
 
 	/*
@@ -328,6 +332,8 @@
 	ldr	r1, .Lcpufuncs
 	mov	lr, pc
 	ldr	pc, [r1, #CF_IDCACHE_WBINV_ALL]
+	mov	lr, pc
+	ldr	pc, [r1, #CF_L2CACHE_WBINV_ALL]
 .Lcs_cache_purge_skipped:
 	/* rem: r6 = lock */
 	/* rem: r9 = new PCB */
@@ -360,6 +366,8 @@
 	mov	r1, #4
 	mov	lr, pc
 	ldr	pc, [r2, #CF_DCACHE_WB_RANGE]
+	mov	lr, pc
+	ldr	pc, [r2, #CF_L2CACHE_WB_RANGE]
 
 .Lcs_same_vector:
 #endif /* PMAP_INCLUDE_PTE_SYNC */


More information about the p4-projects mailing list