svn commit: r347354 - head/sys/powerpc/booke

Justin Hibbits jhibbits at FreeBSD.org
Wed May 8 16:15:29 UTC 2019


Author: jhibbits
Date: Wed May  8 16:15:28 2019
New Revision: 347354
URL: https://svnweb.freebsd.org/changeset/base/347354

Log:
  powerpc/booke: Rewrite pmap_sync_icache() a bit
  
  * Make mmu_booke_sync_icache() use the DMAP on 64-bit prcoesses, no need to
    map the page into the user's address space.  This removes the
    pvh_global_lock from the equation on 64-bit.
  * Don't map the page with user-readability on 32-bit.  I don't know what the
    chance of a given user process being able to access the NULL page when
    another process's page is added there, but it doesn't seem like a good
    idea to map it to NULL with user read permissions.
  * Only sync as much as we need to.  There are only two significant places
    where pmap_sync_icache is used: proc_rwmem(), and the SIGILL second-chance
    for powerpc.  The SIGILL second chance is likely the most common, and only
    syncs 4 bytes, so avoid the other 127 loop iterations (4096 / 32 byte
    cacheline) in __syncicache().

Modified:
  head/sys/powerpc/booke/pmap.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Wed May  8 16:07:43 2019	(r347353)
+++ head/sys/powerpc/booke/pmap.c	Wed May  8 16:15:28 2019	(r347354)
@@ -2871,18 +2871,20 @@ static void
 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
 {
 	pte_t *pte;
+	vm_paddr_t pa = 0;
+	int sync_sz, valid;
+#ifndef __powerpc64__
 	pmap_t pmap;
 	vm_page_t m;
 	vm_offset_t addr;
-	vm_paddr_t pa = 0;
-	int active, valid;
+	int active;
+#endif
  
-	va = trunc_page(va);
-	sz = round_page(sz);
-
+#ifndef __powerpc64__
 	rw_wlock(&pvh_global_lock);
 	pmap = PCPU_GET(curpmap);
 	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
+#endif
 	while (sz > 0) {
 		PMAP_LOCK(pm);
 		pte = pte_find(mmu, pm, va);
@@ -2890,24 +2892,34 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_
 		if (valid)
 			pa = PTE_PA(pte);
 		PMAP_UNLOCK(pm);
+		sync_sz = PAGE_SIZE - (va & PAGE_MASK);
+		sync_sz = min(sync_sz, sz);
 		if (valid) {
+#ifdef __powerpc64__
+			pa += (va & PAGE_MASK);
+			__syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
+#else
 			if (!active) {
 				/* Create a mapping in the active pmap. */
 				addr = 0;
 				m = PHYS_TO_VM_PAGE(pa);
 				PMAP_LOCK(pmap);
 				pte_enter(mmu, pmap, m, addr,
-				    PTE_SR | PTE_VALID | PTE_UR, FALSE);
-				__syncicache((void *)addr, PAGE_SIZE);
+				    PTE_SR | PTE_VALID, FALSE);
+				addr += (va & PAGE_MASK);
+				__syncicache((void *)addr, sync_sz);
 				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
 				PMAP_UNLOCK(pmap);
 			} else
-				__syncicache((void *)va, PAGE_SIZE);
+				__syncicache((void *)va, sync_sz);
+#endif
 		}
-		va += PAGE_SIZE;
-		sz -= PAGE_SIZE;
+		va += sync_sz;
+		sz -= sync_sz;
 	}
+#ifndef __powerpc64__
 	rw_wunlock(&pvh_global_lock);
+#endif
 }
 
 /*


More information about the svn-src-all mailing list