svn commit: r264994 - in head/sys/arm: arm include

Ian Lepore ian at FreeBSD.org
Sun Apr 27 00:46:02 UTC 2014


Author: ian
Date: Sun Apr 27 00:46:01 2014
New Revision: 264994
URL: http://svnweb.freebsd.org/changeset/base/264994

Log:
  Provide a proper armv7 implementation of icache_sync_all rather than
  using armv7_idcache_wbinv_all, because wbinv_all doesn't broadcast the
  operation to other cores.  In elf_cpu_load_file() use icache_sync_all()
  and explain why it's needed (and why other sync operations aren't).
  
  As part of doing this, all callers of cpu_icache_sync_all() were
  inspected to ensure they weren't relying on the old side effect of
  doing a wbinv_all along with the icache work.

Modified:
  head/sys/arm/arm/cpufunc.c
  head/sys/arm/arm/cpufunc_asm_armv7.S
  head/sys/arm/arm/elf_machdep.c
  head/sys/arm/include/cpufunc.h

Modified: head/sys/arm/arm/cpufunc.c
==============================================================================
--- head/sys/arm/arm/cpufunc.c	Sun Apr 27 00:45:08 2014	(r264993)
+++ head/sys/arm/arm/cpufunc.c	Sun Apr 27 00:46:01 2014	(r264994)
@@ -769,7 +769,7 @@ struct cpu_functions cortexa_cpufuncs = 
 	
 	/* Cache operations */
 	
-	armv7_idcache_wbinv_all,         /* icache_sync_all      */
+	armv7_icache_sync_all, 	        /* icache_sync_all      */
 	armv7_icache_sync_range,        /* icache_sync_range    */
 	
 	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */

Modified: head/sys/arm/arm/cpufunc_asm_armv7.S
==============================================================================
--- head/sys/arm/arm/cpufunc_asm_armv7.S	Sun Apr 27 00:45:08 2014	(r264993)
+++ head/sys/arm/arm/cpufunc_asm_armv7.S	Sun Apr 27 00:46:01 2014	(r264994)
@@ -250,6 +250,13 @@ ENTRY(armv7_idcache_wbinv_range)
 	RET
 END(armv7_idcache_wbinv_range)
 
+ENTRY_NP(armv7_icache_sync_all)
+	mcr	p15, 0, r0, c7, c1, 0	/* Invalidate all I cache to PoU Inner Shareable */
+	isb				/* instruction synchronization barrier */
+	dsb				/* data synchronization barrier */
+	RET
+END(armv7_icache_sync_all)
+
 ENTRY_NP(armv7_icache_sync_range)
 	ldr	ip, .Larmv7_line_size
 .Larmv7_sync_next:

Modified: head/sys/arm/arm/elf_machdep.c
==============================================================================
--- head/sys/arm/arm/elf_machdep.c	Sun Apr 27 00:45:08 2014	(r264993)
+++ head/sys/arm/arm/elf_machdep.c	Sun Apr 27 00:46:01 2014	(r264994)
@@ -220,9 +220,19 @@ int
 elf_cpu_load_file(linker_file_t lf __unused)
 {
 
-	cpu_idcache_wbinv_all();
-	cpu_l2cache_wbinv_all();
-	cpu_tlb_flushID();
+	/*
+	 * The pmap code does not do an icache sync upon establishing executable
+	 * mappings in the kernel pmap.  It's an optimization based on the fact
+	 * that kernel memory allocations always have EXECUTABLE protection even
+	 * when the memory isn't going to hold executable code.  The only time
+	 * kernel memory holding instructions does need a sync is after loading
+	 * a kernel module, and that's when this function gets called.  Normal
+	 * data cache maintenance has already been done by the IO code, and TLB
+	 * maintenance has been done by the pmap code, so all we have to do here
+	 * is invalidate the instruction cache (which also invalidates the
+	 * branch predictor cache on platforms that have one).
+	 */
+	cpu_icache_sync_all();
 	return (0);
 }
 

Modified: head/sys/arm/include/cpufunc.h
==============================================================================
--- head/sys/arm/include/cpufunc.h	Sun Apr 27 00:45:08 2014	(r264993)
+++ head/sys/arm/include/cpufunc.h	Sun Apr 27 00:46:01 2014	(r264994)
@@ -411,6 +411,7 @@ void	armv6_idcache_wbinv_range	(vm_offse
 void	armv7_setttb			(u_int);
 void	armv7_tlb_flushID		(void);
 void	armv7_tlb_flushID_SE		(u_int);
+void	armv7_icache_sync_all		();
 void	armv7_icache_sync_range		(vm_offset_t, vm_size_t);
 void	armv7_idcache_wbinv_range	(vm_offset_t, vm_size_t);
 void	armv7_idcache_inv_all		(void);


More information about the svn-src-head mailing list