svn commit: r277414 - in head/sys/mips: include mips

Ruslan Bukin br at FreeBSD.org
Tue Jan 20 11:10:26 UTC 2015


Author: br
Date: Tue Jan 20 11:10:25 2015
New Revision: 277414
URL: https://svnweb.freebsd.org/changeset/base/277414

Log:
  Add 128-byte cache flushing routines.
  
  Leave CNMIPS untouched as these functions depends on config2
  register.

Modified:
  head/sys/mips/include/cache_mipsNN.h
  head/sys/mips/mips/cache.c
  head/sys/mips/mips/cache_mipsNN.c

Modified: head/sys/mips/include/cache_mipsNN.h
==============================================================================
--- head/sys/mips/include/cache_mipsNN.h	Tue Jan 20 09:07:28 2015	(r277413)
+++ head/sys/mips/include/cache_mipsNN.h	Tue Jan 20 11:10:25 2015	(r277414)
@@ -57,7 +57,6 @@ void	mipsNN_pdcache_inv_range_16(vm_offs
 void	mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t);
-#ifdef CPU_CNMIPS
 void	mipsNN_icache_sync_all_128(void);
 void	mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t);
@@ -66,7 +65,6 @@ void	mipsNN_pdcache_wbinv_range_128(vm_o
 void	mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t);
 void	mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t);
-#endif
 void	mipsNN_sdcache_wbinv_all_32(void);
 void	mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t);
 void	mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t);

Modified: head/sys/mips/mips/cache.c
==============================================================================
--- head/sys/mips/mips/cache.c	Tue Jan 20 09:07:28 2015	(r277413)
+++ head/sys/mips/mips/cache.c	Tue Jan 20 11:10:25 2015	(r277414)
@@ -104,7 +104,6 @@ mips_config_cache(struct mips_cpuinfo * 
 		mips_cache_ops.mco_icache_sync_range_index =
 		    mipsNN_icache_sync_range_index_32;
 		break;
-#ifdef CPU_CNMIPS
 	case 128:
 		mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128;
 		mips_cache_ops.mco_icache_sync_range =
@@ -112,7 +111,6 @@ mips_config_cache(struct mips_cpuinfo * 
 		mips_cache_ops.mco_icache_sync_range_index =
 		    mipsNN_icache_sync_range_index_128;
 		break;
-#endif
 
 #ifdef MIPS_DISABLE_L1_CACHE
 	case 0:
@@ -172,7 +170,6 @@ mips_config_cache(struct mips_cpuinfo * 
 		    mipsNN_pdcache_wb_range_32;
 #endif
 		break;
-#ifdef CPU_CNMIPS
 	case 128:
 		mips_cache_ops.mco_pdcache_wbinv_all =
 		    mips_cache_ops.mco_intern_pdcache_wbinv_all =
@@ -188,7 +185,6 @@ mips_config_cache(struct mips_cpuinfo * 
 		    mips_cache_ops.mco_intern_pdcache_wb_range =
 		    mipsNN_pdcache_wb_range_128;
 		break;
-#endif		
 #ifdef MIPS_DISABLE_L1_CACHE
 	case 0:
 		mips_cache_ops.mco_pdcache_wbinv_all =

Modified: head/sys/mips/mips/cache_mipsNN.c
==============================================================================
--- head/sys/mips/mips/cache_mipsNN.c	Tue Jan 20 09:07:28 2015	(r277413)
+++ head/sys/mips/mips/cache_mipsNN.c	Tue Jan 20 11:10:25 2015	(r277414)
@@ -647,6 +647,225 @@ mipsNN_pdcache_wb_range_128(vm_offset_t 
 	SYNC;
 }
 
+#else
+
+void
+mipsNN_icache_sync_all_128(void)
+{
+	vm_offset_t va, eva;
+
+	va = MIPS_PHYS_TO_KSEG0(0);
+	eva = va + picache_size;
+
+	/*
+	 * Since we're hitting the whole thing, we don't have to
+	 * worry about the N different "ways".
+	 */
+
+	mips_intern_dcache_wbinv_all();
+
+	while (va < eva) {
+		cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += (32 * 128);
+	}
+
+	SYNC;
+}
+
+void
+mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	mips_intern_dcache_wb_range(va, (eva - va));
+
+	while ((eva - va) >= (32 * 128)) {
+		cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+		va += (32 * 128);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
+		va += 128;
+	}
+
+	SYNC;
+}
+
+void
+mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva, tmpva;
+	int i, stride, loopcount;
+
+	/*
+	 * Since we're doing Index ops, we expect to not be able
+	 * to access the address we've been given.  So, get the
+	 * bits that determine the cache index, and make a KSEG0
+	 * address out of them.
+	 */
+	va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	/*
+	 * GCC generates better code in the loops if we reference local
+	 * copies of these global variables.
+	 */
+	stride = picache_stride;
+	loopcount = picache_loopcount;
+
+	mips_intern_dcache_wbinv_range_index(va, (eva - va));
+
+	while ((eva - va) >= (32 * 128)) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_r4k_op_32lines_128(tmpva,
+			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += 32 * 128;
+	}
+
+	while (va < eva) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_op_r4k_line(tmpva,
+			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
+		va += 128;
+	}
+}
+
+void
+mipsNN_pdcache_wbinv_all_128(void)
+{
+	vm_offset_t va, eva;
+
+	va = MIPS_PHYS_TO_KSEG0(0);
+	eva = va + pdcache_size;
+
+	/*
+	 * Since we're hitting the whole thing, we don't have to
+	 * worry about the N different "ways".
+	 */
+
+	while (va < eva) {
+		cache_r4k_op_32lines_128(va,
+		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += (32 * 128);
+	}
+
+	SYNC;
+}
+
+
+void
+mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	while ((eva - va) >= (32 * 128)) {
+		cache_r4k_op_32lines_128(va,
+		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+		va += (32 * 128);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
+		va += 128;
+	}
+
+	SYNC;
+}
+
+void
+mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva, tmpva;
+	int i, stride, loopcount;
+
+	/*
+	 * Since we're doing Index ops, we expect to not be able
+	 * to access the address we've been given.  So, get the
+	 * bits that determine the cache index, and make a KSEG0
+	 * address out of them.
+	 */
+	va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	/*
+	 * GCC generates better code in the loops if we reference local
+	 * copies of these global variables.
+	 */
+	stride = pdcache_stride;
+	loopcount = pdcache_loopcount;
+
+	while ((eva - va) >= (32 * 128)) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_r4k_op_32lines_128(tmpva,
+			    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += 32 * 128;
+	}
+
+	while (va < eva) {
+		tmpva = va;
+		for (i = 0; i < loopcount; i++, tmpva += stride)
+			cache_op_r4k_line(tmpva,
+			    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
+		va += 128;
+	}
+}
+
+void
+mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	while ((eva - va) >= (32 * 128)) {
+		cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+		va += (32 * 128);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
+		va += 128;
+	}
+
+	SYNC;
+}
+
+void
+mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size)
+{
+	vm_offset_t eva;
+
+	eva = round_line128(va + size);
+	va = trunc_line128(va);
+
+	while ((eva - va) >= (32 * 128)) {
+		cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+		va += (32 * 128);
+	}
+
+	while (va < eva) {
+		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
+		va += 128;
+	}
+
+	SYNC;
+}
+
 #endif
 
 void


More information about the svn-src-head mailing list