PERFORCE change 71401 for review

John-Mark Gurney jmg at FreeBSD.org
Sun Feb 20 12:35:01 PST 2005


http://perforce.freebsd.org/chv.cgi?CH=71401

Change 71401 by jmg at jmg_carbon on 2005/02/20 20:34:56

	update the cpufunc stuff for arm...
	
	Submitted by:	cognet
	Obtained from:	NetBSD

Affected files ...

.. //depot/projects/arm/src/sys/arm/arm/cpufunc.c#2 edit
.. //depot/projects/arm/src/sys/arm/arm/cpufunc_asm.S#2 edit
.. //depot/projects/arm/src/sys/arm/arm/cpufunc_asm_arm9.S#2 edit
.. //depot/projects/arm/src/sys/arm/include/cpufunc.h#2 edit

Differences ...

==== //depot/projects/arm/src/sys/arm/arm/cpufunc.c#2 (text+ko) ====

@@ -1,5 +1,6 @@
 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
 
+#define ARM9_CACHE_WRITE_THROUGH 1
 /*-
  * arm7tdmi support code Copyright (c) 2001 John Fremlin
  * arm8 support code Copyright (c) 1997 ARM Limited
@@ -426,17 +427,16 @@
 
 	/* Cache operations */
 
-	arm9_cache_syncI,		/* icache_sync_all	*/
-	arm9_cache_syncI_rng,		/* icache_sync_range	*/
+	arm9_icache_sync_all,		/* icache_sync_all	*/
+	arm9_icache_sync_range,		/* icache_sync_range	*/
 
-		/* ...cache in write-though mode... */
-	arm9_cache_flushD,		/* dcache_wbinv_all	*/
-	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
-	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
-	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
+	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
+	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
+/*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
+	arm9_dcache_wb_range,		/* dcache_wb_range	*/
 
-	arm9_cache_flushID,		/* idcache_wbinv_all	*/
-	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
+	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
+	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
 
 	/* Other functions */
 
@@ -973,6 +973,11 @@
 		cpufuncs = arm9_cpufuncs;
 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
 		get_cachetype_cp15();
+		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
+		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
+		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
+		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
+		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
 #ifdef ARM9_CACHE_WRITE_THROUGH
 		pmap_pte_init_arm9();
 #else
@@ -1844,14 +1849,14 @@
 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
-	    | CPU_CONTROL_WBUF_ENABLE;
+	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
-		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
-		 | CPU_CONTROL_CPCLK;
+		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
+		 | CPU_CONTROL_ROUNDROBIN;
 
 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
@@ -1868,8 +1873,10 @@
 	/* Clear out the cache */
 	cpu_idcache_wbinv_all();
 
+	printf("Old value: %x, new value : %x\n", cpu_control(0, 0), cpuctrl);
+
 	/* Set the control register */
-	cpu_control(0xffffffff, cpuctrl);
+	cpu_control(cpuctrlmask, cpuctrl);
 	ctrl = cpuctrl;
 
 }

==== //depot/projects/arm/src/sys/arm/arm/cpufunc_asm.S#2 (text+ko) ====

@@ -121,9 +121,7 @@
 
 	teq	r2, r3			/* Only write if there is a change */
 	mcrne	p15, 0, r2, c1, c0, 0	/* Write new control register */
-	#if 0
 	mov	r0, r3			/* Return old value */
-	#endif
 
 	RET
 .Lglou:

==== //depot/projects/arm/src/sys/arm/arm/cpufunc_asm_arm9.S#2 (text+ko) ====

@@ -1,7 +1,7 @@
-/*	$NetBSD: cpufunc_asm_arm9.S,v 1.2 2002/01/29 15:27:29 rearnsha Exp $	*/
+/*	$NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $	*/
 
-/*-
- * Copyright (c) 2001 ARM Limited
+/*
+ * Copyright (c) 2001, 2004 ARM Limited
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -29,11 +29,9 @@
  * SUCH DAMAGE.
  *
  * ARM9 assembly functions for CPU / MMU / TLB specific operations
- *
  */
  
 #include <machine/asm.h>
-__FBSDID("$FreeBSD: src/sys/arm/arm/cpufunc_asm_arm9.S,v 1.3 2005/01/05 21:58:47 imp Exp $");
 
 /*
  * Functions to set the MMU Translation Table Base register
@@ -42,17 +40,14 @@
  * addresses that are about to change.
  */
 ENTRY(arm9_setttb)
-	/*
-	 * Since we use the caches in write-through mode, we only have to
-	 * drain the write buffers and flush the caches.
-	 */
-	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D caches */
-	mcr	p15, 0, r0, c7, c10, 4	/* drain write buffer */
+	stmfd	sp!, {r0, lr}
+	bl	_C_LABEL(arm9_idcache_wbinv_all)
+	ldmfd	sp!, {r0, lr}
 
 	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */
 
 	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
-	RET
+	mov	pc, lr
 
 /*
  * TLB functions
@@ -60,57 +55,159 @@
 ENTRY(arm9_tlb_flushID_SE)
 	mcr	p15, 0, r0, c8, c6, 1	/* flush D tlb single entry */
 	mcr	p15, 0, r0, c8, c5, 1	/* flush I tlb single entry */
-	RET
+	mov	pc, lr
 
 /*
- * Cache functions
+ * Cache operations.  For the entire cache we use the set/index
+ * operations.
  */
-ENTRY(arm9_cache_flushID)
-	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D cache */
-	RET
+	s_max	.req r0
+	i_max	.req r1
+	s_inc	.req r2
+	i_inc	.req r3
 
-ENTRY(arm9_cache_flushID_SE)
-	mcr	p15, 0, r0, c7, c5, 1	/* flush one entry from I cache */
-	mcr	p15, 0, r0, c7, c6, 1	/* flush one entry from D cache */
-	RET
+ENTRY_NP(arm9_icache_sync_range)
+	ldr	ip, .Larm9_line_size
+	cmp	r1, #0x4000
+	bcs	.Larm9_icache_sync_all
+	ldr	ip, [ip]
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+.Larm9_sync_next:
+	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
+	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	.Larm9_sync_next
+	mov	pc, lr
 
-ENTRY(arm9_cache_flushI)
-	mcr	p15, 0, r0, c7, c5, 0	/* flush I cache */
-	RET
+ENTRY_NP(arm9_icache_sync_all)
+.Larm9_icache_sync_all:
+	/*
+	 * We assume that the code here can never be out of sync with the
+	 * dcache, so that we can safely flush the Icache and fall through
+	 * into the Dcache cleaning code.
+	 */
+	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
+	/* Fall through to clean Dcache. */
 
-ENTRY(arm9_cache_flushI_SE)
-	mcr	p15, 0, r0, c7, c5, 1	/* flush one entry from I cache */
-	RET
+.Larm9_dcache_wb:
+	ldr	ip, .Larm9_cache_data
+	ldmia	ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set:
+	orr	ip, s_max, i_max
+.Lnext_index:
+	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
+	sub	ip, ip, i_inc
+	tst	ip, i_max		/* Index 0 is last one */
+	bne	.Lnext_index		/* Next index */
+	mcr	p15, 0, ip, c7, c10, 2	/* Clean D cache SE with Set/Index */
+	subs	s_max, s_max, s_inc
+	bpl	.Lnext_set		/* Next set */
+	mov	pc, lr
 
-ENTRY(arm9_cache_flushD)
-	mcr	p15, 0, r0, c7, c6, 0	/* flush D cache */
-	RET
+.Larm9_line_size:
+	.word	_C_LABEL(arm_pdcache_line_size)
 
-ENTRY(arm9_cache_flushD_SE)
-	mcr	p15, 0, r0, c7, c6, 1	/* flush one entry from D cache */
-	RET
-
-ENTRY(arm9_cache_cleanID)
-	mcr	p15, 0, r0, c7, c10, 4
-	RET
-
+ENTRY(arm9_dcache_wb_range)
+	ldr	ip, .Larm9_line_size
+	cmp	r1, #0x4000
+	bcs	.Larm9_dcache_wb
+	ldr	ip, [ip]
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+.Larm9_wb_next:
+	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	.Larm9_wb_next
+	mov	pc, lr
+	
+ENTRY(arm9_dcache_wbinv_range)
+	ldr	ip, .Larm9_line_size
+	cmp	r1, #0x4000
+	bcs	.Larm9_dcache_wbinv_all
+	ldr	ip, [ip]
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+.Larm9_wbinv_next:
+	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	.Larm9_wbinv_next
+	mov	pc, lr
+	
 /*
- * Soft functions
+ * Note, we must not invalidate everything.  If the range is too big we
+ * must use wb-inv of the entire cache.
  */
-ENTRY(arm9_cache_syncI) 
-	mcr	p15, 0, r0, c7, c7, 0	/* flush I+D caches */
-	RET
+ENTRY(arm9_dcache_inv_range)
+	ldr	ip, .Larm9_line_size
+	cmp	r1, #0x4000
+	bcs	.Larm9_dcache_wbinv_all
+	ldr	ip, [ip]
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+.Larm9_inv_next:
+	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	.Larm9_inv_next
+	mov	pc, lr
+
+ENTRY(arm9_idcache_wbinv_range)
+	ldr	ip, .Larm9_line_size
+	cmp	r1, #0x4000
+	bcs	.Larm9_idcache_wbinv_all
+	ldr	ip, [ip]
+	sub	r3, ip, #1
+	and	r2, r0, r3
+	add	r1, r1, r2
+	bic	r0, r0, r3
+.Larm9_id_wbinv_next:
+	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
+	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
+	add	r0, r0, ip
+	subs	r1, r1, ip
+	bpl	.Larm9_id_wbinv_next
+	mov	pc, lr
 
-ENTRY_NP(arm9_cache_flushID_rng)
-	b	_C_LABEL(arm9_cache_flushID)
+ENTRY_NP(arm9_idcache_wbinv_all)
+.Larm9_idcache_wbinv_all:
+	/*
+	 * We assume that the code here can never be out of sync with the
+	 * dcache, so that we can safely flush the Icache and fall through
+	 * into the Dcache purging code.
+	 */
+	mcr	p15, 0, r0, c7, c5, 0	/* Flush I&D cache */
+	/* Fall through */
 
-ENTRY_NP(arm9_cache_flushD_rng)
-	/* Same as above, but D cache only */
-	b	_C_LABEL(arm9_cache_flushD)
+ENTRY(arm9_dcache_wbinv_all)
+.Larm9_dcache_wbinv_all:
+	ldr	ip, .Larm9_cache_data
+	ldmia	ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set_inv:
+	orr	ip, s_max, i_max
+.Lnext_index_inv:
+	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
+	sub	ip, ip, i_inc
+	tst	ip, i_max		/* Index 0 is last one */
+	bne	.Lnext_index_inv		/* Next index */
+	mcr	p15, 0, ip, c7, c14, 2	/* Purge D cache SE with Set/Index */
+	subs	s_max, s_max, s_inc
+	bpl	.Lnext_set_inv		/* Next set */
+	mov	pc, lr
 
-ENTRY_NP(arm9_cache_syncI_rng)
-	/* Similarly, for I cache sync  */
-	b	 _C_LABEL(arm9_cache_syncI)
+.Larm9_cache_data:
+	.word	_C_LABEL(arm9_dcache_sets_max)
 
 /*
  * Context switch.
@@ -134,4 +231,25 @@
 	nop
 	nop
 	nop
-	RET
+	mov	pc, lr
+
+	.bss
+
+/* XXX The following macros should probably be moved to asm.h */
+#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
+#define C_OBJECT(x)	_DATA_OBJECT(_C_LABEL(x))
+
+/*
+ * Parameters for the cache cleaning code.  Note that the order of these
+ * four variables is assumed in the code above.  Hence the reason for 
+ * declaring them in the assembler file.
+ */
+	.align 0
+C_OBJECT(arm9_dcache_sets_max)
+	.space	4
+C_OBJECT(arm9_dcache_index_max)
+	.space	4
+C_OBJECT(arm9_dcache_sets_inc)
+	.space	4
+C_OBJECT(arm9_dcache_index_inc)
+	.space	4

==== //depot/projects/arm/src/sys/arm/include/cpufunc.h#2 (text+ko) ====

@@ -322,23 +322,25 @@
 
 void	arm9_tlb_flushID_SE	(u_int va);
 
-void	arm9_cache_flushID	(void);
-void	arm9_cache_flushID_SE	(u_int);
-void	arm9_cache_flushI	(void);
-void	arm9_cache_flushI_SE	(u_int);
-void	arm9_cache_flushD	(void);
-void	arm9_cache_flushD_SE	(u_int);
+void	arm9_icache_sync_all	__P((void));
+void	arm9_icache_sync_range	__P((vm_offset_t, vm_size_t));
 
-void	arm9_cache_cleanID	(void);
+void	arm9_dcache_wbinv_all	__P((void));
+void	arm9_dcache_wbinv_range __P((vm_offset_t, vm_size_t));
+void	arm9_dcache_inv_range	__P((vm_offset_t, vm_size_t));
+void	arm9_dcache_wb_range	__P((vm_offset_t, vm_size_t));
 
-void	arm9_cache_syncI	(void);
-void	arm9_cache_flushID_rng	(vm_offset_t, vm_size_t);
-void	arm9_cache_flushD_rng	(vm_offset_t, vm_size_t);
-void	arm9_cache_syncI_rng	(vm_offset_t, vm_size_t);
+void	arm9_idcache_wbinv_all	__P((void));
+void	arm9_idcache_wbinv_range __P((vm_offset_t, vm_size_t));
 
 void	arm9_context_switch	(void);
 
 void	arm9_setup		(char *string);
+
+extern unsigned arm9_dcache_sets_max;
+extern unsigned arm9_dcache_sets_inc;
+extern unsigned arm9_dcache_index_max;
+extern unsigned arm9_dcache_index_inc;
 #endif
 
 #ifdef CPU_ARM10


More information about the p4-projects mailing list