svn commit: r368153 - head/sys/arm/arm

Michal Meloun mmel at FreeBSD.org
Sun Nov 29 15:04:41 UTC 2020


Author: mmel
Date: Sun Nov 29 15:04:39 2020
New Revision: 368153
URL: https://svnweb.freebsd.org/changeset/base/368153

Log:
  Remove remaining support of big endian byte order.
  Big endian support was ceased by removing ARMv4 sub architecture.

Modified:
  head/sys/arm/arm/bcopyinout_xscale.S
  head/sys/arm/arm/cpufunc.c
  head/sys/arm/arm/fusu.S
  head/sys/arm/arm/in_cksum_arm.S
  head/sys/arm/arm/support.S
  head/sys/arm/arm/vm_machdep.c

Modified: head/sys/arm/arm/bcopyinout_xscale.S
==============================================================================
--- head/sys/arm/arm/bcopyinout_xscale.S	Sun Nov 29 14:21:16 2020	(r368152)
+++ head/sys/arm/arm/bcopyinout_xscale.S	Sun Nov 29 15:04:39 2020	(r368153)
@@ -298,25 +298,12 @@ ENTRY(copyin)
 	b	.Lcopyin_bad1
 
 .Lcopyin_bad1_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldrt	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldrt	r6, [r0], #0x04
 	ldrt	r7, [r0], #0x04
 	ldrt	ip, [r0], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #24
-	mov	r5, r5, lsl #8
-	orr	r5, r5, r6, lsr #24
-	mov	r6, r6, lsl #8
-	orr	r6, r6, r7, lsr #24
-	mov	r7, r7, lsl #8
-	orr	r7, r7, ip, lsr #24
-#else
 	orr	r4, r4, r5, lsl #24
 	mov	r5, r5, lsr #8
 	orr	r5, r5, r6, lsl #24
@@ -324,7 +311,6 @@ ENTRY(copyin)
 	orr	r6, r6, r7, lsl #24
 	mov	r7, r7, lsr #8
 	orr	r7, r7, ip, lsl #24
-#endif
 	str	r4, [r1], #0x04
 	str	r5, [r1], #0x04
 	str	r6, [r1], #0x04
@@ -341,43 +327,22 @@ ENTRY(copyin)
 	blt	.Lcopyin_l4
 
 .Lcopyin_bad1_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldrt	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #24
-#else
 	orr	r4, r4, ip, lsl #24
-#endif
 	str	r4, [r1], #0x04
 	bge	.Lcopyin_bad1_loop4
 	sub	r0, r0, #0x03
 	b	.Lcopyin_l4
 
 .Lcopyin_bad2_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldrt	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldrt	r6, [r0], #0x04
 	ldrt	r7, [r0], #0x04
 	ldrt	ip, [r0], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #16
-	mov	r5, r5, lsl #16
-	orr	r5, r5, r6, lsr #16
-	mov	r6, r6, lsl #16
-	orr	r6, r6, r7, lsr #16
-	mov	r7, r7, lsl #16
-	orr	r7, r7, ip, lsr #16
-#else
 	orr	r4, r4, r5, lsl #16
 	mov	r5, r5, lsr #16
 	orr	r5, r5, r6, lsl #16
@@ -385,7 +350,6 @@ ENTRY(copyin)
 	orr	r6, r6, r7, lsl #16
 	mov	r7, r7, lsr #16
 	orr	r7, r7, ip, lsl #16
-#endif
 	str	r4, [r1], #0x04
 	str	r5, [r1], #0x04
 	str	r6, [r1], #0x04
@@ -402,43 +366,22 @@ ENTRY(copyin)
 	blt	.Lcopyin_l4
 
 .Lcopyin_bad2_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldrt	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #16
-#else
 	orr	r4, r4, ip, lsl #16
-#endif
 	str	r4, [r1], #0x04
 	bge	.Lcopyin_bad2_loop4
 	sub	r0, r0, #0x02
 	b	.Lcopyin_l4
 
 .Lcopyin_bad3_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldrt	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldrt	r6, [r0], #0x04
 	ldrt	r7, [r0], #0x04
 	ldrt	ip, [r0], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #8
-	mov	r5, r5, lsl #24
-	orr	r5, r5, r6, lsr #8
-	mov	r6, r6, lsl #24
-	orr	r6, r6, r7, lsr #8
-	mov	r7, r7, lsl #24
-	orr	r7, r7, ip, lsr #8
-#else
 	orr	r4, r4, r5, lsl #8
 	mov	r5, r5, lsr #24
 	orr	r5, r5, r6, lsl #8
@@ -446,7 +389,6 @@ ENTRY(copyin)
 	orr	r6, r6, r7, lsl #8
 	mov	r7, r7, lsr #24
 	orr	r7, r7, ip, lsl #8
-#endif
 	str	r4, [r1], #0x04
 	str	r5, [r1], #0x04
 	str	r6, [r1], #0x04
@@ -463,18 +405,10 @@ ENTRY(copyin)
 	blt	.Lcopyin_l4
 
 .Lcopyin_bad3_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldrt	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #8
-#else
 	orr	r4, r4, ip, lsl #8
-#endif
 	str	r4, [r1], #0x04
 	bge	.Lcopyin_bad3_loop4
 	sub	r0, r0, #0x01
@@ -750,25 +684,12 @@ ENTRY(copyout)
 	b	.Lcopyout_bad1
 
 .Lcopyout_bad1_loop16:
-#ifdef	__ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldr	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldr	r6, [r0], #0x04
 	ldr	r7, [r0], #0x04
 	ldr	ip, [r0], #0x04
-#ifdef	__ARMEB__
-	orr	r4, r4, r5, lsr #24
-	mov	r5, r5, lsl #8
-	orr	r5, r5, r6, lsr #24
-	mov	r6, r6, lsl #8
-	orr	r6, r6, r7, lsr #24
-	mov	r7, r7, lsl #8
-	orr	r7, r7, ip, lsr #24
-#else
 	orr	r4, r4, r5, lsl #24
 	mov	r5, r5, lsr #8
 	orr	r5, r5, r6, lsl #24
@@ -776,7 +697,6 @@ ENTRY(copyout)
 	orr	r6, r6, r7, lsl #24
 	mov	r7, r7, lsr #8
 	orr	r7, r7, ip, lsl #24
-#endif
 	strt	r4, [r1], #0x04
 	strt	r5, [r1], #0x04
 	strt	r6, [r1], #0x04
@@ -793,43 +713,22 @@ ENTRY(copyout)
 	blt	.Lcopyout_l4
 
 .Lcopyout_bad1_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldr	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #24
-#else
 	orr	r4, r4, ip, lsl #24
-#endif
 	strt	r4, [r1], #0x04
 	bge	.Lcopyout_bad1_loop4
 	sub	r0, r0, #0x03
 	b	.Lcopyout_l4
 
 .Lcopyout_bad2_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldr	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldr	r6, [r0], #0x04
 	ldr	r7, [r0], #0x04
 	ldr	ip, [r0], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #16
-	mov	r5, r5, lsl #16
-	orr	r5, r5, r6, lsr #16
-	mov	r6, r6, lsl #16
-	orr	r6, r6, r7, lsr #16
-	mov	r7, r7, lsl #16
-	orr	r7, r7, ip, lsr #16
-#else
 	orr	r4, r4, r5, lsl #16
 	mov	r5, r5, lsr #16
 	orr	r5, r5, r6, lsl #16
@@ -837,7 +736,6 @@ ENTRY(copyout)
 	orr	r6, r6, r7, lsl #16
 	mov	r7, r7, lsr #16
 	orr	r7, r7, ip, lsl #16
-#endif
 	strt	r4, [r1], #0x04
 	strt	r5, [r1], #0x04
 	strt	r6, [r1], #0x04
@@ -854,43 +752,22 @@ ENTRY(copyout)
 	blt	.Lcopyout_l4
 
 .Lcopyout_bad2_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldr	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #16
-#else
 	orr	r4, r4, ip, lsl #16
-#endif
 	strt	r4, [r1], #0x04
 	bge	.Lcopyout_bad2_loop4
 	sub	r0, r0, #0x02
 	b	.Lcopyout_l4
 
 .Lcopyout_bad3_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldr	r5, [r0], #0x04
 	pld	[r0, #0x018]
 	ldr	r6, [r0], #0x04
 	ldr	r7, [r0], #0x04
 	ldr	ip, [r0], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #8
-	mov	r5, r5, lsl #24
-	orr	r5, r5, r6, lsr #8
-	mov	r6, r6, lsl #24
-	orr	r6, r6, r7, lsr #8
-	mov	r7, r7, lsl #24
-	orr	r7, r7, ip, lsr #8
-#else
 	orr	r4, r4, r5, lsl #8
 	mov	r5, r5, lsr #24
 	orr	r5, r5, r6, lsl #8
@@ -898,7 +775,6 @@ ENTRY(copyout)
 	orr	r6, r6, r7, lsl #8
 	mov	r7, r7, lsr #24
 	orr	r7, r7, ip, lsl #8
-#endif
 	strt	r4, [r1], #0x04
 	strt	r5, [r1], #0x04
 	strt	r6, [r1], #0x04
@@ -915,18 +791,10 @@ ENTRY(copyout)
 	blt	.Lcopyout_l4
 
 .Lcopyout_bad3_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldr	ip, [r0], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #8
-#else
 	orr	r4, r4, ip, lsl #8
-#endif
 	strt	r4, [r1], #0x04
 	bge	.Lcopyout_bad3_loop4
 	sub	r0, r0, #0x01

Modified: head/sys/arm/arm/cpufunc.c
==============================================================================
--- head/sys/arm/arm/cpufunc.c	Sun Nov 29 14:21:16 2020	(r368152)
+++ head/sys/arm/arm/cpufunc.c	Sun Nov 29 15:04:39 2020	(r368153)
@@ -486,9 +486,6 @@ arm10_setup(void)
 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 #endif
 
-#ifdef __ARMEB__
-	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
-#endif
 
 	/* Clear out the cache */
 	cpu_idcache_wbinv_all();

Modified: head/sys/arm/arm/fusu.S
==============================================================================
--- head/sys/arm/arm/fusu.S	Sun Nov 29 14:21:16 2020	(r368152)
+++ head/sys/arm/arm/fusu.S	Sun Nov 29 15:04:39 2020	(r368153)
@@ -151,11 +151,7 @@ ENTRY(fusword)
 
 	ldrbt	r3, [r0], #1
 	ldrbt	ip, [r0]
-#ifdef __ARMEB__
-	orr	r0, ip, r3, asl #8
-#else
 	orr	r0, r3, ip, asl #8
-#endif
 	mov	r1, #0x00000000
 	str	r1, [r2, #PCB_ONFAULT]
 	RET
@@ -269,13 +265,8 @@ ENTRY(susword)
 	adr	r3, .Lfusufault
 	str	r3, [r2, #PCB_ONFAULT]
 
-#ifdef __ARMEB__
-	mov	ip, r1, lsr #8
-	strbt	ip, [r0], #1
-#else
 	strbt	r1, [r0], #1
 	mov	r1, r1, lsr #8
-#endif
 	strbt	r1, [r0]
 
 	mov	r0, #0x00000000

Modified: head/sys/arm/arm/in_cksum_arm.S
==============================================================================
--- head/sys/arm/arm/in_cksum_arm.S	Sun Nov 29 14:21:16 2020	(r368152)
+++ head/sys/arm/arm/in_cksum_arm.S	Sun Nov 29 15:04:39 2020	(r368153)
@@ -133,20 +133,14 @@ ASENTRY_NP(L_cksumdata)
 	movlt	r5, #0x00
 	ldrbgt	r6, [r0], #0x01		/* Fetch 3rd byte */
 	movle	r6, #0x00
+
 	/* Combine the three bytes depending on endianness and alignment */
-#ifdef __ARMEB__
-	orreq	r2, r5, r4, lsl #8
-	orreq	r2, r2, r6, lsl #24
-	orrne	r2, r4, r5, lsl #8
-	orrne	r2, r2, r6, lsl #16
-#else
 	orreq	r2, r4, r5, lsl #8
 	orreq	r2, r2, r6, lsl #16
 	orrne	r2, r5, r4, lsl #8
 	orrne	r2, r2, r6, lsl #24
-#endif
 	subs	r1, r1, r7		/* Update length */
-	RETeq			/* All done? */
+	RETeq				/* All done? */
 
 	/* Buffer is now word aligned */
 .Lcksumdata_wordaligned:
@@ -326,17 +320,10 @@ ASENTRY_NP(L_cksumdata)
 	movle	r5, #0x00
 	/* Combine the three bytes depending on endianness and alignment */
 	tst	r0, #0x01
-#ifdef __ARMEB__
-	orreq	r3, r4, r3, lsl #8
-	orreq	r3, r3, r5, lsl #24
-	orrne	r3, r3, r4, lsl #8
-	orrne	r3, r3, r5, lsl #16
-#else
 	orreq	r3, r3, r4, lsl #8
 	orreq	r3, r3, r5, lsl #16
 	orrne	r3, r4, r3, lsl #8
 	orrne	r3, r3, r5, lsl #24
-#endif
 	adds	r2, r2, r3
 	adc	r2, r2, #0x00
 	RET

Modified: head/sys/arm/arm/support.S
==============================================================================
--- head/sys/arm/arm/support.S	Sun Nov 29 14:21:16 2020	(r368152)
+++ head/sys/arm/arm/support.S	Sun Nov 29 15:04:39 2020	(r368153)
@@ -512,21 +512,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5}
 
 .Lmemmove_fsrcul1loop16:
-#ifdef __ARMEB__
-	mov	r3, lr, lsl #8
-#else
 	mov	r3, lr, lsr #8
-#endif
 	ldmia	r1!, {r4, r5, r12, lr}
-#ifdef __ARMEB__
-	orr	r3, r3, r4, lsr #24
-	mov	r4, r4, lsl #8
-	orr	r4, r4, r5, lsr #24
-	mov	r5, r5, lsl #8
-	orr	r5, r5, r12, lsr #24
-	mov	r12, r12, lsl #8
-	orr	r12, r12, lr, lsr #24
-#else
 	orr	r3, r3, r4, lsl #24
 	mov	r4, r4, lsr #8
 	orr	r4, r4, r5, lsl #24
@@ -534,7 +521,6 @@ EENTRY(memmove)
 	orr	r5, r5, r12, lsl #24
 	mov	r12, r12, lsr #8
 	orr	r12, r12, lr, lsl #24
-#endif
 	stmia	r0!, {r3-r5, r12}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_fsrcul1loop16
@@ -543,17 +529,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_fsrcul1l4
 
 .Lmemmove_fsrcul1loop4:
-#ifdef __ARMEB__
-	mov	r12, lr, lsl #8
-#else
 	mov	r12, lr, lsr #8
-#endif
 	ldr	lr, [r1], #4
-#ifdef __ARMEB__
-	orr	r12, r12, lr, lsr #24
-#else
 	orr	r12, r12, lr, lsl #24
-#endif
 	str	r12, [r0], #4
 	subs	r2, r2, #4
 	bge	.Lmemmove_fsrcul1loop4
@@ -569,21 +547,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5}
 
 .Lmemmove_fsrcul2loop16:
-#ifdef __ARMEB__
-	mov	r3, lr, lsl #16
-#else
 	mov	r3, lr, lsr #16
-#endif
 	ldmia	r1!, {r4, r5, r12, lr}
-#ifdef __ARMEB__
-	orr	r3, r3, r4, lsr #16
-	mov	r4, r4, lsl #16
-	orr	r4, r4, r5, lsr #16
-	mov	r5, r5, lsl #16
-	orr	r5, r5, r12, lsr #16
-	mov	r12, r12, lsl #16
-	orr	r12, r12, lr, lsr #16
-#else
 	orr	r3, r3, r4, lsl #16
 	mov	r4, r4, lsr #16
 	orr	r4, r4, r5, lsl #16
@@ -591,7 +556,6 @@ EENTRY(memmove)
 	orr	r5, r5, r12, lsl #16
 	mov	r12, r12, lsr #16
 	orr	r12, r12, lr, lsl #16
-#endif
 	stmia	r0!, {r3-r5, r12}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_fsrcul2loop16
@@ -600,17 +564,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_fsrcul2l4
 
 .Lmemmove_fsrcul2loop4:
-#ifdef __ARMEB__
-	mov	r12, lr, lsl #16
-#else
 	mov	r12, lr, lsr #16
-#endif
 	ldr	lr, [r1], #4
-#ifdef __ARMEB__
-	orr	r12, r12, lr, lsr #16
-#else
 	orr	r12, r12, lr, lsl #16
-#endif
 	str	r12, [r0], #4
 	subs	r2, r2, #4
 	bge	.Lmemmove_fsrcul2loop4
@@ -626,21 +582,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5}
 
 .Lmemmove_fsrcul3loop16:
-#ifdef __ARMEB__
-	mov	r3, lr, lsl #24
-#else
 	mov	r3, lr, lsr #24
-#endif
 	ldmia	r1!, {r4, r5, r12, lr}
-#ifdef __ARMEB__
-	orr	r3, r3, r4, lsr #8
-	mov	r4, r4, lsl #24
-	orr	r4, r4, r5, lsr #8
-	mov	r5, r5, lsl #24
-	orr	r5, r5, r12, lsr #8
-	mov	r12, r12, lsl #24
-	orr	r12, r12, lr, lsr #8
-#else
 	orr	r3, r3, r4, lsl #8
 	mov	r4, r4, lsr #24
 	orr	r4, r4, r5, lsl #8
@@ -648,7 +591,6 @@ EENTRY(memmove)
 	orr	r5, r5, r12, lsl #8
 	mov	r12, r12, lsr #24
 	orr	r12, r12, lr, lsl #8
-#endif
 	stmia	r0!, {r3-r5, r12}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_fsrcul3loop16
@@ -657,17 +599,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_fsrcul3l4
 
 .Lmemmove_fsrcul3loop4:
-#ifdef __ARMEB__
-	mov	r12, lr, lsl #24
-#else
 	mov	r12, lr, lsr #24
-#endif
 	ldr	lr, [r1], #4
-#ifdef __ARMEB__
-	orr	r12, r12, lr, lsr #8
-#else
 	orr	r12, r12, lr, lsl #8
-#endif
 	str	r12, [r0], #4
 	subs	r2, r2, #4
 	bge	.Lmemmove_fsrcul3loop4
@@ -770,21 +704,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5, lr}
 
 .Lmemmove_bsrcul3loop16:
-#ifdef __ARMEB__
-	mov	lr, r3, lsr #8
-#else
 	mov	lr, r3, lsl #8
-#endif
 	ldmdb	r1!, {r3-r5, r12}
-#ifdef __ARMEB__
-	orr	lr, lr, r12, lsl #24
-	mov	r12, r12, lsr #8
-	orr	r12, r12, r5, lsl #24
-	mov	r5, r5, lsr #8
-	orr	r5, r5, r4, lsl #24
-	mov	r4, r4, lsr #8
-	orr	r4, r4, r3, lsl #24
-#else
 	orr	lr, lr, r12, lsr #24
 	mov	r12, r12, lsl #8
 	orr	r12, r12, r5, lsr #24
@@ -792,7 +713,6 @@ EENTRY(memmove)
 	orr	r5, r5, r4, lsr #24
 	mov	r4, r4, lsl #8
 	orr	r4, r4, r3, lsr #24
-#endif
 	stmdb	r0!, {r4, r5, r12, lr}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_bsrcul3loop16
@@ -801,17 +721,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_bsrcul3l4
 
 .Lmemmove_bsrcul3loop4:
-#ifdef __ARMEB__
-	mov	r12, r3, lsr #8
-#else
 	mov	r12, r3, lsl #8
-#endif
 	ldr	r3, [r1, #-4]!
-#ifdef __ARMEB__
-	orr	r12, r12, r3, lsl #24
-#else
 	orr	r12, r12, r3, lsr #24
-#endif
 	str	r12, [r0, #-4]!
 	subs	r2, r2, #4
 	bge	.Lmemmove_bsrcul3loop4
@@ -827,21 +739,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5, lr}
 
 .Lmemmove_bsrcul2loop16:
-#ifdef __ARMEB__
-	mov	lr, r3, lsr #16
-#else
 	mov	lr, r3, lsl #16
-#endif
 	ldmdb	r1!, {r3-r5, r12}
-#ifdef __ARMEB__
-	orr	lr, lr, r12, lsl #16
-	mov	r12, r12, lsr #16
-	orr	r12, r12, r5, lsl #16
-	mov	r5, r5, lsr #16
-	orr	r5, r5, r4, lsl #16
-	mov	r4, r4, lsr #16
-	orr	r4, r4, r3, lsl #16
-#else
 	orr	lr, lr, r12, lsr #16
 	mov	r12, r12, lsl #16
 	orr	r12, r12, r5, lsr #16
@@ -849,7 +748,6 @@ EENTRY(memmove)
 	orr	r5, r5, r4, lsr #16
 	mov	r4, r4, lsl #16
 	orr	r4, r4, r3, lsr #16
-#endif
 	stmdb	r0!, {r4, r5, r12, lr}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_bsrcul2loop16
@@ -858,17 +756,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_bsrcul2l4
 
 .Lmemmove_bsrcul2loop4:
-#ifdef __ARMEB__
-	mov	r12, r3, lsr #16
-#else
 	mov	r12, r3, lsl #16
-#endif
 	ldr	r3, [r1, #-4]!
-#ifdef __ARMEB__
-	orr	r12, r12, r3, lsl #16
-#else
 	orr	r12, r12, r3, lsr #16
-#endif
 	str	r12, [r0, #-4]!
 	subs	r2, r2, #4
 	bge	.Lmemmove_bsrcul2loop4
@@ -884,21 +774,8 @@ EENTRY(memmove)
 	stmdb	sp!, {r4, r5, lr}
 
 .Lmemmove_bsrcul1loop32:
-#ifdef __ARMEB__
-	mov	lr, r3, lsr #24
-#else
 	mov	lr, r3, lsl #24
-#endif
 	ldmdb	r1!, {r3-r5, r12}
-#ifdef __ARMEB__
-	orr	lr, lr, r12, lsl #8
-	mov	r12, r12, lsr #24
-	orr	r12, r12, r5, lsl #8
-	mov	r5, r5, lsr #24
-	orr	r5, r5, r4, lsl #8
-	mov	r4, r4, lsr #24
-	orr	r4, r4, r3, lsl #8
-#else
 	orr	lr, lr, r12, lsr #8
 	mov	r12, r12, lsl #24
 	orr	r12, r12, r5, lsr #8
@@ -906,7 +783,6 @@ EENTRY(memmove)
 	orr	r5, r5, r4, lsr #8
 	mov	r4, r4, lsl #24
 	orr	r4, r4, r3, lsr #8
-#endif
 	stmdb	r0!, {r4, r5, r12, lr}
 	subs	r2, r2, #0x10
 	bge	.Lmemmove_bsrcul1loop32
@@ -915,17 +791,9 @@ EENTRY(memmove)
 	blt	.Lmemmove_bsrcul1l4
 
 .Lmemmove_bsrcul1loop4:
-#ifdef __ARMEB__
-	mov	r12, r3, lsr #24
-#else
 	mov	r12, r3, lsl #24
-#endif
 	ldr	r3, [r1, #-4]!
-#ifdef __ARMEB__
-	orr	r12, r12, r3, lsl #8
-#else
 	orr	r12, r12, r3, lsr #8
-#endif
 	str	r12, [r0, #-4]!
 	subs	r2, r2, #4
 	bge	.Lmemmove_bsrcul1loop4
@@ -1382,25 +1250,12 @@ ENTRY(memcpy)
 	b	.Lmemcpy_bad1
 
 .Lmemcpy_bad1_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldr	r5, [r1], #0x04
 	pld	[r1, #0x018]
 	ldr	r6, [r1], #0x04
 	ldr	r7, [r1], #0x04
 	ldr	ip, [r1], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #24
-	mov	r5, r5, lsl #8
-	orr	r5, r5, r6, lsr #24
-	mov	r6, r6, lsl #8
-	orr	r6, r6, r7, lsr #24
-	mov	r7, r7, lsl #8
-	orr	r7, r7, ip, lsr #24
-#else
 	orr	r4, r4, r5, lsl #24
 	mov	r5, r5, lsr #8
 	orr	r5, r5, r6, lsl #24
@@ -1408,7 +1263,6 @@ ENTRY(memcpy)
 	orr	r6, r6, r7, lsl #24
 	mov	r7, r7, lsr #8
 	orr	r7, r7, ip, lsl #24
-#endif
 	str	r4, [r3], #0x04
 	str	r5, [r3], #0x04
 	str	r6, [r3], #0x04
@@ -1425,43 +1279,22 @@ ENTRY(memcpy)
 	blt	.Lmemcpy_bad_done
 
 .Lmemcpy_bad1_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #8
-#else
 	mov	r4, ip, lsr #8
-#endif
 	ldr	ip, [r1], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #24
-#else
 	orr	r4, r4, ip, lsl #24
-#endif
 	str	r4, [r3], #0x04
 	bge	.Lmemcpy_bad1_loop4
 	sub	r1, r1, #0x03
 	b	.Lmemcpy_bad_done
 
 .Lmemcpy_bad2_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldr	r5, [r1], #0x04
 	pld	[r1, #0x018]
 	ldr	r6, [r1], #0x04
 	ldr	r7, [r1], #0x04
 	ldr	ip, [r1], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #16
-	mov	r5, r5, lsl #16
-	orr	r5, r5, r6, lsr #16
-	mov	r6, r6, lsl #16
-	orr	r6, r6, r7, lsr #16
-	mov	r7, r7, lsl #16
-	orr	r7, r7, ip, lsr #16
-#else
 	orr	r4, r4, r5, lsl #16
 	mov	r5, r5, lsr #16
 	orr	r5, r5, r6, lsl #16
@@ -1469,7 +1302,6 @@ ENTRY(memcpy)
 	orr	r6, r6, r7, lsl #16
 	mov	r7, r7, lsr #16
 	orr	r7, r7, ip, lsl #16
-#endif
 	str	r4, [r3], #0x04
 	str	r5, [r3], #0x04
 	str	r6, [r3], #0x04
@@ -1486,43 +1318,22 @@ ENTRY(memcpy)
 	blt	.Lmemcpy_bad_done
 
 .Lmemcpy_bad2_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #16
-#else
 	mov	r4, ip, lsr #16
-#endif
 	ldr	ip, [r1], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #16
-#else
 	orr	r4, r4, ip, lsl #16
-#endif
 	str	r4, [r3], #0x04
 	bge	.Lmemcpy_bad2_loop4
 	sub	r1, r1, #0x02
 	b	.Lmemcpy_bad_done
 
 .Lmemcpy_bad3_loop16:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldr	r5, [r1], #0x04
 	pld	[r1, #0x018]
 	ldr	r6, [r1], #0x04
 	ldr	r7, [r1], #0x04
 	ldr	ip, [r1], #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, r5, lsr #8
-	mov	r5, r5, lsl #24
-	orr	r5, r5, r6, lsr #8
-	mov	r6, r6, lsl #24
-	orr	r6, r6, r7, lsr #8
-	mov	r7, r7, lsl #24
-	orr	r7, r7, ip, lsr #8
-#else
 	orr	r4, r4, r5, lsl #8
 	mov	r5, r5, lsr #24
 	orr	r5, r5, r6, lsl #8
@@ -1530,7 +1341,6 @@ ENTRY(memcpy)
 	orr	r6, r6, r7, lsl #8
 	mov	r7, r7, lsr #24
 	orr	r7, r7, ip, lsl #8
-#endif
 	str	r4, [r3], #0x04
 	str	r5, [r3], #0x04
 	str	r6, [r3], #0x04
@@ -1547,18 +1357,10 @@ ENTRY(memcpy)
 	blt	.Lmemcpy_bad_done
 
 .Lmemcpy_bad3_loop4:
-#ifdef __ARMEB__
-	mov	r4, ip, lsl #24
-#else
 	mov	r4, ip, lsr #24
-#endif
 	ldr	ip, [r1], #0x04
 	subs	r2, r2, #0x04
-#ifdef __ARMEB__
-	orr	r4, r4, ip, lsr #8
-#else
 	orr	r4, r4, ip, lsl #8
-#endif
 	str	r4, [r3], #0x04
 	bge	.Lmemcpy_bad3_loop4
 	sub	r1, r1, #0x01
@@ -1633,13 +1435,8 @@ ENTRY(memcpy)
  */
 	ldr	r3, [r1, #-1]		/* BE:r3 = x012  LE:r3 = 210x */
 	ldr	r2, [r1, #3]		/* BE:r2 = 3xxx  LE:r2 = xxx3 */
-#ifdef __ARMEB__
-	mov	r3, r3, lsl #8		/* r3 = 012. */
-	orr	r3, r3, r2, lsr #24	/* r3 = 0123 */
-#else
 	mov	r3, r3, lsr #8		/* r3 = .210 */
 	orr	r3, r3, r2, lsl #24	/* r3 = 3210 */
-#endif
 	str	r3, [r0]
 	RET
 	LMEMCPY_4_PAD
@@ -1647,13 +1444,8 @@ ENTRY(memcpy)
 /*
  * 0010: dst is 32-bit aligned, src is 16-bit aligned
  */
-#ifdef __ARMEB__
-	ldrh	r3, [r1]
-	ldrh	r2, [r1, #0x02]
-#else
 	ldrh	r3, [r1, #0x02]
 	ldrh	r2, [r1]
-#endif
 	orr	r3, r2, r3, lsl #16
 	str	r3, [r0]
 	RET
@@ -1664,13 +1456,8 @@ ENTRY(memcpy)
  */
 	ldr	r3, [r1, #-3]		/* BE:r3 = xxx0  LE:r3 = 0xxx */
 	ldr	r2, [r1, #1]		/* BE:r2 = 123x  LE:r2 = x321 */
-#ifdef __ARMEB__
-	mov	r3, r3, lsl #24		/* r3 = 0... */
-	orr	r3, r3, r2, lsr #8	/* r3 = 0123 */
-#else
 	mov	r3, r3, lsr #24		/* r3 = ...0 */
 	orr	r3, r3, r2, lsl #8	/* r3 = 3210 */
-#endif
 	str	r3, [r0]
 	RET
 	LMEMCPY_4_PAD
@@ -1679,17 +1466,10 @@ ENTRY(memcpy)
  * 0100: dst is 8-bit aligned, src is 32-bit aligned
  */
 	ldr	r2, [r1]
-#ifdef __ARMEB__
-	strb	r2, [r0, #0x03]
-	mov	r3, r2, lsr #8
-	mov	r1, r2, lsr #24
-	strb	r1, [r0]
-#else
 	strb	r2, [r0]
 	mov	r3, r2, lsr #8
 	mov	r1, r2, lsr #24
 	strb	r1, [r0, #0x03]
-#endif
 	strh	r3, [r0, #0x01]
 	RET
 	LMEMCPY_4_PAD
@@ -1711,17 +1491,10 @@ ENTRY(memcpy)
  */
 	ldrh	r2, [r1]		/* BE:r2 = ..01  LE:r2 = ..10 */
 	ldrh	r3, [r1, #0x02]		/* LE:r3 = ..23  LE:r3 = ..32 */
-#ifdef __ARMEB__
 	mov	r1, r2, lsr #8		/* r1 = ...0 */
 	strb	r1, [r0]
 	mov	r2, r2, lsl #8		/* r2 = .01. */
 	orr	r2, r2, r3, lsr #8	/* r2 = .012 */
-#else
-	strb	r2, [r0]
-	mov	r2, r2, lsr #8		/* r2 = ...1 */
-	orr	r2, r2, r3, lsl #8	/* r2 = .321 */
-	mov	r3, r3, lsr #8		/* r3 = ...3 */
-#endif
 	strh	r2, [r0, #0x01]
 	strb	r3, [r0, #0x03]
 	RET
@@ -1743,15 +1516,9 @@ ENTRY(memcpy)
  * 1000: dst is 16-bit aligned, src is 32-bit aligned
  */
 	ldr	r2, [r1]
-#ifdef __ARMEB__
-	strh	r2, [r0, #0x02]
-	mov	r3, r2, lsr #16
-	strh	r3, [r0]
-#else
 	strh	r2, [r0]
 	mov	r3, r2, lsr #16
 	strh	r3, [r0, #0x02]
-#endif
 	RET
 	LMEMCPY_4_PAD
 
@@ -1762,13 +1529,8 @@ ENTRY(memcpy)
 	ldr	r3, [r1, #3]		/* BE:r3 = 3xxx  LE:r3 = xxx3 */
 	mov	r1, r2, lsr #8		/* BE:r1 = .x01  LE:r1 = .210 */
 	strh	r1, [r0]
-#ifdef __ARMEB__
-	mov	r2, r2, lsl #8		/* r2 = 012. */
-	orr	r2, r2, r3, lsr #24	/* r2 = 0123 */
-#else
 	mov	r2, r2, lsr #24		/* r2 = ...2 */
 	orr	r2, r2, r3, lsl #8	/* r2 = xx32 */
-#endif
 	strh	r2, [r0, #0x02]
 	RET
 	LMEMCPY_4_PAD
@@ -1790,13 +1552,8 @@ ENTRY(memcpy)
 	ldr	r2, [r1, #-3]		/* BE:r2 = xxx0  LE:r2 = 0xxx */
 	mov	r1, r3, lsr #8		/* BE:r1 = .123  LE:r1 = .x32 */
 	strh	r1, [r0, #0x02]
-#ifdef __ARMEB__
-	mov	r3, r3, lsr #24		/* r3 = ...1 */
-	orr	r3, r3, r2, lsl #8	/* r3 = xx01 */
-#else
 	mov	r3, r3, lsl #8		/* r3 = 321. */
 	orr	r3, r3, r2, lsr #24	/* r3 = 3210 */
-#endif
 	strh	r3, [r0]
 	RET
 	LMEMCPY_4_PAD
@@ -1805,19 +1562,11 @@ ENTRY(memcpy)
  * 1100: dst is 8-bit aligned, src is 32-bit aligned
  */
 	ldr	r2, [r1]		/* BE:r2 = 0123  LE:r2 = 3210 */
-#ifdef __ARMEB__
-	strb	r2, [r0, #0x03]
-	mov	r3, r2, lsr #8
-	mov	r1, r2, lsr #24
-	strh	r3, [r0, #0x01]
-	strb	r1, [r0]
-#else
 	strb	r2, [r0]
 	mov	r3, r2, lsr #8
 	mov	r1, r2, lsr #24
 	strh	r3, [r0, #0x01]
 	strb	r1, [r0, #0x03]
-#endif
 	RET
 	LMEMCPY_4_PAD
 
@@ -1836,17 +1585,7 @@ ENTRY(memcpy)
 /*
  * 1110: dst is 8-bit aligned, src is 16-bit aligned
  */
-#ifdef __ARMEB__
-	ldrh	r3, [r1, #0x02]		/* BE:r3 = ..23  LE:r3 = ..32 */
 	ldrh	r2, [r1]		/* BE:r2 = ..01  LE:r2 = ..10 */
-	strb	r3, [r0, #0x03]
-	mov	r3, r3, lsr #8		/* r3 = ...2 */
-	orr	r3, r3, r2, lsl #8	/* r3 = ..12 */
-	strh	r3, [r0, #0x01]
-	mov	r2, r2, lsr #8		/* r2 = ...0 */

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list