svn commit: r282816 - in head/lib/libc/arm: . aeabi gen

Andrew Turner andrew at FreeBSD.org
Tue May 12 10:03:16 UTC 2015


Author: andrew
Date: Tue May 12 10:03:14 2015
New Revision: 282816
URL: https://svnweb.freebsd.org/changeset/base/282816

Log:
  Teach bits of libc about Thumb. This adds the if-then instructions needed
  to handle the ARM conditional execution.
  
  While here fix a bug found by this in the hard-float code, cc is the
  opposite of cs. The former is used for 'less than' in floating-point code
  and is executed when the C (carry) bit is clear, the latter is used when
  greater than, equal, or unordered, and is executed when the C bit is set.

Modified:
  head/lib/libc/arm/SYS.h
  head/lib/libc/arm/aeabi/aeabi_vfp_double.S
  head/lib/libc/arm/aeabi/aeabi_vfp_float.S
  head/lib/libc/arm/gen/_setjmp.S
  head/lib/libc/arm/gen/setjmp.S

Modified: head/lib/libc/arm/SYS.h
==============================================================================
--- head/lib/libc/arm/SYS.h	Tue May 12 09:35:50 2015	(r282815)
+++ head/lib/libc/arm/SYS.h	Tue May 12 10:03:14 2015	(r282816)
@@ -62,6 +62,7 @@
 
 #define _SYSCALL(x)							\
 	_SYSCALL_NOERROR(x);						\
+	it	cs;							\
 	bcs PIC_SYM(CERROR, PLT)
 
 #define SYSCALL(x)							\
@@ -72,6 +73,7 @@
 	.weak _C_LABEL(__CONCAT(_,x));					\
 	.set _C_LABEL(__CONCAT(_,x)),_C_LABEL(__CONCAT(__sys_,x));	\
 	SYSTRAP(x);							\
+	it	cs;							\
 	bcs PIC_SYM(CERROR, PLT);					\
 	RET
 

Modified: head/lib/libc/arm/aeabi/aeabi_vfp_double.S
==============================================================================
--- head/lib/libc/arm/aeabi/aeabi_vfp_double.S	Tue May 12 09:35:50 2015	(r282815)
+++ head/lib/libc/arm/aeabi/aeabi_vfp_double.S	Tue May 12 10:03:14 2015	(r282816)
@@ -66,6 +66,7 @@ AEABI_ENTRY(dcmpeq)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      ne
 	movne    r0, #0
 	moveq    r0, #1
 	RET
@@ -77,8 +78,9 @@ AEABI_ENTRY(dcmplt)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      cs
 	movcs    r0, #0
-	movlt    r0, #1
+	movcc    r0, #1
 	RET
 AEABI_END(dcmplt)
 
@@ -88,6 +90,7 @@ AEABI_ENTRY(dcmple)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      hi
 	movhi    r0, #0
 	movls    r0, #1
 	RET
@@ -99,6 +102,7 @@ AEABI_ENTRY(dcmpge)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      lt
 	movlt    r0, #0
 	movge    r0, #1
 	RET
@@ -110,6 +114,7 @@ AEABI_ENTRY(dcmpgt)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      le
 	movle    r0, #0
 	movgt    r0, #1
 	RET
@@ -121,6 +126,7 @@ AEABI_ENTRY(dcmpun)
 	LOAD_DREG(d1, r2, r3)
 	vcmp.f64 d0, d1
 	vmrs     APSR_nzcv, fpscr
+	ite      vc
 	movvc    r0, #0
 	movvs    r0, #1
 	RET

Modified: head/lib/libc/arm/aeabi/aeabi_vfp_float.S
==============================================================================
--- head/lib/libc/arm/aeabi/aeabi_vfp_float.S	Tue May 12 09:35:50 2015	(r282815)
+++ head/lib/libc/arm/aeabi/aeabi_vfp_float.S	Tue May 12 10:03:14 2015	(r282816)
@@ -62,6 +62,7 @@ AEABI_ENTRY(fcmpeq)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      ne
 	movne    r0, #0
 	moveq    r0, #1
 	RET
@@ -72,8 +73,9 @@ AEABI_ENTRY(fcmplt)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      cs
 	movcs    r0, #0
-	movlt    r0, #1
+	movcc    r0, #1
 	RET
 AEABI_END(fcmplt)
 
@@ -82,6 +84,7 @@ AEABI_ENTRY(fcmple)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      hi
 	movhi    r0, #0
 	movls    r0, #1
 	RET
@@ -92,6 +95,7 @@ AEABI_ENTRY(fcmpge)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      lt
 	movlt    r0, #0
 	movge    r0, #1
 	RET
@@ -102,6 +106,7 @@ AEABI_ENTRY(fcmpgt)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      le
 	movle    r0, #0
 	movgt    r0, #1
 	RET
@@ -112,6 +117,7 @@ AEABI_ENTRY(fcmpun)
 	LOAD_SREGS(s0, s1, r0, r1)
 	vcmp.f32 s0, s1
 	vmrs     APSR_nzcv, fpscr
+	ite      vc
 	movvc    r0, #0
 	movvs    r0, #1
 	RET

Modified: head/lib/libc/arm/gen/_setjmp.S
==============================================================================
--- head/lib/libc/arm/gen/_setjmp.S	Tue May 12 09:35:50 2015	(r282815)
+++ head/lib/libc/arm/gen/_setjmp.S	Tue May 12 10:03:14 2015	(r282816)
@@ -85,7 +85,13 @@ ENTRY(_setjmp)
 
 	add	r0, r0, #(_JB_REG_R4 * 4)
 	/* Store integer registers */
+#ifndef __thumb__
         stmia	r0, {r4-r14}
+#else
+	stmia	r0, {r4-r12}
+	str	r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+	str	r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
 
         mov	r0, #0x00000000
 	RET
@@ -120,15 +126,24 @@ ENTRY(_longjmp)
 
 	add	r0, r0, #(_JB_REG_R4 * 4)
        	/* Restore integer registers */
+#ifndef __thumb__
         ldmia	r0, {r4-r14}
+#else
+	ldmia	r0, {r4-r12}
+	ldr	r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+	ldr	r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
 
 	/* Validate sp and r14 */
 	teq	sp, #0
+	it	ne
 	teqne	r14, #0
+	it	eq
 	beq	botch
 
 	/* Set return value */
 	movs	r0, r1
+	it	eq
 	moveq	r0, #0x00000001
 	RET
 
@@ -137,7 +152,7 @@ botch:
 #if !defined(_STANDALONE)
 	bl	PIC_SYM(_C_LABEL(longjmperror), PLT)
 	bl	PIC_SYM(_C_LABEL(abort), PLT)
-	b	. - 8		/* Cannot get here */
+1:	b	1b		/* Cannot get here */
 #else
 	b	.
 #endif

Modified: head/lib/libc/arm/gen/setjmp.S
==============================================================================
--- head/lib/libc/arm/gen/setjmp.S	Tue May 12 09:35:50 2015	(r282815)
+++ head/lib/libc/arm/gen/setjmp.S	Tue May 12 10:03:14 2015	(r282816)
@@ -90,7 +90,13 @@ ENTRY(setjmp)
 
 	/* Store integer registers */
 	add	r0, r0, #(_JB_REG_R4 * 4)
+#ifndef __thumb__
         stmia	r0, {r4-r14}
+#else
+	stmia	r0, {r4-r12}
+	str	r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+	str	r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
         mov	r0, #0x00000000
 	RET
 
@@ -133,15 +139,24 @@ ENTRY(__longjmp)
 
 	add	r0, r0, #(_JB_REG_R4 * 4)
 	/* Restore integer registers */
+#ifndef __thumb__
         ldmia	r0, {r4-r14}
+#else
+        ldmia	r0, {r4-r12}
+	ldr	r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
+	ldr	r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
+#endif
 
 	/* Validate sp and r14 */
 	teq	sp, #0
+	it	ne
 	teqne	r14, #0
+	it	eq
 	beq	.Lbotch
 
 	/* Set return value */
 	movs	r0, r1
+	it	eq
 	moveq	r0, #0x00000001
 	RET
 
@@ -149,5 +164,5 @@ ENTRY(__longjmp)
 .Lbotch:
 	bl	PIC_SYM(_C_LABEL(longjmperror), PLT)
 	bl	PIC_SYM(_C_LABEL(abort), PLT)
-	b	. - 8		/* Cannot get here */
+1:	b	1b		/* Cannot get here */
 END(__longjmp)


More information about the svn-src-all mailing list