git: 2f561284033c - main - [PowerPC64] Enforce natural alignment in bcopy

Leandro Lupori luporl at FreeBSD.org
Thu Mar 25 16:11:23 UTC 2021


The branch main has been updated by luporl:

URL: https://cgit.FreeBSD.org/src/commit/?id=2f561284033c0f53d0911baf9056078e6026a278

commit 2f561284033c0f53d0911baf9056078e6026a278
Author:     Leandro Lupori <luporl at FreeBSD.org>
AuthorDate: 2021-03-25 14:54:06 +0000
Commit:     Leandro Lupori <luporl at FreeBSD.org>
CommitDate: 2021-03-25 16:07:01 +0000

    [PowerPC64] Enforce natural alignment in bcopy
    
    POWER architecture CPUs (Book-S) require natural alignment for
    cache-inhibited storage accesses. Since we can't know the caching model
    for a page ahead of time, always enforce natural alignment in bcopy.
    This fixes a SIGBUS when calling the function with misaligned pointers
    on POWER7.
    
    Submitted by:           Bruno Larsen <bruno.larsen at eldorado.org.br>
    Reviewed by:            luporl, bdragon (IRC)
    MFC after:              1 week
    Sponsored by:           Eldorado Research Institute (eldorado.org.br)
    Differential Revision:  https://reviews.freebsd.org/D28776
---
 lib/libc/powerpc64/string/bcopy.S | 34 ++++++++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)

diff --git a/lib/libc/powerpc64/string/bcopy.S b/lib/libc/powerpc64/string/bcopy.S
index bb860c098feb..4dc80c264362 100644
--- a/lib/libc/powerpc64/string/bcopy.S
+++ b/lib/libc/powerpc64/string/bcopy.S
@@ -34,6 +34,11 @@ __FBSDID("$FreeBSD$");
 #define BLOCK_SIZE			(1 << BLOCK_SIZE_BITS)
 #define BLOCK_SIZE_MASK			(BLOCK_SIZE - 1)
 
+/* Minimum 8 byte alignment, to avoid cache-inhibited alignment faults.*/
+#ifndef ALIGN_MASK
+#define ALIGN_MASK			0x7
+#endif
+
 #define MULTI_PHASE_THRESHOLD		512
 
 #ifndef FN_NAME
@@ -66,9 +71,38 @@ ENTRY(FN_NAME)
 	mr	%r4, %r0
 #endif
 
+	/* First check for relative alignment, if unaligned copy one byte at a time */
+	andi.	%r8, %r3, ALIGN_MASK
+	andi.	%r7, %r4, ALIGN_MASK
+	cmpd	%r7, %r8
+	bne 	.Lunaligned
+
+
 	cmpldi	%r5, MULTI_PHASE_THRESHOLD
 	bge	.Lmulti_phase
+	b	.Lfast_copy
+
+.Lunaligned:
+	/* forward or backward copy? */
+	cmpd	%r4, %r3
+	blt	.Lbackward_unaligned
+
+	/* Just need to setup increment and jump to copy */
+	li	%r0, 1
+	mtctr	%r5
+	b	.Lsingle_1_loop
+
+.Lbackward_unaligned:
+	/* advance src and dst to last byte, set decrement and jump to copy */
+	add	%r3, %r3, %r5
+	addi	%r3, %r3, -1
+	add	%r4, %r4, %r5
+	addi	%r4, %r4, -1
+	li	%r0, -1
+	mtctr	%r5
+	b 	.Lsingle_1_loop
 
+.Lfast_copy:
 	/* align src */
 	cmpd	%r4, %r3		/* forward or backward copy? */
 	blt	.Lbackward_align


More information about the dev-commits-src-all mailing list