git: fb07770acdf1 - stable/12 - amd64: Stop using REP MOVSB for backward memmove()s.

From: Alexander Motin <mav_at_FreeBSD.org>
Date: Thu, 30 Jun 2022 01:16:32 UTC
The branch stable/12 has been updated by mav:

URL: https://cgit.FreeBSD.org/src/commit/?id=fb07770acdf179a609261e09bb895c52522c9d7b

commit fb07770acdf179a609261e09bb895c52522c9d7b
Author:     Alexander Motin <mav@FreeBSD.org>
AuthorDate: 2022-06-16 17:01:12 +0000
Commit:     Alexander Motin <mav@FreeBSD.org>
CommitDate: 2022-06-30 01:16:03 +0000

    amd64: Stop using REP MOVSB for backward memmove()s.
    
    Enhanced REP MOVSB feature of CPUs starting from Ivy Bridge makes
    REP MOVSB the fastest way to copy memory in most of cases. However
    Intel Optimization Reference Manual says: "setting the DF to force
    REP MOVSB to copy bytes from high towards low addresses will expe-
    rience significant performance degradation". Measurements on Intel
    Cascade Lake and Alder Lake, same as on AMD Zen3 show that it can
    drop throughput to as low as 2.5-3.5GB/s, comparing to ~10-30GB/s
    of REP MOVSQ or hand-rolled loop, used for non-ERMS CPUs.
    
    This patch keeps ERMS use for forward ordered memory copies, but
    removes it for backward overlapped moves where it does not work.
    
    Reviewed by:    mjg
    MFC after:      2 weeks
    
    (cherry picked from commit 6210ac95a19416832601b571409a3e08b76d107f)
---
 sys/amd64/amd64/support.S | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index f2bc07701f3c..5e1b980c0ea2 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -531,13 +531,6 @@ END(memcmp)
 	ALIGN_TEXT
 2256:
 	std
-.if \erms == 1
-	leaq	-1(%rdi,%rcx),%rdi
-	leaq	-1(%rsi,%rcx),%rsi
-	rep
-	movsb
-	cld
-.else
 	leaq	-8(%rdi,%rcx),%rdi
 	leaq	-8(%rsi,%rcx),%rsi
 	shrq	$3,%rcx
@@ -547,7 +540,6 @@ END(memcmp)
 	movq	%rdx,%rcx
 	andb	$7,%cl
 	jne	2004b
-.endif
 	\end
 	ret
 .endif