git: 81f07332c03f - main - arm64: tidy up Top-Byte-Ignore (TBI) in the kernel

From: Andrew Turner <andrew_at_FreeBSD.org>
Date: Wed, 06 Aug 2025 17:38:00 UTC
The branch main has been updated by andrew:

URL: https://cgit.FreeBSD.org/src/commit/?id=81f07332c03fd2ac6efa8e15b1659a573d250329

commit 81f07332c03fd2ac6efa8e15b1659a573d250329
Author:     Harry Moulton <harry.moulton@arm.com>
AuthorDate: 2025-07-31 14:10:57 +0000
Commit:     Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2025-07-31 14:27:06 +0000

    arm64: tidy up Top-Byte-Ignore (TBI) in the kernel
    
    In preparation for TBI to be enabled for processes from 15.0 we need
    to clean up copying data between userspace and the kernel. These
    functions will check the address is within the valid userspace range,
    however as the userspace and kernel ranges may overlap when TBI is
    enabled we need to mask off the top 8 bits.
    
    Processes not using TBI are unaffected as the hardware will still
    check all bits in the address, however this will happen at the first
    load/store instruction.
    
    Reviewed by:    andrew
    Sponsored by:   Arm Ltd
    Differential Revision:  https://reviews.freebsd.org/D49119
---
 sys/arm64/arm64/copyinout.S | 18 ++++++++++++++++--
 sys/arm64/arm64/support.S   |  9 ++++++++-
 sys/arm64/include/vmparam.h |  3 +++
 3 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S
index 26dd0b4cf14f..e41c4b5f6734 100644
--- a/sys/arm64/arm64/copyinout.S
+++ b/sys/arm64/arm64/copyinout.S
@@ -37,7 +37,14 @@
 #include "assym.inc"
 
 .macro check_user_access user_arg, size_arg, bad_access_func
-	adds	x6, x\user_arg, x\size_arg
+	/*
+	 * TBI is enabled from 15.0. Clear the top byte of the userspace
+	 * address before checking whether it's within the given limit.
+	 * The later load/store instructions will fault if TBI is disabled
+	 * for the current process.
+	 */
+	and	x6, x\user_arg, #(~TBI_ADDR_MASK)
+	adds	x6, x6, x\size_arg
 	b.cs	\bad_access_func
 	ldr	x7, =VM_MAXUSER_ADDRESS
 	cmp	x6, x7
@@ -100,13 +107,20 @@ ENTRY(copyinstr)
 	adr	x6, copyio_fault /* Get the handler address */
 	SET_FAULT_HANDLER(x6, x7) /* Set the handler */
 
+	/*
+	 *  As in check_user_access mask off the TBI bits for the cmp
+	 * instruction. The load will fail trap if TBI is disabled, but we
+	 * need to check the address didn't wrap.
+	 */
+	and	x6, x0, #(~TBI_ADDR_MASK)
 	ldr	x7, =VM_MAXUSER_ADDRESS
-1:	cmp	x0, x7
+1:	cmp	x6, x7
 	b.cs	copyio_fault
 	ldtrb	w4, [x0]	/* Load from uaddr */
 	add	x0, x0, #1	/* Next char */
 	strb	w4, [x1], #1	/* Store in kaddr */
 	add	x5, x5, #1	/* count++ */
+	add	x6, x6, #1	/* Increment masked address */
 	cbz	w4, 2f		/* Break when NUL-terminated */
 	sub	x2, x2, #1	/* len-- */
 	cbnz	x2, 1b
diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S
index 2d067c7f7730..bf6fc931e4b0 100644
--- a/sys/arm64/arm64/support.S
+++ b/sys/arm64/arm64/support.S
@@ -39,8 +39,15 @@
 #include "assym.inc"
 
 .macro check_user_access user_arg, limit, bad_addr_func
+	/*
+	 * TBI is enabled from 15.0. Clear the top byte of the userspace
+	 * address before checking whether it's within the given limit.
+	 * The later load/store instructions will fault if TBI is disabled
+	 * for the current process.
+	 */
+	and	x6, x\user_arg, #(~TBI_ADDR_MASK)
 	ldr	x7, =(\limit)
-	cmp	x\user_arg, x7
+	cmp	x6, x7
 	b.cs	\bad_addr_func
 .endm
 
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
index db3af1881282..c30ca1b2bff4 100644
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -211,6 +211,9 @@
 /* The address bits that hold a pointer authentication code */
 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
 
+/* The top-byte ignore address bits */
+#define	TBI_ADDR_MASK		0xff00000000000000UL
+
 /* If true addr is in the kernel address space */
 #define	ADDR_IS_KERNEL(addr)	(((addr) & (1ul << 55)) == (1ul << 55))
 /* If true addr is in the user address space */