git: c05d7bdaf63d - main - arm64: Make KMSAN aware of exceptions
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 08 Feb 2024 16:36:32 UTC
The branch main has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=c05d7bdaf63dff2dede5aee742aeb8b1455e40d5
commit c05d7bdaf63dff2dede5aee742aeb8b1455e40d5
Author: Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2024-02-08 15:57:36 +0000
Commit: Mark Johnston <markj@FreeBSD.org>
CommitDate: 2024-02-08 16:35:11 +0000
arm64: Make KMSAN aware of exceptions
- Call kmsan_intr_enter() when an exception occurs. This ensures that
code running in the exception context does not clobber thread-local
KMSAN state.
- Ensure that stack memory containing trap frames is treated as
initialized.
Co-authored-by: Alexander Stetsenko <alex.stetsenko@klarasystems.com>
Reviewed by: imp
MFC after: 2 weeks
Sponsored by: Klara, Inc.
Sponsored by: Juniper Networks, Inc.
Differential Revision: https://reviews.freebsd.org/D43155
---
sys/arm64/arm64/exception.S | 34 ++++++++++++++++++++++++++++++++++
sys/arm64/arm64/trap.c | 9 +++++++++
sys/kern/subr_intr.c | 2 ++
3 files changed, 45 insertions(+)
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
index b1990fc71281..bab71fed4453 100644
--- a/sys/arm64/arm64/exception.S
+++ b/sys/arm64/arm64/exception.S
@@ -193,55 +193,89 @@
2:
.endm
+#ifdef KMSAN
+/*
+ * The KMSAN runtime relies on a TLS block to track initialization and origin
+ * state for function parameters and return values. To keep this state
+ * consistent in the face of asynchronous kernel-mode traps, the runtime
+ * maintains a stack of blocks: when handling an exception or interrupt,
+ * kmsan_intr_enter() pushes the new block to be used until the handler is
+ * complete, at which point kmsan_intr_leave() restores the previous block.
+ *
+ * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that
+ * may have happened while in kernel-mode. In particular, they are not required
+ * around amd64_syscall() or ast() calls. Otherwise, kmsan_intr_enter() can be
+ * called unconditionally, without distinguishing between entry from user-mode
+ * or kernel-mode.
+ */
+#define KMSAN_ENTER bl kmsan_intr_enter
+#define KMSAN_LEAVE bl kmsan_intr_leave
+#else
+#define KMSAN_ENTER
+#define KMSAN_LEAVE
+#endif
+
ENTRY(handle_el1h_sync)
save_registers 1
+ KMSAN_ENTER
ldr x0, [x18, #PC_CURTHREAD]
mov x1, sp
bl do_el1h_sync
+ KMSAN_LEAVE
restore_registers 1
ERET
END(handle_el1h_sync)
ENTRY(handle_el1h_irq)
save_registers 1
+ KMSAN_ENTER
mov x0, sp
bl intr_irq_handler
+ KMSAN_LEAVE
restore_registers 1
ERET
END(handle_el1h_irq)
ENTRY(handle_el0_sync)
save_registers 0
+ KMSAN_ENTER
ldr x0, [x18, #PC_CURTHREAD]
mov x1, sp
str x1, [x0, #TD_FRAME]
bl do_el0_sync
do_ast
+ KMSAN_LEAVE
restore_registers 0
ERET
END(handle_el0_sync)
ENTRY(handle_el0_irq)
save_registers 0
+ KMSAN_ENTER
mov x0, sp
bl intr_irq_handler
do_ast
+ KMSAN_LEAVE
restore_registers 0
ERET
END(handle_el0_irq)
ENTRY(handle_serror)
save_registers 0
+ KMSAN_ENTER
mov x0, sp
1: bl do_serror
b 1b
+ KMSAN_LEAVE
END(handle_serror)
ENTRY(handle_empty_exception)
save_registers 0
+ KMSAN_ENTER
mov x0, sp
1: bl unhandled_exception
b 1b
+ KMSAN_LEAVE
END(handle_empty_exception)
.macro vector name, el
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
index 03cac1e6450e..c0066ae7dcad 100644
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -33,6 +33,7 @@
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
+#include <sys/msan.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/ptrace.h>
@@ -478,6 +479,8 @@ do_el1h_sync(struct thread *td, struct trapframe *frame)
int dfsc;
kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+ kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
+
far = frame->tf_far;
/* Read the esr register to get the exception details */
esr = frame->tf_esr;
@@ -591,6 +594,8 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
get_pcpu(), READ_SPECIALREG(tpidr_el1)));
kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+ kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
+
far = frame->tf_far;
esr = frame->tf_esr;
exception = ESR_ELx_EXCEPTION(esr);
@@ -737,6 +742,8 @@ do_serror(struct trapframe *frame)
uint64_t esr, far;
kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+ kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
+
far = frame->tf_far;
esr = frame->tf_esr;
@@ -752,6 +759,8 @@ unhandled_exception(struct trapframe *frame)
uint64_t esr, far;
kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+ kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
+
far = frame->tf_far;
esr = frame->tf_esr;
diff --git a/sys/kern/subr_intr.c b/sys/kern/subr_intr.c
index a03607c781b2..5958f9025b27 100644
--- a/sys/kern/subr_intr.c
+++ b/sys/kern/subr_intr.c
@@ -54,6 +54,7 @@
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/msan.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/queue.h>
@@ -334,6 +335,7 @@ intr_irq_handler(struct trapframe *tf)
KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0);
+ kmsan_mark(tf, sizeof(*tf), KMSAN_STATE_INITED);
VM_CNT_INC(v_intr);
critical_enter();