git: 03a07de0d5ee - main - arm: Handle VFP exceptions from the kernel
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 11 Dec 2023 14:16:37 UTC
The branch main has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=03a07de0d5ee7d58069152070c42d55f7ec32b7c
commit 03a07de0d5ee7d58069152070c42d55f7ec32b7c
Author: Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2023-12-11 14:08:34 +0000
Commit: Mark Johnston <markj@FreeBSD.org>
CommitDate: 2023-12-11 14:08:34 +0000
arm: Handle VFP exceptions from the kernel
vfp_bounce() is called when handling an undefined instruction exception,
to see if we need to enable the VFP. Previously it would
unconditionally panic if the exception came from the kernel, which was
simply wrong, and it did not permit lazy initialization of VFP state in
the kernel. However, this functionality can be useful and is supported
by arm's fpu_kern_enter() implementation. Thus, relax assertions and
consume the exception if the thread was in an FPU section.
Based on a patch from Stormshield.
Reviewed by: andrew
MFC after: 2 weeks
Sponsored by: Klara, Inc.
Sponsored by: Stormshield
Differential Revision: https://reviews.freebsd.org/D42971
---
sys/arm/arm/vfp.c | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/sys/arm/arm/vfp.c b/sys/arm/arm/vfp.c
index f2979d4a2b27..bbcb468391b6 100644
--- a/sys/arm/arm/vfp.c
+++ b/sys/arm/arm/vfp.c
@@ -196,8 +196,9 @@ vfp_init(void)
SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
-/* start VFP unit, restore the vfp registers from the PCB and retry
- * the instruction
+/*
+ * Start the VFP unit, restore the VFP registers from the PCB and retry
+ * the instruction.
*/
static int
vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
@@ -206,9 +207,6 @@ vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
struct pcb *curpcb;
ksiginfo_t ksi;
- if ((code & FAULT_USER) == 0)
- panic("undefined floating point instruction in supervisor mode");
-
critical_enter();
/*
@@ -242,13 +240,19 @@ vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
return 1;
}
+ curpcb = curthread->td_pcb;
+ if ((code & FAULT_USER) == 0 &&
+ (curpcb->pcb_fpflags & PCB_FP_KERN) == 0) {
+ critical_exit();
+ return (1);
+ }
+
/*
* If the last time this thread used the VFP it was on this core, and
* the last thread to use the VFP on this core was this thread, then the
* VFP state is valid, otherwise restore this thread's state to the VFP.
*/
fmxr(fpexc, fpexc | VFPEXC_EN);
- curpcb = curthread->td_pcb;
cpu = PCPU_GET(cpuid);
if (curpcb->pcb_vfpcpu != cpu || curthread != PCPU_GET(fpcurthread)) {
vfp_restore(curpcb->pcb_vfpsaved);
@@ -258,7 +262,8 @@ vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
critical_exit();
- KASSERT(curpcb->pcb_vfpsaved == &curpcb->pcb_vfpstate,
+ KASSERT((code & FAULT_USER) == 0 ||
+ curpcb->pcb_vfpsaved == &curpcb->pcb_vfpstate,
("Kernel VFP state in use when entering userspace"));
return (0);