git: fe3001bde484 - stable/15 - x86: change ap_boot_mtx from spinlock mutex to naive lock

From: Konstantin Belousov <kib_at_FreeBSD.org>
Date: Mon, 12 Jan 2026 04:05:27 UTC
The branch stable/15 has been updated by kib:

URL: https://cgit.FreeBSD.org/src/commit/?id=fe3001bde4849757064e8bb63fdab8b3b797dd6d

commit fe3001bde4849757064e8bb63fdab8b3b797dd6d
Author:     Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2026-01-03 01:09:32 +0000
Commit:     Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2026-01-12 04:05:08 +0000

    x86: change ap_boot_mtx from spinlock mutex to naive lock
    
    PR:     289297
    
    (cherry picked from commit 55305b590797524dd1cecfc9406869700e925e51)
---
 sys/amd64/amd64/mp_machdep.c |  1 -
 sys/i386/i386/mp_machdep.c   |  2 --
 sys/x86/include/x86_smp.h    |  1 -
 sys/x86/x86/mp_x86.c         | 17 ++++++++++++-----
 4 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 00e99f9df192..9af87787e7cc 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -331,7 +331,6 @@ start_all_aps(void)
 	u_char mpbiosreason;
 
 	amd64_mp_alloc_pcpu();
-	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
 
 	MPASS(bootMP_size <= PAGE_SIZE);
 	m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index f7d9289b1848..18ec0d83fad3 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -369,8 +369,6 @@ start_all_aps(void)
 	u_int32_t mpbioswarmvec;
 	int apic_id, cpu;
 
-	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
-
 	pmap_remap_lower(true);
 
 	/* install the AP 1st level boot code */
diff --git a/sys/x86/include/x86_smp.h b/sys/x86/include/x86_smp.h
index 8b9eb2ec9b66..5cecfab9d183 100644
--- a/sys/x86/include/x86_smp.h
+++ b/sys/x86/include/x86_smp.h
@@ -35,7 +35,6 @@ extern char *bootSTK;
 extern void *bootstacks[];
 extern unsigned int bootMP_size;
 extern volatile int aps_ready;
-extern struct mtx ap_boot_mtx;
 extern int cpu_logical;
 extern int cpu_cores;
 extern volatile uint32_t smp_tlb_generation;
diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c
index c0da41a4d222..8345117f5b6f 100644
--- a/sys/x86/x86/mp_x86.c
+++ b/sys/x86/x86/mp_x86.c
@@ -124,7 +124,7 @@ volatile cpuset_t resuming_cpus;
 volatile cpuset_t toresume_cpus;
 
 /* used to hold the AP's until we are ready to release them */
-struct mtx ap_boot_mtx;
+static int ap_boot_lock;
 
 /* Set to 1 once we're ready to let the APs out of the pen. */
 volatile int aps_ready = 0;
@@ -1086,8 +1086,6 @@ init_secondary_tail(void)
 	PCPU_SET(curthread, PCPU_GET(idlethread));
 	schedinit_ap();
 
-	mtx_lock_spin(&ap_boot_mtx);
-
 	mca_init();
 
 	/* Init local apic for irq's */
@@ -1096,6 +1094,15 @@ init_secondary_tail(void)
 	/* Set memory range attributes for this CPU to match the BSP */
 	mem_range_AP_init();
 
+	/*
+	 * Use naive spinning lock instead of the real spinlock, since
+	 * printfs() below might take a very long time and trigger
+	 * spinlock timeout panics.  This is the only use of the
+	 * ap_boot_lock anyway.
+	 */
+	while (atomic_cmpset_acq_int(&ap_boot_lock, 0, 1) == 0)
+		ia32_pause();
+
 	smp_cpus++;
 
 	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
@@ -1117,6 +1124,8 @@ init_secondary_tail(void)
 		atomic_store_rel_int(&smp_started, 1);
 	}
 
+	atomic_store_rel_int(&ap_boot_lock, 0);
+
 #ifdef __amd64__
 	if (pmap_pcid_enabled)
 		load_cr4(rcr4() | CR4_PCIDE);
@@ -1125,8 +1134,6 @@ init_secondary_tail(void)
 	load_fs(_ufssel);
 #endif
 
-	mtx_unlock_spin(&ap_boot_mtx);
-
 	/* Wait until all the AP's are up. */
 	while (atomic_load_acq_int(&smp_started) == 0)
 		ia32_pause();