git: 32bb6a6925c0 - main - arm64: Call pmap_bootstrap_dmap from initarm
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 22 Apr 2025 16:09:43 UTC
The branch main has been updated by andrew:
URL: https://cgit.FreeBSD.org/src/commit/?id=32bb6a6925c02e26cbb113789280ebb4bff54dd2
commit 32bb6a6925c02e26cbb113789280ebb4bff54dd2
Author: Andrew Turner <andrew@FreeBSD.org>
AuthorDate: 2025-04-22 15:57:29 +0000
Commit: Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2025-04-22 16:08:57 +0000
arm64: Call pmap_bootstrap_dmap from initarm
Make pmap_bootstrap_dmap self contained and call it from initarm. This
will allow us to have a better view of excluded physical address space
when calling pmap_bootstrap.
Reviewed by: imp, markj
Sponsored by: Arm Ltd
Differential Revision: https://reviews.freebsd.org/D49853
---
sys/arm64/arm64/machdep.c | 21 +++++++++++++++++---
sys/arm64/arm64/pmap.c | 49 ++++++++++++++++++++++++++---------------------
sys/arm64/include/pmap.h | 3 ++-
3 files changed, 47 insertions(+), 26 deletions(-)
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index d3955d8c9863..ca393e11cef9 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -815,14 +815,29 @@ initarm(struct arm64_bootparams *abp)
cache_setup();
- /* Bootstrap enough of pmap to enter the kernel proper */
- pmap_bootstrap(lastaddr - KERNBASE);
- /* Exclude entries needed in the DMAP region, but not phys_avail */
+ /*
+ * Perform a staged bootstrap of virtual memory.
+ *
+ * - First we create the DMAP region. This allows it to be used in
+ * later bootstrapping.
+ * - Next exclude memory that is needed in the DMAP region, but must
+ * not be used by FreeBSD.
+ * - Lastly complete the bootstrapping. It may use the physical
+ * memory map so any excluded memory must be marked as such before
+ * pmap_bootstrap() is called.
+ */
+ pmap_bootstrap_dmap(lastaddr - KERNBASE);
+ /*
+ * Exclude EFI entries needed in the DMAP, e.g. EFI_MD_TYPE_RECLAIM
+ * may contain the ACPI tables but shouldn't be used by the kernel
+ */
if (efihdr != NULL)
efi_map_exclude_entries(efihdr);
/* Do the same for reserve entries in the EFI MEMRESERVE table */
if (efi_systbl_phys != 0)
exclude_efi_memreserve(efi_systbl_phys);
+ /* Continue bootstrapping pmap */
+ pmap_bootstrap();
/*
* We carefully bootstrap the sanitizer map after we've excluded
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index e3b8e3bf01ee..30efd40573d2 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -1211,11 +1211,28 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
-static void
-pmap_bootstrap_dmap(void)
+void
+pmap_bootstrap_dmap(vm_size_t kernlen)
{
+ vm_paddr_t start_pa, pa;
+ uint64_t tcr;
int i;
+ tcr = READ_SPECIALREG(tcr_el1);
+
+ /* Verify that the ASID is set through TTBR0. */
+ KASSERT((tcr & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0"));
+
+ if ((tcr & TCR_DS) != 0)
+ pmap_lpa_enabled = true;
+
+ pmap_l1_supported = L1_BLOCKS_SUPPORTED;
+
+ start_pa = pmap_early_vtophys(KERNBASE);
+
+ bs_state.freemempos = KERNBASE + kernlen;
+ bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
+
/* Fill in physmap array. */
physmap_idx = physmem_avail(physmap, nitems(physmap));
@@ -1275,6 +1292,12 @@ pmap_bootstrap_dmap(void)
}
cpu_tlb_flushID();
+
+ bs_state.dmap_valid = true;
+
+ /* Exclude the kernel and DMAP region */
+ pa = pmap_early_vtophys(bs_state.freemempos);
+ physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
}
static void
@@ -1305,21 +1328,10 @@ pmap_bootstrap_l3(vm_offset_t va)
* Bootstrap the system enough to run with virtual memory.
*/
void
-pmap_bootstrap(vm_size_t kernlen)
+pmap_bootstrap(void)
{
vm_offset_t dpcpu, msgbufpv;
vm_paddr_t start_pa, pa;
- uint64_t tcr;
-
- tcr = READ_SPECIALREG(tcr_el1);
-
- /* Verify that the ASID is set through TTBR0. */
- KASSERT((tcr & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0"));
-
- if ((tcr & TCR_DS) != 0)
- pmap_lpa_enabled = true;
-
- pmap_l1_supported = L1_BLOCKS_SUPPORTED;
/* Set this early so we can use the pagetable walking functions */
kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1;
@@ -1334,20 +1346,13 @@ pmap_bootstrap(vm_size_t kernlen)
kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr;
kernel_pmap->pm_asid_set = &asids;
- bs_state.freemempos = KERNBASE + kernlen;
- bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
-
- /* Create a direct map region early so we can use it for pa -> va */
- pmap_bootstrap_dmap();
- bs_state.dmap_valid = true;
-
/*
* We only use PXN when we know nothing will be executed from it, e.g.
* the DMAP region.
*/
bs_state.table_attrs &= ~TATTR_PXN_TABLE;
- start_pa = pa = pmap_early_vtophys(KERNBASE);
+ start_pa = pmap_early_vtophys(bs_state.freemempos);
/*
* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS. We assume that the
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 2503f1df8404..0f23f200f0f6 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -141,7 +141,8 @@ extern pt_entry_t pmap_sh_attr;
#define pmap_vm_page_alloc_check(m)
void pmap_activate_vm(pmap_t);
-void pmap_bootstrap(vm_size_t);
+void pmap_bootstrap_dmap(vm_size_t);
+void pmap_bootstrap(void);
int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
int pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot);
void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);