git: df24ac386cb2 - stable/13 - Remove l1ptfrom pmap_early_vtophys on arm64
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 04 Apr 2022 11:06:03 UTC
The branch stable/13 has been updated by andrew:
URL: https://cgit.FreeBSD.org/src/commit/?id=df24ac386cb237d94d9b2635dbca45a6653ee54e
commit df24ac386cb237d94d9b2635dbca45a6653ee54e
Author: Andrew Turner <andrew@FreeBSD.org>
AuthorDate: 2022-03-10 18:10:40 +0000
Commit: Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2022-04-04 09:37:06 +0000
Remove l1ptfrom pmap_early_vtophys on arm64
The first argument was unused as we use an address translation
instruction to get the physical address.
Sponsored by: The FreeBSD Foundation
(cherry picked from commit 854d5a4f7277fcd32b8c92d552cd93d208f5fc64)
---
sys/arm64/arm64/pmap.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 0183dde11847..a7e5cb9186ca 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -753,7 +753,7 @@ pmap_resident_count_dec(pmap_t pmap, int count)
}
static vm_paddr_t
-pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
+pmap_early_vtophys(vm_offset_t va)
{
vm_paddr_t pa_page;
@@ -790,8 +790,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
if (l1_slot != prev_l1_slot) {
prev_l1_slot = l1_slot;
l2 = (pt_entry_t *)freemempos;
- l2_pa = pmap_early_vtophys(kern_l1,
- (vm_offset_t)l2);
+ l2_pa = pmap_early_vtophys((vm_offset_t)l2);
freemempos += PAGE_SIZE;
pmap_store(&pagetable_dmap[l1_slot],
@@ -838,8 +837,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
if (l1_slot != prev_l1_slot) {
prev_l1_slot = l1_slot;
l2 = (pt_entry_t *)freemempos;
- l2_pa = pmap_early_vtophys(kern_l1,
- (vm_offset_t)l2);
+ l2_pa = pmap_early_vtophys((vm_offset_t)l2);
freemempos += PAGE_SIZE;
pmap_store(&pagetable_dmap[l1_slot],
@@ -888,7 +886,7 @@ pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
- pa = pmap_early_vtophys(l1pt, l2pt);
+ pa = pmap_early_vtophys(l2pt);
pmap_store(&l1[l1_slot],
(pa & ~Ln_TABLE_MASK) | L1_TABLE);
l2pt += PAGE_SIZE;
@@ -918,7 +916,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
- pa = pmap_early_vtophys(l1pt, l3pt);
+ pa = pmap_early_vtophys(l3pt);
pmap_store(&l2[l2_slot],
(pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
l3pt += PAGE_SIZE;
@@ -1021,7 +1019,7 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
kernel_vm_end = virtual_avail;
- pa = pmap_early_vtophys(l1pt, freemempos);
+ pa = pmap_early_vtophys(freemempos);
physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);