git: b6cd84ca2d08 - main - powerpc: replace tailq pointers with iterators
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 21 Apr 2025 04:13:02 UTC
The branch main has been updated by dougm:
URL: https://cgit.FreeBSD.org/src/commit/?id=b6cd84ca2d08b39e6a51a782ddf2b58293be6cba
commit b6cd84ca2d08b39e6a51a782ddf2b58293be6cba
Author: Doug Moore <dougm@FreeBSD.org>
AuthorDate: 2025-04-21 04:04:43 +0000
Commit: Doug Moore <dougm@FreeBSD.org>
CommitDate: 2025-04-21 04:04:43 +0000
powerpc: replace tailq pointers with iterators
Change architecture-specific code to use iterators rather than tailq
pointers.
Reviewed by: kib
Differential Revision: https://reviews.freebsd.org/D49927
---
sys/powerpc/aim/mmu_oea.c | 16 ++++++++++------
sys/powerpc/aim/mmu_oea64.c | 17 ++++++++++-------
sys/powerpc/aim/mmu_radix.c | 31 +++++++++++++++++--------------
sys/powerpc/booke/pmap.c | 15 +++++++++------
4 files changed, 46 insertions(+), 33 deletions(-)
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 7f1f64a51db5..7746b668265d 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -135,6 +135,7 @@
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_radix.h>
#include <vm/uma.h>
#include <machine/cpu.h>
@@ -1235,20 +1236,23 @@ void
moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
+ struct pctrie_iter pages;
+ vm_offset_t va;
vm_page_t m;
- vm_pindex_t diff, psize;
VM_OBJECT_ASSERT_LOCKED(m_start->object);
- psize = atop(end - start);
- m = m_start;
+ vm_page_iter_limit_init(&pages, m_start->object,
+ m_start->pindex + atop(end - start));
+ m = vm_radix_iter_lookup(&pages, m_start->pindex);
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
- while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- moea_enter_locked(pm, start + ptoa(diff), m, prot &
+ while (m != NULL) {
+ va = start + ptoa(m->pindex - m_start->pindex);
+ moea_enter_locked(pm, va, m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED,
0);
- m = TAILQ_NEXT(m, listq);
+ m = vm_radix_iter_step(&pages);
}
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pm);
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 7e23d73557a5..e24f591498dd 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -81,6 +81,7 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_dumpset.h>
+#include <vm/vm_radix.h>
#include <vm/vm_reserv.h>
#include <vm/uma.h>
@@ -1826,17 +1827,18 @@ void
moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
+ struct pctrie_iter pages;
vm_page_t m;
- vm_pindex_t diff, psize;
vm_offset_t va;
int8_t psind;
VM_OBJECT_ASSERT_LOCKED(m_start->object);
- psize = atop(end - start);
- m = m_start;
- while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- va = start + ptoa(diff);
+ vm_page_iter_limit_init(&pages, m_start->object,
+ m_start->pindex + atop(end - start));
+ m = vm_radix_iter_lookup(&pages, m_start->pindex);
+ while (m != NULL) {
+ va = start + ptoa(m->pindex - m_start->pindex);
if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end &&
m->psind == 1 && moea64_ps_enabled(pm))
psind = 1;
@@ -1846,8 +1848,9 @@ moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
(VM_PROT_READ | VM_PROT_EXECUTE),
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind);
if (psind == 1)
- m = &m[HPT_SP_SIZE / PAGE_SIZE - 1];
- m = TAILQ_NEXT(m, listq);
+ m = vm_radix_iter_jump(&pages, HPT_SP_SIZE / PAGE_SIZE);
+ else
+ m = vm_radix_iter_step(&pages);
}
}
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index 388ab386b35a..cda8dd1c0946 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -3334,33 +3334,34 @@ void
mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
{
-
+ struct pctrie_iter pages;
struct rwlock *lock;
vm_offset_t va;
vm_page_t m, mpte;
- vm_pindex_t diff, psize;
bool invalidate;
+
VM_OBJECT_ASSERT_LOCKED(m_start->object);
CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
end, m_start, prot);
-
invalidate = false;
- psize = atop(end - start);
mpte = NULL;
- m = m_start;
+ vm_page_iter_limit_init(&pages, m_start->object,
+ m_start->pindex + atop(end - start));
+ m = vm_radix_iter_lookup(&pages, m_start->pindex);
lock = NULL;
PMAP_LOCK(pmap);
- while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- va = start + ptoa(diff);
+ while (m != NULL) {
+ va = start + ptoa(m->pindex - m_start->pindex);
if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
- pmap_enter_2mpage(pmap, va, m, prot, &lock))
- m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
- else
+ pmap_enter_2mpage(pmap, va, m, prot, &lock)) {
+ m = vm_radix_iter_jump(&pages, L3_PAGE_SIZE / PAGE_SIZE);
+ } else {
mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
mpte, &lock, &invalidate);
- m = TAILQ_NEXT(m, listq);
+ m = vm_radix_iter_step(&pages);
+ }
}
ptesync();
if (lock != NULL)
@@ -4043,6 +4044,7 @@ void
mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
+ struct pctrie_iter pages;
pml3_entry_t *l3e;
vm_paddr_t pa, ptepa;
vm_page_t p, pdpg;
@@ -4059,7 +4061,9 @@ mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
return;
if (!vm_object_populate(object, pindex, pindex + atop(size)))
return;
- p = vm_page_lookup(object, pindex);
+ vm_page_iter_init(&pages, object);
+ p = vm_radix_iter_lookup(&pages, pindex);
+
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
ma = p->md.mdpg_cache_attrs;
@@ -4077,15 +4081,14 @@ mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
* the pages are not physically contiguous or have differing
* memory attributes.
*/
- p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
+ p = vm_radix_iter_next(&pages);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p) ||
ma != p->md.mdpg_cache_attrs)
return;
- p = TAILQ_NEXT(p, listq);
}
PMAP_LOCK(pmap);
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 9f96255ea00e..62fd21d4f073 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1457,20 +1457,23 @@ static void
mmu_booke_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
{
+ struct pctrie_iter pages;
+ vm_offset_t va;
vm_page_t m;
- vm_pindex_t diff, psize;
VM_OBJECT_ASSERT_LOCKED(m_start->object);
- psize = atop(end - start);
- m = m_start;
+ vm_page_iter_limit_init(&pages, m_start->object,
+ m_start->pindex + atop(end - start));
+ m = vm_radix_iter_lookup(&pages, m_start->pindex);
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- mmu_booke_enter_locked(pmap, start + ptoa(diff), m,
+ while (m != NULL) {
+ va = start + ptoa(m->pindex - m_start->pindex);
+ mmu_booke_enter_locked(pmap, va, m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE),
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
- m = TAILQ_NEXT(m, listq);
+ m = vm_radix_iter_step(&pages);
}
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);