git: 0e9a3bdeb737 - stable/14 - pmap: Degrade pmap_page_set_attr*() into a no-op on same attribute
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 28 Jul 2025 13:31:49 UTC
The branch stable/14 has been updated by olce:
URL: https://cgit.FreeBSD.org/src/commit/?id=0e9a3bdeb7376bf04cf21830c4e33aa7efe13961
commit 0e9a3bdeb7376bf04cf21830c4e33aa7efe13961
Author: Olivier Certner <olce@FreeBSD.org>
AuthorDate: 2025-07-08 16:17:30 +0000
Commit: Olivier Certner <olce@FreeBSD.org>
CommitDate: 2025-07-28 13:28:51 +0000
pmap: Degrade pmap_page_set_attr*() into a no-op on same attribute
For 32-bit arm, move the no-op test that was already in place at start
of the function so that it stays first even if the '#if 0' block around
the call to sf_buf_invalidate_cache() is uncommented at some point (if
ever).
Reviewed by: jeffpc_josefsipek.net, kib
MFC after: 10 days
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D51253
(cherry picked from commit 9d1f3ce79d85855399663e3977766ec46f28cadd)
Forgotten on commit to main/-CURRENT:
PR: 277476
---
sys/amd64/amd64/pmap.c | 5 +++++
sys/arm/arm/pmap-v6.c | 32 +++++++++++++++-----------------
sys/arm64/arm64/pmap.c | 2 ++
sys/i386/i386/pmap.c | 2 ++
sys/powerpc/aim/mmu_oea.c | 3 +++
sys/powerpc/aim/mmu_oea64.c | 3 +++
sys/powerpc/aim/mmu_radix.c | 4 ++++
sys/riscv/riscv/pmap.c | 2 ++
8 files changed, 36 insertions(+), 17 deletions(-)
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 34c7c1c94737..b9681da13d3a 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -9620,6 +9620,8 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va, vm_page_t m)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pat_mode == ma)
+ return;
m->md.pat_mode = ma;
@@ -9639,6 +9641,9 @@ pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma)
{
int error;
+ if (m->md.pat_mode == ma)
+ return;
+
m->md.pat_mode = ma;
if ((m->flags & PG_FICTITIOUS) != 0)
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 95aeba201e4a..737e215b44b6 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -5749,7 +5749,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m,
VM_PAGE_TO_PHYS(m), oma, ma);
- if ((m->flags & PG_FICTITIOUS) != 0)
+ if (ma == oma || (m->flags & PG_FICTITIOUS) != 0)
return;
#if 0
/*
@@ -5766,22 +5766,20 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
* If page is not mapped by sf buffer, map the page
* transient and do invalidation.
*/
- if (ma != oma) {
- pa = VM_PAGE_TO_PHYS(m);
- sched_pin();
- pc = get_pcpu();
- cmap2_pte2p = pc->pc_cmap2_pte2p;
- mtx_lock(&pc->pc_cmap_lock);
- if (pte2_load(cmap2_pte2p) != 0)
- panic("%s: CMAP2 busy", __func__);
- pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
- vm_memattr_to_pte2(ma)));
- dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
- pte2_clear(cmap2_pte2p);
- tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
- sched_unpin();
- mtx_unlock(&pc->pc_cmap_lock);
- }
+ pa = VM_PAGE_TO_PHYS(m);
+ sched_pin();
+ pc = get_pcpu();
+ cmap2_pte2p = pc->pc_cmap2_pte2p;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (pte2_load(cmap2_pte2p) != 0)
+ panic("%s: CMAP2 busy", __func__);
+ pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW,
+ vm_memattr_to_pte2(ma)));
+ dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE);
+ pte2_clear(cmap2_pte2p);
+ tlb_flush((vm_offset_t)pc->pc_cmap2_addr);
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
}
/*
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index ef33a7339b9e..8e3161736467 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -6678,6 +6678,8 @@ pmap_unmapbios(void *p, vm_size_t size)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pv_memattr == ma)
+ return;
m->md.pv_memattr = ma;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 90ea6e3459cf..d9eeeed69bd5 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -5516,6 +5516,8 @@ __CONCAT(PMTYPE, unmapdev)(void *p, vm_size_t size)
static void
__CONCAT(PMTYPE, page_set_memattr)(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pat_mode == ma)
+ return;
m->md.pat_mode = ma;
if ((m->flags & PG_FICTITIOUS) != 0)
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 363eb0c4c9a7..662204ec8b33 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1465,6 +1465,9 @@ moea_page_set_memattr(vm_page_t m, vm_memattr_t ma)
pmap_t pmap;
u_int lo;
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
if ((m->oflags & VPO_UNMANAGED) != 0) {
m->md.mdpg_cache_attrs = ma;
return;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 4266047ee3e2..2ded3b6b5195 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -2121,6 +2121,9 @@ moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x",
__func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma);
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
if ((m->oflags & VPO_UNMANAGED) != 0) {
m->md.mdpg_cache_attrs = ma;
return;
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index a15f686d88fe..78c57badee55 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -5888,6 +5888,10 @@ mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
+
+ if (m->md.mdpg_cache_attrs == ma)
+ return;
+
m->md.mdpg_cache_attrs = ma;
/*
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 4812f3274e69..74bbddd7b065 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -4589,6 +4589,8 @@ pmap_unmapbios(void *p, vm_size_t size)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
+ if (m->md.pv_memattr == ma)
+ return;
m->md.pv_memattr = ma;