git: d246ba035960 - stable/14 - riscv: Remove unnecessary invalidations in pmap_enter_quick_locked()
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 02 Dec 2023 21:56:00 UTC
The branch stable/14 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=d246ba0359609e9d9f2b54f2ceeaf41152a5c32e commit d246ba0359609e9d9f2b54f2ceeaf41152a5c32e Author: Mark Johnston <markj@FreeBSD.org> AuthorDate: 2023-11-02 18:34:26 +0000 Commit: Mark Johnston <markj@FreeBSD.org> CommitDate: 2023-12-02 21:54:36 +0000 riscv: Remove unnecessary invalidations in pmap_enter_quick_locked() This function always overwrites an invalid PTE, so if pmap_try_insert_pv_entry() fails it is certainly not necessary to invalidate anything, because the PTE has not yet been written by that point. It should also not be necessary to invalidate TLBs after overwriting an invalid entry. In principle the TLB could cache negative entries, but then the worst case scenario is a spurious fault. Since pmap_enter() does not bother issuing an sfence.vma, pmap_enter_quick_locked() should behave similarly. Reviewed by: kib MFC after: 1 month Differential Revision: https://reviews.freebsd.org/D42291 (cherry picked from commit 71b77a7172c26783a9d2181d3bed27cf62974200) --- sys/riscv/riscv/pmap.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index 79248c4c1fcd..7dce0cd9f5be 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -3452,11 +3452,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, if (l3 == NULL) panic("pmap_enter_quick_locked: No l3"); if (pmap_load(l3) != 0) { - if (mpte != NULL) { + if (mpte != NULL) mpte->ref_count--; - mpte = NULL; - } - return (mpte); + return (NULL); } /* @@ -3466,13 +3464,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) { SLIST_INIT(&free); - if (pmap_unwire_ptp(pmap, va, mpte, &free)) { - pmap_invalidate_page(pmap, va); + if (pmap_unwire_ptp(pmap, va, mpte, &free)) vm_page_free_pages_toq(&free, false); - } - mpte = NULL; } - return (mpte); + return (NULL); } /* @@ -3518,7 +3513,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, } #endif - pmap_invalidate_page(pmap, va); return (mpte); }