svn commit: r349963 - head/sys/powerpc/aim
Justin Hibbits
jhibbits at FreeBSD.org
Sat Jul 13 03:02:13 UTC 2019
Author: jhibbits
Date: Sat Jul 13 03:02:11 2019
New Revision: 349963
URL: https://svnweb.freebsd.org/changeset/base/349963
Log:
powerpc64/pmap: Reduce scope of PV_LOCK in remove path
Summary:
Since the 'page pv' lock is one of the most highly contended locks, we
need to try to do as much work outside of the lock as we can. The
moea64_pvo_remove_from_page() path is a low hanging fruit, where we can
do some heavy work (PHYS_TO_VM_PAGE()) outside of the lock if needed.
In one path, moea64_remove_all(), the PV lock is already held and can't
be swizzled, so we provide two ways to perform the locked operation, one
that can call PHYS_TO_VM_PAGE outside the lock, and one that calls with
the lock already held.
Reviewed By: luporl
Differential Revision: https://reviews.freebsd.org/D20694
Modified:
head/sys/powerpc/aim/mmu_oea64.c
Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c Sat Jul 13 00:51:11 2019 (r349962)
+++ head/sys/powerpc/aim/mmu_oea64.c Sat Jul 13 03:02:11 2019 (r349963)
@@ -234,6 +234,8 @@ static int moea64_pvo_enter(mmu_t mmu, struct pvo_entr
struct pvo_head *pvo_head);
static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
+static void moea64_pvo_remove_from_page_locked(mmu_t mmu,
+ struct pvo_entry *pvo);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
/*
@@ -1454,9 +1456,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, v
/* Free any dead pages */
if (oldpvo != NULL) {
- PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, oldpvo);
- PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
free_pvo_entry(oldpvo);
}
@@ -1877,9 +1877,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr
/* Free any dead pages */
if (oldpvo != NULL) {
- PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, oldpvo);
- PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
free_pvo_entry(oldpvo);
}
@@ -2386,9 +2384,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm)
PMAP_UNLOCK(pm);
RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
- PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, pvo);
- PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
RB_REMOVE(pvo_tree, &tofree, pvo);
free_pvo_entry(pvo);
}
@@ -2429,9 +2425,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, v
PMAP_UNLOCK(pm);
RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
- PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, pvo);
- PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
RB_REMOVE(pvo_tree, &tofree, pvo);
free_pvo_entry(pvo);
}
@@ -2458,7 +2452,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
wasdead = (pvo->pvo_vaddr & PVO_DEAD);
if (!wasdead)
moea64_pvo_remove_from_pmap(mmu, pvo);
- moea64_pvo_remove_from_page(mmu, pvo);
+ moea64_pvo_remove_from_page_locked(mmu, pvo);
if (!wasdead)
LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
PMAP_UNLOCK(pmap);
@@ -2631,10 +2625,10 @@ moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entr
}
}
-static void
-moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
+static inline void
+_moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
+ vm_page_t m)
{
- struct vm_page *pg;
KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
@@ -2648,18 +2642,40 @@ moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entr
*/
PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
if (pvo->pvo_vaddr & PVO_MANAGED) {
- pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
-
- if (pg != NULL) {
+ if (m != NULL) {
LIST_REMOVE(pvo, pvo_vlink);
- if (LIST_EMPTY(vm_page_to_pvoh(pg)))
- vm_page_aflag_clear(pg,
+ if (LIST_EMPTY(vm_page_to_pvoh(m)))
+ vm_page_aflag_clear(m,
PGA_WRITEABLE | PGA_EXECUTABLE);
}
}
moea64_pvo_entries--;
moea64_pvo_remove_calls++;
+}
+
+static void
+moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo)
+{
+ vm_page_t pg = NULL;
+
+ if (pvo->pvo_vaddr & PVO_MANAGED)
+ pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
+
+ _moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
+}
+
+static void
+moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
+{
+ vm_page_t pg = NULL;
+
+ if (pvo->pvo_vaddr & PVO_MANAGED)
+ pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
+
+ PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
+ _moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
+ PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
}
static struct pvo_entry *
More information about the svn-src-all
mailing list