svn commit: r278923 - user/nwhitehorn/ppc64-pmap-rework/aim
Nathan Whitehorn
nwhitehorn at FreeBSD.org
Tue Feb 17 21:39:23 UTC 2015
Author: nwhitehorn
Date: Tue Feb 17 21:39:22 2015
New Revision: 278923
URL: https://svnweb.freebsd.org/changeset/base/278923
Log:
Close race in pmap_remove_all() that could result in pmap_is_page_mapped()
briefly returning true after the function returned. This was caused by
pmap_remove_all() ignoring dead mappings in the middle of removal somewhere
else.
Modified:
user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
Modified: user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
==============================================================================
--- user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c Tue Feb 17 21:18:17 2015 (r278922)
+++ user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c Tue Feb 17 21:39:22 2015 (r278923)
@@ -116,14 +116,16 @@ uintptr_t moea64_get_unique_vsid(void);
*
*/
-static struct mtx_padalign pv_lock[PA_LOCK_COUNT];
+#define PV_LOCK_COUNT PA_LOCK_COUNT*3
+static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
-#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PA_LOCK_COUNT]))
-#define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
-#define PV_TRYLOCK(pa) mtx_trylock(PV_LOCKPTR(pa))
-#define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
-#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
+#define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
+#define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
+#define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
+#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
#define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
+#define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
struct ofw_map {
cell_t om_va;
@@ -215,7 +217,6 @@ int moea64_large_page_shift = 0;
*/
static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
struct pvo_head *pvo_head);
-static void moea64_pvo_remove(mmu_t, struct pvo_entry *);
static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
@@ -797,7 +798,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offs
* Initialize SLB table lock and page locks
*/
mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
- for (i = 0; i < PA_LOCK_COUNT; i++)
+ for (i = 0; i < PV_LOCK_COUNT; i++)
mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
/*
@@ -962,7 +963,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_off
/*
* Allocate some things for page zeroing. We put this directly
- * in the page table, marked with LPTE_LOCKED, to avoid any
+ * in the page table and use MOEA64_PTE_REPLACE to avoid
* of the PVO book-keeping or other parts of the VM system
* from even knowing that this hack exists.
*/
@@ -1020,8 +1021,8 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_o
{
struct pvo_entry key, *pvo;
- PMAP_LOCK(pm);
key.pvo_vaddr = sva;
+ PMAP_LOCK(pm);
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva;
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
@@ -1738,7 +1739,7 @@ moea64_map(mmu_t mmu, vm_offset_t *virt,
if (hw_direct_map) {
/*
* Check if every page in the region is covered by the direct
- * map. The direct map covers all of physical memory, so use
+ * map. The direct map covers all of physical memory. Use
* moea64_calc_wimg() as a shortcut to see if the page is in
* physical memory as a way to see if the direct map covers it.
*/
@@ -2133,6 +2134,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
{
struct pvo_entry *pvo, *next_pvo;
struct pvo_head freequeue;
+ int wasdead;
pmap_t pmap;
LIST_INIT(&freequeue);
@@ -2141,11 +2143,14 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
- if (!(pvo->pvo_vaddr & PVO_DEAD)) {
- moea64_pvo_remove(mmu, pvo);
+ wasdead = (pvo->pvo_vaddr & PVO_DEAD);
+ if (!wasdead)
+ moea64_pvo_remove_from_pmap(mmu, pvo);
+ moea64_pvo_remove_from_page(mmu, pvo);
+ if (!wasdead)
LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
- }
PMAP_UNLOCK(pmap);
+
}
if ((m->aflags & PGA_WRITEABLE) &&
(atomic_readandclear_32(&m->md.mdpg_attrs) & LPTE_CHG))
@@ -2153,6 +2158,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_aflag_clear(m, PGA_EXECUTABLE);
+ KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
PV_PAGE_UNLOCK(m);
/* Clean up UMA allocations */
@@ -2264,6 +2270,7 @@ static void
moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
{
+ KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
@@ -2298,12 +2305,18 @@ moea64_pvo_remove_from_page(mmu_t mmu, s
KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
+ /* Use NULL pmaps as a sentinel for races in page deletion */
+ if (pvo->pvo_pmap == NULL)
+ return;
+ pvo->pvo_pmap = NULL;
+
/*
* Update vm about page writeability/executability if managed
*/
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
+ PV_PAGE_LOCKASSERT(pg);
- if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
+ if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
LIST_REMOVE(pvo, pvo_vlink);
if (pvo->pvo_pte.prot & VM_PROT_WRITE) {
/* If page is managed, send RC bits over the fence */
@@ -2323,14 +2336,6 @@ moea64_pvo_remove_from_page(mmu_t mmu, s
moea64_pvo_remove_calls++;
}
-static void
-moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
-{
-
- moea64_pvo_remove_from_pmap(mmu, pvo);
- moea64_pvo_remove_from_page(mmu, pvo);
-}
-
static struct pvo_entry *
moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
{
More information about the svn-src-user
mailing list