PERFORCE change 162000 for review
Arnar Mar Sig
antab at FreeBSD.org
Wed May 13 02:35:22 UTC 2009
http://perforce.freebsd.org/chv.cgi?CH=162000
Change 162000 by antab at antab_farm on 2009/05/13 02:34:49
* Lots of pmap changes, most of it copied from i386 and modified.
* Mark page PTE_ACCESSED when its inserted into the tlb.
* Route tlb protection fault for usermode to trap_pfault()
* Move tblehi and tlbear into struct trapframe instead of passing it as parameter to trap(), its only stored for tlb exceptions.
* Add show tlb ddb command, dumps all tlb entries
* Add show sysreg ddb command, used to view system registers.
fork() seems to work now, but later panics on execve().
Affected files ...
.. //depot/projects/avr32/src/sys/avr32/avr32/exception.S#12 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#18 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/support.S#12 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/switch.S#13 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/tlb.c#5 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/trap.c#9 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/vm_machdep.c#11 edit
.. //depot/projects/avr32/src/sys/avr32/include/asm.h#5 edit
.. //depot/projects/avr32/src/sys/avr32/include/atomic.h#7 edit
.. //depot/projects/avr32/src/sys/avr32/include/cpu.h#6 edit
.. //depot/projects/avr32/src/sys/avr32/include/frame.h#3 edit
.. //depot/projects/avr32/src/sys/avr32/include/pmap.h#8 edit
.. //depot/projects/avr32/src/sys/avr32/include/pte.h#7 edit
.. //depot/projects/avr32/src/sys/avr32/include/tlb.h#6 edit
.. //depot/projects/avr32/src/sys/avr32/include/trap.h#8 edit
.. //depot/projects/avr32/src/sys/conf/files.avr32#15 edit
Differences ...
==== //depot/projects/avr32/src/sys/avr32/avr32/exception.S#12 (text+ko) ====
@@ -127,14 +127,15 @@
cp.w r2, 0 /* No entry */
breq tlb_miss_long
- /* Mark dirty if write miss */
+ /* Mark access and dirty if write miss */
mfsr r3, AT32_SYS_ECR /* Get exception number */
cp.w r3, T_TLB_MISS_WRITE /* Check if Write miss */
- brne tlb_miss_1
+ brne 1f
orl r2, PTE_DIRTY /* Mark page if so */
+1: orl r2, PTE_ACCESSED /* Mark page accessed */
st.w r0, r2 /* Save entry */
-tlb_miss_1:
+
andl r2, lo(~PTE_SOFTWARE_MASK) /* Mask out software */
orl r2, PTE_SIZE_4K /* All pages are 4k */
@@ -171,14 +172,12 @@
*/
tlb_miss_long:
popm r0-r3
- PUSH_TRAPFRAME(EX)
+ PUSH_TLB_TRAPFRAME(EX)
mfsr r12, AT32_SYS_ECR
mov r11, sp
- mfsr r10, AT32_SYS_TLBEAR
- mfsr r9, AT32_SYS_TLBEHI
csrf AT32_SYS_SR_EM /* Enable exceptions */
rcall trap
- POP_TRAPFRAME(EX)
+ POP_TLB_TRAPFRAME(EX)
rete
tlb_at_ptr:
@@ -268,14 +267,12 @@
* Trap running in Exception mode
*/
ENTRY(handle_tlb_exception)
- PUSH_TRAPFRAME(EX)
+ PUSH_TLB_TRAPFRAME(EX)
mfsr r12, AT32_SYS_ECR
mov r11, sp
- mfsr r10, AT32_SYS_TLBEAR
- mfsr r9, AT32_SYS_TLBEHI
call trap
call handle_ast
- POP_TRAPFRAME(EX)
+ POP_TLB_TRAPFRAME(EX)
rete
END(handle_tlb_exception)
==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#18 (text+ko) ====
@@ -35,24 +35,33 @@
#include <machine/cache.h>
#include <machine/debug.h>
-// antab: What does this stand for?
+#define PV_ENTRY_ZONE_MIN 2048
+
#ifndef PMAP_SHPGPERPROC
#define PMAP_SHPGPERPROC 200
#endif
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
-static int page_is_managed(vm_offset_t pa);
-static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m, boolean_t wired);
+static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
static void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
-static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
-static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
-static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
-static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m);
-static __inline int pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m);
+static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
+ vm_page_t *free);
+static void pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
+static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
+static __inline int pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
+static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
+static void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
static void pmap_invalidate_all(pmap_t pmap);
+static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, vm_page_t mpte);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+ vm_page_t m);
+static __inline void pmap_free_zero_pages(vm_page_t free);
+
struct pmap kernel_pmap_store;
vm_offset_t kernel_vm_end = 0;
@@ -164,6 +173,7 @@
pv_entry_high_water = 9 * (pv_entry_max / 10);
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+ uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
}
/*
@@ -191,8 +201,9 @@
pmap_pinit(pmap_t pmap)
{
vm_page_t ptdpg;
+ int i;
+
PMAP_LOCK_INIT(pmap);
- int i;
/* allocate the page directory page */
ptdpg = vm_page_alloc(NULL, 512,
@@ -224,6 +235,7 @@
{
pmap_t pmap, oldpmap;
+ critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
@@ -233,13 +245,34 @@
pmap_asid_alloc(pmap);
/* XXX: Set tlbear here? */
PCPU_SET(curpmap, pmap);
+ critical_exit();
}
boolean_t
pmap_is_modified(vm_page_t m)
{
- avr32_impl();
- return (0);
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ boolean_t rv;
+
+ rv = FALSE;
+ if (m->flags & PG_FICTITIOUS) {
+ return (rv);
+ }
+
+ sched_pin();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ rv = (*pte & PTE_DIRTY) != 0;
+ PMAP_UNLOCK(pv->pv_pmap);
+ if (rv) {
+ break;
+ }
+ }
+ sched_unpin();
+ return (rv);
}
void
@@ -248,27 +281,22 @@
pv_entry_t pv;
pt_entry_t *pte;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->flags & PG_FICTITIOUS) {
return;
}
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if (m->md.pv_flags & PV_TABLE_MOD) {
- panic("Need to look more into this");
- /*
- * Loop over all current mappings
- */
- TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- PMAP_LOCK(pv->pv_pmap);
-
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
- KASSERT((pte != NULL || pte != 0), ("Mapped page not found"));
- *pte &= ~PTE_DIRTY;
-
- PMAP_UNLOCK(pv->pv_pmap);
+ sched_pin();
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ if (*pte & PTE_DIRTY) {
+ atomic_clear_32(pte, PTE_DIRTY);
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}
- m->md.pv_flags &= ~PV_TABLE_MOD;
+ PMAP_UNLOCK(pv->pv_pmap);
}
+ sched_unpin();
}
int
@@ -302,11 +330,13 @@
if (!ent) {
panic("pmap_kenter: not in kernel segment\n");
}
- *ent = PTE_CACHEABLE | PTE_PERM_READ | PTE_PERM_WRITE | PTE_GLOBAL;
- pfn_set(*ent, pa);
+ *ent = pa | PTE_CACHEABLE | PTE_PERM_RW | PTE_GLOBAL;
- /* No need to do any tlb inserts, will just get a miss exception
- * when the page is needed */
+ /*
+ * No need to do any tlb inserts, will just get a miss exception
+ * when the page is needed, but has the page been invalidated
+ * before calling this?
+ */
}
/*
@@ -319,7 +349,6 @@
ent = pmap_pte(kernel_pmap, va);
*ent = 0;
- tlb_remove_entry(kernel_pmap, va);
}
/*
@@ -411,31 +440,61 @@
}
void
-pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
+ pt_entry_t *pte, oldpte;
+ vm_offset_t va;
int i;
- for (i = 0; i < count; i++) {
- pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
- va += PAGE_SIZE;
+ oldpte = 0;
+ for (i = 0, va = sva; i < count; i++, va += PAGE_SIZE) {
+ pte = pmap_pte(kernel_pmap, va);
+ KASSERT(pte != NULL, ("pmap_qenter: page entry not found\n"));
+
+ oldpte |= *pte;
+ *pte = VM_PAGE_TO_PHYS(m[i]) | PTE_CACHEABLE | PTE_PERM_RW |
+ PTE_GLOBAL;
+ }
+
+ if ((oldpte & PTE_ACCESSED) != 0) {
+ pmap_invalidate_range(kernel_pmap, sva, sva + count *
+ PAGE_SIZE);
}
}
void
-pmap_qremove(vm_offset_t va, int count)
+pmap_qremove(vm_offset_t sva, int count)
{
+ vm_offset_t va;
+
+ va = sva;
while (count-- > 0) {
pmap_kremove(va);
va += PAGE_SIZE;
- }
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+static __inline void
+pmap_free_zero_pages(vm_page_t free)
+{
+ vm_page_t m;
+
+ while (free != NULL) {
+ m = free;
+ free = m->right;
+ /* Preserve the page's PG_ZERO setting. */
+ vm_page_free_toq(m);
+ }
}
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
- m->md.pv_list_count = 0;
- m->md.pv_flags = 0;
}
/*
@@ -463,7 +522,7 @@
va += PAGE_SIZE;
start += PAGE_SIZE;
}
-
+ pmap_invalidate_range(kernel_pmap, sva, va);
*virt = va;
return (sva);
}
@@ -482,12 +541,21 @@
pt_entry_t *pte;
pt_entry_t origpte, newpte;
vm_page_t mpte, om;
+ boolean_t invalidate;
+
+ mpte = NULL;
+ va &= ~PAGE_MASK;
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+
vm_page_lock_queues();
PMAP_LOCK(pmap);
+ sched_pin();
- va &= ~PAGE_MASK;
- mpte = NULL;
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
if (va < VM_MAXUSER_ADDRESS) {
mpte = pmap_allocpte(pmap, va, M_WAITOK);
}
@@ -496,10 +564,11 @@
if (pte == NULL) {
panic("pmap_enter: Invalid page directory, va=0x%08X\n", va);
}
+
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
- opa = pfn_get(origpte);
+ opa = origpte & PTE_PFN;
/* Mapping has not changed, must be proection or wired changed */
if (origpte != 0 && opa == pa) {
@@ -525,8 +594,9 @@
* We might be turning off write access to the page, so we
* go ahead and sense modify status.
*/
- if (page_is_managed(opa)) {
+ if (origpte & PTE_MANAGED) {
om = m;
+ pa |= PTE_MANAGED;
}
goto update;
}
@@ -536,11 +606,11 @@
if (origpte & PTE_WIRED) {
pmap->pm_stats.wired_count--;
}
- if (page_is_managed(opa)) {
+ if (origpte & PTE_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
}
- if (mpte) {
+ if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
("pmap_enter: missing reference to page table page,"
@@ -554,7 +624,8 @@
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
- pmap_insert_entry(pmap, va, mpte, m, wired);
+ pmap_insert_entry(pmap, va, m);
+ pa |= PTE_MANAGED;
}
/* Increment counters */
@@ -563,8 +634,7 @@
}
update:
- newpte = 0;
- pfn_set(newpte, pa);
+ newpte = (pt_entry_t)pa;
if (prot & VM_PROT_READ) {
newpte |= PTE_PERM_READ;
}
@@ -579,31 +649,47 @@
if (wired) {
newpte |= PTE_WIRED;
}
+ if (pmap == kernel_pmap) {
+ newpte |= PTE_GLOBAL;
+ }
/* Mapping changed, update */
- if (origpte != newpte) {
+ if ((origpte & ~(PTE_DIRTY | PTE_ACCESSED)) != newpte) {
+ newpte |= PTE_ACCESSED;
+ if ((access & VM_PROT_WRITE) != 0) {
+ newpte |= PTE_DIRTY;
+ }
if (origpte != 0) {
- *pte = newpte;
- if (page_is_managed(opa) && (opa != pa)) {
- if (om->md.pv_flags & PV_TABLE_REF) {
+ invalidate = FALSE;
+ origpte = atomic_load_store_32(pte, newpte);
+ if (origpte & PTE_ACCESSED) {
+ if (origpte & PTE_MANAGED) {
vm_page_flag_set(om, PG_REFERENCED);
}
- om->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
+ if (opa != VM_PAGE_TO_PHYS(m)) {
+ invalidate = TRUE;
+ }
+ if ((origpte & PTE_PERM_EXECUTE) == 0 &&
+ (newpte & PTE_PERM_EXECUTE) != 0) {
+ invalidate = TRUE;
+ }
}
if (origpte & PTE_DIRTY) {
- KASSERT((origpte & PTE_RW),
- ("pmap_enter: modified page not writable:"
- " va: 0x%x, pte: 0x%x", va, origpte));
- if (page_is_managed(opa)) {
+ if ((origpte & PTE_MANAGED) != 0) {
vm_page_dirty(om);
}
+ if ((prot & VM_PROT_WRITE) == 0) {
+ invalidate = TRUE;
+ }
}
+ if (invalidate) {
+ pmap_invalidate_page(pmap, va);
+ }
}
else {
*pte = newpte;
}
}
- tlb_update_entry(pmap, va, newpte);
/*
* XXX: Sync I & D caches for executable pages. Do this only if the the
@@ -616,6 +702,7 @@
avr32_dcache_wbinv_range(va, PAGE_SIZE);
}
+ sched_unpin();
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -626,15 +713,23 @@
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
- pt_entry_t *pte;
- vm_page_t mpte = NULL;
+ PMAP_LOCK(pmap);
+ pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+ PMAP_UNLOCK(pmap);
+}
+
+static vm_page_t
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, vm_page_t mpte)
+{
+ pt_entry_t *pte, newpte;
+ vm_page_t free;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- PMAP_LOCK(pmap);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
/*
* In the case that a page table page is not
@@ -651,7 +746,6 @@
if (mpte && (mpte->pindex == pdindex)) {
mpte->wire_count++;
} else {
-retry:
/*
* Get the page directory entry
*/
@@ -667,16 +761,7 @@
} else {
mpte = _pmap_allocpte(pmap, pdindex, M_NOWAIT);
if (mpte == NULL) {
- PMAP_UNLOCK(pmap);
- vm_page_busy(m);
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(m->object);
- VM_WAIT;
- VM_OBJECT_LOCK(m->object);
- vm_page_lock_queues();
- vm_page_wakeup(m);
- PMAP_LOCK(pmap);
- goto retry;
+ return (NULL);
}
}
}
@@ -687,17 +772,27 @@
pte = pmap_pte(pmap, va);
if (*pte) {
if (mpte != NULL) {
- pmap_unwire_pte_hold(pmap, mpte);
+ // XXX: Seems we should call pmap_unwire_pte_hold here?
+ mpte->wire_count--;
+ mpte = NULL;
}
- PMAP_UNLOCK(pmap);
- return;
+ return (mpte);
}
/*
* Enter on the PV list if part of our managed memory.
*/
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
- pmap_insert_entry(pmap, va, mpte, m, FALSE);
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
+ !pmap_try_insert_pv_entry(pmap, va, m)) {
+ if (mpte != NULL) {
+ free = NULL;
+ if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
+ pmap_invalidate_page(pmap, va);
+ pmap_free_zero_pages(free);
+ }
+ mpte = NULL;
+ }
+ return (mpte);
}
/*
@@ -705,17 +800,22 @@
*/
pmap->pm_stats.resident_count++;
- pfn_set(*pte, VM_PAGE_TO_PHYS(m));
+ newpte = VM_PAGE_TO_PHYS(m);
if (prot & VM_PROT_READ) {
- *pte |= PTE_PERM_READ;
+ newpte |= PTE_PERM_READ;
}
if (prot & VM_PROT_WRITE) {
- *pte |= PTE_PERM_WRITE;
+ newpte |= PTE_PERM_WRITE;
}
if (prot & VM_PROT_EXECUTE) {
- *pte |= PTE_PERM_EXECUTE;
+ newpte |= PTE_PERM_EXECUTE;
+ }
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ newpte |= PTE_MANAGED;
}
- PMAP_UNLOCK(pmap);
+ *pte = newpte;
+
+ return (mpte);
}
@@ -735,21 +835,20 @@
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
- vm_page_t m;
+ vm_page_t m, mpte;
vm_pindex_t diff, psize;
+ VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
psize = atop(end - start);
+ mpte = NULL;
m = m_start;
+ PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- /* FIX ME FIX ME - prot is passed in both the
- * the normal spot m, prot but also as the fault_type
- * which we don't use. If we ever use it in pmap_enter
- * we will have to fix this.
- */
- pmap_enter(pmap, start + ptoa(diff), prot, m, prot &
- (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
+ prot, mpte);
m = TAILQ_NEXT(m, listq);
}
+ PMAP_UNLOCK(pmap);
}
void
@@ -757,16 +856,17 @@
{
vm_offset_t va;
pt_entry_t *pte;
+ vm_page_t free = NULL;
+ int anyvalid;
- if (pmap == NULL) {
+ if (pmap->pm_stats.resident_count == 0) {
return;
}
- if (pmap->pm_stats.resident_count == 0) {
- return;
- }
+ anyvalid = 0;
vm_page_lock_queues();
+ sched_pin();
PMAP_LOCK(pmap);
/*
@@ -774,68 +874,89 @@
* and easy to short circuit some code.
*/
if ((sva + PAGE_SIZE) == eva) {
- pmap_remove_page(pmap, sva);
+ pmap_remove_page(pmap, sva, &free);
goto out;
}
for (va = sva; va < eva; va += PAGE_SIZE) {
+ if (pmap->pm_stats.resident_count == 0) {
+ break;
+ }
+
pte = pmap_pte(pmap, va);
if (!pte || !*pte) {
continue;
}
- pmap_remove_page(pmap, va);
+
+ if ((*pte & PTE_GLOBAL) == 0) {
+ anyvalid = 1;
+ }
+ pmap_remove_pte(pmap, pte, va, &free);
}
out:
+
+ if (anyvalid) {
+ pmap_invalidate_all(pmap);
+ }
+ sched_unpin();
+ PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
- PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(free);
}
void
pmap_remove_all(vm_page_t m)
{
register pv_entry_t pv;
- register pt_entry_t *pte;
+ register pt_entry_t *pte, tpte;
+ vm_page_t free;
+ KASSERT((m->flags & PG_FICTITIOUS) == 0,
+ ("pmap_remove_all: page %p is fictitious", m));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if (m->md.pv_flags & PV_TABLE_REF) {
- vm_page_flag_set(m, PG_REFERENCED);
- }
-
+ sched_pin();
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
PMAP_LOCK(pv->pv_pmap);
pv->pv_pmap->pm_stats.resident_count--;
-
/*
* Update the vm_page_t clean and reference bits.
*/
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
- if (*pte & PTE_WIRED) {
+ KASSERT(pte != NULL, ("pv_entry has invalid pte\n"));
+ tpte = atomic_readandclear_32(pte);
+
+ if (tpte & PTE_WIRED) {
pv->pv_pmap->pm_stats.wired_count--;
}
- if (*pte & PTE_DIRTY) {
+ if (tpte & PTE_ACCESSED) {
+ vm_page_flag_set(m, PG_REFERENCED);
+ }
+ if (tpte & PTE_DIRTY) {
+ KASSERT((tpte & PTE_PERM_WRITE),
+ ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx",
+ pv->pv_va, (uintmax_t)tpte));
vm_page_dirty(m);
}
- *pte = 0;
- tlb_remove_entry(pv->pv_pmap, pv->pv_va);
-
+ free = NULL;
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, &free);
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ pmap_free_zero_pages(free);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
- m->md.pv_list_count--;
PMAP_UNLOCK(pv->pv_pmap);
free_pv_entry(pv);
}
-
vm_page_flag_clear(m, PG_WRITEABLE);
- m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
+ sched_unpin();
}
void
pmap_remove_pages(pmap_t pmap)
{
- pv_entry_t pv, npv;
- pt_entry_t *pte;
- vm_page_t m;
+ vm_page_t m, free = NULL;
+ pv_entry_t pv, next_pv;
+ pt_entry_t *pte, tpte;
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
@@ -845,45 +966,47 @@
PMAP_LOCK(pmap);
sched_pin();
- for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
- npv = TAILQ_NEXT(pv, pv_plist);
-
+ TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, next_pv) {
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
KASSERT(pte != NULL, ("page on pm_pvlist has no pte\n"));
+ tpte = atomic_load_32(pte);
/* We cannot remove wired pages from a process' mapping at this time */
- if (*pte & PTE_WIRED) {
+ if (tpte & PTE_WIRED) {
continue;
}
+ m = PHYS_TO_VM_PAGE(tpte & PTE_PFN);
+ KASSERT(m->phys_addr == (tpte & PTE_PFN),
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tpte));
- m = PHYS_TO_VM_PAGE(pfn_get(*pte));
KASSERT(m < &vm_page_array[vm_page_array_size],
- ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)*pte));
+ ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
+ pv->pv_pmap->pm_stats.resident_count--;
+ *pte = 0;
/* Update the vm_page_t clean and reference bits. */
- if (*pte & PTE_DIRTY) {
+ if (tpte & PTE_DIRTY) {
vm_page_dirty(m);
}
/* Remove from lists and free */
- pv->pv_pmap->pm_stats.resident_count--;
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
-
- m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
- if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
+ if (TAILQ_EMPTY(&m->md.pv_list)) {
vm_page_flag_clear(m, PG_WRITEABLE);
}
- pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, &free);
free_pv_entry(pv);
}
-
sched_unpin();
pmap_invalidate_all(pmap);
+ vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
- vm_page_unlock_queues();
+ pmap_free_zero_pages(free);
}
/*
@@ -891,23 +1014,22 @@
* conditionally free the page, and manage the hold/wire counts.
*/
static int
-pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
{
pd_entry_t pteva;
+ vm_page_t mpte;
if (va >= VM_MAXUSER_ADDRESS) {
return (0);
}
- if (mpte == NULL) {
- pteva = *pmap_pde(pmap, va);
- mpte = PHYS_TO_VM_PAGE(AVR32_P1_TO_PHYS(pteva));
- }
- return pmap_unwire_pte_hold(pmap, mpte);
+ pteva = *pmap_pde(pmap, va);
+ mpte = PHYS_TO_VM_PAGE(AVR32_P1_TO_PHYS(pteva));
+ return (pmap_unwire_pte_hold(pmap, mpte, free));
}
static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
/*
* Unmap the page table page
@@ -915,134 +1037,177 @@
pmap->pm_pd[m->pindex] = 0;
--pmap->pm_stats.resident_count;
+ atomic_subtract_32(&cnt.v_wire_count, 1);
+
/*
- * If the page is finally unwired, simply free it.
+ * If the page is finally unwired, then add it to the free list
*/
- vm_page_free_zero(m);
- atomic_subtract_int(&cnt.v_wire_count, 1);
+ m->right = *free;
+ *free = m;
return (1);
}
static __inline int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
--m->wire_count;
if (m->wire_count == 0) {
- return (_pmap_unwire_pte_hold(pmap, m));
+ return (_pmap_unwire_pte_hold(pmap, m, free));
} else {
return (0);
}
}
static void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ if (pmap->pm_asid_generation < PCPU_GET(asid_generation)) {
+ return;
+ }
+
+ sched_pin();
+ tlb_remove_entry(pmap, va);
+ sched_unpin();
+}
+
+static void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t addr;
+
+ if (pmap->pm_asid_generation < PCPU_GET(asid_generation)) {
+ return;
+ }
+
+ sched_pin();
+ for (addr = sva; addr < eva; addr += PAGE_SIZE) {
+ panic("pmap_invalidate_range"); // addr
+ }
+ sched_unpin();
+}
+
+static void
pmap_invalidate_all(pmap_t pmap)
{
- /* XXX: Need to implement this. */
+ pv_entry_t pv;
+
+ if (pmap->pm_asid_generation < PCPU_GET(asid_generation)) {
+ return;
+ }
+ sched_pin();
+ TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
+ tlb_remove_entry(pmap, pv->pv_va);
+ }
+ sched_unpin();
}
void
pmap_remove_write(vm_page_t m)
{
- pv_entry_t pv, npv;
- vm_offset_t va;
- pt_entry_t *pte;
+ pv_entry_t pv;
+ pt_entry_t oldpte, *pte;
- if ((m->flags & PG_WRITEABLE) == 0) {
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if ((m->flags & PG_FICTITIOUS) != 0 ||
+ (m->flags & PG_WRITEABLE) == 0) {
return;
}
- /*
- * Loop over all current mappings setting/clearing as appropos.
- */
- for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
- npv = TAILQ_NEXT(pv, pv_plist);
+ sched_pin();
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
-
- if (pte == NULL) {
- panic("page on pm_pvlist has no pte\n");
+ KASSERT(pte != NULL, ("page on pv_list has no pte\n"));
+retry:
+ oldpte = *pte;
+ if ((oldpte & PTE_PERM_WRITE) != 0) {
+ if (!atomic_cmpset_int(pte, oldpte,
+ oldpte & ~(PTE_PERM_WRITE | PTE_DIRTY))) {
+ goto retry;
+ }
+ if ((oldpte & PTE_DIRTY) != 0) {
+ vm_page_dirty(m);
+ }
+ pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}
-
- va = pv->pv_va;
- pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
- VM_PROT_READ | VM_PROT_EXECUTE);
+ PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
+ sched_unpin();
}
/*
* pmap_remove_pte: do the things to unmap a page in a process
*/
static int
-pmap_remove_pte(struct pmap *pmap, pt_entry_t *pte, vm_offset_t va)
+pmap_remove_pte(pmap_t pmap, pt_entry_t *pte, vm_offset_t va, vm_page_t *free)
{
vm_page_t m;
- vm_offset_t pa;
+ pt_entry_t oldpte;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpte = atomic_readandclear_32(pte);
+ pmap->pm_stats.resident_count--;
- if (*pte & PTE_WIRED) {
+ if (oldpte & PTE_WIRED) {
pmap->pm_stats.wired_count--;
}
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list