PERFORCE change 93327 for review
Peter Wemm
peter at FreeBSD.org
Wed Mar 15 00:50:13 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=93327
Change 93327 by peter at peter_melody on 2006/03/15 00:49:56
Eliminate the sva/eva args to pmap_remove_pages(). We only ever
call it for "all user address space", and as long as that is true,
the amd64 pmap_remove_pages() can use bsfq etc to scan its bitmaps
instead of a slow for loop.
XXX switch over to using bsfq()!
Affected files ...
.. //depot/projects/hammer/sys/alpha/alpha/pmap.c#50 edit
.. //depot/projects/hammer/sys/amd64/amd64/pmap.c#140 edit
.. //depot/projects/hammer/sys/arm/arm/pmap.c#30 edit
.. //depot/projects/hammer/sys/i386/i386/pmap.c#80 edit
.. //depot/projects/hammer/sys/ia64/ia64/pmap.c#52 edit
.. //depot/projects/hammer/sys/kern/kern_exec.c#62 edit
.. //depot/projects/hammer/sys/kern/kern_exit.c#59 edit
.. //depot/projects/hammer/sys/powerpc/powerpc/mmu_if.m#3 edit
.. //depot/projects/hammer/sys/powerpc/powerpc/pmap_dispatch.c#4 edit
.. //depot/projects/hammer/sys/sparc64/sparc64/pmap.c#40 edit
.. //depot/projects/hammer/sys/vm/pmap.h#24 edit
Differences ...
==== //depot/projects/hammer/sys/alpha/alpha/pmap.c#50 (text+ko) ====
@@ -2125,9 +2125,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap, sva, eva)
- pmap_t pmap;
- vm_offset_t sva, eva;
+pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t m;
@@ -2146,11 +2144,6 @@
pv;
pv = npv) {
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
-
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
pte = vtopte(pv->pv_va);
#else
==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#140 (text+ko) ====
@@ -2685,9 +2685,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap, sva, eva)
- pmap_t pmap;
- vm_offset_t sva, eva;
+pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t m;
@@ -2695,7 +2693,6 @@
struct pv_chunk *pc, *npc;
int field, idx;
int64_t bit;
- int allfree, didfree;;
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
@@ -2704,13 +2701,11 @@
vm_page_lock_queues();
PMAP_LOCK(pmap);
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
- allfree = 1;
- didfree = 0;
/*
- * If only we could eliminate the sva/eva tests, and define
- * pmap_remove_pages() to simply remove *ALL* user pages, we
- * could make it faster here. eg: replace for() loop with
- * bsrq() and some other some algorithm changes.
+ * XXX optimize more after removal of sva/eva!
+ * We can use bsfq to find the next inuse pv instead of
+ * the for loop. Since we free as we go, we won't have
+ * to skip unfreeable ones that sva/eva might have allowed.
*/
for (idx = 0; idx < _NPCPV; idx++) {
field = idx / 64;
@@ -2718,11 +2713,6 @@
if ((pc->pc_map[field] & 1ul << bit) == 0) { /* inuse */
pv = &pc->pc_pventry[idx];
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- allfree = 0;
- continue;
- }
-
pte = vtopte(pv->pv_va);
tpte = *pte;
@@ -2763,7 +2753,6 @@
pv_entry_frees++;
pv_entry_spare++;
pv_entry_count--;
- didfree = 1;
pc->pc_map[field] |= 1ul << bit;
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
@@ -2773,21 +2762,14 @@
*vtopde(pv->pv_va));
}
}
- if (allfree) {
- pv_entry_spare -= _NPCPV;
- pc_chunk_count--;
- pc_chunk_frees++;
- TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
- m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
- vm_page_lock_queues();
- vm_page_free(m);
- vm_page_unlock_queues();
- } else {
- if (didfree) {
- TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
- TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
- }
- }
+ pv_entry_spare -= _NPCPV;
+ pc_chunk_count--;
+ pc_chunk_frees++;
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ vm_page_lock_queues();
+ vm_page_free(m);
+ vm_page_unlock_queues();
}
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
==== //depot/projects/hammer/sys/arm/arm/pmap.c#30 (text+ko) ====
@@ -2809,7 +2809,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove_pages(pmap_t pmap)
{
struct pv_entry *pv, *npv;
struct l2_bucket *l2b = NULL;
@@ -2818,10 +2818,6 @@
vm_page_lock_queues();
for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
if (pv->pv_flags & PVF_WIRED) {
/* The page is wired, cannot remove it now. */
npv = TAILQ_NEXT(pv, pv_plist);
==== //depot/projects/hammer/sys/i386/i386/pmap.c#80 (text+ko) ====
@@ -2590,9 +2590,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap, sva, eva)
- pmap_t pmap;
- vm_offset_t sva, eva;
+pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t m;
@@ -2609,11 +2607,6 @@
sched_pin();
for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
-
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
pte = vtopte(pv->pv_va);
#else
==== //depot/projects/hammer/sys/ia64/ia64/pmap.c#52 (text+ko) ====
@@ -1862,7 +1862,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove_pages(pmap_t pmap)
{
pmap_t oldpmap;
pv_entry_t pv, npv;
@@ -1881,9 +1881,6 @@
npv = TAILQ_NEXT(pv, pv_plist);
- if (pv->pv_va >= eva || pv->pv_va < sva)
- continue;
-
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
if (!pmap_wired(pte))
==== //depot/projects/hammer/sys/kern/kern_exec.c#62 (text+ko) ====
@@ -905,8 +905,7 @@
if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
vm_map_max(map) == sv->sv_maxuser) {
shmexit(vmspace);
- pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map),
- vm_map_max(map));
+ pmap_remove_pages(vmspace_pmap(vmspace));
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
} else {
vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
==== //depot/projects/hammer/sys/kern/kern_exit.c#59 (text+ko) ====
@@ -325,8 +325,7 @@
while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
if (refcnt == 1) {
shmexit(vm);
- pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
- vm_map_max(&vm->vm_map));
+ pmap_remove_pages(vmspace_pmap(vm));
(void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
vm_map_max(&vm->vm_map));
}
==== //depot/projects/hammer/sys/powerpc/powerpc/mmu_if.m#3 (text+ko) ====
@@ -85,8 +85,7 @@
return;
}
- static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap,
- vm_offset_t start, vm_offset_t end)
+ static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
{
return;
}
@@ -543,8 +542,6 @@
METHOD void remove_pages {
mmu_t _mmu;
pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
} DEFAULT mmu_null_remove_pages;
==== //depot/projects/hammer/sys/powerpc/powerpc/pmap_dispatch.c#4 (text+ko) ====
@@ -244,9 +244,9 @@
}
void
-pmap_remove_pages(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+pmap_remove_pages(pmap_t pmap)
{
- MMU_REMOVE_PAGES(mmu_obj, pmap, start, end);
+ MMU_REMOVE_PAGES(mmu_obj, pmap);
}
void
==== //depot/projects/hammer/sys/sparc64/sparc64/pmap.c#40 (text+ko) ====
@@ -1698,7 +1698,7 @@
* an entire address space. Only works for the current pmap.
*/
void
-pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+pmap_remove_pages(pmap_t pm)
{
}
==== //depot/projects/hammer/sys/vm/pmap.h#24 (text+ko) ====
@@ -121,7 +121,7 @@
void pmap_release(pmap_t);
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_all(vm_page_t m);
-void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
+void pmap_remove_pages(pmap_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
More information about the p4-projects
mailing list