PERFORCE change 93322 for review
Peter Wemm
peter at FreeBSD.org
Tue Mar 14 23:48:53 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=93322
Change 93322 by peter at peter_melody on 2006/03/14 23:48:30
add some stats
Affected files ...
.. //depot/projects/hammer/sys/amd64/amd64/pmap.c#138 edit
Differences ...
==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#138 (text+ko) ====
@@ -1451,6 +1451,32 @@
#define PC_FREE1 0xfffffffffffffffful
#define PC_FREE2 0x000000fffffffffful
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+
+static int pv_entry_frees, pv_entry_allocs, pv_entry_spare;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+
+static int pmap_collect_inactive, pmap_collect_active;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
+ "Current number times pmap_collect called on inactive queue");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
+ "Current number times pmap_collect called on active queue");
/*
* We are in a serious low memory condition. Resort to
* drastic measures to free some pages so we can allocate
@@ -1516,6 +1542,8 @@
struct pv_chunk *pc;
int idx, field, bit;
+ pv_entry_frees++;
+ pv_entry_spare++;
pv_entry_count--;
pc = pv_to_chunk(pv);
idx = pv - &pc->pc_pventry[0];
@@ -1528,6 +1556,9 @@
if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
pc->pc_map[2] != PC_FREE2)
return;
+ pv_entry_spare -= _NPCPV;
+ pc_chunk_count--;
+ pc_chunk_frees++;
/* entire chunk is free, return it */
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
@@ -1553,6 +1584,7 @@
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ pv_entry_allocs++;
pv_entry_count++;
if (pv_entry_count > pv_entry_high_water)
pagedaemon_wakeup();
@@ -1575,6 +1607,7 @@
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
}
+ pv_entry_spare--;
return (pv);
}
alloc:
@@ -1590,15 +1623,19 @@
printf("Approaching the limit on PV entries, consider "
"increasing sysctl vm.pmap.shpgperproc or "
"vm.pmap.pv_entry_max\n");
+ pmap_collect_inactive++;
pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
if (m == NULL) {
+ pmap_collect_active++;
pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
if (m == NULL)
panic("get_pv_entry: increase vm.pmap.shpgperproc");
}
}
+ pc_chunk_count++;
+ pc_chunk_allocs++;
colour++;
pc = (void *)PHYS_TO_DMAP(m->phys_addr);
pc->pc_pmap = pmap;
@@ -1607,6 +1644,7 @@
pc->pc_map[2] = PC_FREE2;
pv = &pc->pc_pventry[0];
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ pv_entry_spare += _NPCPV - 1;
return (pv);
}
More information about the p4-projects
mailing list