PERFORCE change 93310 for review

Peter Wemm peter at FreeBSD.org
Tue Mar 14 21:46:12 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=93310

Change 93310 by peter at peter_daintree on 2006/03/14 21:45:43

	Integrate alc's pv reclaim procedure by moving it to a
	callable function - long live pmap_collect()!!  Call it if
	needed to try and pry loose a page for more chunks.  Since
	pv entries are per process with this, we cannot just steal
	one from another process.  We have to reclaim an entire
	contiguous page worth of pv entries.

Affected files ...

.. //depot/projects/hammer/sys/amd64/amd64/pmap.c#135 edit

Differences ...

==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#135 (text+ko) ====

@@ -1452,6 +1452,61 @@
 #define	PC_FREE2	0x000000fffffffffful
 
 /*
+ * We are in a serious low memory condition.  Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.  This is normally called to
+ * unmap inactive pages, and if necessary, active pages.
+ */
+static void
+pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
+{
+	pd_entry_t ptepde;
+	pmap_t pmap;
+	pt_entry_t *pte, tpte;
+	pv_entry_t next_pv, pv;
+	vm_offset_t va;
+	vm_page_t m;
+
+	TAILQ_FOREACH(m, &vpq->pl, pageq) {
+		if (m->hold_count || m->busy || (m->flags & PG_BUSY))
+			continue;
+		TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
+			va = pv->pv_va;
+			pmap = PV_PMAP(pv);
+			/* Avoid deadlock and lock recursion. */
+			if (pmap > locked_pmap)
+				PMAP_LOCK(pmap);
+			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
+				continue;
+			pmap->pm_stats.resident_count--;
+			pte = pmap_pte_pde(pmap, va, &ptepde);
+			tpte = pte_load_clear(pte);
+			KASSERT((tpte & PG_W) == 0,
+			    ("get_pv_entry: wired pte %#lx", tpte));
+			if (tpte & PG_A)
+				vm_page_flag_set(m, PG_REFERENCED);
+			if (tpte & PG_M) {
+				KASSERT((tpte & PG_RW),
+	("get_pv_entry: modified page not writable: va: %#lx, pte: %#lx",
+				    va, tpte));
+				if (pmap_track_modified(va))
+					vm_page_dirty(m);
+			}
+			pmap_invalidate_page(pmap, va);
+			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+			if (TAILQ_EMPTY(&m->md.pv_list))
+				vm_page_flag_clear(m, PG_WRITEABLE);
+			m->md.pv_list_count--;
+			pmap_unuse_pt(pmap, va, ptepde);
+			if (pmap != locked_pmap)
+				PMAP_UNLOCK(pmap);
+			free_pv_entry(pv);
+		}
+	}
+}
+
+
+/*
  * free the pv_entry back to the free list
  */
 static PMAP_INLINE void
@@ -1488,18 +1543,19 @@
 static pv_entry_t
 get_pv_entry(pmap_t pmap)
 {
+	static const struct timeval printinterval = { 60, 0 };
+	static struct timeval lastprint;
 	static vm_pindex_t colour;
 	int bit, field;
 	pv_entry_t pv;
 	struct pv_chunk *pc;
 	vm_page_t m;
 
+	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	pv_entry_count++;
-	if ((pv_entry_count > pv_entry_high_water) &&
-		(pmap_pagedaemon_waken == 0)) {
-		pmap_pagedaemon_waken = 1;
-		wakeup (&vm_pages_needed);
-	}
+	if (pv_entry_count > pv_entry_high_water)
+		pagedaemon_wakeup();
 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 	for (field = 0; field < _NPCM; field++) {
 		bit = bsrq(pc->pc_map[field]);
@@ -1519,8 +1575,25 @@
 	}
 	/* No free items, allocate another chunk */
 	m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
-	if (m == NULL)
-		return (NULL);
+	if (m == NULL) {
+		/*
+		 * Reclaim pv entries: At first, destroy mappings to inactive
+		 * pages.  After that, if a pv chunk entry is still needed,
+		 * destroy mappings to active pages.
+		 */
+		if (ratecheck(&lastprint, &printinterval))
+			printf("Approaching the limit on PV entries, consider "
+			    "increasing sysctl vm.pmap.shpgperproc or "
+			    "vm.pmap.pv_entry_max\n");
+		pmap_collect(locked_pmap, &vm_page_queues[PQ_INACTIVE]);
+		m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
+		if (m == NULL) {
+			pmap_collect(locked_pmap, &vm_page_queues[PQ_ACTIVE]);
+			m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
+			if (m == NULL)
+				panic("get_pv_entry: increase vm.pmap.shpgperproc");
+		}
+	}
 	colour++;
 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 	pc->pc_pmap = pmap;
@@ -1531,92 +1604,6 @@
 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 	return (pv);
 }
-#if 0
-static pv_entry_t
-get_pv_entry(pmap_t locked_pmap)
-{
-	static const struct timeval printinterval = { 60, 0 };
-	static struct timeval lastprint;
-	struct vpgqueues *vpq;
-	pd_entry_t ptepde;
-	pmap_t pmap;
-	pt_entry_t *pte, tpte;
-	pv_entry_t allocated_pv, next_pv, pv;
-	vm_offset_t va;
-	vm_page_t m;
-
-	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
-	if (allocated_pv != NULL) {
-		pv_entry_count++;
-		if (pv_entry_count > pv_entry_high_water)
-			pagedaemon_wakeup();
-		else
-			return (allocated_pv);
-	}
-
-	/*
-	 * Reclaim pv entries: At first, destroy mappings to inactive
-	 * pages.  After that, if a pv entry is still needed, destroy
-	 * mappings to active pages.
-	 */
-	if (ratecheck(&lastprint, &printinterval))
-		printf("Approaching the limit on PV entries, consider "
-		    "increasing sysctl vm.pmap.shpgperproc or "
-		    "vm.pmap.pv_entry_max\n");
-	vpq = &vm_page_queues[PQ_INACTIVE];
-retry:
-	TAILQ_FOREACH(m, &vpq->pl, pageq) {
-		if (m->hold_count || m->busy || (m->flags & PG_BUSY))
-			continue;
-		TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
-			va = pv->pv_va;
-			pmap = pv->pv_pmap;
-			/* Avoid deadlock and lock recursion. */
-			if (pmap > locked_pmap)
-				PMAP_LOCK(pmap);
-			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
-				continue;
-			pmap->pm_stats.resident_count--;
-			pte = pmap_pte_pde(pmap, va, &ptepde);
-			tpte = pte_load_clear(pte);
-			KASSERT((tpte & PG_W) == 0,
-			    ("get_pv_entry: wired pte %#lx", tpte));
-			if (tpte & PG_A)
-				vm_page_flag_set(m, PG_REFERENCED);
-			if (tpte & PG_M) {
-				KASSERT((tpte & PG_RW),
-	("get_pv_entry: modified page not writable: va: %#lx, pte: %#lx",
-				    va, tpte));
-				if (pmap_track_modified(va))
-					vm_page_dirty(m);
-			}
-			pmap_invalidate_page(pmap, va);
-			TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
-			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
-			if (TAILQ_EMPTY(&m->md.pv_list))
-				vm_page_flag_clear(m, PG_WRITEABLE);
-			m->md.pv_list_count--;
-			pmap_unuse_pt(pmap, va, ptepde);
-			if (pmap != locked_pmap)
-				PMAP_UNLOCK(pmap);
-			if (allocated_pv == NULL)
-				allocated_pv = pv;
-			else
-				free_pv_entry(pv);
-		}
-	}
-	if (allocated_pv == NULL) {
-		if (vpq == &vm_page_queues[PQ_INACTIVE]) {
-			vpq = &vm_page_queues[PQ_ACTIVE];
-			goto retry;
-		}
-		panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
-	}
-	return (allocated_pv);
-}
-#endif
 
 static void
 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)


More information about the p4-projects mailing list