PERFORCE change 132553 for review
Warner Losh
imp at FreeBSD.org
Fri Jan 4 22:34:48 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=132553
Change 132553 by imp at imp_paco-paco on 2008/01/05 06:33:50
Implement the newly required pmap functions. We now have
no pmap undefined symbols.
Affected files ...
.. //depot/projects/mips2-jnpr/src/sys/mips/include/pmap.h#8 edit
.. //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#14 edit
Differences ...
==== //depot/projects/mips2-jnpr/src/sys/mips/include/pmap.h#8 (text+ko) ====
@@ -137,6 +137,7 @@
TAILQ_ENTRY(pv_entry) pv_list;
TAILQ_ENTRY(pv_entry) pv_plist;
vm_page_t pv_ptem; /* VM page for pte */
+ boolean_t pv_wired; /* whether this entry is wired */
} *pv_entry_t;
==== //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#14 (text+ko) ====
@@ -65,6 +65,10 @@
* and to when physical maps must be made correct.
*/
+/* XXXimp
+ * mips2 has a pmap_initialized, but we don't use it here. Why?
+ */
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -172,7 +176,7 @@
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
static boolean_t pmap_testbit(vm_page_t m, int bit);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
- vm_page_t m);
+ vm_page_t m, boolean_t wired);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@@ -1275,7 +1279,8 @@
* (pmap, va).
*/
static void
-pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m,
+ boolean_t wired)
{
pv_entry_t pv;
@@ -1286,6 +1291,7 @@
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
+ pv->pv_wired = wired;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -1687,7 +1693,7 @@
* called at interrupt time.
*/
if((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
- pmap_insert_entry(pmap, va, mpte, m);
+ pmap_insert_entry(pmap, va, mpte, m, wired);
}
/*
@@ -1846,7 +1852,7 @@
* called at interrupt time.
*/
if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
- pmap_insert_entry(pmap, va, mpte, m);
+ pmap_insert_entry(pmap, va, mpte, m, FALSE);
/*
* Increment counters
@@ -1916,6 +1922,34 @@
*/
/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ vm_page_t m;
+ vm_pindex_t diff, psize;
+
+ psize = atop(end - start);
+ m = m_start;
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ pmap_enter(pmap, start + ptoa(diff), m, prot &
+ (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+ m = TAILQ_NEXT(m, listq);
+ }
+}
+
+/*
* pmap_object_init_pt preloads the ptes for a given object
* into the specified pmap. This eliminates the blast of soft
* faults on process startup and immediately after an mmap.
@@ -2117,6 +2151,43 @@
}
}
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ pv_entry_t pv;
+ int loops = 0;
+
+ if (m->flags & PG_FICTITIOUS)
+ return FALSE;
+
+ vm_page_lock_queues();
+ PMAP_LOCK(pmap);
+
+ /*
+ * Not found, check current mappings returning immediately if found.
+ */
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ if (pv->pv_pmap == pmap) {
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ return TRUE;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ PMAP_UNLOCK(pmap);
+ vm_page_unlock_queues();
+ return (FALSE);
+}
+
#define PMAP_REMOVE_PAGES_CURPROC_ONLY
/*
* Remove all pages from specified address space
@@ -2298,6 +2369,60 @@
}
/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pv_entry_t pv;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
+ if (pv->pv_wired)
+ count++;
+ return (count);
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ pv_entry_t pv, npv;
+ vm_offset_t va;
+ pt_entry_t *pte;
+
+ if ((m->flags & PG_WRITEABLE) == 0)
+ return;
+
+ /*
+ * Loop over all current mappings
+ * setting/clearing as appropos.
+ */
+ for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv)
+ {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+
+ if ((pte == NULL) || !mips_pg_v(*pte))
+ panic("page on pm_pvlist has no pte\n");
+
+ va = pv->pv_va;
+ pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
+ VM_PROT_READ | VM_PROT_EXECUTE);
+ }
+ vm_page_flag_clear(m, PG_WRITEABLE);
+}
+
+/*
* pmap_ts_referenced:
*
* Return the count of reference bits for a page, clearing all of them.
More information about the p4-projects
mailing list