PERFORCE change 134353 for review
Scott Long
scottl at FreeBSD.org
Mon Jan 28 21:13:17 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=134353
Change 134353 by scottl at scottl-deimos on 2008/01/29 05:12:42
Integrate from xen31
Affected files ...
.. //depot/projects/xen31-xenbus/sys/conf/files.i386#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/i386/trap.c#3 integrate
.. //depot/projects/xen31-xenbus/sys/i386/include/xen/xenpmap.h#3 integrate
.. //depot/projects/xen31-xenbus/sys/i386/xen/pmap.c#6 integrate
.. //depot/projects/xen31-xenbus/sys/vm/pmap.h#2 integrate
.. //depot/projects/xen31-xenbus/sys/vm/vm_fault.c#2 integrate
Differences ...
==== //depot/projects/xen31-xenbus/sys/conf/files.i386#2 (text+ko) ====
@@ -317,7 +317,7 @@
i386/i386/nexus.c standard
i386/i386/perfmon.c optional perfmon
i386/i386/pmap.c optional native
-i386/xen/pmap.c optional xen nowerror
+i386/xen/pmap.c optional xen
i386/i386/ptrace_machdep.c standard
i386/i386/stack_machdep.c optional ddb | stack
i386/i386/support.s standard
==== //depot/projects/xen31-xenbus/sys/i386/i386/trap.c#3 (text+ko) ====
@@ -69,6 +69,7 @@
#include <sys/syscall.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
+#include <sys/syslog.h>
#include <sys/uio.h>
#include <sys/vmmeter.h>
#ifdef KTRACE
@@ -834,7 +835,7 @@
}
#ifdef XEN
if (rv == KERN_PROTECTION_FAILURE)
- printf("SIGBUS: p=%s va=0x%x ftype=0x%x eip=0x%x\n", p->p_comm, va, ftype, frame->tf_eip);
+ log(LOG_ERR, "SIGBUS: p=%s va=0x%x ftype=0x%x eip=0x%x\n", p->p_comm, va, ftype, frame->tf_eip);
#endif
return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
==== //depot/projects/xen31-xenbus/sys/i386/include/xen/xenpmap.h#3 (text+ko) ====
@@ -202,13 +202,17 @@
static __inline vm_paddr_t
xpmap_mtop(vm_paddr_t mpa)
{
- return machtophys(mpa) | (mpa & PAGE_MASK);
+ vm_paddr_t tmp = (mpa & PG_FRAME);
+
+ return machtophys(tmp) | (mpa & ~PG_FRAME);
}
static __inline vm_paddr_t
xpmap_ptom(vm_paddr_t ppa)
{
- return phystomach(ppa) | (ppa & PAGE_MASK);
+ vm_paddr_t tmp = (ppa & PG_FRAME);
+
+ return phystomach(tmp) | (ppa & ~PG_FRAME);
}
static __inline void
==== //depot/projects/xen31-xenbus/sys/i386/xen/pmap.c#6 (text+ko) ====
@@ -213,8 +213,10 @@
#ifdef PAE
pt_entry_t pg_nx;
+#if !defined(XEN)
static uma_zone_t pdptzone;
#endif
+#endif
/*
* Data for the pv entry allocation mechanism
@@ -270,7 +272,7 @@
static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
-static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
vm_page_t *free);
@@ -290,7 +292,9 @@
static void pmap_pte_release(pt_entry_t *pte);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
-#ifdef PAE
+static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr);
+
+#if defined(PAE) && !defined(XEN)
static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
#endif
@@ -583,7 +587,7 @@
TAILQ_INIT(&m->md.pv_list);
}
-#ifdef PAE
+#if defined(PAE) && !defined(XEN)
static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
@@ -677,7 +681,7 @@
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
-#ifdef PAE
+#if defined(PAE) && !defined(XEN)
pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
UMA_ZONE_VM | UMA_ZONE_NOFREE);
@@ -1217,24 +1221,49 @@
void
pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
{
- pt_entry_t *endpte, oldpte, *pte;
-
- oldpte = 0;
+ pt_entry_t *endpte, *pte;
+ vm_paddr_t pa;
+ vm_offset_t va = sva;
+ int mclcount = 0;
+ multicall_entry_t mcl[16];
+ multicall_entry_t *mclp = mcl;
+ int error;
+
pte = vtopte(sva);
endpte = pte + count;
while (pte < endpte) {
- oldpte |= *pte;
- PT_SET_VA(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V, FALSE);
+ pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A;
+
+ mclp->op = __HYPERVISOR_update_va_mapping;
+ mclp->args[0] = va;
+ mclp->args[1] = (uint32_t)(pa & 0xffffffff);
+ mclp->args[2] = (uint32_t)(pa >> 32);
+ mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0;
+
+ va += PAGE_SIZE;
pte++;
ma++;
+ mclp++;
+ mclcount++;
+ if (mclcount == 16) {
+ error = HYPERVISOR_multicall(mcl, mclcount);
+ mclp = mcl;
+ mclcount = 0;
+ KASSERT(error == 0, ("bad multicall %d", error));
+ }
+ }
+ if (mclcount) {
+ error = HYPERVISOR_multicall(mcl, mclcount);
+ KASSERT(error == 0, ("bad multicall %d", error));
}
- if ((oldpte & PG_V) != 0)
- pmap_invalidate_range(kernel_pmap, sva, sva + count *
- PAGE_SIZE);
- else
- PT_UPDATES_FLUSH();
+
+#ifdef INVARIANTS
+ for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++)
+ KASSERT(*pte, ("pte not set for va=0x%x\n", sva + mclcount*PAGE_SIZE));
+#endif
}
+
/*
* This routine tears out page mappings from the
* kernel -- it is meant only for temporary mappings.
@@ -1692,37 +1721,40 @@
void
pmap_release(pmap_t pmap)
{
- vm_page_t m, ptdpg[NPGPTD+1];
+ vm_page_t m, ptdpg[2*NPGPTD+1];
vm_paddr_t ma;
int i;
+#ifdef XEN
#ifdef PAE
int npgptd = 2*NPGPTD + 1;
#else
int npgptd = 2*NPGPTD;
#endif
-
+#else
+ int npgptd = NPGPTD;
+#endif
KASSERT(pmap->pm_stats.resident_count == 0,
("pmap_release: pmap resident count %ld != 0",
pmap->pm_stats.resident_count));
+ PT_UPDATES_FLUSH();
pmap_lazyfix(pmap);
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
+#ifdef XEN
for (i = 0; i < NPGPTD; i++)
ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir_shadow + (i*NPDEPG)) & PG_FRAME);
+ pmap_qremove((vm_offset_t)pmap->pm_pdir_shadow, NPGPTD);
+#endif
for (i = 0; i < NPGPTD; i++)
ptdpg[NPGPTD + i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME);
-
-#ifdef PAE
+ pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
+#if defined(PAE) && defined(XEN)
ptdpg[2*NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt));
#endif
- PT_UPDATES_FLUSH();
- pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
- pmap_qremove((vm_offset_t)pmap->pm_pdir_shadow, NPGPTD);
-
for (i = 0; i < npgptd; i++) {
m = ptdpg[i];
ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
@@ -2695,6 +2727,7 @@
*PMAP1 = 0;
sched_unpin();
vm_page_unlock_queues();
+ KASSERT(*vtopte(va), ("pte not set for va=0x%x\n", va));
PMAP_UNLOCK(pmap);
}
@@ -2716,18 +2749,33 @@
{
vm_page_t m, mpte;
vm_pindex_t diff, psize;
-
+ multicall_entry_t mcl[16];
+ multicall_entry_t *mclp = mcl;
+ int error, count = 0;
+
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
psize = atop(end - start);
+
mpte = NULL;
m = m_start;
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
+ mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m,
prot, mpte);
m = TAILQ_NEXT(m, listq);
+ if (count == 16) {
+ error = HYPERVISOR_multicall(mcl, count);
+ KASSERT(error == 0, ("bad multicall %d", error));
+ mclp = mcl;
+ count = 0;
+ }
+ }
+ if (count) {
+ error = HYPERVISOR_multicall(mcl, count);
+ KASSERT(error == 0, ("bad multicall %d", error));
}
- PMAP_UNLOCK(pmap);
+
+ PMAP_UNLOCK(pmap);
}
/*
@@ -2742,20 +2790,54 @@
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
+ multicall_entry_t mcl, *mclp;
+ int count = 0;
+ mclp = &mcl;
+
+ PMAP_LOCK(pmap);
+ (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
+ if (count)
+ HYPERVISOR_multicall(&mcl, count);
+ PMAP_UNLOCK(pmap);
+}
+void
+pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count)
+{
+ int i, error, index = 0;
+ multicall_entry_t mcl[16];
+ multicall_entry_t *mclp = mcl;
+
PMAP_LOCK(pmap);
- (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+ for (i = 0; i < count; i++, addrs++, pages++, prots++) {
+ if (!pmap_is_prefaultable_locked(pmap, *addrs))
+ continue;
+
+ (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL);
+ if (index == 16) {
+ error = HYPERVISOR_multicall(mcl, index);
+ mclp = mcl;
+ index = 0;
+ KASSERT(error == 0, ("bad multicall %d", error));
+ }
+ }
+ if (index) {
+ error = HYPERVISOR_multicall(mcl, index);
+ KASSERT(error == 0, ("bad multicall %d", error));
+ }
+
PMAP_UNLOCK(pmap);
}
static vm_page_t
-pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_t free;
-
+ multicall_entry_t *mcl = *mclpp;
+
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
@@ -2845,6 +2927,7 @@
pa |= pg_nx;
#endif
+#if 0
/*
* Now validate mapping with RO protection
*/
@@ -2852,6 +2935,23 @@
pte_store(pte, pa | PG_V | PG_U);
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
+#else
+ /*
+ * Now validate mapping with RO protection
+ */
+ if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
+ pa = xpmap_ptom(pa | PG_V | PG_U);
+ else
+ pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED);
+
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = va;
+ mcl->args[1] = (uint32_t)(pa & 0xffffffff);
+ mcl->args[2] = (uint32_t)(pa >> 32);
+ mcl->args[3] = 0;
+ *mclpp = mcl + 1;
+ *count = *count + 1;
+#endif
return mpte;
}
@@ -3058,7 +3158,11 @@
* accessed (referenced) bits
* during the copy.
*/
+#if 0
PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), FALSE);
+#else
+ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
+#endif
dst_pmap->pm_stats.resident_count++;
} else {
free = NULL;
@@ -3407,22 +3511,32 @@
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
-boolean_t
-pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+static boolean_t
+pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr)
{
pt_entry_t *pte;
boolean_t rv = FALSE;
- PMAP_LOCK(pmap);
+ return (rv);
+
if (*pmap_pde(pmap, addr)) {
pte = vtopte(addr);
- rv = *pte == 0;
+ rv = ((*pte & PG_V) == 0);
}
+ return (rv);
+}
+
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ boolean_t rv;
+
+ PMAP_LOCK(pmap);
+ rv = pmap_is_prefaultable_locked(pmap, addr);
PMAP_UNLOCK(pmap);
return (rv);
}
-
void
pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
{
==== //depot/projects/xen31-xenbus/sys/vm/pmap.h#2 (text+ko) ====
@@ -99,6 +99,8 @@
vm_prot_t, boolean_t);
void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot);
+void pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *m,
+ vm_prot_t *prot, int count);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
==== //depot/projects/xen31-xenbus/sys/vm/vm_fault.c#2 (text+ko) ====
@@ -932,12 +932,15 @@
static void
vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
{
- int i;
+ int i, count;
vm_offset_t addr, starta;
vm_pindex_t pindex;
vm_page_t m;
- vm_object_t object;
-
+ vm_object_t object, lobject;
+ vm_prot_t prots[PAGEORDER_SIZE];
+ vm_page_t pages[PAGEORDER_SIZE];
+ vm_offset_t addrs[PAGEORDER_SIZE];
+
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
return;
@@ -949,10 +952,16 @@
} else if (starta > addra) {
starta = 0;
}
-
+
+ for (i = 0; i < PAGEORDER_SIZE; i++)
+ pages[i] = NULL;
+ count = 0;
+ lobject = object;
+ VM_OBJECT_LOCK(lobject);
for (i = 0; i < PAGEORDER_SIZE; i++) {
- vm_object_t backing_object, lobject;
+ vm_object_t backing_object;
+
addr = addra + prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
@@ -964,35 +973,45 @@
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
- lobject = object;
- VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
+
+ if (count) {
+ vm_page_lock_queues();
+ pmap_enter_quick_range(pmap, addrs, pages, prots, count);
+ vm_page_unlock_queues();
+ }
+ count = 0;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
+
}
/*
* give-up when a page is not in memory
*/
- if (m == NULL) {
- VM_OBJECT_UNLOCK(lobject);
+ if (m == NULL)
break;
- }
+
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & PG_FICTITIOUS) == 0) {
-
- vm_page_lock_queues();
- pmap_enter_quick(pmap, addr, m, entry->protection);
- vm_page_unlock_queues();
+ pages[count] = m;
+ prots[count] = entry->protection;
+ addrs[count] = addr;
+ count++;
}
- VM_OBJECT_UNLOCK(lobject);
+ }
+ if (count) {
+ vm_page_lock_queues();
+ pmap_enter_quick_range(pmap, addrs, pages, prots, count);
+ vm_page_unlock_queues();
}
+ VM_OBJECT_UNLOCK(lobject);
}
/*
More information about the p4-projects
mailing list