PERFORCE change 133896 for review

Kip Macy kmacy at FreeBSD.org
Tue Jan 22 15:49:15 PST 2008


http://perforce.freebsd.org/chv.cgi?CH=133896

Change 133896 by kmacy at pandemonium:kmacy:xen31 on 2008/01/22 23:48:37

	- teach prefault to batch calls to pmap_enter_quick as well as reduce locking overhead	
	- convert pmap_qenter and pmap_enter_quick_locked to use the update_va_mapping interface 
	  as that allows xen to use its own linear page tables

Affected files ...

.. //depot/projects/xen31/sys/i386/xen/pmap.c#27 edit
.. //depot/projects/xen31/sys/vm/pmap.h#5 edit
.. //depot/projects/xen31/sys/vm/vm_fault.c#3 edit

Differences ...

==== //depot/projects/xen31/sys/i386/xen/pmap.c#27 (text+ko) ====

@@ -270,7 +270,7 @@
 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
 
-static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+static vm_page_t pmap_enter_quick_locked(multicall_entry_t *mcl, pmap_t pmap, vm_offset_t va,
     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
     vm_page_t *free);
@@ -1217,22 +1217,37 @@
 void
 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 {
-	pt_entry_t *endpte, oldpte, *pte;
-
-	oldpte = 0;
+	pt_entry_t *endpte, *pte;
+	vm_paddr_t pa;
+	int mclcount = 0;
+	multicall_entry_t mcl[16];
+	multicall_entry_t *mclp = mcl;
+	
 	pte = vtopte(sva);
 	endpte = pte + count;
 	while (pte < endpte) {
-		oldpte |= *pte;
-		PT_SET_VA(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V, FALSE);
+		pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V;
+
+		mclp->op = __HYPERVISOR_update_va_mapping;
+		mclp->args[0] = sva;
+		mclp->args[1] = (uint32_t)(pa & 0xffffffff);
+		mclp->args[2] = (uint32_t)(pa >> 32);
+		mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG : 0;
+	
+		sva += PAGE_SIZE;
 		pte++;
 		ma++;
+		mclp++;
+		mclcount++;
+		if (mclcount == 16) {
+			HYPERVISOR_multicall(mcl, mclcount);
+			mclp = mcl;
+			mclcount = 0;
+		}		
 	}
-	if ((oldpte & PG_V) != 0)
-		pmap_invalidate_range(kernel_pmap, sva, sva + count *
-		    PAGE_SIZE);
-	else
-		PT_UPDATES_FLUSH();
+	if (mclcount) 
+		HYPERVISOR_multicall(mcl, mclcount);
+
 }
 
 /*
@@ -2716,17 +2731,31 @@
 {
 	vm_page_t m, mpte;
 	vm_pindex_t diff, psize;
-
+	multicall_entry_t mcl[16];
+	multicall_entry_t *mclpstart, *mclp;
+	int count = 0;
+	mclpstart = mclp = mcl;
+	
 	VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 	psize = atop(end - start);
+	    
 	mpte = NULL;
 	m = m_start;
 	PMAP_LOCK(pmap);
 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
-		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
+		mpte = pmap_enter_quick_locked(mclp, pmap, start + ptoa(diff), m,
 		    prot, mpte);
 		m = TAILQ_NEXT(m, listq);
+		mclp++;
+		count++;
+		if (count == 16) {
+			HYPERVISOR_multicall(mclpstart, count);
+			mclp = mclpstart;
+			count = 0;
+		}
 	}
+	if (count)
+		HYPERVISOR_multicall(mclpstart, count);
  	PMAP_UNLOCK(pmap);
 }
 
@@ -2742,14 +2771,39 @@
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
+	multicall_entry_t mcl;
+	
+	PMAP_LOCK(pmap);
+	(void) pmap_enter_quick_locked(&mcl, pmap, va, m, prot, NULL);
+	HYPERVISOR_multicall(&mcl, 1);
+	PMAP_UNLOCK(pmap);
+}
 
+void
+pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count)
+{
+	int i, index = 0;
+	multicall_entry_t mcl[16];
+	multicall_entry_t *mclp = mcl;
+		
 	PMAP_LOCK(pmap);
-	(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+	for (i = 0; i < count; i++, pages++, prots++, addrs++) {
+		(void) pmap_enter_quick_locked(mclp, pmap, *addrs, *pages, *prots, NULL);
+		index++;
+		mclp++;
+		if (index == 16) {
+			HYPERVISOR_multicall(mcl, count);
+			mclp = mcl;
+			index = 0;
+		}
+	}
+	if (index)
+		HYPERVISOR_multicall(mcl, index);
 	PMAP_UNLOCK(pmap);
 }
 
 static vm_page_t
-pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+pmap_enter_quick_locked(multicall_entry_t *mcl, pmap_t pmap, vm_offset_t va, vm_page_t m,
     vm_prot_t prot, vm_page_t mpte)
 {
 	pt_entry_t *pte;
@@ -2845,6 +2899,7 @@
 		pa |= pg_nx;
 #endif
 
+#if 0
 	/*
 	 * Now validate mapping with RO protection
 	 */
@@ -2852,6 +2907,21 @@
 		pte_store(pte, pa | PG_V | PG_U);
 	else
 		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
+#else
+	/*
+	 * Now validate mapping with RO protection
+	 */
+	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
+		pa = 	xpmap_ptom(pa | PG_V | PG_U);
+	else
+		pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED);
+
+	mcl->op = __HYPERVISOR_update_va_mapping;
+	mcl->args[0] = va;
+	mcl->args[1] = (uint32_t)(pa & 0xffffffff);
+	mcl->args[2] = (uint32_t)(pa >> 32);
+	mcl->args[3] = 0;
+#endif	
 	return mpte;
 }
 

==== //depot/projects/xen31/sys/vm/pmap.h#5 (text+ko) ====

@@ -99,6 +99,8 @@
 		    vm_prot_t, boolean_t);
 void		 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
 		    vm_prot_t prot);
+void		 pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *m,
+                    vm_prot_t *prot, int count);
 void		 pmap_enter_object(pmap_t pmap, vm_offset_t start,
 		    vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
 vm_paddr_t	 pmap_extract(pmap_t pmap, vm_offset_t va);

==== //depot/projects/xen31/sys/vm/vm_fault.c#3 (text+ko) ====

@@ -932,12 +932,15 @@
 static void
 vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
 {
-	int i;
+	int i, count;
 	vm_offset_t addr, starta;
 	vm_pindex_t pindex;
 	vm_page_t m;
-	vm_object_t object;
-
+	vm_object_t object, lobject;
+	vm_prot_t prots[PAGEORDER_SIZE];
+	vm_page_t pages[PAGEORDER_SIZE];
+	vm_offset_t addrs[PAGEORDER_SIZE];
+	
 	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
 		return;
 
@@ -949,10 +952,16 @@
 	} else if (starta > addra) {
 		starta = 0;
 	}
-
+       
+	for (i = 0; i < PAGEORDER_SIZE; i++) 
+		pages[i] = NULL;
+	count = 0;
+	lobject = object;
+	VM_OBJECT_LOCK(lobject);
 	for (i = 0; i < PAGEORDER_SIZE; i++) {
-		vm_object_t backing_object, lobject;
+		vm_object_t backing_object;
 
+		    
 		addr = addra + prefault_pageorder[i];
 		if (addr > addra + (PFFOR * PAGE_SIZE))
 			addr = 0;
@@ -964,35 +973,45 @@
 			continue;
 
 		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
-		lobject = object;
-		VM_OBJECT_LOCK(lobject);
 		while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
 		    lobject->type == OBJT_DEFAULT &&
 		    (backing_object = lobject->backing_object) != NULL) {
 			if (lobject->backing_object_offset & PAGE_MASK)
 				break;
 			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
+
+			if (count) {
+				vm_page_lock_queues();
+				pmap_enter_quick_range(pmap, addrs, pages, prots, count);
+				vm_page_unlock_queues();
+			}
+			count = 0;
 			VM_OBJECT_LOCK(backing_object);
 			VM_OBJECT_UNLOCK(lobject);
 			lobject = backing_object;
+			
 		}
 		/*
 		 * give-up when a page is not in memory
 		 */
-		if (m == NULL) {
-			VM_OBJECT_UNLOCK(lobject);
+		if (m == NULL) 
 			break;
-		}
+
 		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
 			(m->busy == 0) &&
 		    (m->flags & PG_FICTITIOUS) == 0) {
-
-			vm_page_lock_queues();
-			pmap_enter_quick(pmap, addr, m, entry->protection);
-			vm_page_unlock_queues();
+			pages[count] = m;
+			prots[count] = entry->protection;
+			addrs[count] = addr;
+			count++;
 		}
-		VM_OBJECT_UNLOCK(lobject);
+	}
+	if (count) {	
+		vm_page_lock_queues();
+		pmap_enter_quick_range(pmap, addrs, pages, prots, count);
+		vm_page_unlock_queues();
 	}
+	VM_OBJECT_UNLOCK(lobject);
 }
 
 /*


More information about the p4-projects mailing list