svn commit: r187465 - in stable/7/sys: . amd64/amd64 amd64/include contrib/pf dev/ath/ath_hal dev/cxgb

From: Alan Cox <alc_at_FreeBSD.org>
Date: Tue, 20 Jan 2009 07:24:29 +0000 (UTC)
Author: alc
Date: Tue Jan 20 07:24:28 2009
New Revision: 187465
URL: http://svn.freebsd.org/changeset/base/187465

Log:
  MFC r179917,180333,180373,180378
    Prepare for a larger kernel virtual address space.  Specifically, once
    KERNBASE and VM_MIN_KERNEL_ADDRESS are no longer the same, the physical
    memory allocated during bootstrap will be offset from the low-end of the
    kernel's page table.
  
    Change create_pagetables() and pmap_init() so that many fewer page table
    pages have to be preallocated by create_pagetables().
  
    Eliminate pmap_growkernel()'s dependence on create_pagetables() preallocating
    page directory pages from VM_MIN_KERNEL_ADDRESS through the end of the
    kernel's bss.  Specifically, the dependence was in pmap_growkernel()'s one-
    time initialization of kernel_vm_end, not in its main body.  (I could not,
    however, resist the urge to optimize the main body.)
  
    Reduce the number of preallocated page directory pages to just those needed
    to support NKPT page table pages.  (In fact, this allows me to revert a
    couple of my earlier changes to create_pagetables().)
  
    Fix lines that are too long in pmap_growkernel() by substituting shorter
    but equivalent expressions.

Modified:
  stable/7/sys/   (props changed)
  stable/7/sys/amd64/amd64/pmap.c
  stable/7/sys/amd64/include/pmap.h
  stable/7/sys/contrib/pf/   (props changed)
  stable/7/sys/dev/ath/ath_hal/   (props changed)
  stable/7/sys/dev/cxgb/   (props changed)

Modified: stable/7/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/7/sys/amd64/amd64/pmap.c	Tue Jan 20 05:51:16 2009	(r187464)
+++ stable/7/sys/amd64/amd64/pmap.c	Tue Jan 20 07:24:28 2009	(r187465)
_at__at_ -173,7 +173,7 _at__at_ vm_offset_t virtual_end;	/* VA of last a
 
 static int ndmpdp;
 static vm_paddr_t dmaplimit;
-vm_offset_t kernel_vm_end;
+vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
 pt_entry_t pg_nx;
 
 static u_int64_t	KPTphys;	/* phys addr of kernel level 1 */
_at__at_ -1517,25 +1517,34 _at__at_ pmap_growkernel(vm_offset_t addr)
 	vm_paddr_t paddr;
 	vm_page_t nkpg;
 	pd_entry_t *pde, newpdir;
-	pdp_entry_t newpdp;
+	pdp_entry_t *pdpe;
 
 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
-	if (kernel_vm_end == 0) {
-		kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
-		while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) {
-			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
-			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
-				kernel_vm_end = kernel_map->max_offset;
-				break;                       
-			}
-		}
-	}
-	addr = roundup2(addr, PAGE_SIZE * NPTEPG);
+
+	/*
+	 * Return if "addr" is within the range of kernel page table pages
+	 * that were preallocated during pmap bootstrap.  Moreover, leave
+	 * "kernel_vm_end" and the kernel page table as they were.
+	 *
+	 * The correctness of this action is based on the following
+	 * argument: vm_map_findspace() allocates contiguous ranges of the
+	 * kernel virtual address space.  It calls this function if a range
+	 * ends after "kernel_vm_end".  If the kernel is mapped between
+	 * "kernel_vm_end" and "addr", then the range cannot begin at
+	 * "kernel_vm_end".  In fact, its beginning address cannot be less
+	 * than the kernel.  Thus, there is no immediate need to allocate
+	 * any new kernel page table pages between "kernel_vm_end" and
+	 * "KERNBASE".
+	 */
+	if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
+		return;
+
+	addr = roundup2(addr, NBPDR);
 	if (addr - 1 >= kernel_map->max_offset)
 		addr = kernel_map->max_offset;
 	while (kernel_vm_end < addr) {
-		pde = pmap_pde(kernel_pmap, kernel_vm_end);
-		if (pde == NULL) {
+		pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
+		if ((*pdpe & PG_V) == 0) {
 			/* We need a new PDP entry */
 			nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
 			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
_at__at_ -1545,13 +1554,13 _at__at_ pmap_growkernel(vm_offset_t addr)
 			if ((nkpg->flags & PG_ZERO) == 0)
 				pmap_zero_page(nkpg);
 			paddr = VM_PAGE_TO_PHYS(nkpg);
-			newpdp = (pdp_entry_t)
+			*pdpe = (pdp_entry_t)
 				(paddr | PG_V | PG_RW | PG_A | PG_M);
-			*pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp;
 			continue; /* try again */
 		}
+		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
 		if ((*pde & PG_V) != 0) {
-			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
+			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 				kernel_vm_end = kernel_map->max_offset;
 				break;                       
_at__at_ -1568,9 +1577,9 _at__at_ pmap_growkernel(vm_offset_t addr)
 			pmap_zero_page(nkpg);
 		paddr = VM_PAGE_TO_PHYS(nkpg);
 		newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
-		*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
+		pde_store(pde, newpdir);
 
-		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
+		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 			kernel_vm_end = kernel_map->max_offset;
 			break;                       

Modified: stable/7/sys/amd64/include/pmap.h
==============================================================================
--- stable/7/sys/amd64/include/pmap.h	Tue Jan 20 05:51:16 2009	(r187464)
+++ stable/7/sys/amd64/include/pmap.h	Tue Jan 20 07:24:28 2009	(r187465)
_at__at_ -104,8 +104,7 _at__at_
 
 /* Initial number of kernel page tables. */
 #ifndef NKPT
-/* 240 page tables needed to map 16G (120B "struct vm_page", 2M page tables). */
-#define	NKPT		240
+#define	NKPT		32
 #endif
 
 #define NKPML4E		1		/* number of kernel PML4 slots */
Received on Tue Jan 20 2009 - 07:24:29 UTC