svn commit: r215529 - stable/8/sys/vm

John Baldwin jhb at FreeBSD.org
Fri Nov 19 16:52:18 UTC 2010


Author: jhb
Date: Fri Nov 19 16:52:18 2010
New Revision: 215529
URL: http://svn.freebsd.org/changeset/base/215529

Log:
  MFC 214782:
  Update startup_alloc() to support multi-page allocations and allow internal
  zones whose objects are larger than a page to use startup_alloc().  This
  allows allocation of zone objects during early boot on machines with a large
  number of CPUs since the resulting zone objects are larger than a page.

Modified:
  stable/8/sys/vm/uma_core.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)

Modified: stable/8/sys/vm/uma_core.c
==============================================================================
--- stable/8/sys/vm/uma_core.c	Fri Nov 19 16:26:51 2010	(r215528)
+++ stable/8/sys/vm/uma_core.c	Fri Nov 19 16:52:18 2010	(r215529)
@@ -932,15 +932,32 @@ startup_alloc(uma_zone_t zone, int bytes
 {
 	uma_keg_t keg;
 	uma_slab_t tmps;
+	int pages, check_pages;
 
 	keg = zone_first_keg(zone);
+	pages = howmany(bytes, PAGE_SIZE);
+	check_pages = pages - 1;
+	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
 
 	/*
 	 * Check our small startup cache to see if it has pages remaining.
 	 */
 	mtx_lock(&uma_boot_pages_mtx);
-	if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
-		LIST_REMOVE(tmps, us_link);
+
+	/* First check if we have enough room. */
+	tmps = LIST_FIRST(&uma_boot_pages);
+	while (tmps != NULL && check_pages-- > 0)
+		tmps = LIST_NEXT(tmps, us_link);
+	if (tmps != NULL) {
+		/*
+		 * It's ok to lose tmps references.  The last one will
+		 * have tmps->us_data pointing to the start address of
+		 * "pages" contiguous pages of memory.
+		 */
+		while (pages-- > 0) {
+			tmps = LIST_FIRST(&uma_boot_pages);
+			LIST_REMOVE(tmps, us_link);
+		}
 		mtx_unlock(&uma_boot_pages_mtx);
 		*pflag = tmps->us_flags;
 		return (tmps->us_data);
@@ -952,7 +969,7 @@ startup_alloc(uma_zone_t zone, int bytes
 	 * Now that we've booted reset these users to their real allocator.
 	 */
 #ifdef UMA_MD_SMALL_ALLOC
-	keg->uk_allocf = uma_small_alloc;
+	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
 #else
 	keg->uk_allocf = page_alloc;
 #endif
@@ -1181,12 +1198,15 @@ keg_large_init(uma_keg_t keg)
 
 	keg->uk_ppera = pages;
 	keg->uk_ipers = 1;
+	keg->uk_rsize = keg->uk_size;
+
+	/* We can't do OFFPAGE if we're internal, bail out here. */
+	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
+		return;
 
 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
 	if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
 		keg->uk_flags |= UMA_ZONE_HASH;
-
-	keg->uk_rsize = keg->uk_size;
 }
 
 static void
@@ -1305,7 +1325,8 @@ keg_ctor(void *mem, int size, void *udat
 #endif
 		if (booted == 0)
 			keg->uk_allocf = startup_alloc;
-	}
+	} else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
+		keg->uk_allocf = startup_alloc;
 
 	/*
 	 * Initialize keg's lock (shared among zones).
@@ -1334,7 +1355,7 @@ keg_ctor(void *mem, int size, void *udat
 		if (totsize & UMA_ALIGN_PTR)
 			totsize = (totsize & ~UMA_ALIGN_PTR) +
 			    (UMA_ALIGN_PTR + 1);
-		keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
+		keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
 
 		if (keg->uk_flags & UMA_ZONE_REFCNT)
 			totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
@@ -1350,7 +1371,7 @@ keg_ctor(void *mem, int size, void *udat
 		 * mathematically possible for all cases, so we make
 		 * sure here anyway.
 		 */
-		if (totsize > UMA_SLAB_SIZE) {
+		if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
 			printf("zone %s ipers %d rsize %d size %d\n",
 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
 			    keg->uk_size);


More information about the svn-src-stable-8 mailing list