svn commit: r200200 - in head/sys/ia64: ia64 include

Marcel Moolenaar marcel at FreeBSD.org
Mon Dec 7 00:54:02 UTC 2009


Author: marcel
Date: Mon Dec  7 00:54:02 2009
New Revision: 200200
URL: http://svn.freebsd.org/changeset/base/200200

Log:
  Allocate the VHPT for each CPU in cpu_mp_start(), rather than
  allocating MAXCPU VHPTs up-front. This allows us to max-out MAXCPU
  without memory waste -- MAXCPU is now 32 for SMP kernels.
  
  This change also eliminates the VHPT scaling based in the total
  memory in the system. It's the workload that determines the best size
  of the VHPT. The workload can be affected by the amount of memory,
  but not necessarily. For example, there's no performance difference
  between VHPT sizes of 256KB, 512KB and 1MB when building the LINT
  kernel. This was observed with a system that has 8GB of memory.
  By default the kernel will allocate a 1MB VHPT. The user can tune the
  system with the "machdep.vhpt.log2size" tunable.

Modified:
  head/sys/ia64/ia64/mp_machdep.c
  head/sys/ia64/ia64/pmap.c
  head/sys/ia64/include/param.h
  head/sys/ia64/include/pcpu.h
  head/sys/ia64/include/pmap.h

Modified: head/sys/ia64/ia64/mp_machdep.c
==============================================================================
--- head/sys/ia64/ia64/mp_machdep.c	Mon Dec  7 00:29:10 2009	(r200199)
+++ head/sys/ia64/ia64/mp_machdep.c	Mon Dec  7 00:54:02 2009	(r200200)
@@ -76,7 +76,6 @@ void ia64_ap_startup(void);
 /* Variables used by os_boot_rendez and ia64_ap_startup */
 struct pcpu *ap_pcpu;
 void *ap_stack;
-uint64_t ap_vhpt;
 volatile int ap_delay;
 volatile int ap_awake;
 volatile int ap_spin;
@@ -116,13 +115,15 @@ void
 ia64_ap_startup(void)
 {
 	volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK;
+	uint64_t vhpt;
 	int vector;
 
 	pcpup = ap_pcpu;
 	ia64_set_k4((intptr_t)pcpup);
 
-	map_vhpt(ap_vhpt);
-	ia64_set_pta(ap_vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
+	vhpt = PCPU_GET(vhpt);
+	map_vhpt(vhpt);
+	ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
 	ia64_srlz_i();
 
 	ap_awake = 1;
@@ -261,9 +262,14 @@ cpu_mp_start()
 		pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
 		if (pc->pc_cpuid > 0) {
 			ap_pcpu = pc;
+			pc->pc_vhpt = pmap_alloc_vhpt();
+			if (pc->pc_vhpt == 0) {
+				printf("SMP: WARNING: unable to allocate VHPT"
+				    " for cpu%d", pc->pc_cpuid);
+				continue;
+			}
 			ap_stack = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP,
 			    M_WAITOK);
-			ap_vhpt = pmap_vhpt_base[pc->pc_cpuid];
 			ap_delay = 2000;
 			ap_awake = 0;
 

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c	Mon Dec  7 00:29:10 2009	(r200199)
+++ head/sys/ia64/ia64/pmap.c	Mon Dec  7 00:54:02 2009	(r200200)
@@ -217,8 +217,6 @@ int pmap_vhpt_nbuckets;
 SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD,
     &pmap_vhpt_nbuckets, 0, "");
 
-uint64_t pmap_vhpt_base[MAXCPU];
-
 int pmap_vhpt_log2size = 0;
 TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size);
 SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD,
@@ -277,6 +275,40 @@ pmap_steal_memory(vm_size_t size)
 	return va;
 }
 
+static void
+pmap_initialize_vhpt(vm_offset_t vhpt)
+{
+	struct ia64_lpte *pte;
+	u_int i;
+
+	pte = (struct ia64_lpte *)vhpt;
+	for (i = 0; i < pmap_vhpt_nbuckets; i++) {
+		pte[i].pte = 0;
+		pte[i].itir = 0;
+		pte[i].tag = 1UL << 63; /* Invalid tag */
+		pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
+	}
+}
+
+#ifdef SMP
+MALLOC_DECLARE(M_SMP);
+
+vm_offset_t
+pmap_alloc_vhpt(void)
+{
+	vm_offset_t vhpt;
+	vm_size_t size;
+
+	size = 1UL << pmap_vhpt_log2size;
+	vhpt = (uintptr_t)contigmalloc(size, M_SMP, 0, 0UL, ~0UL, size, 0UL);
+	if (vhpt != 0) {
+		vhpt = IA64_PHYS_TO_RR7(ia64_tpa(vhpt));
+		pmap_initialize_vhpt(vhpt);
+	}
+	return (vhpt);
+}
+#endif
+
 /*
  *	Bootstrap the system enough to run with virtual memory.
  */
@@ -284,8 +316,7 @@ void
 pmap_bootstrap()
 {
 	struct ia64_pal_result res;
-	struct ia64_lpte *pte;
-	vm_offset_t base, limit;
+	vm_offset_t base;
 	size_t size;
 	int i, j, count, ridbits;
 
@@ -365,94 +396,52 @@ pmap_bootstrap()
 		;
 	count = i+2;
 
-	/*
-	 * Figure out a useful size for the VHPT, based on the size of
-	 * physical memory and try to locate a region which is large
-	 * enough to contain the VHPT (which must be a power of two in
-	 * size and aligned to a natural boundary).
-	 * We silently bump up the VHPT size to the minimum size if the
-	 * user has set the tunable too small. Likewise, the VHPT size
-	 * is silently capped to the maximum allowed.
-	 */
 	TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size);
-	if (pmap_vhpt_log2size == 0) {
-		pmap_vhpt_log2size = 15;
-		size = 1UL << pmap_vhpt_log2size;
-		while (size < Maxmem * 32) {
-			pmap_vhpt_log2size++;
-			size <<= 1;
-		}
-	} else if (pmap_vhpt_log2size < 15)
+	if (pmap_vhpt_log2size == 0)
+		pmap_vhpt_log2size = 20;
+	else if (pmap_vhpt_log2size < 15)
 		pmap_vhpt_log2size = 15;
-	if (pmap_vhpt_log2size > 61)
+	else if (pmap_vhpt_log2size > 61)
 		pmap_vhpt_log2size = 61;
 
-	pmap_vhpt_base[0] = 0;
-	base = limit = 0;
+	base = 0;
 	size = 1UL << pmap_vhpt_log2size;
-	while (pmap_vhpt_base[0] == 0) {
-		if (bootverbose)
-			printf("Trying VHPT size 0x%lx\n", size);
-		for (i = 0; i < count; i += 2) {
-			base = (phys_avail[i] + size - 1) & ~(size - 1);
-			limit = base + MAXCPU * size;
-			if (limit <= phys_avail[i+1])
-				/*
-				 * VHPT can fit in this region
-				 */
-				break;
-		}
-		if (!phys_avail[i]) {
-			/* Can't fit, try next smaller size. */
-			pmap_vhpt_log2size--;
-			size >>= 1;
-		} else
-			pmap_vhpt_base[0] = IA64_PHYS_TO_RR7(base);
+	for (i = 0; i < count; i += 2) {
+		base = (phys_avail[i] + size - 1) & ~(size - 1);
+		if (base + size <= phys_avail[i+1])
+			break;
 	}
-	if (pmap_vhpt_log2size < 15)
-		panic("Can't find space for VHPT");
-
-	if (bootverbose)
-		printf("Putting VHPT at 0x%lx\n", base);
+	if (!phys_avail[i])
+		panic("Unable to allocate VHPT");
 
 	if (base != phys_avail[i]) {
 		/* Split this region. */
-		if (bootverbose)
-			printf("Splitting [%p-%p]\n", (void *)phys_avail[i],
-			    (void *)phys_avail[i+1]);
 		for (j = count; j > i; j -= 2) {
 			phys_avail[j] = phys_avail[j-2];
 			phys_avail[j+1] = phys_avail[j-2+1];
 		}
 		phys_avail[i+1] = base;
-		phys_avail[i+2] = limit;
+		phys_avail[i+2] = base + size;
 	} else
-		phys_avail[i] = limit;
+		phys_avail[i] = base + size;
 
-	pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
+	base = IA64_PHYS_TO_RR7(base);
+	PCPU_SET(vhpt, base);
+	if (bootverbose)
+		printf("VHPT: address=%#lx, size=%#lx\n", base, size);
 
+	pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
 	pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets *
 	    sizeof(struct ia64_bucket));
-	pte = (struct ia64_lpte *)pmap_vhpt_base[0];
 	for (i = 0; i < pmap_vhpt_nbuckets; i++) {
-		pte[i].pte = 0;
-		pte[i].itir = 0;
-		pte[i].tag = 1UL << 63;	/* Invalid tag */
-		pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
-		/* Stolen memory is zeroed! */
+		/* Stolen memory is zeroed. */
 		mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,
 		    MTX_NOWITNESS | MTX_SPIN);
 	}
 
-	for (i = 1; i < MAXCPU; i++) {
-		pmap_vhpt_base[i] = pmap_vhpt_base[i - 1] + size;
-		bcopy((void *)pmap_vhpt_base[i - 1], (void *)pmap_vhpt_base[i],
-		    size);
-	}
-
-	map_vhpt(pmap_vhpt_base[0]);
-	ia64_set_pta(pmap_vhpt_base[0] + (1 << 8) +
-	    (pmap_vhpt_log2size << 2) + 1);
+	pmap_initialize_vhpt(base);
+	map_vhpt(base);
+	ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
 	ia64_srlz_i();
 
 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
@@ -551,15 +540,16 @@ static void
 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 {
 	struct ia64_lpte *pte;
-	int i, vhpt_ofs;
+	struct pcpu *pc;
+	u_int vhpt_ofs;
 
 	KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)),
 		("invalidating TLB for non-current pmap"));
 
-	vhpt_ofs = ia64_thash(va) - pmap_vhpt_base[PCPU_GET(cpuid)];
+	vhpt_ofs = ia64_thash(va) - PCPU_GET(vhpt);
 	critical_enter();
-	for (i = 0; i < MAXCPU; i++) {
-		pte = (struct ia64_lpte *)(pmap_vhpt_base[i] + vhpt_ofs);
+	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+		pte = (struct ia64_lpte *)(pc->pc_vhpt + vhpt_ofs);
 		if (pte->tag == ia64_ttag(va))
 			pte->tag = 1UL << 63;
 	}

Modified: head/sys/ia64/include/param.h
==============================================================================
--- head/sys/ia64/include/param.h	Mon Dec  7 00:29:10 2009	(r200199)
+++ head/sys/ia64/include/param.h	Mon Dec  7 00:54:02 2009	(r200200)
@@ -59,7 +59,7 @@
 #endif
 
 #if defined(SMP) || defined(KLD_MODULE)
-#define	MAXCPU		4
+#define	MAXCPU		32
 #else
 #define MAXCPU		1
 #endif

Modified: head/sys/ia64/include/pcpu.h
==============================================================================
--- head/sys/ia64/include/pcpu.h	Mon Dec  7 00:29:10 2009	(r200199)
+++ head/sys/ia64/include/pcpu.h	Mon Dec  7 00:54:02 2009	(r200200)
@@ -53,6 +53,7 @@ struct pcpu_stats {
 #define	PCPU_MD_FIELDS							\
 	struct pcb	pc_pcb;			/* Used by IPI_STOP */	\
 	struct pmap	*pc_current_pmap;	/* active pmap */	\
+	vm_offset_t	pc_vhpt;		/* Address of VHPT */	\
 	uint64_t	pc_lid;			/* local CPU ID */	\
 	uint64_t	pc_clock;		/* Clock counter. */	\
 	uint64_t	pc_clockadj;		/* Clock adjust. */	\

Modified: head/sys/ia64/include/pmap.h
==============================================================================
--- head/sys/ia64/include/pmap.h	Mon Dec  7 00:29:10 2009	(r200199)
+++ head/sys/ia64/include/pmap.h	Mon Dec  7 00:54:02 2009	(r200200)
@@ -125,6 +125,7 @@ extern int pmap_vhpt_log2size;
 #define	pmap_unmapbios(va, sz)	pmap_unmapdev(va, sz)
 
 vm_offset_t pmap_steal_memory(vm_size_t);
+vm_offset_t pmap_alloc_vhpt(void);
 void	pmap_bootstrap(void);
 void	pmap_kenter(vm_offset_t va, vm_offset_t pa);
 vm_paddr_t pmap_kextract(vm_offset_t va);


More information about the svn-src-all mailing list