PERFORCE change 98768 for review

Kip Macy kmacy at FreeBSD.org
Thu Jun 8 05:27:56 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=98768

Change 98768 by kmacy at kmacy_storage:sun4v_work_ifc on 2006/06/08 00:27:03

	page allocation is not MP safe - using, among other things, static variables
	this change serializes page allocation in pmap
	this doesn't fix the general problem in the event that we are contending with 
	other consumers - but it fixes the immediate problem of panicking when we run
	"make -j32"

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pmap.h#14 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#63 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tsb.c#17 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#38 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/pmap.h#14 (text+ko) ====

@@ -59,7 +59,6 @@
 	TAILQ_HEAD(, pv_entry) pv_list;
 };
 
-
 struct pmap {
 	uint64_t                pm_context;
 	uint64_t                pm_hashscratch;
@@ -116,6 +115,8 @@
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;
 extern	vm_paddr_t msgbuf_phys;
+extern  struct mtx page_alloc_lock;
+
 
 static __inline int
 pmap_track_modified(pmap_t pm, vm_offset_t va)

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#63 (text+ko) ====

@@ -106,7 +106,7 @@
 int sparc64_nmemreg;
 
 extern vm_paddr_t mmu_fault_status_area;
-
+struct mtx page_alloc_lock;
 
 /*
  * First and last available kernel virtual addresses.
@@ -187,7 +187,9 @@
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
 static void pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va);
+void pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap);
 
+
 /*
  * Quick sort callout for comparing memory regions.
  */
@@ -381,18 +383,10 @@
 pmap_activate(struct thread *td)
 {
 	pmap_t pmap, oldpmap;
-	int context;
+	int context, err;
 	DPRINTF("activating pmap for %d\n", td->td_tid);
 	
-	/*
-	 * Workaround for i386-centric hack to avoid race in freeing vmspace
-	 */
 	critical_enter();
-	if (td->td_proc->p_vmspace == &vmspace0) {
-		PCPU_SET(curpmap, kernel_pmap);
-		context = 0;
-		goto done;
-	}
 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
 	oldpmap = PCPU_GET(curpmap);
 #if defined(SMP)
@@ -408,9 +402,11 @@
 	pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash, pmap->pm_context);
 	pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
 	PCPU_SET(curpmap, pmap);
-	hv_set_ctxnon0(1, pmap->pm_tsb_ra);
 	context = pmap->pm_context;
- done:
+	if (pmap->pm_context != 0)
+		if ((err = hv_set_ctxnon0(1, pmap->pm_tsb_ra)) != H_EOK)
+			panic("failed to set TSB 0x%lx - context == %d\n", 
+			      pmap->pm_tsb_ra, context);
 	stxa(MMU_CID_S, ASI_MMU_CONTEXTID, context);
 	membar(Sync);
 	critical_exit();
@@ -604,7 +600,8 @@
 	kernel_pmap->pm_tsb.hvtsb_pgszs = TSB8K;
 	kernel_pmap->pm_tsb.hvtsb_rsvd = 0;
 	kernel_pmap->pm_tsb.hvtsb_pa = pa;
-
+	
+	kernel_pmap->pm_tsb_ra = vtophys((vm_offset_t)&kernel_pmap->pm_tsb);
 	tsb_set_scratchpad_kernel(&kernel_pmap->pm_tsb);
 	
 	/*
@@ -1109,6 +1106,7 @@
 		ctx_stack[ctx_stack_top] = ctx_stack_top;
 
 	mtx_init(&pmap_ctx_lock, "ctx lock", NULL, MTX_SPIN);
+	mtx_init(&page_alloc_lock, "page alloc", NULL, MTX_SPIN);
 
 	/*
 	 * Initialize the address space (zone) for the pv entries.  Set a
@@ -1450,6 +1448,8 @@
 	PMAP_LOCK_INIT(pmap);
 	pmap->pm_active = pmap->pm_tlbactive = ~0;
 	pmap->pm_context = 0;
+	pmap->pm_tsb_ra = kernel_pmap->pm_tsb_ra;
+	pmap->pm_hash = kernel_pmap->pm_hash;
 	PCPU_SET(curpmap, pmap);
 	TAILQ_INIT(&pmap->pm_pvlist);
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -1893,3 +1893,10 @@
 	hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
 }
 
+void
+pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap)
+{
+	panic("setting ctxnon0 failed ctx=0x%lx hvtsb_ra=0x%lx tsbscratch=0x%lx error=0x%lx",
+	      pmap->pm_context, tsb_ra, pmap->pm_tsbscratch, error);
+	
+}

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tsb.c#17 (text+ko) ====

@@ -60,13 +60,14 @@
 /* make TSB start off at the same size as the hash */
 #define TSB_SIZE      (1 << HASH_ENTRY_SHIFT)
 
-
 #ifdef DEBUG_TSB
 #define DPRINTF printf
 #else
 #define DPRINTF(...)
 #endif
 
+void tsb_sysinit(void);
+
 vm_paddr_t
 tsb_init(hv_tsb_info_t *hvtsb, uint64_t *scratchval)
 {
@@ -76,13 +77,19 @@
 
 	m = NULL;
 	while (m == NULL) {
+		mtx_lock_spin(&page_alloc_lock);
 		m = vm_page_alloc_contig(TSB_SIZE, phys_avail[0], 
 					 phys_avail[1], TSB_SIZE*PAGE_SIZE, (1UL<<34));
+		mtx_unlock_spin(&page_alloc_lock);
 		if (m == NULL) {
 			printf("vm_page_alloc_contig failed - waiting to retry\n");
 			VM_WAIT;
 		}
 	}
+	if ((VM_PAGE_TO_PHYS(m) & (TSB_SIZE*PAGE_SIZE - 1)) != 0)
+	    panic("vm_page_alloc_contig allocated unaligned pages: 0x%lx",
+		  VM_PAGE_TO_PHYS(m));
+
 	hvtsb->hvtsb_idxpgsz = TTE8K;
 	hvtsb->hvtsb_assoc = 1;
 	hvtsb->hvtsb_ntte = (TSB_SIZE*PAGE_SIZE >> TTE_SHIFT);

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#38 (text+ko) ====

@@ -118,7 +118,6 @@
 static int tte_hash_count = 0, tte_hash_max = 0;
 
 extern uint64_t hash_bucket_lock(tte_hash_field_t fields);
-
 extern void hash_bucket_unlock(tte_hash_field_t fields, uint64_t s);
 
 static tte_hash_t
@@ -180,8 +179,10 @@
 	m = NULL;
 
 	while (m == NULL) {
+		mtx_lock_spin(&page_alloc_lock);
 		m = vm_page_alloc_contig(HASH_SIZE, phys_avail[0], 
 					 phys_avail[1], PAGE_SIZE, (1UL<<34));
+		mtx_unlock_spin(&page_alloc_lock);
 		if (m == NULL) {
 			printf("vm_page_alloc_contig failed - waiting to retry\n");
 			VM_WAIT;
@@ -194,9 +195,11 @@
 	th->th_hashtable = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
 	m = NULL;
 	while (m == NULL) {
+		mtx_lock_spin(&page_alloc_lock);
 		m = vm_page_alloc(NULL, color++,
 		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 		    VM_ALLOC_ZERO);
+		mtx_unlock_spin(&page_alloc_lock);
 
 		if (m == NULL) 
 			VM_WAIT;


More information about the p4-projects mailing list