svn commit: r195149 - in head/sys/sparc64: include sparc64

Marius Strobl marius at FreeBSD.org
Sun Jun 28 22:42:52 UTC 2009


Author: marius
Date: Sun Jun 28 22:42:51 2009
New Revision: 195149
URL: http://svn.freebsd.org/changeset/base/195149

Log:
  - Work around the broken loader behavior of not demapping no longer
    used kernel TLB slots when unloading the kernel or modules, which
    results in havoc when loading a kernel and modules which take up
    less TLB slots afterwards as the unused but locked ones aren't
    accounted for in virtual_avail. Eventually this should be fixed
    in the loader which isn't straight forward though and the kernel
    should be robust against this anyway. [1]
  - Ensure that the addresses allocated directly from phys_avail[] by
    pmap_bootstrap_alloc() are always colored properly. This implicit
    assumption was broken in r194784 as unlike the other consumers the
    DPCPU area allocated for the BSP isn't a multiple of PAGE_SIZE *
    DCACHE_COLORS. [2]
  - Remove the no longer used global msgbuf_phys.
  - Remove the redundant ekva parameter of pmap_bootstrap_alloc().
  - Correct some outdated function names in ktr(9) invocations.
  
  Requested by:	jhb [1]
  Reported by:	gavin [2]
  Approved by:	re (kib)
  MFC after:	2 weeks

Modified:
  head/sys/sparc64/include/pmap.h
  head/sys/sparc64/sparc64/machdep.c
  head/sys/sparc64/sparc64/pmap.c

Modified: head/sys/sparc64/include/pmap.h
==============================================================================
--- head/sys/sparc64/include/pmap.h	Sun Jun 28 21:49:43 2009	(r195148)
+++ head/sys/sparc64/include/pmap.h	Sun Jun 28 22:42:51 2009	(r195149)
@@ -77,7 +77,7 @@ struct pmap {
 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
 
-void	pmap_bootstrap(vm_offset_t ekva);
+void	pmap_bootstrap(void);
 vm_paddr_t pmap_kextract(vm_offset_t va);
 void	pmap_kenter(vm_offset_t va, vm_page_t m);
 void	pmap_kremove(vm_offset_t);
@@ -103,8 +103,6 @@ extern	vm_paddr_t phys_avail[];
 extern	vm_offset_t virtual_avail;
 extern	vm_offset_t virtual_end;
 
-extern	vm_paddr_t msgbuf_phys;
-
 #ifdef PMAP_STATS
 
 SYSCTL_DECL(_debug_pmap_stats);

Modified: head/sys/sparc64/sparc64/machdep.c
==============================================================================
--- head/sys/sparc64/sparc64/machdep.c	Sun Jun 28 21:49:43 2009	(r195148)
+++ head/sys/sparc64/sparc64/machdep.c	Sun Jun 28 22:42:51 2009	(r195149)
@@ -243,6 +243,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
 	char *env;
 	struct pcpu *pc;
 	vm_offset_t end;
+	vm_offset_t va;
 	caddr_t kmdp;
 	phandle_t child;
 	phandle_t root;
@@ -368,19 +369,28 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
 	 * Panic if there is no metadata.  Most likely the kernel was booted
 	 * directly, instead of through loader(8).
 	 */
-	if (mdp == NULL || kmdp == NULL) {
-		printf("sparc64_init: no loader metadata.\n"
+	if (mdp == NULL || kmdp == NULL || end == 0 ||
+	    kernel_tlb_slots == 0 || kernel_tlbs == NULL) {
+		printf("sparc64_init: missing loader metadata.\n"
 		    "This probably means you are not using loader(8).\n");
 		panic("sparc64_init");
 	}
 
 	/*
-	 * Sanity check the kernel end, which is important.
-	 */
-	if (end == 0) {
-		printf("sparc64_init: warning, kernel end not specified.\n"
-		    "Attempting to continue anyway.\n");
-		end = (vm_offset_t)_end;
+	 * Work around the broken loader behavior of not demapping no
+	 * longer used kernel TLB slots when unloading the kernel or
+	 * modules.
+	 */
+	for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
+	    va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
+		printf("demapping unused kernel TLB slot (va %#lx - %#lx)\n",
+		    va, va + PAGE_SIZE_4M - 1);
+		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
+		    ASI_DMMU_DEMAP, 0);
+		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
+		    ASI_IMMU_DEMAP, 0);
+		flush(KERNBASE);
+		kernel_tlb_slots--;
 	}
 
 	/*
@@ -429,7 +439,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_l
 	/*
 	 * Initialize virtual memory and calculate physmem.
 	 */
-	pmap_bootstrap(end);
+	pmap_bootstrap();
 
 	/*
 	 * Initialize tunables.
@@ -752,6 +762,7 @@ cpu_shutdown(void *args)
 void
 cpu_flush_dcache(void *ptr, size_t len)
 {
+
 	/* TBD */
 }
 

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c	Sun Jun 28 21:49:43 2009	(r195148)
+++ head/sys/sparc64/sparc64/pmap.c	Sun Jun 28 22:42:51 2009	(r195149)
@@ -119,10 +119,9 @@ __FBSDID("$FreeBSD$");
 extern struct mtx sched_lock;
 
 /*
- * Virtual and physical address of message buffer
+ * Virtual address of message buffer
  */
 struct msgbuf *msgbufp;
-vm_paddr_t msgbuf_phys;
 
 /*
  * Map of physical memory reagions
@@ -277,7 +276,7 @@ om_cmp(const void *a, const void *b)
  * Bootstrap the system enough to run with virtual memory.
  */
 void
-pmap_bootstrap(vm_offset_t ekva)
+pmap_bootstrap(void)
 {
 	struct pmap *pm;
 	struct tte *tp;
@@ -365,13 +364,14 @@ pmap_bootstrap(vm_offset_t ekva)
 	/*
 	 * Allocate and map the dynamic per-CPU area for the BSP.
 	 */
-	dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pmap_bootstrap_alloc(DPCPU_SIZE));
+	pa = pmap_bootstrap_alloc(DPCPU_SIZE);
+	dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa);
 
 	/*
 	 * Allocate and map the message buffer.
 	 */
-	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
-	msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
+	pa = pmap_bootstrap_alloc(MSGBUF_SIZE);
+	msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
 
 	/*
 	 * Patch the virtual address and the tsb mask into the trap table.
@@ -420,10 +420,11 @@ pmap_bootstrap(vm_offset_t ekva)
 	}
 
 	/*
-	 * Set the start and end of KVA.  The kernel is loaded at the first
-	 * available 4MB super page, so round up to the end of the page.
+	 * Set the start and end of KVA.  The kernel is loaded starting
+	 * at the first available 4MB super page, so we advance to the
+	 * end of the last one used for it.
 	 */
-	virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
+	virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
 	virtual_end = vm_max_kernel_address;
 	kernel_vm_end = vm_max_kernel_address;
 
@@ -443,8 +444,7 @@ pmap_bootstrap(vm_offset_t ekva)
 	 * coloured properly, since we're allocating from phys_avail so the
 	 * memory won't have an associated vm_page_t.
 	 */
-	pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
-	    PAGE_SIZE);
+	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE);
 	kstack0_phys = pa;
 	virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) *
 	    PAGE_SIZE;
@@ -587,7 +587,7 @@ pmap_bootstrap_alloc(vm_size_t size)
 	vm_paddr_t pa;
 	int i;
 
-	size = round_page(size);
+	size = roundup(size, PAGE_SIZE * DCACHE_COLORS);
 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 		if (phys_avail[i + 1] - phys_avail[i] < size)
 			continue;
@@ -946,7 +946,7 @@ pmap_kremove_flags(vm_offset_t va)
 	struct tte *tp;
 
 	tp = tsb_kvtotte(va);
-	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
+	CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
 	    tp->tte_data);
 	TTE_ZERO(tp);
 }
@@ -1349,7 +1349,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
 	}
 
 	CTR6(KTR_PMAP,
-	    "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
+	    "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
 	    pm->pm_context[curcpu], m, va, pa, prot, wired);
 
 	/*
@@ -1357,7 +1357,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
 	 * changed, must be protection or wiring change.
 	 */
 	if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
-		CTR0(KTR_PMAP, "pmap_enter: update");
+		CTR0(KTR_PMAP, "pmap_enter_locked: update");
 		PMAP_STATS_INC(pmap_nenter_update);
 
 		/*
@@ -1414,12 +1414,12 @@ pmap_enter_locked(pmap_t pm, vm_offset_t
 		 * phsyical address, delete the old mapping.
 		 */
 		if (tp != NULL) {
-			CTR0(KTR_PMAP, "pmap_enter: replace");
+			CTR0(KTR_PMAP, "pmap_enter_locked: replace");
 			PMAP_STATS_INC(pmap_nenter_replace);
 			pmap_remove_tte(pm, NULL, tp, va);
 			tlb_page_demap(pm, va);
 		} else {
-			CTR0(KTR_PMAP, "pmap_enter: new");
+			CTR0(KTR_PMAP, "pmap_enter_locked: new");
 			PMAP_STATS_INC(pmap_nenter_new);
 		}
 


More information about the svn-src-head mailing list