PERFORCE change 140141 for review

Marcel Moolenaar marcel at FreeBSD.org
Thu Apr 17 00:46:43 UTC 2008


http://perforce.freebsd.org/chv.cgi?CH=140141

Change 140141 by marcel at marcel_xcllnt on 2008/04/17 00:45:44

	IFC @140140

Affected files ...

.. //depot/projects/powerpc/sys/powerpc/aim/locore.S#8 integrate
.. //depot/projects/powerpc/sys/powerpc/aim/mmu_oea.c#3 integrate

Differences ...

==== //depot/projects/powerpc/sys/powerpc/aim/locore.S#8 (text+ko) ====

@@ -1,4 +1,4 @@
-/* $FreeBSD: src/sys/powerpc/aim/locore.S,v 1.25 2008/03/07 22:27:05 marcel Exp $ */
+/* $FreeBSD: src/sys/powerpc/aim/locore.S,v 1.26 2008/04/16 23:28:11 marcel Exp $ */
 /* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */
 
 /*-
@@ -183,6 +183,9 @@
 	mr	7,21
 
 	bl	powerpc_init
+	mr	%r1, %r3
+	li	%r3, 0
+	stw	%r3, 0(%r1)
 	bl	mi_startup
 	b	OF_exit
 

==== //depot/projects/powerpc/sys/powerpc/aim/mmu_oea.c#3 (text+ko) ====

@@ -91,7 +91,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/powerpc/aim/mmu_oea.c,v 1.117 2007/12/14 22:39:34 marcel Exp $");
+__FBSDID("$FreeBSD: src/sys/powerpc/aim/mmu_oea.c,v 1.119 2008/04/17 00:37:40 marcel Exp $");
 
 /*
  * Manages physical address maps.
@@ -262,12 +262,6 @@
 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
     &moea_pte_spills, 0, "");
 
-struct	pvo_entry *moea_pvo_zeropage;
-struct	mtx	moea_pvo_zeropage_mtx;
-
-vm_offset_t	moea_rkva_start = VM_MIN_KERNEL_ADDRESS;
-u_int		moea_rkva_count = 4;
-
 /*
  * Allocate physical memory for use in moea_bootstrap.
  */
@@ -292,10 +286,6 @@
  */
 static void		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
 			    vm_prot_t, boolean_t);
-static struct		pvo_entry *moea_rkva_alloc(mmu_t);
-static void		moea_pa_map(struct pvo_entry *, vm_offset_t,
-			    struct pte *, int *);
-static void		moea_pa_unmap(struct pvo_entry *, struct pte *, int *);
 static void		moea_syncicache(vm_offset_t, vm_size_t);
 static boolean_t	moea_query_bit(vm_page_t, int);
 static u_int		moea_clear_bit(vm_page_t, int, int *);
@@ -674,7 +664,7 @@
 	int		sz;
 	int		i, j;
 	int		ofw_mappings;
-	uint32_t	trace;
+	uint32_t	trace[2];
 	vm_size_t	size, physsz, hwphyssz;
 	vm_offset_t	pa, va, off;
 
@@ -719,12 +709,6 @@
 	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
 	isync();
 
-	/*
-	 * Set the start and end of kva.
-	 */
-	virtual_avail = VM_MIN_KERNEL_ADDRESS;
-	virtual_end = VM_MAX_KERNEL_ADDRESS;
-
 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
 
@@ -824,11 +808,6 @@
 	    MTX_RECURSE);
 
 	/*
-	 * Allocate the message buffer.
-	 */
-	msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, 0);
-
-	/*
 	 * Initialise the unmanaged pvo pool.
 	 */
 	moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
@@ -895,9 +874,13 @@
 			ofw_mappings++;
 		}
 	}
-#ifdef SMP
-	TLBSYNC();
-#endif
+
+	/*
+	 * Calculate the last available physical address.
+	 */
+	for (i = 0; phys_avail[i + 2] != 0; i += 2)
+		;
+	Maxmem = powerpc_btop(phys_avail[i + 1]);
 
 	/*
 	 * Initialize the kernel pmap (which is statically allocated).
@@ -910,39 +893,69 @@
 	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
 	kernel_pmap->pm_active = ~0;
 
+	pmap_cpu_bootstrap(trace, 0);
+	/*
+	 * Initialize hardware.
+	 */
+	for (i = 0; i < 16; i++) {
+		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
+	}
+	__asm __volatile ("mtsr %0,%1"
+	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
+	__asm __volatile ("mtsr %0,%1"
+	    :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
+	__asm __volatile ("sync; mtsdr1 %0; isync"
+	    :: "r"((u_int)moea_pteg_table | (moea_pteg_mask >> 10)));
+	tlbia();
+
+	pmap_bootstrapped++;
+
+	/*
+	 * Set the start and end of kva.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+	moea_rkva_start = virtual_avail;
+	moea_rkva_count = 4;
+	virtual_avail += moea_rkva_count * PAGE_SIZE;
+
+	/*
+
+	/*
+	 * Set the start and end of kva.
+	 */
+	virtual_avail = VM_MIN_KERNEL_ADDRESS;
+	virtual_end = VM_MAX_KERNEL_ADDRESS;
+
 	/*
 	 * Allocate a kernel stack with a guard page for thread0 and map it
 	 * into the kernel page map.
 	 */
-	pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
-	kstack0_phys = pa;
-	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
-	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
-	    kstack0);
-	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
+	pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
+	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
+	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
+	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
+	thread0.td_kstack = va;
+	thread0.td_kstack_pages = KSTACK_PAGES;
 	for (i = 0; i < KSTACK_PAGES; i++) {
-		pa = kstack0_phys + i * PAGE_SIZE;
-		va = kstack0 + i * PAGE_SIZE;
-		moea_kenter(mmup, va, pa);
-		TLBIE(va);
+		moea_kenter(mmup, va, pa);;
+		pa += PAGE_SIZE;
+		va += PAGE_SIZE;
 	}
 
 	/*
-	 * Calculate the last available physical address.
-	 */
-	for (i = 0; phys_avail[i + 2] != 0; i += 2)
-		;
-	Maxmem = powerpc_btop(phys_avail[i + 1]);
-
-	/*
 	 * Allocate virtual address space for the message buffer.
 	 */
+	pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
 	msgbufp = (struct msgbuf *)virtual_avail;
+	va = virtual_avail;
 	virtual_avail += round_page(MSGBUF_SIZE);
-
-	pmap_cpu_bootstrap(&trace, 0);
-	tlbia();
-	pmap_bootstrapped++;
+	while (va < virtual_avail) {
+		moea_kenter(mmup, va, pa);;
+		pa += PAGE_SIZE;
+		va += PAGE_SIZE;
+	}
 }
 
 /*
@@ -1018,65 +1031,27 @@
 moea_zero_page(mmu_t mmu, vm_page_t m)
 {
 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
-	caddr_t va;
-
-	if (pa < SEGMENT_LENGTH) {
-		va = (caddr_t) pa;
-	} else if (moea_initialized) {
-		if (moea_pvo_zeropage == NULL) {
-			moea_pvo_zeropage = moea_rkva_alloc(mmu);
-			mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
-			    NULL, MTX_DEF);
-		}
-		mtx_lock(&moea_pvo_zeropage_mtx);
-		moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
-		va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
-	} else {
-		panic("moea_zero_page: can't zero pa %#x", pa);
-	}
+	void *va = (void *)pa;
 
 	bzero(va, PAGE_SIZE);
-
-	if (pa >= SEGMENT_LENGTH) {
-		moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
-		mtx_unlock(&moea_pvo_zeropage_mtx);
-	}
 }
 
 void
 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {
 	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
-	caddr_t va;
+	void *va = (void *)(pa + off);
 
-	if (pa < SEGMENT_LENGTH) {
-		va = (caddr_t) pa;
-	} else if (moea_initialized) {
-		if (moea_pvo_zeropage == NULL) {
-			moea_pvo_zeropage = moea_rkva_alloc(mmu);
-			mtx_init(&moea_pvo_zeropage_mtx, "pvo zero page",
-			    NULL, MTX_DEF);
-		}
-		mtx_lock(&moea_pvo_zeropage_mtx);
-		moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL);
-		va = (caddr_t)PVO_VADDR(moea_pvo_zeropage);
-	} else {
-		panic("moea_zero_page: can't zero pa %#x", pa);
-	}
-
-	bzero(va + off, size);
-
-	if (pa >= SEGMENT_LENGTH) {
-		moea_pa_unmap(moea_pvo_zeropage, NULL, NULL);
-		mtx_unlock(&moea_pvo_zeropage_mtx);
-	}
+	bzero(va, size);
 }
 
 void
 moea_zero_page_idle(mmu_t mmu, vm_page_t m)
 {
+	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
+	void *va = (void *)pa;
 
-	moea_zero_page(mmu, m);
+	bzero(va, PAGE_SIZE);
 }
 
 /*
@@ -1804,108 +1779,6 @@
 	panic("moea_bootstrap_alloc: could not allocate memory");
 }
 
-/*
- * Return an unmapped pvo for a kernel virtual address.
- * Used by pmap functions that operate on physical pages.
- */
-static struct pvo_entry *
-moea_rkva_alloc(mmu_t mmu)
-{
-	struct		pvo_entry *pvo;
-	struct		pte *pt;
-	vm_offset_t	kva;
-	int		pteidx;
-
-	if (moea_rkva_count == 0)
-		panic("moea_rkva_alloc: no more reserved KVAs");
-
-	kva = moea_rkva_start + (PAGE_SIZE * --moea_rkva_count);
-	moea_kenter(mmu, kva, 0);
-
-	pvo = moea_pvo_find_va(kernel_pmap, kva, &pteidx);
-
-	if (pvo == NULL)
-		panic("moea_kva_alloc: moea_pvo_find_va failed");
-
-	pt = moea_pvo_to_pte(pvo, pteidx);
-
-	if (pt == NULL)
-		panic("moea_kva_alloc: moea_pvo_to_pte failed");
-
-	moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
-	mtx_unlock(&moea_table_mutex);
-	PVO_PTEGIDX_CLR(pvo);
-
-	moea_pte_overflow++;
-
-	return (pvo);
-}
-
-static void
-moea_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
-    int *depth_p)
-{
-	struct	pte *pt;
-
-	/*
-	 * If this pvo already has a valid pte, we need to save it so it can
-	 * be restored later.  We then just reload the new PTE over the old
-	 * slot.
-	 */
-	if (saved_pt != NULL) {
-		pt = moea_pvo_to_pte(pvo, -1);
-
-		if (pt != NULL) {
-			moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
-			mtx_unlock(&moea_table_mutex);
-			PVO_PTEGIDX_CLR(pvo);
-			moea_pte_overflow++;
-		}
-
-		*saved_pt = pvo->pvo_pte;
-
-		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
-	}
-
-	pvo->pvo_pte.pte_lo |= pa;
-
-	if (!moea_pte_spill(pvo->pvo_vaddr))
-		panic("moea_pa_map: could not spill pvo %p", pvo);
-
-	if (depth_p != NULL)
-		(*depth_p)++;
-}
-
-static void
-moea_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
-{
-	struct	pte *pt;
-
-	pt = moea_pvo_to_pte(pvo, -1);
-
-	if (pt != NULL) {
-		moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
-		mtx_unlock(&moea_table_mutex);
-		PVO_PTEGIDX_CLR(pvo);
-		moea_pte_overflow++;
-	}
-
-	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
-
-	/*
-	 * If there is a saved PTE and it's valid, restore it and return.
-	 */
-	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
-		if (depth_p != NULL && --(*depth_p) == 0)
-			panic("moea_pa_unmap: restoring but depth == 0");
-
-		pvo->pvo_pte = *saved_pt;
-
-		if (!moea_pte_spill(pvo->pvo_vaddr))
-			panic("moea_pa_unmap: could not spill pvo %p", pvo);
-	}
-}
-
 static void
 moea_syncicache(vm_offset_t pa, vm_size_t len)
 {


More information about the p4-projects mailing list