svn commit: r341102 - in head/sys/powerpc: booke mpc85xx

Justin Hibbits jhibbits at FreeBSD.org
Wed Nov 28 02:00:28 UTC 2018


Author: jhibbits
Date: Wed Nov 28 02:00:27 2018
New Revision: 341102
URL: https://svnweb.freebsd.org/changeset/base/341102

Log:
  powerpc: Prepare Book-E kernels for KERNBASE != run base
  
  Book-E kernels really run at VM_MIN_KERNEL_ADDRESS, which currently happens to
  be the same as KERNBASE.  KERNBASE is the linked address, which the loader also
  takes to be the physical load address.  Treat KERNBASE as a physical address,
  not a virtual, and change virtual address references for KERNBASE to use
  something more appropriate.

Modified:
  head/sys/powerpc/booke/locore.S
  head/sys/powerpc/mpc85xx/platform_mpc85xx.c

Modified: head/sys/powerpc/booke/locore.S
==============================================================================
--- head/sys/powerpc/booke/locore.S	Wed Nov 28 01:47:01 2018	(r341101)
+++ head/sys/powerpc/booke/locore.S	Wed Nov 28 02:00:27 2018	(r341102)
@@ -113,7 +113,7 @@ __start:
  *  - Create temp entry in the second AS (make sure it's not TLB[1])
  *  - Switch to temp mapping
  *  - Map 64MB of RAM in TLB1[1]
- *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
+ *  - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
  *  - Switch to TLB1[1] mapping
  *  - Invalidate temp mapping
  *
@@ -238,7 +238,7 @@ __start:
 	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
 	isync
 
-	LOAD_ADDR(%r3, KERNBASE)
+	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
 	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
 	mtspr	SPR_MAS2, %r3
 	isync
@@ -471,7 +471,7 @@ bp_kernload:
 	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
 	isync
 
-	LOAD_ADDR(%r3, KERNBASE)
+	LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
 	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
 	mtspr	SPR_MAS2, %r3
 	isync
@@ -526,8 +526,8 @@ bp_kernload:
 7:
 
 /*
- * At this point we're running at virtual addresses KERNBASE and beyond so
- * it's allowed to directly access all locations the kernel was linked
+ * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
+ * beyond so it's allowed to directly access all locations the kernel was linked
  * against.
  */
 

Modified: head/sys/powerpc/mpc85xx/platform_mpc85xx.c
==============================================================================
--- head/sys/powerpc/mpc85xx/platform_mpc85xx.c	Wed Nov 28 01:47:01 2018	(r341101)
+++ head/sys/powerpc/mpc85xx/platform_mpc85xx.c	Wed Nov 28 02:00:27 2018	(r341102)
@@ -68,6 +68,7 @@ extern void *ap_pcpu;
 extern vm_paddr_t kernload;		/* Kernel physical load address */
 extern uint8_t __boot_page[];		/* Boot page body */
 extern uint32_t bp_kernload;
+extern vm_offset_t __startkernel;
 
 struct cpu_release {
 	uint32_t entry_h;
@@ -346,7 +347,7 @@ mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pc
 	rel_va = rel_page + (rel_pa & PAGE_MASK);
 	pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
 	rel = (struct cpu_release *)rel_va;
-	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
+	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
 	cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
 	rel->pir = pc->pc_cpuid; __asm __volatile("sync");
 	rel->entry_h = (bptr >> 32);
@@ -415,7 +416,7 @@ mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc
 	/* Flush caches to have our changes hit DRAM. */
 	cpu_flush_dcache(__boot_page, 4096);
 
-	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
+	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
 	KASSERT((bptr & 0xfff) == 0,
 	    ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
 	if (mpc85xx_is_qoriq()) {


More information about the svn-src-head mailing list