svn commit: r292891 - head/sys/arm/arm

Ian Lepore ian at FreeBSD.org
Tue Dec 29 22:18:37 UTC 2015


Author: ian
Date: Tue Dec 29 22:18:35 2015
New Revision: 292891
URL: https://svnweb.freebsd.org/changeset/base/292891

Log:
  Bring some of the recent locore-v4.S improvements into locore-V6...
  
   - Map all 4GB as VA=PA so that args passed in from a bootloader can
     be accessed regardless of where they are.
   - Figure out the kernel load address by directly masking the PC rather
     then by doing pc-relative math on the _start symbol.
   - For EARLY_PRINTF support, map device memory as uncacheable (no-op for
     ARM_NEW_PMAP because all TEX types resolve to uncacheable).

Modified:
  head/sys/arm/arm/locore-v6.S

Modified: head/sys/arm/arm/locore-v6.S
==============================================================================
--- head/sys/arm/arm/locore-v6.S	Tue Dec 29 22:14:21 2015	(r292890)
+++ head/sys/arm/arm/locore-v6.S	Tue Dec 29 22:18:35 2015	(r292891)
@@ -153,46 +153,49 @@ ASENTRY_NP(_start)
 	 * Build page table from scratch.
 	 */
 
-	/* Calculate the physical address of the startup pagetable. */
+	/* 
+	 * Figure out the physical address we're loaded at by assuming this
+	 * entry point code is in the first L1 section and so if we clear the
+	 * offset bits of the pc that will give us the section-aligned load
+	 * address, which remains in r5 throughout all the following code.
+	 */
+	ldr	r2, =(L1_S_OFFSET)
+	bic	r5, pc, r2
+
+	/* Find the delta between VA and PA, result stays in r0 throughout. */
 	adr	r0, Lpagetable
 	bl	translate_va_to_pa
 
-	/* Clear boot page table */
-	mov	r1, r0
-	mov	r2, L1_TABLE_SIZE
-	mov	r3,#0
-1:	str	r3, [r1], #4
-	subs	r2, #4
-	bgt	1b
-
-	/*
-	 * Map PA == VA
+	/* 
+	 * First map the entire 4GB address space as VA=PA.  It's mapped as
+	 * normal (cached) memory because it's for things like accessing the
+	 * parameters passed in from the bootloader, which might be at any
+	 * physical address, different for every platform.
 	 */
-	/* Find the start kernels load address */
-	adr	r5, _start
-	ldr	r2, =(PTE1_OFFSET)
-	bic	r5, r2
-	mov	r1, r5
-	mov	r2, r5
-	/* Map 64MiB, preserved over calls to build_pagetables */
-	mov	r3, #64
+	mov	r1, #0
+	mov	r2, #0
+	mov	r3, #4096
 	bl	build_pagetables
 
-	/* Create the kernel map to jump to */
+	/* 
+	 * Next we do 64MiB starting at the physical load address, mapped to
+	 * the VA the kernel is linked for.
+	 */
 	mov	r1, r5
 	ldr	r2, =(KERNVIRTADDR)
+	mov	r3, #64
 	bl	build_pagetables
 
+	/* Create a device mapping for early_printf if specified. */
 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
-	/* Create the custom map (1MB) used for early_printf(). */
 	ldr	r1, =SOCDEV_PA
 	ldr	r2, =SOCDEV_VA
 	mov	r3, #1
-	bl	build_pagetables
+	bl	build_device_pagetables
 #endif
 	bl	init_mmu
 
-	/* Switch to virtual addresses. */
+	/* Transition the PC from physical to virtual addressing. */
 	ldr	pc, =1f
 1:
 
@@ -394,6 +397,15 @@ END(reinit_mmu)
  *
  * Addresses must be 1MiB aligned
  */
+build_device_pagetables:
+#if defined(ARM_NEW_PMAP)
+	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
+#elif defined(SMP)
+	ldr	r4, =(L1_TYPE_S|L1_S_AP(AP_KRW)|L1_SHARED)
+#else
+	ldr	r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
+#endif
+	b	1f
 build_pagetables:
 	/* Set the required page attributed */
 #if defined(ARM_NEW_PMAP)
@@ -403,18 +415,19 @@ build_pagetables:
 #else
 	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
 #endif
+1:
 	orr	r1, r4
 
 	/* Move the virtual address to the correct bit location */
 	lsr	r2, #(PTE1_SHIFT - 2)
 
 	mov	r4, r3
-1:
+2:
 	str	r1, [r0, r2]
 	add	r2, r2, #4
 	add	r1, r1, #(PTE1_SIZE)
 	adds	r4, r4, #-1
-	bhi	1b
+	bhi	2b
 
 	mov	pc, lr
 


More information about the svn-src-all mailing list