svn commit: r279750 - in head: . sys/conf sys/kern sys/powerpc/aim sys/powerpc/booke sys/powerpc/ofw sys/powerpc/powerpc

Nathan Whitehorn nwhitehorn at FreeBSD.org
Sat Mar 7 20:14:51 UTC 2015


Author: nwhitehorn
Date: Sat Mar  7 20:14:46 2015
New Revision: 279750
URL: https://svnweb.freebsd.org/changeset/base/279750

Log:
  Make 32-bit PowerPC kernels, like 64-bit PowerPC kernels, position-independent
  executables. The goal here, not yet accomplished, is to let the e500 kernel
  run under QEMU by setting KERNBASE to something that fits in low memory and
  then having the kernel relocate itself at runtime.

Modified:
  head/UPDATING
  head/sys/conf/Makefile.powerpc
  head/sys/kern/link_elf.c
  head/sys/powerpc/aim/locore32.S
  head/sys/powerpc/aim/machdep.c
  head/sys/powerpc/aim/trap_subr32.S
  head/sys/powerpc/aim/trap_subr64.S
  head/sys/powerpc/booke/locore.S
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/booke/trap_subr.S
  head/sys/powerpc/ofw/ofwcall32.S
  head/sys/powerpc/powerpc/elf32_machdep.c
  head/sys/powerpc/powerpc/swtch32.S

Modified: head/UPDATING
==============================================================================
--- head/UPDATING	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/UPDATING	Sat Mar  7 20:14:46 2015	(r279750)
@@ -31,6 +31,12 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11
 	disable the most expensive debugging functionality run
 	"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
 
+20150307:
+	The 32-bit PowerPC kernel has been changed to a position-independent
+	executable. This can only be booted with a version of loader(8)
+	newer than January 31, 2015, so make sure to update both world and
+	kernel before rebooting.
+
 20150217:
 	If you are running a -CURRENT kernel since r273872 (Oct 30th, 2014),
 	but before r278950, the RNG was not seeded properly.  Immediately

Modified: head/sys/conf/Makefile.powerpc
==============================================================================
--- head/sys/conf/Makefile.powerpc	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/conf/Makefile.powerpc	Sat Mar  7 20:14:46 2015	(r279750)
@@ -37,10 +37,9 @@ INCLUDES+= -I$S/contrib/libfdt
 
 CFLAGS+= -msoft-float -Wa,-many
 
-.if ${MACHINE_ARCH} == "powerpc64"
+# Build position-independent kernel
 CFLAGS+= -fPIC
 LDFLAGS+= -pie
-.endif
 
 .if !empty(DDB_ENABLED)
 CFLAGS+=	-fno-omit-frame-pointer

Modified: head/sys/kern/link_elf.c
==============================================================================
--- head/sys/kern/link_elf.c	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/kern/link_elf.c	Sat Mar  7 20:14:46 2015	(r279750)
@@ -411,7 +411,7 @@ link_elf_init(void* arg)
 
 	ef = (elf_file_t) linker_kernel_file;
 	ef->preloaded = 1;
-#ifdef __powerpc64__
+#ifdef __powerpc__
 	ef->address = (caddr_t) (__startkernel - KERNBASE);
 #else
 	ef->address = 0;

Modified: head/sys/powerpc/aim/locore32.S
==============================================================================
--- head/sys/powerpc/aim/locore32.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/aim/locore32.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -108,36 +108,47 @@ kernel_text:
 	.text
 	.globl	__start
 __start:
-	li	8,0
-	li	9,0x100
-	mtctr	9
-1:
-	dcbf	0,8
-	icbi	0,8
-	addi	8,8,0x20
-	bdnz	1b
-	sync
-	isync
-
-	/* Zero bss, in case we were started by something unhelpful */
-	li	0,0
-	lis	8,_edata at ha
-	addi	8,8,_edata at l
-	lis	9,_end at ha
-	addi	9,9,_end at l
-2:	stw     0,0(8)
-	addi	8,8,4
-	cmplw	8,9
-	blt	2b
+	/* Figure out where we are */
+	bl	1f
+	.long	_DYNAMIC-.
+	.long	_GLOBAL_OFFSET_TABLE_-.
+	.long	tmpstk-.
+1:	mflr	%r30
+
+	/* Set up temporary stack pointer */
+	lwz	%r1,8(%r30)
+	add	%r1,%r1,%r30
+	addi	%r1,%r1,(8+TMPSTKSZ-32)
+
+	/* Relocate self */
+	stw	%r3,16(%r1)
+	stw	%r4,20(%r1)
+	stw	%r5,24(%r1)
+	stw	%r6,28(%r1)
+
+	lwz	%r3,0(%r30) /* _DYNAMIC in %r3 */
+	add	%r3,%r3,%r30
+	lwz	%r4,4(%r30) /* GOT pointer */
+	add	%r4,%r4,%r30
+	lwz	%r4,4(%r4)  /* got[0] is _DYNAMIC link addr */
+	subf	%r4,%r4,%r3 /* subtract to calculate relocbase */
+	bl	elf_reloc_self
 	
-	lis	1,(tmpstk+TMPSTKSZ-16)@ha
-	addi	1,1,(tmpstk+TMPSTKSZ-16)@l
+	lwz	%r3,16(%r1)
+	lwz	%r4,20(%r1)
+	lwz	%r5,24(%r1)
+	lwz	%r6,28(%r1)
 
+	/* MD setup */
 	bl	powerpc_init
+
+	/* Set stack pointer to new value and branch to mi_startup */
 	mr	%r1, %r3
 	li	%r3, 0
 	stw	%r3, 0(%r1)
 	bl	mi_startup
+
+	/* If mi_startup somehow returns, exit. This would be bad. */
 	b	OF_exit
 
 /*

Modified: head/sys/powerpc/aim/machdep.c
==============================================================================
--- head/sys/powerpc/aim/machdep.c	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/aim/machdep.c	Sat Mar  7 20:14:46 2015	(r279750)
@@ -235,10 +235,13 @@ extern void	*testppc64, *testppc64size;
 extern void	*restorebridge, *restorebridgesize;
 extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
 extern void	*trapcode64;
+
+extern Elf_Addr	_GLOBAL_OFFSET_TABLE_[];
 #endif
 
 extern void	*rstcode, *rstcodeend;
-extern void	*trapcode, *trapcodeend, *trapcode2;
+extern void	*trapcode, *trapcodeend;
+extern void	*generictrap, *generictrap64;
 extern void	*slbtrap, *slbtrapend;
 extern void	*alitrap, *aliend;
 extern void	*dsitrap, *dsiend;
@@ -254,7 +257,6 @@ powerpc_init(vm_offset_t fdt, vm_offset_
 {
 	struct		pcpu *pc;
 	vm_offset_t	startkernel, endkernel;
-	void		*generictrap;
 	size_t		trap_offset, trapsize;
 	vm_offset_t	trap;
 	void		*kmdp;
@@ -467,20 +469,9 @@ powerpc_init(vm_offset_t fdt, vm_offset_
 		/* rfi_patch2 is at the end of dbleave */
 		bcopy(&rfid_patch,&rfi_patch2,4);
 	#endif
-
-		/*
-		 * Set the common trap entry point to the one that
-		 * knows to restore 32-bit operation on execution.
-		 */
-
-		generictrap = &trapcode64;
-	} else {
-		generictrap = &trapcode;
 	}
-
 	#else /* powerpc64 */
 	cpu_features |= PPC_FEATURE_64;
-	generictrap = &trapcode;
 	#endif
 
 	trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
@@ -490,7 +481,7 @@ powerpc_init(vm_offset_t fdt, vm_offset_
 	 * different ones in a minute.
 	 */
 	for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
-		bcopy(generictrap, (void *)trap, trapsize);
+		bcopy(&trapcode, (void *)trap, trapsize);
 
 	#ifndef __powerpc64__
 	if (cpu_features & PPC_FEATURE_64) {
@@ -530,12 +521,19 @@ powerpc_init(vm_offset_t fdt, vm_offset_
 
 	#ifdef __powerpc64__
 	/* Set TOC base so that the interrupt code can get at it */
-	*((void **)TRAP_GENTRAP) = &trapcode2;
+	*((void **)TRAP_GENTRAP) = &generictrap;
 	*((register_t *)TRAP_TOCBASE) = toc;
 
 	bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
 	bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
 	#else
+	/* Set branch address for trap code */
+	if (cpu_features & PPC_FEATURE_64)
+		*((void **)TRAP_GENTRAP) = &generictrap64;
+	else
+		*((void **)TRAP_GENTRAP) = &generictrap;
+	*((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
+
 	/* G2-specific TLB miss helper handlers */
 	bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
 	bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);

Modified: head/sys/powerpc/aim/trap_subr32.S
==============================================================================
--- head/sys/powerpc/aim/trap_subr32.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/aim/trap_subr32.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -74,8 +74,9 @@
  * Kernel SRs are loaded directly from kernel_pmap_
  */
 #define RESTORE_KERN_SRS(pmap,sr) \
-	lis	pmap,CNAME(kernel_pmap_store)@ha; \
-	lwzu	sr,CNAME(kernel_pmap_store)+PM_SR at l(pmap); \
+	lwz	pmap,TRAP_TOCBASE(0); \
+	lwz	pmap,CNAME(kernel_pmap_store)@got(pmap); \
+	lwzu	sr,PM_SR(pmap); \
 	RESTORE_SRS(pmap,sr)
 
 /*
@@ -301,7 +302,12 @@ CNAME(restorebridgesize) = .-CNAME(resto
  */
 	.globl	CNAME(rstcode), CNAME(rstcodeend)
 CNAME(rstcode):
-	ba	cpu_reset
+	bl	1f
+	.long	cpu_reset
+1:	mflr	%r31
+	lwz	%r31,0(%r31)
+	mtlr	%r31
+	blrl
 CNAME(rstcodeend):
 
 cpu_reset:
@@ -313,12 +319,12 @@ cpu_reset:
 	mflr	%r1
 	addi	%r1,%r1,(124-16)@l
 
-	bla	CNAME(cpudep_ap_early_bootstrap)
+	bl	CNAME(cpudep_ap_early_bootstrap)
 	lis	%r3,1 at l
-	bla	CNAME(pmap_cpu_bootstrap)
-	bla	CNAME(cpudep_ap_bootstrap)
+	bl	CNAME(pmap_cpu_bootstrap)
+	bl	CNAME(cpudep_ap_bootstrap)
 	mr	%r1,%r3
-	bla	CNAME(cpudep_ap_setup)
+	bl	CNAME(cpudep_ap_setup)
 	GET_CPUINFO(%r5)
 	lwz	%r3,(PC_RESTORE)(%r5)
 	cmplwi	%cr0,%r3,0
@@ -327,7 +333,7 @@ cpu_reset:
 	b	CNAME(longjmp)
 2:
 #ifdef SMP
-	bla	CNAME(machdep_ap_bootstrap)
+	bl	CNAME(machdep_ap_bootstrap)
 #endif
 
 	/* Should not be reached */
@@ -344,22 +350,13 @@ CNAME(trapcode):
 	mtsprg1	%r1			/* save SP */
 	mflr	%r1			/* Save the old LR in r1 */
 	mtsprg2 %r1			/* And then in SPRG2 */
-	li	%r1, 0x20		/* How to get the vector from LR */
-	bla	generictrap		/* LR & SPRG3 is exception # */
+	lwz	%r1, TRAP_GENTRAP(0)	/* Get branch address */
+	mtlr	%r1
+	li	%r1, 0xe0		/* How to get the vector from LR */
+	blrl				/* LR & (0xff00 | r1) is exception # */
 CNAME(trapcodeend):
 
 /*
- * 64-bit version of trapcode. Identical, except it calls generictrap64.
- */
-	.globl	CNAME(trapcode64)
-CNAME(trapcode64):
-	mtsprg1	%r1			/* save SP */
-	mflr	%r1			/* Save the old LR in r1 */
-	mtsprg2 %r1			/* And then in SPRG2 */
-	li	%r1, 0x20		/* How to get the vector from LR */
-	bla	generictrap64		/* LR & SPRG3 is exception # */
-
-/*
  * For ALI: has to save DSISR and DAR
  */
 	.globl	CNAME(alitrap),CNAME(aliend)
@@ -385,7 +382,14 @@ CNAME(alitrap):
 	/* Test whether we already had PR set */
 	mfsrr1	%r31
 	mtcr	%r31
-	bla	s_trap
+
+	/* Jump to s_trap */
+	bl	1f
+	.long	s_trap
+1:	mflr	%r31
+	lwz	%r31,0(%r31)
+	mtlr	%r31
+	blrl
 CNAME(aliend):
 
 /*
@@ -449,7 +453,7 @@ isi1: 
 	xoris %r0, %r0, 0x2		/* flip the msr<tgpr> bit */
 	mtcrf 0x80, %r3			/* restore CR0 */
 	mtmsr %r0			/* flip back to the native gprs */
-	ba EXC_ISI			/* go to instr. access interrupt */
+	ba EXC_ISI 			/* go to instr. access interrupt */
 
 CNAME(imisssize) = .-CNAME(imisstrap)
 
@@ -613,13 +617,15 @@ CNAME(dsitrap):
 	rlwinm	%r31,%r31,7,25,28	/* get segment * 8 */
 
 	/* get batu */
-	addis	%r31,%r31,CNAME(battable)@ha
-	lwz	%r30,CNAME(battable)@l(31)
+	lwz	%r30,TRAP_TOCBASE(0)
+	lwz	%r30,CNAME(battable)@got(%r30)
+	add	%r31,%r30,%r31
+	lwz	%r30,0(%r31)
 	mtcr	%r30
 	bf	30,1f			/* branch if supervisor valid is
 					   false */
 	/* get batl */
-	lwz	%r31,CNAME(battable)+4 at l(31)
+	lwz	%r31,4(%r31)
 /* We randomly use the highest two bat registers here */
 	mftb	%r28
 	andi.	%r28,%r28,1
@@ -644,7 +650,14 @@ CNAME(dsitrap):
 	rfi				/* return to trapped code */
 1:
 	mflr	%r28			/* save LR (SP already saved) */
-	bla	disitrap
+
+	/* Jump to disitrap */
+	bl	4f
+	.long	disitrap
+4:	mflr	%r1
+	lwz	%r1,0(%r1)
+	mtlr	%r1
+	blrl
 CNAME(dsiend):
 
 /*
@@ -711,7 +724,7 @@ realtrap:
 	GET_CPUINFO(%r1)
 	lwz	%r1,PC_CURPCB(%r1)
 	RESTORE_KERN_SRS(%r30,%r31)	/* enable kernel mapping */
-	ba s_trap
+	b	s_trap
 
 /*
  * generictrap does some standard setup for trap handling to minimize
@@ -723,6 +736,7 @@ realtrap:
  * SPRG2 - Original LR
  */
 
+	.globl	CNAME(generictrap64)
 generictrap64:
 	mtsprg3	%r31
 	mfmsr	%r31
@@ -731,6 +745,7 @@ generictrap64:
 	mfsprg3	%r31
 	isync
 
+	.globl	CNAME(generictrap)
 generictrap:
 	/* Save R1 for computing the exception vector */
 	mtsprg3 %r1
@@ -848,8 +863,9 @@ dbtrap:
 	andi.	%r1,%r1,0xff00
 	mtsprg3	%r1
 
-	lis	%r1,(tmpstk+TMPSTKSZ-16)@ha	/* get new SP */
-	addi	%r1,%r1,(tmpstk+TMPSTKSZ-16)@l
+	lwz	%r1,TRAP_TOCBASE(0)		/* get new SP */
+	lwz	%r1,tmpstk at got(%r1)
+	addi	%r1,%r1,TMPSTKSZ-16
 
 	FRAME_SETUP(PC_DBSAVE)
 /* Call C trap code: */
@@ -896,9 +912,11 @@ CNAME(dblow):
         mfsprg2	%r29			/* ... and r29 */
         mflr	%r1			/* save LR */
 	mtsprg2 %r1			/* And then in SPRG2 */
-	li	%r1, 0	 		/* How to get the vector from LR */
 
-        bla     generictrap		/* and we look like a generic trap */
+	lwz	%r1, TRAP_GENTRAP(0)	/* Get branch address */
+	mtlr	%r1
+	li	%r1, 0			/* How to get the vector from LR */
+	blrl				/* LR & (0xff00 | r1) is exception # */
 1:
 	/* Privileged, so drop to KDB */
 	GET_CPUINFO(%r1)
@@ -908,6 +926,13 @@ CNAME(dblow):
         stw	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
         stw	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
         mflr	%r28					/* save LR */
-	bla	dbtrap
+
+	/* Jump to dbtrap */
+	bl	2f
+	.long	dbtrap
+2:	mflr	%r1
+	lwz	%r1,0(%r1)
+	mtlr	%r1
+	blrl
 CNAME(dbend):
 #endif /* KDB */

Modified: head/sys/powerpc/aim/trap_subr64.S
==============================================================================
--- head/sys/powerpc/aim/trap_subr64.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/aim/trap_subr64.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -673,8 +673,7 @@ realtrap:
  * SPRG2 - Original LR
  */
 
-	.globl	CNAME(trapcode2)
-trapcode2:
+	.globl	CNAME(generictrap)
 generictrap:
 	/* Save R1 for computing the exception vector */
 	mtsprg3 %r1

Modified: head/sys/powerpc/booke/locore.S
==============================================================================
--- head/sys/powerpc/booke/locore.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/booke/locore.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -205,11 +205,29 @@ done_mapping:
 /*
  * Setup a temporary stack
  */
-	lis	%r1, tmpstack at ha
-	addi	%r1, %r1, tmpstack at l
+	bl	1f
+	.long tmpstack-.
+1:	mflr	%r1
+	lwz	%r2,0(%r1)
+	add	%r1,%r1,%r2
 	addi	%r1, %r1, (TMPSTACKSZ - 16)
 
 /*
+ * Relocate kernel
+ */
+	bl      1f
+	.long   _DYNAMIC-.
+	.long   _GLOBAL_OFFSET_TABLE_-.
+1:	mflr    %r5
+	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
+	add	%r3,%r3,%r5
+	lwz	%r4,4(%r5)	/* GOT pointer */
+	add	%r4,%r4,%r5
+	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
+	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
+	bl	elf_reloc_self
+
+/*
  * Initialise exception vector offsets
  */
 	bl	ivor_setup
@@ -338,10 +356,13 @@ bp_tlb1_end:
 	blt	4b
 
 	/* Switch to the final mapping */
-	lis	%r5, __boot_page at ha
-	ori	%r5, %r5, __boot_page at l
 	bl	5f
-5:	mflr	%r3
+	.long __boot_page-.
+5:	mflr	%r5
+	lwz	%r3,0(%r3)
+	add	%r5,%r5,%r3		/* __boot_page in r5 */
+	bl	6f
+6:	mflr	%r3
 	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
 	add	%r3, %r3, %r5		/* Make this virtual address */
 	addi	%r3, %r3, 32
@@ -365,8 +386,11 @@ bp_tlb1_end:
 /*
  * Setup a temporary stack
  */
-	lis	%r1, tmpstack at ha
-	addi	%r1, %r1, tmpstack at l
+	bl	1f
+	.long tmpstack-.
+1:	mflr	%r1
+	lwz	%r2,0(%r1)
+	add	%r1,%r1,%r2
 	addi	%r1, %r1, (TMPSTACKSZ - 16)
 
 /*
@@ -377,8 +401,11 @@ bp_tlb1_end:
 	/*
 	 * Assign our pcpu instance
 	 */
-	lis	%r3, ap_pcpu at h
-	ori	%r3, %r3, ap_pcpu at l
+	bl	1f
+	.long ap_pcpu-.
+1:	mflr	%r4
+	lwz	%r3, 0(%r4)
+	add	%r3, %r3, %r4
 	lwz	%r3, 0(%r3)
 	mtsprg0	%r3
 
@@ -543,24 +570,14 @@ __boot_page_padding:
  * dedicated for cases when invalidation(s) should NOT be propagated to other
  * CPUs.
  *
- * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
- * correctly (by tlb0_get_tlbconf()).
+ * void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
  *
+ * XXX: why isn't this in C?
  */
 ENTRY(tid_flush)
 	cmpwi	%r3, TID_KERNEL
 	beq	tid_flush_end	/* don't evict kernel translations */
 
-	/* Number of TLB0 ways */
-	lis	%r4, tlb0_ways at h
-	ori	%r4, %r4, tlb0_ways at l
-	lwz	%r4, 0(%r4)
-
-	/* Number of entries / way */
-	lis	%r5, tlb0_entries_per_way at h
-	ori	%r5, %r5, tlb0_entries_per_way at l
-	lwz	%r5, 0(%r5)
-
 	/* Disable interrupts */
 	mfmsr	%r10
 	wrteei	0
@@ -718,6 +735,11 @@ setfault:
 /* Data section								*/
 /************************************************************************/
 	.data
+	.align 3
+GLOBAL(__startkernel)
+	.long   begin
+GLOBAL(__endkernel)
+	.long   end
 	.align	4
 tmpstack:
 	.space	TMPSTACKSZ

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/booke/pmap.c	Sat Mar  7 20:14:46 2015	(r279750)
@@ -159,7 +159,7 @@ unsigned int kernel_ptbls;	/* Number of 
 #define PMAP_REMOVE_DONE(pmap) \
 	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
 
-extern void tid_flush(tlbtid_t);
+extern void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
 extern int elf32_nxstack;
 
 /**************************************************************************/
@@ -2818,7 +2818,7 @@ tid_alloc(pmap_t pmap)
 		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
 
 		/* Flush all entries from TLB0 matching this TID. */
-		tid_flush(tid);
+		tid_flush(tid, tlb0_ways, tlb0_entries_per_way);
 	}
 
 	tidbusy[thiscpu][tid] = pmap;

Modified: head/sys/powerpc/booke/trap_subr.S
==============================================================================
--- head/sys/powerpc/booke/trap_subr.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/booke/trap_subr.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -542,8 +542,11 @@ INTERRUPT(int_data_tlb_error)
 
 search_kernel_pmap:
 	/* Load r26 with kernel_pmap address */
-	lis	%r26, kernel_pmap_store at h
-	ori	%r26, %r26, kernel_pmap_store at l
+	bl	1f
+	.long kernel_pmap_store-.
+1:	mflr	%r21
+	lwz	%r26, 0(%r21)
+	add	%r26, %r21, %r26	/* kernel_pmap_store in r26 */
 
 	/* Force kernel tid, set TID to 0 in MAS1. */
 	li	%r21, 0
@@ -737,12 +740,17 @@ INTERRUPT(int_debug)
 	FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG)
 	GET_CPUINFO(%r3)
 	lwz	%r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3)
-	lis	%r4, interrupt_vector_base at ha
-	addi	%r4, %r4, interrupt_vector_base at l
+	bl	0f
+	.long	interrupt_vector_base-.
+	.long	interrupt_vector_top-.
+0:	mflr	%r5
+	lwz	%r4,0(%r5)	/* interrupt_vector_base in r4 */
+	add	%r4,%r4,%r5
 	cmplw	cr0, %r3, %r4
 	blt	1f
-	lis	%r4, interrupt_vector_top at ha
-	addi	%r4, %r4, interrupt_vector_top at l
+	lwz	%r4,4(%r5)	/* interrupt_vector_top in r4 */
+	add	%r4,%r4,%r5
+	addi	%r4,%r4,4
 	cmplw	cr0, %r3, %r4
 	bge	1f
 	/* Disable single-stepping for the interrupt handlers. */

Modified: head/sys/powerpc/ofw/ofwcall32.S
==============================================================================
--- head/sys/powerpc/ofw/ofwcall32.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/ofw/ofwcall32.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -66,17 +66,21 @@ ASENTRY(ofwcall)
 	/* Record the old MSR */
 	mfmsr	%r6
 
+	/* GOT pointer in r7 */
+	bl	_GLOBAL_OFFSET_TABLE_ at local-4
+	mflr	%r7
+
 	/* read client interface handler */
-	lis	%r4,openfirmware_entry at ha
-	lwz	%r4,openfirmware_entry at l(%r4)
+	lwz	%r4,openfirmware_entry at got(%r7)
+	lwz	%r4,0(%r4)
 
 	/*
 	 * Set the MSR to the OF value. This has the side effect of disabling
 	 * exceptions, which prevents preemption later.
 	 */
 
-	lis	%r5,ofmsr at ha
-	lwz	%r5,ofmsr at l(%r5)
+	lwz	%r5,ofmsr at got(%r7)
+	lwz	%r5,0(%r5)
 	mtmsr	%r5
 	isync
 
@@ -87,8 +91,8 @@ ASENTRY(ofwcall)
 	 * later.
 	 */
 	mr	%r5,%r1
-	lis	%r1,(ofwstk+OFWSTKSZ-32)@ha
-	addi	%r1,%r1,(ofwstk+OFWSTKSZ-32)@l
+	lwz	%r1,ofwstk at got(%r7)
+	addi	%r1,%r1,(OFWSTKSZ-32)
 	stw	%r5,20(%r1)	/* Save real stack pointer */
 	stw	%r2,24(%r1)	/* Save curthread */
 	stw	%r6,28(%r1)	/* Save old MSR */
@@ -124,18 +128,22 @@ ASENTRY(rtascall)
 	mflr	%r0
 	stw	%r0,4(%r1)
 
+	/* GOT pointer in r7 */
+	bl	_GLOBAL_OFFSET_TABLE_ at local-4
+	mflr	%r7
+
 	/* Record the old MSR to real-mode-accessible area */
 	mfmsr	%r0
-	lis	%r5,rtas_regsave at ha
-	stw	%r0,rtas_regsave at l(%r5)
+	lwz	%r5,rtas_regsave at got(%r7)
+	stw	%r0,0(%r5)
 
 	/* read client interface handler */
-	lis	%r5,rtas_entry at ha
-	lwz	%r5,rtas_entry at l(%r5)
+	lwz	%r5,rtas_entry at got(%r7)
+	lwz	%r5,0(%r5)
 
 	/* Set the MSR to the RTAS value */
-	lis	%r6,rtasmsr at ha
-	lwz	%r6,rtasmsr at l(%r6)
+	lwz	%r6,rtasmsr at got(%r7)
+	lwz	%r6,0(%r6)
 	mtmsr	%r6
 	isync
 
@@ -143,9 +151,13 @@ ASENTRY(rtascall)
 	mtctr	%r5
 	bctrl
 
+	/* GOT pointer in r7 */
+	bl	_GLOBAL_OFFSET_TABLE_ at local-4
+	mflr	%r7
+
 	/* Now set the MSR back */
-	lis	%r6,rtas_regsave at ha
-	lwz	%r6,rtas_regsave at l(%r6)
+	lwz	%r6,rtas_regsave at got(%r7)
+	lwz	%r6,0(%r6)
 	mtmsr	%r6
 	isync
 

Modified: head/sys/powerpc/powerpc/elf32_machdep.c
==============================================================================
--- head/sys/powerpc/powerpc/elf32_machdep.c	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/powerpc/elf32_machdep.c	Sat Mar  7 20:14:46 2015	(r279750)
@@ -147,6 +147,8 @@ SYSINIT(oelf32, SI_SUB_EXEC, SI_ORDER_AN
 	(sysinit_cfunc_t) elf32_insert_brand_entry,
 	&freebsd_brand_oinfo);
 
+void elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase);
+
 void
 elf32_dump_thread(struct thread *td, void *dst, size_t *off)
 {
@@ -252,6 +254,39 @@ elf_reloc_internal(linker_file_t lf, Elf
 	return(0);
 }
 
+void
+elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase)
+{
+	Elf_Rela *rela = 0, *relalim;
+	Elf_Addr relasz = 0;
+	Elf_Addr *where;
+
+	/*
+	 * Extract the rela/relasz values from the dynamic section
+	 */
+	for (; dynp->d_tag != DT_NULL; dynp++) {
+		switch (dynp->d_tag) {
+		case DT_RELA:
+			rela = (Elf_Rela *)(relocbase+dynp->d_un.d_ptr);
+			break;
+		case DT_RELASZ:
+			relasz = dynp->d_un.d_val;
+			break;
+		}
+	}
+
+	/*
+	 * Relocate these values
+	 */
+	relalim = (Elf_Rela *)((caddr_t)rela + relasz);
+	for (; rela < relalim; rela++) {
+		if (ELF_R_TYPE(rela->r_info) != R_PPC_RELATIVE)
+			continue;
+		where = (Elf_Addr *)(relocbase + rela->r_offset);
+		*where = (Elf_Addr)(relocbase + rela->r_addend);
+	}
+}
+
 int
 elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
     elf_lookup_fn lookup)

Modified: head/sys/powerpc/powerpc/swtch32.S
==============================================================================
--- head/sys/powerpc/powerpc/swtch32.S	Sat Mar  7 20:00:26 2015	(r279749)
+++ head/sys/powerpc/powerpc/swtch32.S	Sat Mar  7 20:14:46 2015	(r279750)
@@ -121,8 +121,9 @@ ENTRY(cpu_switch)
 cpu_switchin:
 #if defined(SMP) && defined(SCHED_ULE)
 	/* Wait for the new thread to become unblocked */
-	lis	%r6,blocked_lock at ha
-	addi	%r6,%r6,blocked_lock at l
+	bl	_GLOBAL_OFFSET_TABLE_ at local-4
+	mflr	%r6
+	lwz	%r6,blocked_lock at got(%r6)
 blocked_loop:
 	lwz	%r7,TD_LOCK(%r2)
 	cmpw	%r6,%r7 


More information about the svn-src-all mailing list