PERFORCE change 92765 for review

Kip Macy kmacy at FreeBSD.org
Sat Mar 4 19:07:20 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=92765

Change 92765 by kmacy at kmacy_storage:sun4v_work on 2006/03/05 03:06:50

	suck back in bits required to make the linker happy

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#16 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#16 (text+ko) ====

@@ -267,10 +267,7 @@
 
 	.macro	data_miss
 	MAGIC_TRAP_ON
-	MAGIC_TRAP_ON
-	MAGIC_TRAP_ON
-	MAGIC_TRAP_ON
-	MAGIC_TRAP_ON
+
 	.align	32
 	.endm
 
@@ -999,7 +996,7 @@
 	brlz,pt	%g1, 1f
 	  nop
 	wrpr	%g0, %g1, %pil
-	
+1:		
 	wrpr	%g0, %g4, %tnpc
 	rdpr	%cwp, %l0
 	set	TSTATE_KERNEL, %l1
@@ -1009,3 +1006,328 @@
 END(tl0_trap)
 END(tl0_intr)
 
+
+
+/*
+ * Freshly forked processes come here when switched to for the first time.
+ * The arguments to fork_exit() have been setup in the locals, we must move
+ * them to the outs.
+ */
+ENTRY(fork_trampoline)
+#if KTR_COMPILE & KTR_PROC
+	CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
+	    , %g1, %g2, %g3, 7, 8, 9)
+	ldx	[PCPU(CURTHREAD)], %g2
+	stx	%g2, [%g1 + KTR_PARM1]
+	ldx	[%g2 + TD_PROC], %g2
+	add	%g2, P_COMM, %g2
+	stx	%g2, [%g1 + KTR_PARM2]
+	rdpr	%cwp, %g2
+	stx	%g2, [%g1 + KTR_PARM3]
+9:
+#endif
+	mov	%l0, %o0
+	mov	%l1, %o1
+	call	fork_exit
+	 mov	%l2, %o2
+	ba,a	%xcc, tl0_ret
+	 nop
+END(fork_trampoline)
+
+
+
+/*
+ * The following code is all going to need to be re-written
+ * but is here to make the linker happy
+ *
+ */
+
+	.comm	intrnames, IV_NAMLEN
+	.comm	eintrnames, 0
+
+	.comm	intrcnt, IV_MAX * 8
+	.comm	eintrcnt, 0
+	
+
+ENTRY(tl1_trap)
+	illtrap
+END(tl1_trap)	
+/*
+ * Initiate return to usermode.
+ *
+ * Called with a trapframe on the stack.  The window that was setup in
+ * tl0_trap may have been used by "fast" trap handlers that pretend to be
+ * leaf functions, so all ins and locals may have been clobbered since
+ * then.
+ *
+ * This code is rather long and complicated.
+ */
+ENTRY(tl0_ret)
+	/*
+	 * Check for pending asts atomically with returning.  We must raise
+	 * the pil before checking, and if no asts are found the pil must
+	 * remain raised until the retry is executed, or we risk missing asts
+	 * caused by interrupts occuring after the test.  If the pil is lowered,
+	 * as it is when we call ast, the check must be re-executed.
+	 */
+	wrpr	%g0, PIL_TICK, %pil
+	ldx	[PCPU(CURTHREAD)], %l0
+	lduw	[%l0 + TD_FLAGS], %l1
+	set	TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
+	and	%l1, %l2, %l1
+	brz,a,pt %l1, 1f
+	 nop
+
+	/*
+	 * We have an ast.  Re-enable interrupts and handle it, then restart
+	 * the return sequence.
+	 */
+	wrpr	%g0, 0, %pil
+	call	ast
+	 add	%sp, CCFSZ + SPOFF, %o0
+	ba,a	%xcc, tl0_ret
+	 nop
+
+	/*
+	 * Check for windows that were spilled to the pcb and need to be
+	 * copied out.  This must be the last thing that is done before the
+	 * return to usermode.  If there are still user windows in the cpu
+	 * and we call a nested function after this, which causes them to be
+	 * spilled to the pcb, they will not be copied out and the stack will
+	 * be inconsistent.
+	 */
+1:	ldx	[PCB_REG + PCB_NSAVED], %l1
+	brz,a,pt %l1, 2f
+	 nop
+	wrpr	%g0, 0, %pil
+	mov	T_SPILL, %o0
+	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
+	call	trap
+	 add	%sp, SPOFF + CCFSZ, %o0
+	ba,a	%xcc, tl0_ret
+	 nop
+
+	/*
+	 * Restore the out and most global registers from the trapframe.
+	 * The ins will become the outs when we restore below.
+	 */
+2:	ldx	[%sp + SPOFF + CCFSZ + TF_O0], %i0
+	ldx	[%sp + SPOFF + CCFSZ + TF_O1], %i1
+	ldx	[%sp + SPOFF + CCFSZ + TF_O2], %i2
+	ldx	[%sp + SPOFF + CCFSZ + TF_O3], %i3
+	ldx	[%sp + SPOFF + CCFSZ + TF_O4], %i4
+	ldx	[%sp + SPOFF + CCFSZ + TF_O5], %i5
+	ldx	[%sp + SPOFF + CCFSZ + TF_O6], %i6
+	ldx	[%sp + SPOFF + CCFSZ + TF_O7], %i7
+
+	ldx	[%sp + SPOFF + CCFSZ + TF_G1], %g1
+	ldx	[%sp + SPOFF + CCFSZ + TF_G2], %g2
+	ldx	[%sp + SPOFF + CCFSZ + TF_G3], %g3
+	ldx	[%sp + SPOFF + CCFSZ + TF_G4], %g4
+	ldx	[%sp + SPOFF + CCFSZ + TF_G5], %g5
+
+	/*
+	 * Load everything we need to restore below before disabling
+	 * interrupts.
+	 */
+	ldx	[%sp + SPOFF + CCFSZ + TF_FPRS], %l0
+	ldx	[%sp + SPOFF + CCFSZ + TF_GSR], %l1
+	ldx	[%sp + SPOFF + CCFSZ + TF_TNPC], %l2
+	ldx	[%sp + SPOFF + CCFSZ + TF_TPC], %l3
+	ldx	[%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
+	ldx	[%sp + SPOFF + CCFSZ + TF_Y], %l5
+	ldx	[%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
+
+	/*
+	 * Disable interrupts to restore the special globals.  They are not
+	 * saved and restored for all kernel traps, so an interrupt at the
+	 * wrong time would clobber them.
+	 */
+	wrpr	%g0, PSTATE_NORMAL, %pstate
+
+	ldx	[%sp + SPOFF + CCFSZ + TF_G6], %g6
+	ldx	[%sp + SPOFF + CCFSZ + TF_G7], %g7
+
+#if 0
+	/*
+	 * Switch to alternate globals.  This frees up some registers we
+	 * can use after the restore changes our window.
+	 */
+	wrpr	%g0, PSTATE_ALT, %pstate
+#endif
+	/*
+	 * Drop %pil to zero.  It must have been zero at the time of the
+	 * trap, since we were in usermode, but it was raised above in
+	 * order to check for asts atomically.  We have interrupts disabled
+	 * so any interrupts will not be serviced until we complete the
+	 * return to usermode.
+	 */
+	wrpr	%g0, 0, %pil
+
+	/*
+	 * Save %fprs in an alternate global so it can be restored after the
+	 * restore instruction below.  If we restore it before the restore,
+	 * and the restore traps we may run for a while with floating point
+	 * enabled in the kernel, which we want to avoid.
+	 */
+	mov	%l0, %g1
+
+	/*
+	 * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
+	 * so we set it temporarily and then clear it.
+	 */
+	wr	%g0, FPRS_FEF, %fprs
+	ldx	[%sp + SPOFF + CCFSZ + TF_FSR], %fsr
+	wr	%l1, 0, %gsr
+	wr	%g0, 0, %fprs
+
+	/*
+	 * Restore program counters.  This could be done after the restore
+	 * but we're out of alternate globals to store them in...
+	 */
+	wrpr	%l2, 0, %tnpc
+	wrpr	%l3, 0, %tpc
+
+	/*
+	 * Save %tstate in an alternate global and clear the %cwp field.  %cwp
+	 * will be affected by the restore below and we need to make sure it
+	 * points to the current window at that time, not the window that was
+	 * active at the time of the trap.
+	 */
+	andn	%l4, TSTATE_CWP_MASK, %g2
+
+	/*
+	 * Restore %y.  Could also be below if we had more alternate globals.
+	 */
+	wr	%l5, 0, %y
+
+	/*
+	 * Setup %wstate for return.  We need to restore the user window state
+	 * which we saved in wstate.other when we trapped.  We also need to
+	 * set the transition bit so the restore will be handled specially
+	 * if it traps, use the xor feature of wrpr to do that.
+	 */
+#if 0
+	srlx	%l6, WSTATE_OTHER_SHIFT, %g3
+	wrpr	%g3, WSTATE_TRANSITION, %wstate
+#endif
+	/*
+	 * Setup window management registers for return.  If not all user
+	 * windows were spilled in the kernel %otherwin will be non-zero,
+	 * so we need to transfer it to %canrestore to correctly restore
+	 * those windows.  Otherwise everything gets set to zero and the
+	 * restore below will fill a window directly from the user stack.
+	 */
+	rdpr	%otherwin, %o0
+	wrpr	%o0, 0, %canrestore
+	wrpr	%g0, 0, %otherwin
+	wrpr	%o0, 0, %cleanwin
+
+	/*
+	 * Now do the restore.  If this instruction causes a fill trap which
+	 * fails to fill a window from the user stack, we will resume at
+	 * tl0_ret_fill_end and call back into the kernel.
+	 */
+	restore
+tl0_ret_fill:
+
+	/*
+	 * We made it.  We're back in the window that was active at the time
+	 * of the trap, and ready to return to usermode.
+	 */
+
+	/*
+	 * Restore %frps.  This was saved in an alternate global above.
+	 */
+	wr	%g1, 0, %fprs
+
+	/*
+	 * Fixup %tstate so the saved %cwp points to the current window and
+	 * restore it.
+	 */
+	rdpr	%cwp, %g4
+	wrpr	%g2, %g4, %tstate
+
+	/*
+	 * Restore the user window state.  The transition bit was set above
+	 * for special handling of the restore, this clears it.
+	 */
+	wrpr	%g3, 0, %wstate
+
+#if KTR_COMPILE & KTR_TRAP
+	CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
+	    , %g2, %g3, %g4, 7, 8, 9)
+	ldx	[PCPU(CURTHREAD)], %g3
+	stx	%g3, [%g2 + KTR_PARM1]
+	rdpr	%pil, %g3
+	stx	%g3, [%g2 + KTR_PARM2]
+	rdpr	%tpc, %g3
+	stx	%g3, [%g2 + KTR_PARM3]
+	rdpr	%tnpc, %g3
+	stx	%g3, [%g2 + KTR_PARM4]
+	stx	%sp, [%g2 + KTR_PARM5]
+9:
+#endif
+
+	/*
+	 * Return to usermode.
+	 */
+	retry
+tl0_ret_fill_end:
+
+#if KTR_COMPILE & KTR_TRAP
+	CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
+	    , %l0, %l1, %l2, 7, 8, 9)
+	rdpr	%pstate, %l1
+	stx	%l1, [%l0 + KTR_PARM1]
+	stx	%l5, [%l0 + KTR_PARM2]
+	stx	%sp, [%l0 + KTR_PARM3]
+9:
+#endif
+
+	/*
+	 * The restore above caused a fill trap and the fill handler was
+	 * unable to fill a window from the user stack.  The special fill
+	 * handler recognized this and punted, sending us here.  We need
+	 * to carefully undo any state that was restored before the restore
+	 * was executed and call trap again.  Trap will copyin a window
+	 * from the user stack which will fault in the page we need so the
+	 * restore above will succeed when we try again.  If this fails
+	 * the process has trashed its stack, so we kill it.
+	 */
+
+	/*
+	 * Restore the kernel window state.  This was saved in %l6 above, and
+	 * since the restore failed we're back in the same window.
+	 */
+	wrpr	%l6, 0, %wstate
+
+#if 0
+	/*
+	 * Restore the normal globals which have predefined values in the
+	 * kernel.  We clobbered them above restoring the user's globals
+	 * so this is very important.
+	 * XXX PSTATE_ALT must already be set.
+	 */
+	wrpr	%g0, PSTATE_ALT, %pstate
+	mov	PCPU_REG, %o1
+	wrpr	%g0, PSTATE_NORMAL, %pstate
+	mov	%o0, PCB_REG
+	mov	%o1, PCPU_REG
+	wrpr	%g0, PSTATE_KERNEL, %pstate
+#endif
+	/*
+	 * Simulate a fill trap and then start the whole return sequence over
+	 * again.  This is special because it only copies in 1 window, not 2
+	 * as we would for a normal failed fill.  This may be the first time
+	 * the process has been run, so there may not be 2 windows worth of
+	 * stack to copyin.
+	 */
+	mov	T_FILL_RET, %o0
+	stx	%o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
+	call	trap
+	 add	%sp, SPOFF + CCFSZ, %o0
+	ba,a	%xcc, tl0_ret
+	 nop
+END(tl0_ret)
+


More information about the p4-projects mailing list