PERFORCE change 101196 for review

Oleksandr Tymoshenko gonzo at FreeBSD.org
Mon Jul 10 11:44:25 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=101196

Change 101196 by gonzo at gonzo_hq on 2006/07/10 11:44:05

	o genassym.s extended with registers offsets in frame struct, VM params etc..
	o exception handling added.
	o fls(3) implementation has been added from libkern.

Affected files ...

.. //depot/projects/mips2/src/sys/conf/files.mips#11 edit
.. //depot/projects/mips2/src/sys/mips/mips/cpu.c#7 edit
.. //depot/projects/mips2/src/sys/mips/mips/exception.S#2 edit
.. //depot/projects/mips2/src/sys/mips/mips/genassym.c#2 edit
.. //depot/projects/mips2/src/sys/mips/mips/intr_machdep.c#2 edit
.. //depot/projects/mips2/src/sys/mips/mips/trap.c#3 edit

Differences ...

==== //depot/projects/mips2/src/sys/conf/files.mips#11 (text+ko) ====

@@ -33,6 +33,7 @@
 libkern/divdi3.c		standard
 libkern/ffs.c			standard
 libkern/ffsl.c			standard
+libkern/fls.c			standard
 libkern/flsl.c			standard
 libkern/lshrdi3.c		standard
 libkern/moddi3.c		standard

==== //depot/projects/mips2/src/sys/mips/mips/cpu.c#7 (text+ko) ====

@@ -92,7 +92,7 @@
  * XXMIPS: This makes cooperation with exception handler more handy. Less hand-typing
  * is needed. Take a look at mips_vector_init() in this file to see a usage.
  */
-#define	VECI(vec, class)	mips_vector_install(MIPS_ ## vec ## _EXC_VEC, \
+#define	VECI(vec, class)	mips_vector_install(vec ## _EXC_VEC, \
 						    class ## Vector, \
 						    class ## VectorEnd)
 /*
@@ -124,10 +124,10 @@
 mips_vector_init(void)
 {
 
-	VECI(UTLB_MISS, TLBMiss);
-	VECI(XTLB_MISS, XTLBMiss);
-	VECI(CACHE_ERR, Cache);
-	VECI(GEN, Exception);
+	VECI(MIPS_UTLB_MISS, TLBMiss);
+	VECI(MIPS3_XTLB_MISS, XTLBMiss);
+	VECI(MIPS3_CACHE_ERR, Cache);
+	VECI(MIPS3_GEN, Exception);
 	
 	mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV);
 }

==== //depot/projects/mips2/src/sys/mips/mips/exception.S#2 (text+ko) ====

@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) [year] [your name]
+ * Copyright (c) 2003-2004 Juli Mallett <jmallett at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,19 +23,468 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $Id$
+ * $P4: //depot/projects/mips2/src/sys/mips/mips/exception.S#2 $
  */
+
+#include "opt_ddb.h"
+
+#include <machine/asm.h>
+#include <machine/cache_r4k.h>
+#include <machine/cpuregs.h>
+#include <machine/param.h>
+#include <machine/pte.h>
+
 #include "assym.s"
 
-#include <machine/asm.h>
-#include <machine/asmacros.h>
-__FBSDID("$FreeBSD$");
+/*
+ * Mark the end of a vector.
+ */
+#define	_VEND(x)						\
+	GLOBAL(x ## End);					\
+	END(x);
+
+#define	VEND(x)							\
+	_VEND(x)
+
+ExceptionHandlerTable:
+	.dword	GenericException	/* Int */
+	.dword	GenericException	/* TLBMod */
+	.dword	TLBMissVector		/* TLBL */
+	.dword	TLBMissVector		/* TLBS */
+	.dword	GenericException	/* AdEL */
+	.dword	GenericException	/* AdES */
+	.dword	GenericException	/* IBE */
+	.dword	GenericException	/* DBE */
+	.dword	GenericException	/* Sys */
+	.dword	GenericException	/* Bp */
+	.dword	GenericException	/* RI */
+	.dword	GenericException	/* CpU */
+	.dword	GenericException	/* Ov */
+	.dword	GenericException	/* Tr */
+	.dword	GenericException	/* VCEI */
+	.dword	GenericException	/* FPE */
+	.dword	GenericException	/* Res (16) */
+	.dword	GenericException	/* Res (17) */
+	.dword	GenericException	/* Res (18) */
+	.dword	GenericException	/* Res (19) */
+	.dword	GenericException	/* Res (20) */
+	.dword	GenericException	/* Res (21) */
+	.dword	GenericException	/* Res (22) */
+	.dword	GenericException	/* WATCH */
+	.dword	GenericException	/* Res (24) */
+	.dword	GenericException	/* Res (25) */
+	.dword	GenericException	/* Res (26) */
+	.dword	GenericException	/* Res (27) */
+	.dword	GenericException	/* Res (28) */
+	.dword	GenericException	/* Res (29) */
+	.dword	GenericException	/* Res (30) */
+	.dword	VCED			/* VCED */
+
+	.text
+	.set noreorder
+
+/*
+ * Exception vector bodies, essentially here to save registers, jump
+ * to C, restore registers, and return from exception.  Some may do
+ * some fast servicing, where it makes sense to, but in general they
+ * are meant to be foolproof.  Some may jump to assembly herein, not
+ * to external C routines.
+ */
+LEAF(ExceptionVector)
+	.set noat
+
+	mfc0	k0, MIPS_COP_0_CAUSE
+	and	k0, MIPS3_CR_EXC_CODE
+	srl	k0, MIPS_CR_EXC_CODE_SHIFT
+	sll	k0, 3 /* Index 64-bits. */
+	la	k1, ExceptionHandlerTable
+	addu	k1, k0
+	lw	k1, 0(k1)
+	jr	k1
+	nop
+
+	.set at
+VEND(ExceptionVector)
+	.data
+1:	.asciiz "ExceptionHandlerTable"
+	.text
+
+LEAF(GenericException)
+	.set noat
+
+	subu	sp, sp, TF_SIZE
+	la	k0, 1f
+	j	exception_save_registers
+	move	k1, sp
+1:
+
+	/*
+	 * A generic exception may result in DDB being invoked.  If we
+	 * are using the kernel debugger, then set up is auxillary
+	 * frame, so it has a full one.
+	 */
+#if	DDB
+	jal	save_kdbaux
+	nop
+#endif /* DDB */
+	li	t0, MIPS_SR_KX
+	mtc0	t0, MIPS_COP_0_STATUS
+
+	/*
+	 * If this is an interrupt, call cpu_intr() with the arguments:
+	 * trapframe
+	 *
+	 * Otherwise, go to trap().  Trapframe is set to a0 from k1 in the
+	 * BDslot here.
+	 */
+
+	mfc0	a1, MIPS_COP_0_CAUSE
+	and	k0, a1, MIPS3_CR_EXC_CODE
+	bnez	k0, 1f
+	move	a0, k1
+
+	jal	cpu_intr
+	nop
+
+	b	2f
+	nop
+
+	/*
+	 * Call trap() with arguments:
+	 * trapframe, cause, badvaddr.
+	 *
+	 * The trapframe is copied from k1 to a0 in the BDslot above.
+	 * Cause is set up above when computing the code.
+	 */
+1:
+	mfc0	a1, MIPS_COP_0_CAUSE
+	mfc0	a2, MIPS_COP_0_BAD_VADDR
+
+	jal	trap
+	nop
+2:
+
+	/*
+	 * Make sure interrupts are disabled for the purposes of
+	 * having a clean go at restoring.
+	 */
+	mtc0	zero, MIPS_COP_0_CAUSE
+	li	t0, MIPS_SR_KX
+	mtc0	t0, MIPS_COP_0_STATUS
+
+	move	k1, sp
+	jal	exception_restore_registers
+	nop
+
+	addu	sp, sp, TF_SIZE
+	eret
+
+	.set at
+END(GenericException)
+	.data
+3:	.asciiz "GenericException"
+	.text
+
+
+LEAF(CacheVector)
+	.set noat
+	la	k0, panic
+	mtc0	k0, MIPS_COP_0_EXC_PC
+	la	a0, 1f
+	eret
+	.set at
+VEND(CacheVector)
+	.data
+1:	.asciiz "Cache Vector"
+	.text
+
+
+/*
+ * XXX kernel only.  For now that makes sense.
+ */
+LEAF(TLBMissVector)
+	.set noat
+
+	mfc0	k0, MIPS_COP_0_BAD_VADDR
+	/*
+	 * Shift right logical to get a page index, but leaving
+	 * enough bits to index an array of 64 bit values, plus
+	 * align for the even/odd TLB stuff.
+	 */
+
+	/* VPN2 = (VA >> page_shift) / 2 */
+	la	k1, VM_MIN_ADDRESS
+	subu	k0, k0, k1
+	srl	k0, PAGE_SHIFT + 1
+	sll	k0, 2 + 1
+	la	k1, kptmap
+
+
+	/*
+	 * Find the page table, and index it.
+	 */
+	lw	k1, 0(k1)
+	addu	k1, k0
+	/*
+	 * Write the pair.
+	 */
+	lw	k0, 0(k1)	/* Even PTE. */
+	lw	k1, 4(k1)	/* Odd PTE. */
+	/*
+	 * Write TLB entry.
+	 */
+	mtc0	k0, MIPS_COP_0_TLB_LO0
+	mtc0	k1, MIPS_COP_0_TLB_LO1
+	nop
+
+	tlbp
+    
+	mfc0	k0, MIPS_COP_0_TLB_INDEX
+	nop
+	bltz	k0, 1f
+	nop
+
+	tlbwi
+
+	eret
+1:
+
+	tlbwr
+	eret
+
+	.set at
+VEND(TLBMissVector)
+	.data
+2:	.asciiz "TLBMissVector"
+	.text
+
+
+
+LEAF(XTLBMissVector)
+	.set noat
+	la	k0, panic
+	mtc0	k0, MIPS_COP_0_EXC_PC
+	la	a0, 1f
+	eret
+	.set at
+VEND(XTLBMissVector)
+	.data
+1:	.asciiz "64-bit TLB Miss Vector"
+	.text
+
+
+LEAF(VCED)
+	.set noat
+	la	k0, panic
+	mtc0	k0, MIPS_COP_0_EXC_PC
+	la	a0, 1f
+	eret
+	.set at
+VEND(VCED)
+	.data
+1:	.asciiz "VCED"
+	.text
+
+/*
+ * Handle a data-cache virtual coherency error.
+ */
+LEAF(VCEDX)
+	.set noat
+	mfc0	k0, MIPS_COP_0_BAD_VADDR
+	srl	k0, 3
+	sll	k0, 3
+	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
+	cache	(CACHE_R4K_D | CACHEOP_R4K_HIT_INV), 0(k0)
+	eret
+	.set at
+END(VCEDX)
+
+
+/*
+ * Restore registers from a trapframe pointed to in k1, returning to ra
+ * that is passed in, and kept in k0.
+ */
+LEAF(exception_restore_registers)
+	move	k0, ra
+	/*
+	 * Load extended registers into some GPRs, and start unrolling
+	 * the trapframe, putting back the extended registers just
+	 * before we need to restore those GPRs.  This doesn't do any
+	 * special ordering like the save_registers routine might,
+	 * but it keeps the flow relatively consistent.
+	 */
+
+
+	lw	a1, TF_REG_EPC(k1)
+	lw	a0, TF_REG_MULHI(k1)
+	lw	v1, TF_REG_MULLO(k1)
+#if 0
+	lw	v0, TF_REG_SR(k1)
+#endif
+
+	lw	ra, TF_REG_RA(k1)
+	lw	t9, TF_REG_T9(k1)
+	lw	t8, TF_REG_T8(k1)
+	lw	ta3, TF_REG_TA3(k1)
+	lw	ta2, TF_REG_TA2(k1)
+	lw	ta1, TF_REG_TA1(k1)
+	lw	ta0, TF_REG_TA0(k1)
+	lw	t3, TF_REG_T3(k1)
+	lw	t2, TF_REG_T2(k1)
+	lw	t1, TF_REG_T1(k1)
+	lw	t0, TF_REG_T0(k1)
+
+	/*
+	 * Brief interlude.
+	 */
+	mtc0	a1, MIPS_COP_0_EXC_PC
+	mthi	a0
+	mtlo	v1
+#if 0
+	mtc0	v0, MIPS_COP_0_STATUS
+#endif
+
+
+
+	/*
+	 * Now restore the registers we used for scratch, and nearby
+	 * GPRs.
+	 */
+	lw	a3, TF_REG_A3(k1)
+	lw	a2, TF_REG_A2(k1)
+	lw	a1, TF_REG_A1(k1)
+	lw	a0, TF_REG_A0(k1)
+	lw	v1, TF_REG_V1(k1)
+	lw	v0, TF_REG_V0(k1)
+
+
+
+	/*
+	 * Restore the stack minus TF_SIZE, to account for sp twiddle.
+	 */
+	lw	sp, TF_REG_SP(k1)
+	subu	sp, TF_SIZE
+
+
+	/*
+	 * We are done with the assembler temporary, restore it, and
+	 * return with it disabled, just in case.
+	 */
+	.set noat
+	lw	AT, TF_REG_AST(k1)
+#if 1
+	/* XXX
+	 * We wait until now so we don't interrupt ourselves.
+	 */
+	lw	k1, TF_REG_SR(k1)
+	jr	k0
+	mtc0	k1, MIPS_COP_0_STATUS
+#else
+	jr	k0
+	nop
+#endif
+
+	.set at
+END(exception_restore_registers)
+
+/*
+ * Save registers into a trapframe pointed to in k1, returning to k0.
+ */
+LEAF(exception_save_registers)
+	/*
+	 * Store the assembler temporary, and make it usable.
+	 */
+	.set noat
+	sw	AT, TF_REG_AST(k1)
+	.set at
+	/*
+	 * Store registers in order, clobbering the first with values
+	 * we need to have around later, and then storing those at the
+	 * end of the sequence, to avoid any possible issues with the
+	 * time it takes to load from those.
+	 */
+	sw	v0, TF_REG_V0(k1)
+	sw	v1, TF_REG_V1(k1)
+	sw	a0, TF_REG_A0(k1)
+	sw	a1, TF_REG_A1(k1)
+	sw	a2, TF_REG_A2(k1)
+	sw	a3, TF_REG_A3(k1)
+
+
+
+
+	/*
+	 * Brief interlude.
+	 */
+	mfc0	v0, MIPS_COP_0_STATUS
+	mflo	v1
+	mfhi	a0
+	mfc0	a1, MIPS_COP_0_EXC_PC
+
+	sw	t0, TF_REG_T0(k1)
+	sw	t1, TF_REG_T1(k1)
+	sw	t2, TF_REG_T2(k1)
+	sw	t3, TF_REG_T3(k1)
+	sw	ta0, TF_REG_TA0(k1)
+	sw	ta1, TF_REG_TA1(k1)
+	sw	ta2, TF_REG_TA2(k1)
+	sw	ta3, TF_REG_TA3(k1)
+	sw	t8, TF_REG_T8(k1)
+	sw	t9, TF_REG_T9(k1)
+	sw	ra, TF_REG_RA(k1)
+
+	/*
+	 * Now save the extended parts of the frame.
+	 */
+	sw	v0, TF_REG_SR(k1)
+	sw	v1, TF_REG_MULLO(k1)
+	sw	a0, TF_REG_MULHI(k1)
+	sw	a1, TF_REG_EPC(k1)
+
+	/*
+	 * When restore returns, TF_SIZE gets added to the SP for
+	 * return.  So in restore, we subtract TF_SIZE, which means
+	 * the value stored herein will be the value returned to.
+	 * To accomodate this, we add TF_SIZE, which makes up for the
+	 * initial subtraction for the trapframe.
+	 */
+	addu	t0, sp, TF_SIZE
+	sw	t0, TF_REG_SP(k1)
+
+
 
 
-ENTRY(reset_entry)
-ENTRY(swi_entry)
-ENTRY(prefetch_abort_entry)
-ENTRY(data_abort_entry)
-ENTRY(address_exception_entry)
-ENTRY(undefined_entry)
-ENTRY(undefinedinstruction_bounce)
+	/*
+	 * All done.
+	 */
+	jr	k0
+	nop
+END(exception_save_registers)
+
+#if	DDB
+/*
+ * Save the kdbaux structure for DDB.
+ */
+ENTRY(save_kdbaux)
+	/*
+	 * If we are using the kernel debugger, store registers that
+	 * the compiler normally saves in the place where it expects
+	 * to find them, to form a full frame.
+	 */
+	la	k0, kdbaux
+	sw	s0, 0x00(k0)
+	sw	s1, 0x08(k0)
+	sw	s2, 0x10(k0)
+	sw	s3, 0x18(k0)
+	sw	s4, 0x20(k0)
+	sw	s5, 0x28(k0)
+	sw	s6, 0x30(k0)
+	sw	s7, 0x38(k0)
+	sw	sp, 0x40(k0)
+	sw	s8, 0x48(k0)
+	sw	gp, 0x50(k0)
+
+	jr	ra
+	nop
+END(save_kdbaux)
+#endif /* DDB */

==== //depot/projects/mips2/src/sys/mips/mips/genassym.c#2 (text+ko) ====

@@ -64,4 +64,114 @@
 #include <nfsclient/nfs.h>
 #include <nfsclient/nfsdiskless.h>
 
+#include <machine/locore.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+
 ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
+
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+
+ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
+ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_PROC, offsetof(struct thread, td_proc));
+
+ASSYM(TD_MD_REGS, offsetof(struct thread, td_md.md_regs));
+
+ASSYM(FRAME_SIZ, sizeof(struct frame));
+ASSYM(FRAME_ZERO, offsetof(struct frame, f_regs[ZERO]));
+ASSYM(FRAME_AST, offsetof(struct frame, f_regs[AST]));
+ASSYM(FRAME_V0, offsetof(struct frame, f_regs[V0]));
+ASSYM(FRAME_V1, offsetof(struct frame, f_regs[V1]));
+ASSYM(FRAME_A0, offsetof(struct frame, f_regs[A0]));
+ASSYM(FRAME_A1, offsetof(struct frame, f_regs[A1]));
+ASSYM(FRAME_A2, offsetof(struct frame, f_regs[A2]));
+ASSYM(FRAME_A3, offsetof(struct frame, f_regs[A3]));
+ASSYM(FRAME_T0, offsetof(struct frame, f_regs[T0]));
+ASSYM(FRAME_T1, offsetof(struct frame, f_regs[T1]));
+ASSYM(FRAME_T2, offsetof(struct frame, f_regs[T2]));
+ASSYM(FRAME_T3, offsetof(struct frame, f_regs[T3]));
+
+ASSYM(FRAME_TA0, offsetof(struct frame, f_regs[TA0]));
+ASSYM(FRAME_TA1, offsetof(struct frame, f_regs[TA1]));
+ASSYM(FRAME_TA2, offsetof(struct frame, f_regs[TA2]));
+ASSYM(FRAME_TA3, offsetof(struct frame, f_regs[TA3]));
+
+ASSYM(FRAME_S0, offsetof(struct frame, f_regs[S0]));
+ASSYM(FRAME_S1, offsetof(struct frame, f_regs[S1]));
+ASSYM(FRAME_S2, offsetof(struct frame, f_regs[S2]));
+ASSYM(FRAME_S3, offsetof(struct frame, f_regs[S3]));
+ASSYM(FRAME_S4, offsetof(struct frame, f_regs[S4]));
+ASSYM(FRAME_S5, offsetof(struct frame, f_regs[S5]));
+ASSYM(FRAME_S6, offsetof(struct frame, f_regs[S6]));
+ASSYM(FRAME_S7, offsetof(struct frame, f_regs[S7]));
+ASSYM(FRAME_T8, offsetof(struct frame, f_regs[T8]));
+ASSYM(FRAME_T9, offsetof(struct frame, f_regs[T9]));
+ASSYM(FRAME_K0, offsetof(struct frame, f_regs[K0]));
+ASSYM(FRAME_K1, offsetof(struct frame, f_regs[K1]));
+ASSYM(FRAME_GP, offsetof(struct frame, f_regs[GP]));
+ASSYM(FRAME_SP, offsetof(struct frame, f_regs[SP]));
+ASSYM(FRAME_S8, offsetof(struct frame, f_regs[S8]));
+ASSYM(FRAME_RA, offsetof(struct frame, f_regs[RA]));
+ASSYM(FRAME_SR, offsetof(struct frame, f_regs[SR]));
+ASSYM(FRAME_MULLO, offsetof(struct frame, f_regs[MULLO]));
+ASSYM(FRAME_MULHI, offsetof(struct frame, f_regs[MULHI]));
+ASSYM(FRAME_BADVADDR, offsetof(struct frame, f_regs[BADVADDR]));
+ASSYM(FRAME_CAUSE, offsetof(struct frame, f_regs[CAUSE]));
+ASSYM(FRAME_EPC, offsetof(struct frame, f_regs[PC]));
+ASSYM(FRAME_PPL, offsetof(struct frame, f_ppl));
+
+ASSYM(TF_SIZE, sizeof(struct trapframe));
+ASSYM(TF_REG_AST, offsetof(struct trapframe, tf_regs[TF_AST]));
+ASSYM(TF_REG_V0, offsetof(struct trapframe, tf_regs[TF_V0]));
+ASSYM(TF_REG_V1, offsetof(struct trapframe, tf_regs[TF_V1]));
+ASSYM(TF_REG_A0, offsetof(struct trapframe, tf_regs[TF_A0]));
+ASSYM(TF_REG_A1, offsetof(struct trapframe, tf_regs[TF_A1]));
+ASSYM(TF_REG_A2, offsetof(struct trapframe, tf_regs[TF_A2]));
+ASSYM(TF_REG_A3, offsetof(struct trapframe, tf_regs[TF_A3]));
+ASSYM(TF_REG_T0, offsetof(struct trapframe, tf_regs[TF_T0]));
+ASSYM(TF_REG_T1, offsetof(struct trapframe, tf_regs[TF_T1]));
+ASSYM(TF_REG_T2, offsetof(struct trapframe, tf_regs[TF_T2]));
+ASSYM(TF_REG_T3, offsetof(struct trapframe, tf_regs[TF_T3]));
+
+ASSYM(TF_REG_TA0, offsetof(struct trapframe, tf_regs[TF_TA0]));
+ASSYM(TF_REG_TA1, offsetof(struct trapframe, tf_regs[TF_TA1]));
+ASSYM(TF_REG_TA2, offsetof(struct trapframe, tf_regs[TF_TA2]));
+ASSYM(TF_REG_TA3, offsetof(struct trapframe, tf_regs[TF_TA3]));
+
+ASSYM(TF_REG_T8, offsetof(struct trapframe, tf_regs[TF_T8]));
+ASSYM(TF_REG_T9, offsetof(struct trapframe, tf_regs[TF_T9]));
+ASSYM(TF_REG_RA, offsetof(struct trapframe, tf_regs[TF_RA]));
+ASSYM(TF_REG_SR, offsetof(struct trapframe, tf_regs[TF_SR]));
+ASSYM(TF_REG_MULLO, offsetof(struct trapframe, tf_regs[TF_MULLO]));
+ASSYM(TF_REG_MULHI, offsetof(struct trapframe, tf_regs[TF_MULHI]));
+ASSYM(TF_REG_EPC, offsetof(struct trapframe, tf_regs[TF_EPC]));
+ASSYM(TF_REG_SP, offsetof(struct trapframe, tf_regs[TF_SP]));
+ASSYM(TF_PPL, offsetof(struct trapframe, tf_ppl));
+
+ASSYM(PCB_REG_S0, offsetof(struct pcb, pcb_regs[PCB_REG_S0]));
+ASSYM(PCB_REG_S1, offsetof(struct pcb, pcb_regs[PCB_REG_S1]));
+ASSYM(PCB_REG_S2, offsetof(struct pcb, pcb_regs[PCB_REG_S2]));
+ASSYM(PCB_REG_S3, offsetof(struct pcb, pcb_regs[PCB_REG_S3]));
+ASSYM(PCB_REG_S4, offsetof(struct pcb, pcb_regs[PCB_REG_S4]));
+ASSYM(PCB_REG_S5, offsetof(struct pcb, pcb_regs[PCB_REG_S5]));
+ASSYM(PCB_REG_S6, offsetof(struct pcb, pcb_regs[PCB_REG_S6]));
+ASSYM(PCB_REG_S7, offsetof(struct pcb, pcb_regs[PCB_REG_S7]));
+ASSYM(PCB_REG_S8, offsetof(struct pcb, pcb_regs[PCB_REG_S8]));
+ASSYM(PCB_REG_SP, offsetof(struct pcb, pcb_regs[PCB_REG_SP]));
+ASSYM(PCB_REG_SR, offsetof(struct pcb, pcb_regs[PCB_REG_SR]));
+ASSYM(PCB_REG_RA, offsetof(struct pcb, pcb_regs[PCB_REG_RA]));
+
+ASSYM(VM_MIN_ADDRESS, VM_MIN_ADDRESS);
+ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
+ASSYM(VM_MAX_ADDRESS, VM_MAX_ADDRESS);
+ASSYM(VM_MIN_KERNEL_ADDRESS, VM_MIN_KERNEL_ADDRESS);
+ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
+

==== //depot/projects/mips2/src/sys/mips/mips/intr_machdep.c#2 (text+ko) ====

@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2006 Fill this file and put your name here
+ * Copyright (c) 2002-2004 Juli Mallett <jmallett at FreeBSD.org>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,3 +28,102 @@
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/cpuinfo.h>
+#include <machine/cpuregs.h>
+#include <machine/frame.h>
+#include <machine/intr_machdep.h>
+#include <machine/md_var.h>
+#include <machine/trap.h>
+
+struct intrhand {
+	void (*handler)(void *);
+	void *arg;
+};
+static struct intrhand intrhard[6];
+static struct intrhand intrsoft[2];
+
+void
+cpu_establish_hardintr(int intr, void (*handler)(void *), void *arg)
+{
+	struct intrhand *ih;
+
+	if (intr < 0 || intr > 5)
+		panic("%s called for unknown hard intr %d", __func__, intr);
+	ih = &intrhard[intr];
+	if (ih->handler != NULL && ih->handler != handler)
+		panic("%s cannot share hard intr %d", __func__, intr);
+	ih->handler = handler;
+	ih->arg = arg;
+
+	mips_wr_status(mips_rd_status() | (((1<< intr) << 8) << 2));
+}
+
+void
+cpu_establish_softintr(int intr, void (*handler)(void *), void *arg)
+{
+	struct intrhand *ih;
+
+	if (intr < 0 || intr > 1)
+		panic("%s called for unknown soft intr %d", __func__, intr);
+	ih = &intrsoft[intr];
+	if (ih->handler != NULL && ih->handler != handler)
+		panic("%s cannot share soft intr %d", __func__, intr);
+	ih->handler = handler;
+	ih->arg = arg;
+
+	mips_wr_status(mips_rd_status() | ((1 << intr) << 8));
+}
+
+void
+cpu_intr(struct trapframe *tf)
+{
+	struct intrhand *ih;
+	register_t cause;
+	int hard;
+	int intr;
+	int i;
+
+	critical_enter();
+
+	cause = mips_rd_cause();
+	intr = (cause & MIPS_INT_MASK) >> 8;
+	cause &= ~MIPS_INT_MASK;
+	mips_wr_cause(cause);
+
+	while ((i = fls(intr)) != 0) {
+		intr &= ~(1 << (i - 1));
+		switch (i) {
+		case 1: case 2:
+			/* Software interrupt. */
+			i--; /* Get a 0-offset interrupt. */
+			hard = 0;
+			ih = &intrsoft[i];
+			break;
+		default:
+			/* Hardware interrupt. */
+			i -= 2; /* Trim software interrupt bits. */
+			i--; /* Get a 0-offset interrupt. */
+			hard = 1;
+			ih = &intrhard[i];
+			break;
+		}
+		if (ih->handler != NULL) {
+			if (ih->arg == NULL)
+				(*ih->handler)(tf);
+			else
+				(*ih->handler)(ih->arg);
+		} else
+			printf("stray %s interrupt %d\n",
+			       hard ? "hard" : "soft", i);
+	}
+	KASSERT(i == 0, ("all interrupts handled"));
+
+	critical_exit();
+}

==== //depot/projects/mips2/src/sys/mips/mips/trap.c#3 (text+ko) ====

@@ -107,7 +107,7 @@
 		tf = retf;
 	bcopy(retf, tf, sizeof *tf);
 
-	code = (cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
+	code = (cause & MIPS3_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
 	kernelmode = (tf->tf_regs[TF_SR] & MIPS_SR_KSU_USER) == 0;
 
 	/*


More information about the p4-projects mailing list