PERFORCE change 132750 for review
John Birrell
jb at FreeBSD.org
Mon Jan 7 14:25:28 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=132750
Change 132750 by jb at jb_freebsd1 on 2008/01/07 22:25:11
Add the do fbt/sdt probe calls via dtrace_invop and then alternately
call the function (if an entry probe), or clean up after a return
probe.
Affected files ...
.. //depot/projects/dtrace/src/sys/cddl/dev/dtrace/amd64/dtrace_asm.S#3 edit
Differences ...
==== //depot/projects/dtrace/src/sys/cddl/dev/dtrace/amd64/dtrace_asm.S#3 (text+ko) ====
@@ -28,19 +28,129 @@
* Use is subject to license terms.
*/
-#include <machine/asm.h>
+#define _ASM
+
+#include <machine/asmacros.h>
#include <sys/cpuvar_defs.h>
+#include <sys/dtrace.h>
#include "assym.s"
+#define INTR_POP \
+ MEXITCOUNT; \
+ movq TF_RDI(%rsp),%rdi; \
+ movq TF_RSI(%rsp),%rsi; \
+ movq TF_RDX(%rsp),%rdx; \
+ movq TF_RCX(%rsp),%rcx; \
+ movq TF_R8(%rsp),%r8; \
+ movq TF_R9(%rsp),%r9; \
+ movq TF_RAX(%rsp),%rax; \
+ movq TF_RBX(%rsp),%rbx; \
+ movq TF_RBP(%rsp),%rbp; \
+ movq TF_R10(%rsp),%r10; \
+ movq TF_R11(%rsp),%r11; \
+ movq TF_R12(%rsp),%r12; \
+ movq TF_R13(%rsp),%r13; \
+ movq TF_R14(%rsp),%r14; \
+ movq TF_R15(%rsp),%r15; \
+ testb $SEL_RPL_MASK,TF_CS(%rsp); \
+ jz 1f; \
+ cli; \
+ swapgs; \
+1: addq $TF_RIP,%rsp;
+
+
.globl calltrap
.type calltrap, at function
ENTRY(dtrace_invop_start)
- /* XXX More code to go in here. :-) */
+ /*
+ * #BP traps with %rip set to the next address. We need to decrement
+ * the value to indicate the address of the int3 (0xcc) instruction
+ * that we substituted.
+ */
+ movq TF_RIP(%rsp), %rdi
+ decq %rdi
+ movq TF_RSP(%rsp), %rsi
+ movq TF_RAX(%rsp), %rdx
+ pushq (%rsi)
+ movq %rsp, %rsi
+ call dtrace_invop
+ addq $8, %rsp
+ cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
+ je bp_push
+ cmpl $DTRACE_INVOP_LEAVE, %eax
+ je bp_leave
+ cmpl $DTRACE_INVOP_NOP, %eax
+ je bp_nop
+ cmpl $DTRACE_INVOP_RET, %eax
+ je bp_ret
/* When all else fails handle the trap in the usual way. */
jmpq *dtrace_invop_calltrap_addr
+
+bp_push:
+ /*
+ * We must emulate a "pushq %rbp". To do this, we pull the stack
+ * down 8 bytes, and then store the base pointer.
+ */
+ INTR_POP
+ subq $16, %rsp /* make room for %rbp */
+ pushq %rax /* push temp */
+ movq 24(%rsp), %rax /* load calling RIP */
+ movq %rax, 8(%rsp) /* store calling RIP */
+ movq 32(%rsp), %rax /* load calling CS */
+ movq %rax, 16(%rsp) /* store calling CS */
+ movq 40(%rsp), %rax /* load calling RFLAGS */
+ movq %rax, 24(%rsp) /* store calling RFLAGS */
+ movq 48(%rsp), %rax /* load calling RSP */
+ subq $8, %rax /* make room for %rbp */
+ movq %rax, 32(%rsp) /* store calling RSP */
+ movq 56(%rsp), %rax /* load calling SS */
+ movq %rax, 40(%rsp) /* store calling SS */
+ movq 32(%rsp), %rax /* reload calling RSP */
+ movq %rbp, (%rax) /* store %rbp there */
+ popq %rax /* pop off temp */
+ iretq /* return from interrupt */
+ /*NOTREACHED*/
+
+bp_leave:
+ /*
+ * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
+ * followed by a "popq %rbp". This is quite a bit simpler on amd64
+ * than it is on i386 -- we can exploit the fact that the %rsp is
+ * explicitly saved to effect the pop without having to reshuffle
+ * the other data pushed for the trap.
+ */
+ INTR_POP
+ pushq %rax /* push temp */
+ movq 8(%rsp), %rax /* load calling RIP */
+ movq %rax, 8(%rsp) /* store calling RIP */
+ movq (%rbp), %rax /* get new %rbp */
+ addq $8, %rbp /* adjust new %rsp */
+ movq %rbp, 32(%rsp) /* store new %rsp */
+ movq %rax, %rbp /* set new %rbp */
+ popq %rax /* pop off temp */
+ iretq /* return from interrupt */
+ /*NOTREACHED*/
+
+bp_nop:
+ /* We must emulate a "nop". */
+ INTR_POP
+ iretq
+ /*NOTREACHED*/
+
+bp_ret:
+ INTR_POP
+ pushq %rax /* push temp */
+ movq 32(%rsp), %rax /* load %rsp */
+ movq (%rax), %rax /* load calling RIP */
+ movq %rax, 8(%rsp) /* store calling RIP */
+ addq $8, 32(%rsp) /* adjust new %rsp */
+ popq %rax /* pop off temp */
+ iretq /* return from interrupt */
+ /*NOTREACHED*/
+
END(dtrace_invop_start)
/*
More information about the p4-projects
mailing list