svn commit: r334520 - in head/sys: amd64/amd64 i386/i386 i386/include libkern
Bruce Evans
bde at FreeBSD.org
Sat Jun 2 04:25:11 UTC 2018
Author: bde
Date: Sat Jun 2 04:25:09 2018
New Revision: 334520
URL: https://svnweb.freebsd.org/changeset/base/334520
Log:
Fix recent breakages of kernel profiling, mostly on i386 (high resolution
kernel profiling remains broken).
memmove() was broken using ALTENTRY(). ALTENTRY() is only different from
ENTRY() in the profiling case, and its use in that case was sort of
backwards. The backwardness magically turned memmove() into memcpy()
instead of completely breaking it. Only the high resolution parts of
profiling itself were broken. Use ordinary ENTRY() for memmove().
Turn bcopy() into a tail call to memmove() to reduce complications.
This gives slightly different pessimizations and profiling lossage.
The pessimizations are minimized by not using a frame pointer() for
bcopy().
Calls to profiling functions from exception trampolines were not
relocated. This caused crashes on the first exception. Fix this using
function pointers.
Addresses of exception handlers in trampolines were not relocated. This
caused unknown offsets in the profiling data. Relocate by abusing
setidt_disp as for pmc although this is slower than necessary and
requires namespace pollution. pmc seems to be missing some relocations.
Stack traces and lots of other things in debuggers need similar relocations.
Most user addresses were misclassified as unknown kernel addresses and
then ignored. Treat all unknown addresses as user. Now only user
addresses in the kernel text range are significantly misclassified (as
known kernel addresses).
The ibrs functions didn't preserve enough registers. This is the only
recent breakage on amd64. Although these functions are written in
asm, in the profiling case they call profiling functions which are
mostly for the C ABI, so they only have to save call-used registers.
They also have to save arg and return registers in some cases and
actually save them in all cases to reduce complications. They end up
saving all registers except %ecx on i386 and %r10 and %r11 on amd64.
Saving these is only needed for 1 caller on each of amd64 and i386.
Save them there. This is slightly simpler.
Remove saving %ecx in handle_ibrs_exit on i386. Both handle_ibrs_entry
and handle_ibrs_exit use %ecx, but only the latter needed to or did
save it. But saving it there doesn't work for the profiling case.
amd64 has more automatic saving of the most common scratch registers
%rax, %rcx and %rdx (its complications for %r10 are from unusual use
of %r10 by SYSCALL). Thus profiling of handle_ibrs_exit_rs() was not
broken, and I didn't simplify the saving by moving the saving of these
registers from it to the caller.
Modified:
head/sys/amd64/amd64/exception.S
head/sys/i386/i386/exception.s
head/sys/i386/i386/prof_machdep.c
head/sys/i386/i386/support.s
head/sys/i386/include/asmacros.h
head/sys/i386/include/profile.h
head/sys/libkern/mcount.c
Modified: head/sys/amd64/amd64/exception.S
==============================================================================
--- head/sys/amd64/amd64/exception.S Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/amd64/amd64/exception.S Sat Jun 2 04:25:09 2018 (r334520)
@@ -463,8 +463,16 @@ fast_syscall_common:
movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
movq %r11,TF_RSP(%rsp) /* user stack pointer */
movq PCPU(SCRATCH_RAX),%rax
+ /*
+ * Save a few arg registers early to free them for use in
+ * handle_ibrs_entry(). %r10 is especially tricky. It is not an
+ * arg register, but it holds the arg register %rcx. Profiling
+ * preserves %rcx, but may clobber %r10. Profiling may also
+ * clobber %r11, but %r11 (original %eflags) has been saved.
+ */
movq %rax,TF_RAX(%rsp) /* syscall number */
movq %rdx,TF_RDX(%rsp) /* arg 3 */
+ movq %r10,TF_RCX(%rsp) /* arg 4 */
SAVE_SEGS
call handle_ibrs_entry
movq PCPU(CURPCB),%r11
@@ -475,7 +483,6 @@ fast_syscall_common:
movq $2,TF_ERR(%rsp)
movq %rdi,TF_RDI(%rsp) /* arg 1 */
movq %rsi,TF_RSI(%rsp) /* arg 2 */
- movq %r10,TF_RCX(%rsp) /* arg 4 */
movq %r8,TF_R8(%rsp) /* arg 5 */
movq %r9,TF_R9(%rsp) /* arg 6 */
movq %rbx,TF_RBX(%rsp) /* C preserved */
Modified: head/sys/i386/i386/exception.s
==============================================================================
--- head/sys/i386/i386/exception.s Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/i386/i386/exception.s Sat Jun 2 04:25:09 2018 (r334520)
@@ -516,7 +516,9 @@ doreti_exit:
1: testl $SEL_RPL_MASK, TF_CS(%esp)
jz doreti_popl_fs
2: movl $handle_ibrs_exit,%eax
+ pushl %ecx /* preserve enough call-used regs */
call *%eax
+ popl %ecx
movl %esp, %esi
movl PCPU(TRAMPSTK), %edx
subl %ecx, %edx
Modified: head/sys/i386/i386/prof_machdep.c
==============================================================================
--- head/sys/i386/i386/prof_machdep.c Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/i386/i386/prof_machdep.c Sat Jun 2 04:25:09 2018 (r334520)
@@ -117,6 +117,9 @@ __mcount: \n\
.mcount_exit: \n\
ret $0 \n\
");
+
+void __mcount(void);
+void (*__mcountp)(void) = __mcount;
#else /* !__GNUCLIKE_ASM */
#error "this file needs to be ported to your compiler"
#endif /* __GNUCLIKE_ASM */
@@ -162,6 +165,9 @@ GMON_PROF_HIRES = 4 \n\
ret $0 \n\
");
#endif /* __GNUCLIKE_ASM */
+
+void __mexitcount(void);
+void (*__mexitcountp)(void) = __mexitcount;
/*
* Return the time elapsed since the last call. The units are machine-
Modified: head/sys/i386/i386/support.s
==============================================================================
--- head/sys/i386/i386/support.s Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/i386/i386/support.s Sat Jun 2 04:25:09 2018 (r334520)
@@ -151,18 +151,19 @@ END(fillw)
* ws at tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
*/
ENTRY(bcopy)
+ movl 4(%esp),%eax
+ movl 8(%esp),%edx
+ movl %eax,8(%esp)
+ movl %edx,4(%esp)
+ MEXITCOUNT
+ jmp memmove
+END(bcopy)
+
+ENTRY(memmove)
pushl %ebp
movl %esp,%ebp
pushl %esi
pushl %edi
- movl 8(%ebp),%esi
- movl 12(%ebp),%edi
- jmp 1f
-ALTENTRY(memmove)
- pushl %ebp
- movl %esp,%ebp
- pushl %esi
- pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%esi
1:
@@ -208,7 +209,7 @@ ALTENTRY(memmove)
movl 8(%ebp),%eax /* return dst for memmove */
popl %ebp
ret
-END(bcopy)
+END(memmove)
/*
* Note: memcpy does not support overlapping copies
@@ -463,13 +464,11 @@ END(handle_ibrs_entry)
ENTRY(handle_ibrs_exit)
cmpb $0,PCPU(IBPB_SET)
je 1f
- pushl %ecx
movl $MSR_IA32_SPEC_CTRL,%ecx
rdmsr
andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
wrmsr
- popl %ecx
movb $0,PCPU(IBPB_SET)
1: ret
END(handle_ibrs_exit)
Modified: head/sys/i386/include/asmacros.h
==============================================================================
--- head/sys/i386/include/asmacros.h Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/i386/include/asmacros.h Sat Jun 2 04:25:09 2018 (r334520)
@@ -108,11 +108,11 @@
#define CROSSJUMPTARGET(label) \
ALIGN_TEXT; __CONCAT(to,label): ; MCOUNT; jmp label
#define ENTRY(name) GEN_ENTRY(name) ; 9: ; MCOUNT
-#define FAKE_MCOUNT(caller) pushl caller ; call __mcount ; popl %ecx
-#define MCOUNT call __mcount
+#define FAKE_MCOUNT(caller) pushl caller ; call *__mcountp ; popl %ecx
+#define MCOUNT call *__mcountp
#define MCOUNT_LABEL(name) GEN_ENTRY(name) ; nop ; ALIGN_TEXT
#ifdef GUPROF
-#define MEXITCOUNT call .mexitcount
+#define MEXITCOUNT call *__mexitcountp
#define ret MEXITCOUNT ; NON_GPROF_RET
#else
#define MEXITCOUNT
Modified: head/sys/i386/include/profile.h
==============================================================================
--- head/sys/i386/include/profile.h Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/i386/include/profile.h Sat Jun 2 04:25:09 2018 (r334520)
@@ -92,15 +92,28 @@ extern int mcount_lock;
void bintr(void);
void btrap(void);
void eintr(void);
+#if 0
+void end_exceptions(void);
+void start_exceptions(void);
+#else
+#include <machine/pmc_mdep.h> /* XXX */
+#endif
void user(void);
-#define MCOUNT_FROMPC_USER(pc) \
- ((pc < (uintfptr_t)VM_MAXUSER_ADDRESS) ? (uintfptr_t)user : pc)
+#include <machine/md_var.h> /* XXX for setidt_disp */
+#define MCOUNT_DETRAMP(pc) do { \
+ if ((pc) >= (uintfptr_t)start_exceptions + setidt_disp && \
+ (pc) < (uintfptr_t)end_exceptions + setidt_disp) \
+ (pc) -= setidt_disp; \
+} while (0)
+
#define MCOUNT_FROMPC_INTR(pc) \
((pc >= (uintfptr_t)btrap && pc < (uintfptr_t)eintr) ? \
((pc >= (uintfptr_t)bintr) ? (uintfptr_t)bintr : \
(uintfptr_t)btrap) : ~0U)
+
+#define MCOUNT_USERPC ((uintfptr_t)user)
#else /* !_KERNEL */
Modified: head/sys/libkern/mcount.c
==============================================================================
--- head/sys/libkern/mcount.c Sat Jun 2 04:20:42 2018 (r334519)
+++ head/sys/libkern/mcount.c Sat Jun 2 04:25:09 2018 (r334520)
@@ -88,13 +88,28 @@ _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc) /*
#endif
#ifdef _KERNEL
+ /* De-relocate any addresses in a (single) trampoline. */
+#ifdef MCOUNT_DETRAMP
+ MCOUNT_DETRAMP(frompc);
+ MCOUNT_DETRAMP(selfpc);
+#endif
/*
* When we are called from an exception handler, frompc may be
* a user address. Convert such frompc's to some representation
* in kernel address space.
*/
+#ifdef MCOUNT_FROMPC_USER
frompc = MCOUNT_FROMPC_USER(frompc);
+#elif defined(MCOUNT_USERPC)
+ /*
+ * For separate address spaces, we can only guess that addresses
+ * in the range known to us are actually kernel addresses. Outside
+ * of this range, conerting to the user address is fail-safe.
+ */
+ if (frompc < p->lowpc || frompc - p->lowpc >= p->textsize)
+ frompc = MCOUNT_USERPC;
#endif
+#endif /* _KERNEL */
frompci = frompc - p->lowpc;
if (frompci >= p->textsize)
@@ -252,6 +267,9 @@ mexitcount(uintfptr_t selfpc)
uintfptr_t selfpcdiff;
p = &_gmonparam;
+#ifdef MCOUNT_DETRAMP
+ MCOUNT_DETRAMP(selfpc);
+#endif
selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
if (selfpcdiff < p->textsize) {
int delta;
More information about the svn-src-all
mailing list