svn commit: r192575 - in user/kmacy/releng_7_2_fcs: lib/libpmc
sys/amd64/amd64 sys/amd64/include sys/conf sys/dev/coretemp
sys/dev/hwpmc sys/i386/cpufreq sys/i386/i386 sys/i386/include
sys/kern sys...
Kip Macy
kmacy at FreeBSD.org
Fri May 22 04:50:00 UTC 2009
Author: kmacy
Date: Fri May 22 04:49:59 2009
New Revision: 192575
URL: http://svn.freebsd.org/changeset/base/192575
Log:
MFC callchain, core, and i7 support from HEAD
Added:
user/kmacy/releng_7_2_fcs/lib/libpmc/libpmcinternal.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_core.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_core.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_intel.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_tsc.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_tsc.h
Modified:
user/kmacy/releng_7_2_fcs/lib/libpmc/libpmc.c
user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.c
user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.h
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/amd64_mem.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/exception.S
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/genassym.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/identcpu.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/initcpu.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/local_apic.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/mp_machdep.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/msi.c
user/kmacy/releng_7_2_fcs/sys/amd64/amd64/trap.c
user/kmacy/releng_7_2_fcs/sys/amd64/include/cputypes.h
user/kmacy/releng_7_2_fcs/sys/amd64/include/md_var.h
user/kmacy/releng_7_2_fcs/sys/amd64/include/pmc_mdep.h
user/kmacy/releng_7_2_fcs/sys/amd64/include/specialreg.h
user/kmacy/releng_7_2_fcs/sys/conf/Makefile.amd64
user/kmacy/releng_7_2_fcs/sys/conf/files.amd64
user/kmacy/releng_7_2_fcs/sys/conf/files.i386
user/kmacy/releng_7_2_fcs/sys/conf/files.pc98
user/kmacy/releng_7_2_fcs/sys/dev/coretemp/coretemp.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_amd.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_amd.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_logging.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_mod.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_pentium.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_pentium.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_piv.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_piv.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_ppro.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_ppro.h
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/hwpmc_x86.c
user/kmacy/releng_7_2_fcs/sys/dev/hwpmc/pmc_events.h
user/kmacy/releng_7_2_fcs/sys/i386/cpufreq/est.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/exception.s
user/kmacy/releng_7_2_fcs/sys/i386/i386/genassym.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/i686_mem.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/identcpu.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/initcpu.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/k6_mem.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/local_apic.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/longrun.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/mp_machdep.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/msi.c
user/kmacy/releng_7_2_fcs/sys/i386/i386/trap.c
user/kmacy/releng_7_2_fcs/sys/i386/include/cputypes.h
user/kmacy/releng_7_2_fcs/sys/i386/include/md_var.h
user/kmacy/releng_7_2_fcs/sys/i386/include/pmc_mdep.h
user/kmacy/releng_7_2_fcs/sys/i386/include/specialreg.h
user/kmacy/releng_7_2_fcs/sys/kern/kern_descrip.c
user/kmacy/releng_7_2_fcs/sys/kern/kern_pmc.c
user/kmacy/releng_7_2_fcs/sys/kern/subr_trap.c
user/kmacy/releng_7_2_fcs/sys/modules/hwpmc/Makefile
user/kmacy/releng_7_2_fcs/sys/sys/pmc.h
user/kmacy/releng_7_2_fcs/sys/sys/pmckern.h
user/kmacy/releng_7_2_fcs/sys/sys/pmclog.h
user/kmacy/releng_7_2_fcs/sys/sys/proc.h
user/kmacy/releng_7_2_fcs/usr.sbin/pmcstat/Makefile
user/kmacy/releng_7_2_fcs/usr.sbin/pmcstat/pmcstat.8
user/kmacy/releng_7_2_fcs/usr.sbin/pmcstat/pmcstat.c
user/kmacy/releng_7_2_fcs/usr.sbin/pmcstat/pmcstat.h
user/kmacy/releng_7_2_fcs/usr.sbin/pmcstat/pmcstat_log.c
Modified: user/kmacy/releng_7_2_fcs/lib/libpmc/libpmc.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/lib/libpmc/libpmc.c Fri May 22 01:15:07 2009 (r192574)
+++ user/kmacy/releng_7_2_fcs/lib/libpmc/libpmc.c Fri May 22 04:49:59 2009 (r192575)
@@ -42,12 +42,18 @@ __FBSDID("$FreeBSD$");
#include <strings.h>
#include <unistd.h>
+#include "libpmcinternal.h"
+
/* Function prototypes */
#if defined(__i386__)
static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
#endif
#if defined(__amd64__) || defined(__i386__)
+static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
@@ -88,14 +94,131 @@ struct pmc_event_descr {
enum pmc_class pm_ev_class;
};
-static const struct pmc_event_descr
-pmc_event_table[] =
+/*
+ * The pmc_class_descr structure maps class name prefixes for
+ * event names to event tables and other PMC class data.
+ */
+struct pmc_class_descr {
+ const char *pm_evc_name;
+ size_t pm_evc_name_size;
+ enum pmc_class pm_evc_class;
+ const struct pmc_event_descr *pm_evc_event_table;
+ size_t pm_evc_event_table_size;
+ int (*pm_evc_allocate_pmc)(enum pmc_event _pe,
+ char *_ctrspec, struct pmc_op_pmcallocate *_pa);
+};
+
+#define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0]))
+#define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
+
+#undef __PMC_EV
+#define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
+
+/*
+ * PMC_CLASSDEP_TABLE(NAME, CLASS)
+ *
+ * Define a table mapping event names and aliases to HWPMC event IDs.
+ */
+#define PMC_CLASSDEP_TABLE(N, C) \
+ static const struct pmc_event_descr N##_event_table[] = \
+ { \
+ __PMC_EV_##C() \
+ }
+
+PMC_CLASSDEP_TABLE(iaf, IAF);
+PMC_CLASSDEP_TABLE(k7, K7);
+PMC_CLASSDEP_TABLE(k8, K8);
+PMC_CLASSDEP_TABLE(p4, P4);
+PMC_CLASSDEP_TABLE(p5, P5);
+PMC_CLASSDEP_TABLE(p6, P6);
+
+#undef __PMC_EV_ALIAS
+#define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
+
+static const struct pmc_event_descr atom_event_table[] =
+{
+ __PMC_EV_ALIAS_ATOM()
+};
+
+static const struct pmc_event_descr core_event_table[] =
+{
+ __PMC_EV_ALIAS_CORE()
+};
+
+
+static const struct pmc_event_descr core2_event_table[] =
+{
+ __PMC_EV_ALIAS_CORE2()
+};
+
+/*
+ * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
+ *
+ * Map a CPU to the PMC classes it supports.
+ */
+#define PMC_MDEP_TABLE(N,C,...) \
+ static const enum pmc_class N##_pmc_classes[] = { \
+ PMC_CLASS_##C, __VA_ARGS__ \
+ }
+
+PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
+
+static const struct pmc_event_descr tsc_event_table[] =
{
#undef __PMC_EV
#define __PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
__PMC_EVENTS()
};
+#undef PMC_CLASS_TABLE_DESC
+#define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \
+static const struct pmc_class_descr NAME##_class_table_descr = \
+ { \
+ .pm_evc_name = #CLASS "-", \
+ .pm_evc_name_size = sizeof(#CLASS "-") - 1, \
+ .pm_evc_class = PMC_CLASS_##CLASS , \
+ .pm_evc_event_table = EVENTS##_event_table , \
+ .pm_evc_event_table_size = \
+ PMC_EVENT_TABLE_SIZE(EVENTS), \
+ .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
+ }
+
+#if defined(__i386__) || defined(__amd64__)
+PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
+PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
+PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
+PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
+#endif
+#if defined(__i386__)
+PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
+#endif
+#if defined(__i386__) || defined(__amd64__)
+PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
+PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
+#endif
+#if defined(__i386__)
+PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
+PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
+#endif
+#if defined(__i386__) || defined(__amd64__)
+PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
+#endif
+
+#undef PMC_CLASS_TABLE_DESC
+
+static const struct pmc_class_descr **pmc_class_table;
+#define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass
+
+static const enum pmc_class *pmc_mdep_class_list;
+static size_t pmc_mdep_class_list_size;
+
/*
* Mapping tables, mapping enumeration values to human readable
* strings.
@@ -304,6 +427,238 @@ k7_allocate_pmc(enum pmc_event pe, char
#if defined(__amd64__) || defined(__i386__)
/*
+ * Intel Core (Family 6, Model E) PMCs.
+ */
+
+static struct pmc_event_alias core_aliases[] = {
+ EV_ALIAS("branches", "iap-br-instr-ret"),
+ EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"),
+ EV_ALIAS("cycles", "tsc-tsc"),
+ EV_ALIAS("ic-misses", "iap-icache-misses"),
+ EV_ALIAS("instructions", "iap-instr-ret"),
+ EV_ALIAS("interrupts", "iap-core-hw-int-rx"),
+ EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"),
+ EV_ALIAS(NULL, NULL)
+};
+
+/*
+ * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
+ * and Atom (Family 6, model 1CH) PMCs.
+ */
+
+static struct pmc_event_alias core2_aliases[] = {
+ EV_ALIAS("branches", "iap-br-inst-retired.any"),
+ EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
+ EV_ALIAS("cycles", "tsc-tsc"),
+ EV_ALIAS("ic-misses", "iap-l1i-misses"),
+ EV_ALIAS("instructions", "iaf-instr-retired.any"),
+ EV_ALIAS("interrupts", "iap-hw-int-rcv"),
+ EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"),
+ EV_ALIAS(NULL, NULL)
+};
+#define atom_aliases core2_aliases
+
+#define IAF_KW_OS "os"
+#define IAF_KW_USR "usr"
+#define IAF_KW_ANYTHREAD "anythread"
+
+/*
+ * Parse an event specifier for Intel fixed function counters.
+ */
+static int
+iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *p;
+
+ (void) pe;
+
+ pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
+ pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
+
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWMATCH(p, IAF_KW_OS))
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ else if (KWMATCH(p, IAF_KW_USR))
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ else if (KWMATCH(p, IAF_KW_ANYTHREAD))
+ pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
+ else
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Core/Core2 support.
+ */
+
+#define IAP_KW_AGENT "agent"
+#define IAP_KW_ANYTHREAD "anythread"
+#define IAP_KW_CACHESTATE "cachestate"
+#define IAP_KW_CMASK "cmask"
+#define IAP_KW_CORE "core"
+#define IAP_KW_EDGE "edge"
+#define IAP_KW_INV "inv"
+#define IAP_KW_OS "os"
+#define IAP_KW_PREFETCH "prefetch"
+#define IAP_KW_SNOOPRESPONSE "snoopresponse"
+#define IAP_KW_SNOOPTYPE "snooptype"
+#define IAP_KW_TRANSITION "trans"
+#define IAP_KW_USR "usr"
+
+static struct pmc_masks iap_core_mask[] = {
+ PMCMASK(all, (0x3 << 14)),
+ PMCMASK(this, (0x1 << 14)),
+ NULLMASK
+};
+
+static struct pmc_masks iap_agent_mask[] = {
+ PMCMASK(this, 0),
+ PMCMASK(any, (0x1 << 13)),
+ NULLMASK
+};
+
+static struct pmc_masks iap_prefetch_mask[] = {
+ PMCMASK(both, (0x3 << 12)),
+ PMCMASK(only, (0x1 << 12)),
+ PMCMASK(exclude, 0),
+ NULLMASK
+};
+
+static struct pmc_masks iap_cachestate_mask[] = {
+ PMCMASK(i, (1 << 8)),
+ PMCMASK(s, (1 << 9)),
+ PMCMASK(e, (1 << 10)),
+ PMCMASK(m, (1 << 11)),
+ NULLMASK
+};
+
+static struct pmc_masks iap_snoopresponse_mask[] = {
+ PMCMASK(clean, (1 << 8)),
+ PMCMASK(hit, (1 << 9)),
+ PMCMASK(hitm, (1 << 11)),
+ NULLMASK
+};
+
+static struct pmc_masks iap_snooptype_mask[] = {
+ PMCMASK(cmp2s, (1 << 8)),
+ PMCMASK(cmp2i, (1 << 9)),
+ NULLMASK
+};
+
+static struct pmc_masks iap_transition_mask[] = {
+ PMCMASK(any, 0x00),
+ PMCMASK(frequency, 0x10),
+ NULLMASK
+};
+
+static int
+iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
+ struct pmc_op_pmcallocate *pmc_config)
+{
+ char *e, *p, *q;
+ uint32_t cachestate, evmask;
+ int count, n;
+
+ pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
+ PMC_CAP_QUALIFIER);
+ pmc_config->pm_md.pm_iap.pm_iap_config = 0;
+
+ cachestate = evmask = 0;
+
+ /* Parse additional modifiers if present */
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+
+ n = 0;
+ if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
+ q = strchr(p, '=');
+ if (*++q == '\0') /* skip '=' */
+ return (-1);
+ count = strtol(q, &e, 0);
+ if (e == q || *e != '\0')
+ return (-1);
+ pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+ pmc_config->pm_md.pm_iap.pm_iap_config |=
+ IAP_CMASK(count);
+ } else if (KWMATCH(p, IAP_KW_EDGE)) {
+ pmc_config->pm_caps |= PMC_CAP_EDGE;
+ } else if (KWMATCH(p, IAP_KW_INV)) {
+ pmc_config->pm_caps |= PMC_CAP_INVERT;
+ } else if (KWMATCH(p, IAP_KW_OS)) {
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ } else if (KWMATCH(p, IAP_KW_USR)) {
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
+ pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
+ } else if (KWMATCH(p, IAP_KW_CORE)) {
+ n = pmc_parse_mask(iap_core_mask, p, &evmask);
+ if (n != 1)
+ return (-1);
+ } else if (KWMATCH(p, IAP_KW_AGENT)) {
+ n = pmc_parse_mask(iap_agent_mask, p, &evmask);
+ if (n != 1)
+ return (-1);
+ } else if (KWMATCH(p, IAP_KW_PREFETCH)) {
+ n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
+ if (n != 1)
+ return (-1);
+ } else if (KWMATCH(p, IAP_KW_CACHESTATE)) {
+ n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
+ } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
+ KWMATCH(p, IAP_KW_TRANSITION)) {
+ n = pmc_parse_mask(iap_transition_mask, p, &evmask);
+ if (n != 1)
+ return (-1);
+ } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
+ cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
+ cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
+ if (KWMATCH(p, IAP_KW_SNOOPRESPONSE)) {
+ n = pmc_parse_mask(iap_snoopresponse_mask, p,
+ &evmask);
+ } else if (KWMATCH(p, IAP_KW_SNOOPTYPE)) {
+ n = pmc_parse_mask(iap_snooptype_mask, p,
+ &evmask);
+ } else
+ return (-1);
+ } else
+ return (-1);
+
+ if (n < 0) /* Parsing failed. */
+ return (-1);
+ }
+
+ pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
+
+ /*
+ * If the event requires a 'cachestate' qualifier but was not
+ * specified by the user, use a sensible default.
+ */
+ switch (pe) {
+ case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
+ case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_32H: /* Core */
+ case PMC_EV_IAP_EVENT_40H: /* Core */
+ case PMC_EV_IAP_EVENT_41H: /* Core */
+ case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
+ case PMC_EV_IAP_EVENT_77H: /* Core */
+ if (cachestate == 0)
+ cachestate = (0xF << 8);
+ default:
+ break;
+ }
+
+ pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
+
+ return (0);
+}
+
+/*
* AMD K8 PMCs.
*
* These are very similar to AMD K7 PMCs, but support more kinds of
@@ -1637,6 +1992,75 @@ p6_allocate_pmc(enum pmc_event pe, char
#endif
/*
+ * Match an event name `name' with its canonical form.
+ *
+ * Matches are case insensitive and spaces, periods, underscores and
+ * hyphen characters are considered to match each other.
+ *
+ * Returns 1 for a match, 0 otherwise.
+ */
+
+static int
+pmc_match_event_name(const char *name, const char *canonicalname)
+{
+ int cc, nc;
+ const unsigned char *c, *n;
+
+ c = (const unsigned char *) canonicalname;
+ n = (const unsigned char *) name;
+
+ for (; (nc = *n) && (cc = *c); n++, c++) {
+
+ if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
+ (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
+ continue;
+
+ if (toupper(nc) == toupper(cc))
+ continue;
+
+
+ return (0);
+ }
+
+ if (*n == '\0' && *c == '\0')
+ return (1);
+
+ return (0);
+}
+
+/*
+ * Match an event name against all the event named supported by a
+ * PMC class.
+ *
+ * Returns an event descriptor pointer on match or NULL otherwise.
+ */
+static const struct pmc_event_descr *
+pmc_match_event_class(const char *name,
+ const struct pmc_class_descr *pcd)
+{
+ size_t n;
+ const struct pmc_event_descr *ev;
+
+ ev = pcd->pm_evc_event_table;
+ for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
+ if (pmc_match_event_name(name, ev->pm_ev_name))
+ return (ev);
+
+ return (NULL);
+}
+
+static int
+pmc_mdep_is_compatible_class(enum pmc_class pc)
+{
+ size_t n;
+
+ for (n = 0; n < pmc_mdep_class_list_size; n++)
+ if (pmc_mdep_class_list[n] == pc)
+ return (1);
+ return (0);
+}
+
+/*
* API entry points
*/
@@ -1675,13 +2099,34 @@ pmc_allocate(const char *ctrspec, enum p
r = spec_copy;
ctrname = strsep(&r, ",");
- /* look for the given counter name */
-
- for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
- if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
+ /*
+ * If a explicit class prefix was given by the user, restrict the
+ * search for the event to the specified PMC class.
+ */
+ ev = NULL;
+ for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
+ pcd = pmc_class_table[n];
+ if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
+ strncasecmp(ctrname, pcd->pm_evc_name,
+ pcd->pm_evc_name_size) == 0) {
+ if ((ev = pmc_match_event_class(ctrname +
+ pcd->pm_evc_name_size, pcd)) == NULL) {
+ errno = EINVAL;
+ goto out;
+ }
break;
- if (pe > PMC_EVENT_LAST) {
+ /*
+ * Otherwise, search for this event in all compatible PMC
+ * classes.
+ */
+ for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
+ pcd = pmc_class_table[n];
+ if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
+ ev = pmc_match_event_class(ctrname, pcd);
+ }
+
+ if (ev == NULL) {
errno = EINVAL;
goto out;
}
@@ -1817,6 +2262,32 @@ pmc_event_names_of_class(enum pmc_class
switch (cl)
{
+ case PMC_CLASS_IAF:
+ ev = iaf_event_table;
+ count = PMC_EVENT_TABLE_SIZE(iaf);
+ break;
+ case PMC_CLASS_IAP:
+ /*
+ * Return the most appropriate set of event name
+ * spellings for the current CPU.
+ */
+ switch (cpu_info.pm_cputype) {
+ default:
+ case PMC_CPU_INTEL_ATOM:
+ ev = atom_event_table;
+ count = PMC_EVENT_TABLE_SIZE(atom);
+ break;
+ case PMC_CPU_INTEL_CORE:
+ ev = core_event_table;
+ count = PMC_EVENT_TABLE_SIZE(core);
+ break;
+ case PMC_CPU_INTEL_CORE2:
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ ev = core2_event_table;
+ count = PMC_EVENT_TABLE_SIZE(core2);
+ break;
+ }
+ break;
case PMC_CLASS_TSC:
ev = &pmc_event_table[PMC_EV_TSC_TSC];
count = 1;
@@ -1939,16 +2410,42 @@ pmc_init(void)
for (n = 0; n < cpu_info.pm_nclass; n++)
cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
- /* set parser pointer */
+ pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
+ sizeof(struct pmc_class_descr *));
+
+ if (pmc_class_table == NULL)
+ return (-1);
+
+
+ /*
+ * Fill in the class table.
+ */
+ n = 0;
+#if defined(__amd64__) || defined(__i386__)
+ pmc_class_table[n++] = &tsc_class_table_descr;
+#endif
+
+#define PMC_MDEP_INIT(C) do { \
+ pmc_mdep_event_aliases = C##_aliases; \
+ pmc_mdep_class_list = C##_pmc_classes; \
+ pmc_mdep_class_list_size = \
+ PMC_TABLE_SIZE(C##_pmc_classes); \
+ } while (0)
+
+ /* Configure the event name parser. */
switch (cpu_info.pm_cputype) {
#if defined(__i386__)
case PMC_CPU_AMD_K7:
pmc_mdep_event_aliases = k7_aliases;
pmc_mdep_allocate_pmc = k7_allocate_pmc;
+ pmc_class_table[n] = &k7_class_table_descr;
+ pmc_class_table[n] = &k7_class_table_descr;
break;
case PMC_CPU_INTEL_P5:
pmc_mdep_event_aliases = p5_aliases;
pmc_mdep_allocate_pmc = p5_allocate_pmc;
+ pmc_class_table[n] = &p5_class_table_descr;
+ pmc_class_table[n] = &p5_class_table_descr;
break;
case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */
case PMC_CPU_INTEL_PII: /* similar PMCs. */
@@ -1956,12 +2453,35 @@ pmc_init(void)
case PMC_CPU_INTEL_PM:
pmc_mdep_event_aliases = p6_aliases;
pmc_mdep_allocate_pmc = p6_allocate_pmc;
+ pmc_class_table[n] = &p6_class_table_descr;
+ pmc_class_table[n] = &p6_class_table_descr;
break;
#endif
#if defined(__amd64__) || defined(__i386__)
+ case PMC_CPU_AMD_K8:
+ PMC_MDEP_INIT(k8);
+ pmc_class_table[n] = &k8_class_table_descr;
+ break;
+ case PMC_CPU_INTEL_ATOM:
+ PMC_MDEP_INIT(atom);
+ pmc_class_table[n++] = &iaf_class_table_descr;
+ pmc_class_table[n] = &atom_class_table_descr;
+ break;
+ case PMC_CPU_INTEL_CORE:
+ PMC_MDEP_INIT(core);
+ pmc_class_table[n] = &core_class_table_descr;
+ break;
+ case PMC_CPU_INTEL_CORE2:
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ PMC_MDEP_INIT(core2);
+ pmc_class_table[n++] = &iaf_class_table_descr;
+ pmc_class_table[n] = &core2_class_table_descr;
+ break;
case PMC_CPU_INTEL_PIV:
pmc_mdep_event_aliases = p4_aliases;
pmc_mdep_allocate_pmc = p4_allocate_pmc;
+ pmc_class_table[n] = &p4_class_table_descr;
+ pmc_class_table[n] = &p4_class_table_descr;
break;
case PMC_CPU_AMD_K8:
pmc_mdep_event_aliases = k8_aliases;
@@ -1969,6 +2489,7 @@ pmc_init(void)
break;
#endif
+
default:
/*
* Some kind of CPU this version of the library knows nothing
@@ -2036,12 +2557,81 @@ pmc_name_of_disposition(enum pmc_disp pd
}
const char *
-pmc_name_of_event(enum pmc_event pe)
+_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
{
if ((int) pe >= PMC_EVENT_FIRST &&
pe <= PMC_EVENT_LAST)
return pmc_event_table[pe].pm_ev_name;
+ ev = evfence = NULL;
+ if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
+ ev = iaf_event_table;
+ evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
+ } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
+ switch (cpu) {
+ case PMC_CPU_INTEL_ATOM:
+ ev = atom_event_table;
+ evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
+ break;
+ case PMC_CPU_INTEL_CORE:
+ ev = core_event_table;
+ evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
+ break;
+ case PMC_CPU_INTEL_CORE2:
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ ev = core2_event_table;
+ evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
+ break;
+ default: /* Unknown CPU type. */
+ break;
+ }
+ } if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
+ ev = k7_event_table;
+ evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
+ } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
+ ev = k8_event_table;
+ evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
+ } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
+ ev = p4_event_table;
+ evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
+ } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
+ ev = p5_event_table;
+ evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
+ } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
+ ev = p6_event_table;
+ evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
+ } else if (pe == PMC_EV_TSC_TSC) {
+ ev = tsc_event_table;
+ evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
+ }
+
+ for (; ev != evfence; ev++)
+ if (pe == ev->pm_ev_code)
+ return (ev->pm_ev_name);
+
+ return (NULL);
+}
+
+const char *
+pmc_name_of_event(enum pmc_event pe)
+{
+ const char *n;
+
+ if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
+ return (n);
+
+ errno = EINVAL;
+ return (NULL);
+}
+
+const char *
+pmc_name_of_event(enum pmc_event pe)
+{
+ const char *n;
+
+ if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
+ return (n);
+
errno = EINVAL;
return NULL;
}
Added: user/kmacy/releng_7_2_fcs/lib/libpmc/libpmcinternal.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/releng_7_2_fcs/lib/libpmc/libpmcinternal.h Fri May 22 04:49:59 2009 (r192575)
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef LIBPMC_INTERNAL_H
+#define LIBPMC_INTERNAL_H 1
+
+/*
+ * Prototypes.
+ */
+const char *_pmc_name_of_event(enum pmc_event _ev, enum pmc_cputype _cpu);
+
+#endif /* LIBPMC_INTERNAL_H */
Modified: user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.c Fri May 22 01:15:07 2009 (r192574)
+++ user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.c Fri May 22 04:49:59 2009 (r192575)
@@ -1,7 +1,11 @@
/*-
- * Copyright (c) 2005-2006 Joseph Koshy
+ * Copyright (c) 2005-2007 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
* All rights reserved.
*
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -43,6 +47,8 @@ __FBSDID("$FreeBSD$");
#include <machine/pmc_mdep.h>
+#include "libpmcinternal.h"
+
#define PMCLOG_BUFFER_SIZE 4096
/*
@@ -270,7 +276,7 @@ pmclog_get_event(void *cookie, char **da
struct pmclog_ev *ev)
{
int evlen, pathlen;
- uint32_t h, *le;
+ uint32_t h, *le, npc;
enum pmclog_parser_state e;
struct pmclog_parse_state *ps;
@@ -310,7 +316,22 @@ pmclog_get_event(void *cookie, char **da
goto error; \
} while (0)
+#define PMCLOG_GET_CALLCHAIN_SIZE(SZ,E) do { \
+ (SZ) = ((E) - offsetof(struct pmclog_callchain, pl_pc)) \
+ / sizeof(uintfptr_t); \
+ } while (0);
+
switch (ev->pl_type = PMCLOG_HEADER_TO_TYPE(h)) {
+ case PMCLOG_TYPE_CALLCHAIN:
+ PMCLOG_READ32(le,ev->pl_u.pl_cc.pl_pid);
+ PMCLOG_READ32(le,ev->pl_u.pl_cc.pl_pmcid);
+ PMCLOG_READ32(le,ev->pl_u.pl_cc.pl_cpuflags);
+ PMCLOG_GET_CALLCHAIN_SIZE(ev->pl_u.pl_cc.pl_npc,evlen);
+ for (npc = 0; npc < ev->pl_u.pl_cc.pl_npc; npc++)
+ PMCLOG_READADDR(le,ev->pl_u.pl_cc.pl_pc[npc]);
+ for (;npc < PMC_CALLCHAIN_DEPTH_MAX; npc++)
+ ev->pl_u.pl_cc.pl_pc[npc] = (uintfptr_t) 0;
+ break;
case PMCLOG_TYPE_CLOSELOG:
case PMCLOG_TYPE_DROPNOTIFY:
/* nothing to do */
@@ -344,7 +365,8 @@ pmclog_get_event(void *cookie, char **da
PMCLOG_READ32(le,ev->pl_u.pl_a.pl_event);
PMCLOG_READ32(le,ev->pl_u.pl_a.pl_flags);
if ((ev->pl_u.pl_a.pl_evname =
- pmc_name_of_event(ev->pl_u.pl_a.pl_event)) == NULL)
+ _pmc_name_of_event(ev->pl_u.pl_a.pl_event, ps->ps_arch))
+ == NULL)
goto error;
break;
case PMCLOG_TYPE_PMCATTACH:
@@ -387,7 +409,7 @@ pmclog_get_event(void *cookie, char **da
default: /* unknown record type */
ps->ps_state = PL_STATE_ERROR;
ev->pl_state = PMCLOG_ERROR;
- return -1;
+ return (-1);
}
ev->pl_offset = (ps->ps_offset += evlen);
Modified: user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.h Fri May 22 01:15:07 2009 (r192574)
+++ user/kmacy/releng_7_2_fcs/lib/libpmc/pmclog.h Fri May 22 04:49:59 2009 (r192575)
@@ -1,7 +1,11 @@
/*-
- * Copyright (c) 2005-2006 Joseph Koshy
+ * Copyright (c) 2005-2007 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
* All rights reserved.
*
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -38,6 +42,14 @@ enum pmclog_state {
PMCLOG_ERROR
};
+struct pmclog_ev_callchain {
+ uint32_t pl_pid;
+ uint32_t pl_pmcid;
+ uint32_t pl_cpuflags;
+ uint32_t pl_npc;
+ uintfptr_t pl_pc[PMC_CALLCHAIN_DEPTH_MAX];
+};
+
struct pmclog_ev_dropnotify {
};
@@ -125,6 +137,7 @@ struct pmclog_ev {
struct timespec pl_ts; /* log entry timestamp */
enum pmclog_type pl_type; /* type of log entry */
union { /* log entry data */
+ struct pmclog_ev_callchain pl_cc;
struct pmclog_ev_closelog pl_cl;
struct pmclog_ev_dropnotify pl_dn;
struct pmclog_ev_initialize pl_i;
Modified: user/kmacy/releng_7_2_fcs/sys/amd64/amd64/amd64_mem.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/amd64/amd64/amd64_mem.c Fri May 22 01:15:07 2009 (r192574)
+++ user/kmacy/releng_7_2_fcs/sys/amd64/amd64/amd64_mem.c Fri May 22 04:49:59 2009 (r192575)
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/sysctl.h>
+#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
@@ -677,8 +678,8 @@ amd64_mem_drvinit(void *unused)
return;
if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
return;
- if ((strcmp(cpu_vendor, "GenuineIntel") != 0) &&
- (strcmp(cpu_vendor, "AuthenticAMD") != 0))
+ if (cpu_vendor_id != CPU_VENDOR_INTEL &&
+ cpu_vendor_id != CPU_VENDOR_AMD)
return;
mem_range_softc.mr_op = &amd64_mrops;
}
Modified: user/kmacy/releng_7_2_fcs/sys/amd64/amd64/exception.S
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/amd64/amd64/exception.S Fri May 22 01:15:07 2009 (r192574)
+++ user/kmacy/releng_7_2_fcs/sys/amd64/amd64/exception.S Fri May 22 04:49:59 2009 (r192575)
@@ -1,8 +1,12 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz.
* Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2007 The FreeBSD Foundation
* All rights reserved.
*
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -33,6 +37,7 @@
#include "opt_atpic.h"
#include "opt_compat.h"
#include "opt_kdtrace.h"
+#include "opt_hwpmc_hooks.h"
#include <machine/asmacros.h>
#include <machine/psl.h>
@@ -56,6 +61,9 @@ dtrace_invop_calltrap_addr:
.zero 8
#endif
.text
+#ifdef HWPMC_HOOKS
+ ENTRY(start_exceptions)
+#endif
/*****************************************************************************/
/* Trap handling */
@@ -388,6 +396,9 @@ IDTVEC(fast_syscall32)
* execute the NMI handler with interrupts disabled to prevent a
* nested interrupt from executing an 'iretq' instruction and
* inadvertently taking the processor out of NMI mode.
+ *
+ * Third, the NMI handler runs on its own stack (tss_ist1), shared
+ * with the double fault handler.
*/
IDTVEC(nmi)
@@ -426,6 +437,61 @@ nmi_calltrap:
movq %rsp, %rdi
call trap
MEXITCOUNT
+#ifdef HWPMC_HOOKS
+ /*
+ * Check if the current trap was from user mode and if so
+ * whether the current thread needs a user call chain to be
+ * captured. We are still in NMI mode at this point.
+ */
+ testb $SEL_RPL_MASK,TF_CS(%rsp)
+ jz nocallchain
+ movq PCPU(CURTHREAD),%rax /* curthread present? */
+ orq %rax,%rax
+ jz nocallchain
+ testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */
+ jz nocallchain
+ /*
+ * A user callchain is to be captured, so:
+ * - Move execution to the regular kernel stack, to allow for
+ * nested NMI interrupts.
+ * - Take the processor out of "NMI" mode by faking an "iret".
+ * - Enable interrupts, so that copyin() can work.
+ */
+ movq %rsp,%rsi /* source stack pointer */
+ movq $TF_SIZE,%rcx
+ movq PCPU(RSP0),%rbx
+ subq %rcx,%rbx
+ movq %rbx,%rdi /* destination stack pointer */
+
+ shrq $3,%rcx /* trap frame size in long words */
+ cld
+ rep
+ movsq /* copy trapframe */
+
+ movl %ss,%eax
+ pushq %rax /* tf_ss */
+ pushq %rbx /* tf_rsp (on kernel stack) */
+ pushfq /* tf_rflags */
+ movl %cs,%eax
+ pushq %rax /* tf_cs */
+ pushq $outofnmi /* tf_rip */
+ iretq
+outofnmi:
+ /*
+ * At this point the processor has exited NMI mode and is running
+ * with interrupts turned off on the normal kernel stack.
+ * We turn interrupts back on, and take the usual 'doreti' exit
+ * path.
+ *
+ * If a pending NMI gets recognized at or after this point, it
+ * will cause a kernel callchain to be traced. Since this path
+ * is only taken for NMI interrupts from user space, our `swapgs'
+ * state is correct for taking the doreti path.
+ */
+ sti
+ jmp doreti
+nocallchain:
+#endif
testl %ebx,%ebx
jz nmi_restoreregs
swapgs
@@ -593,3 +659,6 @@ doreti_iret_fault:
movq $0,TF_ADDR(%rsp)
FAKE_MCOUNT(TF_RIP(%rsp))
jmp calltrap
+#ifdef HWPMC_HOOKS
+ ENTRY(end_exceptions)
+#endif
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list