svn commit: r185363 - in head: lib/libpmc sys/amd64/include sys/conf sys/dev/hwpmc sys/i386/include sys/modules/hwpmc sys/sys

Joseph Koshy jkoshy at FreeBSD.org
Thu Nov 27 01:00:48 PST 2008


Author: jkoshy
Date: Thu Nov 27 09:00:47 2008
New Revision: 185363
URL: http://svn.freebsd.org/changeset/base/185363

Log:
  - Add support for PMCs in Intel CPUs of Family 6, model 0xE (Core Solo
    and Core Duo), models 0xF (Core2), model 0x17 (Core2Extreme) and
    model 0x1C (Atom).
  
    In these CPUs, the actual numbers, kinds and widths of PMCs present
    need to queried at run time.  Support for specific "architectural"
    events also needs to be queried at run time.
  
    Model 0xE CPUs support programmable PMCs, subsequent CPUs
    additionally support "fixed-function" counters.
  
  - Use event names that are close to vendor documentation, taking in
    account that:
    - events with identical semantics on two or more CPUs in this family
      can have differing names in vendor documentation,
    - identical vendor event names may map to differing events across
      CPUs,
    - each type of CPU supports a different subset of measurable
      events.
  
    Fixed-function and programmable counters both use the same vendor
    names for events.  The use of a class name prefix ("iaf-" or
    "iap-" respectively) permits these to be distinguished.
  
  - In libpmc, refactor pmc_name_of_event() into a public interface
    and an internal helper function, for use by log handling code.
  
  - Minor code tweaks: staticize a global, freshen a few comments.
  
  Tested by:	gnn

Added:
  head/lib/libpmc/libpmcinternal.h   (contents, props changed)
  head/sys/dev/hwpmc/hwpmc_core.c   (contents, props changed)
  head/sys/dev/hwpmc/hwpmc_core.h   (contents, props changed)
Modified:
  head/lib/libpmc/libpmc.c
  head/lib/libpmc/pmclog.c
  head/sys/amd64/include/pmc_mdep.h
  head/sys/conf/files.amd64
  head/sys/conf/files.i386
  head/sys/dev/hwpmc/hwpmc_intel.c
  head/sys/dev/hwpmc/hwpmc_logging.c
  head/sys/dev/hwpmc/hwpmc_mod.c
  head/sys/dev/hwpmc/pmc_events.h
  head/sys/i386/include/pmc_mdep.h
  head/sys/modules/hwpmc/Makefile
  head/sys/sys/param.h
  head/sys/sys/pmc.h
  head/sys/sys/pmclog.h

Modified: head/lib/libpmc/libpmc.c
==============================================================================
--- head/lib/libpmc/libpmc.c	Thu Nov 27 08:42:58 2008	(r185362)
+++ head/lib/libpmc/libpmc.c	Thu Nov 27 09:00:47 2008	(r185363)
@@ -42,12 +42,18 @@ __FBSDID("$FreeBSD$");
 #include <strings.h>
 #include <unistd.h>
 
+#include "libpmcinternal.h"
+
 /* Function prototypes */
 #if defined(__i386__)
 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
     struct pmc_op_pmcallocate *_pmc_config);
 #endif
 #if defined(__amd64__) || defined(__i386__)
+static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+    struct pmc_op_pmcallocate *_pmc_config);
+static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+    struct pmc_op_pmcallocate *_pmc_config);
 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
     struct pmc_op_pmcallocate *_pmc_config);
 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
@@ -110,19 +116,55 @@ struct pmc_class_descr {
 #define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
 
 /*
- * PMC_MDEP_TABLE(NAME, CLASS, ADDITIONAL_CLASSES...)
+ * PMC_CLASSDEP_TABLE(NAME, CLASS)
  *
- * Build an event descriptor table and a list of valid PMC classes.
+ * Define a table mapping event names and aliases to HWPMC event IDs.
  */
-#define	PMC_MDEP_TABLE(N,C,...)				\
+#define	PMC_CLASSDEP_TABLE(N, C)				\
 	static const struct pmc_event_descr N##_event_table[] =	\
 	{							\
 		__PMC_EV_##C()					\
-	};							\
+	}
+
+PMC_CLASSDEP_TABLE(iaf, IAF);
+PMC_CLASSDEP_TABLE(k7, K7);
+PMC_CLASSDEP_TABLE(k8, K8);
+PMC_CLASSDEP_TABLE(p4, P4);
+PMC_CLASSDEP_TABLE(p5, P5);
+PMC_CLASSDEP_TABLE(p6, P6);
+
+#undef	__PMC_EV_ALIAS
+#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
+
+static const struct pmc_event_descr atom_event_table[] =
+{
+	__PMC_EV_ALIAS_ATOM()
+};
+
+static const struct pmc_event_descr core_event_table[] =
+{
+	__PMC_EV_ALIAS_CORE()
+};
+
+
+static const struct pmc_event_descr core2_event_table[] =
+{
+	__PMC_EV_ALIAS_CORE2()
+};
+
+/*
+ * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
+ *
+ * Map a CPU to the PMC classes it supports.
+ */
+#define	PMC_MDEP_TABLE(N,C,...)				\
 	static const enum pmc_class N##_pmc_classes[] = {	\
 		PMC_CLASS_##C, __VA_ARGS__			\
 	}
 
+PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
+PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
@@ -135,39 +177,44 @@ static const struct pmc_event_descr tsc_
 };
 
 #undef	PMC_CLASS_TABLE_DESC
-#define	PMC_CLASS_TABLE_DESC(N, C)	{			\
-		.pm_evc_name  = #N "-",				\
-		.pm_evc_name_size = sizeof(#N "-") - 1,		\
-		.pm_evc_class = PMC_CLASS_##C ,			\
-		.pm_evc_event_table = N##_event_table ,		\
+#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
+static const struct pmc_class_descr NAME##_class_table_descr =	\
+	{							\
+		.pm_evc_name  = #CLASS "-",			\
+		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
+		.pm_evc_class = PMC_CLASS_##CLASS ,		\
+		.pm_evc_event_table = EVENTS##_event_table ,	\
 		.pm_evc_event_table_size = 			\
-			PMC_EVENT_TABLE_SIZE(N),		\
-		.pm_evc_allocate_pmc = N##_allocate_pmc		\
+			PMC_EVENT_TABLE_SIZE(EVENTS),		\
+		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
 	}
 
-static const struct pmc_class_descr pmc_class_table[] =
-{
+#if	defined(__i386__) || defined(__amd64__)
+PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
+PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
+PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
+PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
+#endif
 #if	defined(__i386__)
-	PMC_CLASS_TABLE_DESC(k7, K7),
+PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
 #endif
 #if	defined(__i386__) || defined(__amd64__)
-	PMC_CLASS_TABLE_DESC(k8, K8),
-	PMC_CLASS_TABLE_DESC(p4, P4),
+PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
+PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
 #endif
 #if	defined(__i386__)
-	PMC_CLASS_TABLE_DESC(p5, P5),
-	PMC_CLASS_TABLE_DESC(p6, P6),
+PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
+PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
 #endif
 #if	defined(__i386__) || defined(__amd64__)
-	PMC_CLASS_TABLE_DESC(tsc, TSC)
+PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
 #endif
-};
-
-static size_t pmc_event_class_table_size =
-    PMC_TABLE_SIZE(pmc_class_table);
 
 #undef	PMC_CLASS_TABLE_DESC
 
+static const struct pmc_class_descr **pmc_class_table;
+#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
+
 static const enum pmc_class *pmc_mdep_class_list;
 static size_t pmc_mdep_class_list_size;
 
@@ -371,6 +418,237 @@ k7_allocate_pmc(enum pmc_event pe, char 
 #if defined(__amd64__) || defined(__i386__)
 
 /*
+ * Intel Core (Family 6, Model E) PMCs.
+ */
+
+static struct pmc_event_alias core_aliases[] = {
+	EV_ALIAS("branches",		"iap-br-instr-ret"),
+	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
+	EV_ALIAS("cycles",		"tsc-tsc"),
+	EV_ALIAS("ic-misses",		"iap-icache-misses"),
+	EV_ALIAS("instructions",	"iap-instr-ret"),
+	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
+	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
+	EV_ALIAS(NULL, NULL)
+};
+
+/*
+ * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
+ * and Atom (Family 6, model 1CH) PMCs.
+ */
+
+static struct pmc_event_alias core2_aliases[] = {
+	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
+	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
+	EV_ALIAS("cycles",		"tsc-tsc"),
+	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
+	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
+	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
+	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
+	EV_ALIAS(NULL, NULL)
+};
+#define	atom_aliases	core2_aliases
+
+#define	IAF_KW_OS		"os"
+#define	IAF_KW_USR		"usr"
+#define	IAF_KW_ANYTHREAD	"anythread"
+
+/*
+ * Parse an event specifier for Intel fixed function counters.
+ */
+static int
+iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
+    struct pmc_op_pmcallocate *pmc_config)
+{
+	char *p;
+
+	(void) pe;
+
+	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
+	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
+
+	while ((p = strsep(&ctrspec, ",")) != NULL) {
+		if (KWMATCH(p, IAF_KW_OS))
+			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+		else if (KWMATCH(p, IAF_KW_USR))
+			pmc_config->pm_caps |= PMC_CAP_USER;
+		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
+			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
+		else
+			return (-1);
+	}
+
+	return (0);
+}
+
+/*
+ * Core/Core2 support.
+ */
+
+#define	IAP_KW_AGENT		"agent"
+#define	IAP_KW_ANYTHREAD	"anythread"
+#define	IAP_KW_CACHESTATE	"cachestate"
+#define	IAP_KW_CMASK		"cmask"
+#define	IAP_KW_CORE		"core"
+#define	IAP_KW_EDGE		"edge"
+#define	IAP_KW_INV		"inv"
+#define	IAP_KW_OS		"os"
+#define	IAP_KW_PREFETCH		"prefetch"
+#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
+#define	IAP_KW_SNOOPTYPE	"snooptype"
+#define	IAP_KW_TRANSITION	"trans"
+#define	IAP_KW_USR		"usr"
+
+static struct pmc_masks iap_core_mask[] = {
+	PMCMASK(all,	(0x3 << 14)),
+	PMCMASK(this,	(0x1 << 14)),
+	NULLMASK
+};
+
+static struct pmc_masks iap_agent_mask[] = {
+	PMCMASK(this,	0),
+	PMCMASK(any,	(0x1 << 13)),
+	NULLMASK
+};
+
+static struct pmc_masks iap_prefetch_mask[] = {
+	PMCMASK(both,		(0x3 << 12)),
+	PMCMASK(only,		(0x1 << 12)),
+	PMCMASK(exclude,	0),
+	NULLMASK
+};
+
+static struct pmc_masks iap_cachestate_mask[] = {
+	PMCMASK(i,		(1 <<  8)),
+	PMCMASK(s,		(1 <<  9)),
+	PMCMASK(e,		(1 << 10)),
+	PMCMASK(m,		(1 << 11)),
+	NULLMASK
+};
+
+static struct pmc_masks iap_snoopresponse_mask[] = {
+	PMCMASK(clean,		(1 << 8)),
+	PMCMASK(hit,		(1 << 9)),
+	PMCMASK(hitm,		(1 << 11)),
+	NULLMASK
+};
+
+static struct pmc_masks iap_snooptype_mask[] = {
+	PMCMASK(cmp2s,		(1 << 8)),
+	PMCMASK(cmp2i,		(1 << 9)),
+	NULLMASK
+};
+
+static struct pmc_masks iap_transition_mask[] = {
+	PMCMASK(any,		0x00),
+	PMCMASK(frequency,	0x10),
+	NULLMASK
+};
+
+static int
+iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
+    struct pmc_op_pmcallocate *pmc_config)
+{
+	char *e, *p, *q;
+	uint32_t cachestate, evmask;
+	int count, n;
+
+	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
+	    PMC_CAP_QUALIFIER);
+	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
+
+	cachestate = evmask = 0;
+
+	/* Parse additional modifiers if present */
+	while ((p = strsep(&ctrspec, ",")) != NULL) {
+
+		n = 0;
+		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
+			q = strchr(p, '=');
+			if (*++q == '\0') /* skip '=' */
+				return (-1);
+			count = strtol(q, &e, 0);
+			if (e == q || *e != '\0')
+				return (-1);
+			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
+			pmc_config->pm_md.pm_iap.pm_iap_config |=
+			    IAP_CMASK(count);
+		} else if (KWMATCH(p, IAP_KW_EDGE)) {
+			pmc_config->pm_caps |= PMC_CAP_EDGE;
+		} else if (KWMATCH(p, IAP_KW_INV)) {
+			pmc_config->pm_caps |= PMC_CAP_INVERT;
+		} else if (KWMATCH(p, IAP_KW_OS)) {
+			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+		} else if (KWMATCH(p, IAP_KW_USR)) {
+			pmc_config->pm_caps |= PMC_CAP_USER;
+		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
+			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
+		} else if (KWMATCH(p, IAP_KW_CORE)) {
+			n = pmc_parse_mask(iap_core_mask, p, &evmask);
+			if (n != 1)
+				return (-1);
+		} else if (KWMATCH(p, IAP_KW_AGENT)) {
+			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
+			if (n != 1)
+				return (-1);
+		} else if (KWMATCH(p, IAP_KW_PREFETCH)) {
+			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
+			if (n != 1)
+				return (-1);
+		} else if (KWMATCH(p, IAP_KW_CACHESTATE)) {
+			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
+		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
+		    KWMATCH(p, IAP_KW_TRANSITION)) {
+			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
+			if (n != 1)
+				return (-1);
+		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
+		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2) {
+			if (KWMATCH(p, IAP_KW_SNOOPRESPONSE)) {
+				n = pmc_parse_mask(iap_snoopresponse_mask, p,
+				    &evmask);
+			} else if (KWMATCH(p, IAP_KW_SNOOPTYPE)) {
+				n = pmc_parse_mask(iap_snooptype_mask, p,
+				    &evmask);
+			} else
+				return (-1);
+		} else
+			return (-1);
+
+		if (n < 0)	/* Parsing failed. */
+			return (-1);
+	}
+
+	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
+
+	/*
+	 * If the event requires a 'cachestate' qualifier but was not
+	 * specified by the user, use a sensible default.
+	 */
+	switch (pe) {
+	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
+	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_32H: /* Core */
+	case PMC_EV_IAP_EVENT_40H: /* Core */
+	case PMC_EV_IAP_EVENT_41H: /* Core */
+	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
+	case PMC_EV_IAP_EVENT_77H: /* Core */
+		if (cachestate == 0)
+			cachestate = (0xF << 8);
+	default:
+		break;
+	}
+
+	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
+
+	return (0);
+}
+
+/*
  * AMD K8 PMCs.
  *
  * These are very similar to AMD K7 PMCs, but support more kinds of
@@ -1704,9 +1982,9 @@ tsc_allocate_pmc(enum pmc_event pe, char
 
 /*
  * Match an event name `name' with its canonical form.
- * 
- * Matches are case insensitive and spaces, underscores and hyphen
- * characters are considered to match each other.
+ *
+ * Matches are case insensitive and spaces, periods, underscores and
+ * hyphen characters are considered to match each other.
  *
  * Returns 1 for a match, 0 otherwise.
  */
@@ -1722,13 +2000,14 @@ pmc_match_event_name(const char *name, c
 
 	for (; (nc = *n) && (cc = *c); n++, c++) {
 
-		if (toupper(nc) == cc)
+		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
+		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
 			continue;
 
-		if ((nc == ' ' || nc == '_' || nc == '-') &&
-		    (cc == ' ' || cc == '_' || cc == '-'))
+		if (toupper(nc) == toupper(cc))
 			continue;
 
+
 		return (0);
 	}
 
@@ -1750,7 +2029,7 @@ pmc_match_event_class(const char *name,
 {
 	size_t n;
 	const struct pmc_event_descr *ev;
-	
+
 	ev = pcd->pm_evc_event_table;
 	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
 		if (pmc_match_event_name(name, ev->pm_ev_name))
@@ -1815,8 +2094,8 @@ pmc_allocate(const char *ctrspec, enum p
 	 * search for the event to the specified PMC class.
 	 */
 	ev = NULL;
-	for (n = 0; n < pmc_event_class_table_size; n++) {
-		pcd = &pmc_class_table[n];
+	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
+		pcd = pmc_class_table[n];
 		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
 		    strncasecmp(ctrname, pcd->pm_evc_name,
 				pcd->pm_evc_name_size) == 0) {
@@ -1833,8 +2112,8 @@ pmc_allocate(const char *ctrspec, enum p
 	 * Otherwise, search for this event in all compatible PMC
 	 * classes.
 	 */
-	for (n = 0; ev == NULL && n < pmc_event_class_table_size; n++) {
-		pcd = &pmc_class_table[n];
+	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
+		pcd = pmc_class_table[n];
 		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
 			ev = pmc_match_event_class(ctrname, pcd);
 	}
@@ -1974,6 +2253,31 @@ pmc_event_names_of_class(enum pmc_class 
 
 	switch (cl)
 	{
+	case PMC_CLASS_IAF:
+		ev = iaf_event_table;
+		count = PMC_EVENT_TABLE_SIZE(iaf);
+		break;
+	case PMC_CLASS_IAP:
+		/*
+		 * Return the most appropriate set of event name
+		 * spellings for the current CPU.
+		 */
+		switch (cpu_info.pm_cputype) {
+		default:
+		case PMC_CPU_INTEL_ATOM:
+			ev = atom_event_table;
+			count = PMC_EVENT_TABLE_SIZE(atom);
+			break;
+		case PMC_CPU_INTEL_CORE:
+			ev = core_event_table;
+			count = PMC_EVENT_TABLE_SIZE(core);
+			break;
+		case PMC_CPU_INTEL_CORE2:
+			ev = core2_event_table;
+			count = PMC_EVENT_TABLE_SIZE(core2);
+			break;
+		}
+		break;
 	case PMC_CLASS_TSC:
 		ev = tsc_event_table;
 		count = PMC_EVENT_TABLE_SIZE(tsc);
@@ -2095,6 +2399,21 @@ pmc_init(void)
 	for (n = 0; n < cpu_info.pm_nclass; n++)
 		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
 
+	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
+	    sizeof(struct pmc_class_descr *));
+
+	if (pmc_class_table == NULL)
+		return (-1);
+
+
+	/*
+	 * Fill in the class table.
+	 */
+	n = 0;
+#if defined(__amd64__) || defined(__i386__)
+	pmc_class_table[n++] = &tsc_class_table_descr;
+#endif
+
 #define	PMC_MDEP_INIT(C) do {					\
 		pmc_mdep_event_aliases    = C##_aliases;	\
 		pmc_mdep_class_list  = C##_pmc_classes;		\
@@ -2107,26 +2426,46 @@ pmc_init(void)
 #if defined(__i386__)
 	case PMC_CPU_AMD_K7:
 		PMC_MDEP_INIT(k7);
+		pmc_class_table[n] = &k7_class_table_descr;
 		break;
 	case PMC_CPU_INTEL_P5:
 		PMC_MDEP_INIT(p5);
+		pmc_class_table[n]  = &p5_class_table_descr;
 		break;
 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
 	case PMC_CPU_INTEL_PIII:
 	case PMC_CPU_INTEL_PM:
 		PMC_MDEP_INIT(p6);
+		pmc_class_table[n] = &p6_class_table_descr;
 		break;
 #endif
 #if defined(__amd64__) || defined(__i386__)
 	case PMC_CPU_AMD_K8:
 		PMC_MDEP_INIT(k8);
+		pmc_class_table[n] = &k8_class_table_descr;
+		break;
+	case PMC_CPU_INTEL_ATOM:
+		PMC_MDEP_INIT(atom);
+		pmc_class_table[n++] = &iaf_class_table_descr;
+		pmc_class_table[n]   = &atom_class_table_descr;
+		break;
+	case PMC_CPU_INTEL_CORE:
+		PMC_MDEP_INIT(core);
+		pmc_class_table[n] = &core_class_table_descr;
+		break;
+	case PMC_CPU_INTEL_CORE2:
+		PMC_MDEP_INIT(core2);
+		pmc_class_table[n++] = &iaf_class_table_descr;
+		pmc_class_table[n]   = &core2_class_table_descr;
 		break;
 	case PMC_CPU_INTEL_PIV:
 		PMC_MDEP_INIT(p4);
+		pmc_class_table[n] = &p4_class_table_descr;
 		break;
 #endif
 
+
 	default:
 		/*
 		 * Some kind of CPU this version of the library knows nothing
@@ -2195,12 +2534,32 @@ pmc_name_of_disposition(enum pmc_disp pd
 }
 
 const char *
-pmc_name_of_event(enum pmc_event pe)
+_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
 {
 	const struct pmc_event_descr *ev, *evfence;
 
 	ev = evfence = NULL;
-	if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
+	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
+		ev = iaf_event_table;
+		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
+	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
+		switch (cpu) {
+		case PMC_CPU_INTEL_ATOM:
+			ev = atom_event_table;
+			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
+			break;
+		case PMC_CPU_INTEL_CORE:
+			ev = core_event_table;
+			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
+			break;
+		case PMC_CPU_INTEL_CORE2:
+			ev = core2_event_table;
+			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
+			break;
+		default:	/* Unknown CPU type. */
+			break;
+		}
+	} if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
 		ev = k7_event_table;
 		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
 	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
@@ -2224,6 +2583,17 @@ pmc_name_of_event(enum pmc_event pe)
 		if (pe == ev->pm_ev_code)
 			return (ev->pm_ev_name);
 
+	return (NULL);
+}
+
+const char *
+pmc_name_of_event(enum pmc_event pe)
+{
+	const char *n;
+
+	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
+		return (n);
+
 	errno = EINVAL;
 	return (NULL);
 }

Added: head/lib/libpmc/libpmcinternal.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/lib/libpmc/libpmcinternal.h	Thu Nov 27 09:00:47 2008	(r185363)
@@ -0,0 +1,37 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	LIBPMC_INTERNAL_H
+#define	LIBPMC_INTERNAL_H	1
+
+/*
+ * Prototypes.
+ */
+const char *_pmc_name_of_event(enum pmc_event _ev, enum pmc_cputype _cpu);
+
+#endif	/* LIBPMC_INTERNAL_H */

Modified: head/lib/libpmc/pmclog.c
==============================================================================
--- head/lib/libpmc/pmclog.c	Thu Nov 27 08:42:58 2008	(r185362)
+++ head/lib/libpmc/pmclog.c	Thu Nov 27 09:00:47 2008	(r185363)
@@ -47,6 +47,8 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/pmc_mdep.h>
 
+#include "libpmcinternal.h"
+
 #define	PMCLOG_BUFFER_SIZE			4096
 
 /*
@@ -363,7 +365,8 @@ pmclog_get_event(void *cookie, char **da
 		PMCLOG_READ32(le,ev->pl_u.pl_a.pl_event);
 		PMCLOG_READ32(le,ev->pl_u.pl_a.pl_flags);
 		if ((ev->pl_u.pl_a.pl_evname =
-		    pmc_name_of_event(ev->pl_u.pl_a.pl_event)) == NULL)
+		    _pmc_name_of_event(ev->pl_u.pl_a.pl_event, ps->ps_arch))
+		    == NULL)
 			goto error;
 		break;
 	case PMCLOG_TYPE_PMCATTACH:

Modified: head/sys/amd64/include/pmc_mdep.h
==============================================================================
--- head/sys/amd64/include/pmc_mdep.h	Thu Nov 27 08:42:58 2008	(r185362)
+++ head/sys/amd64/include/pmc_mdep.h	Thu Nov 27 09:00:47 2008	(r185363)
@@ -40,6 +40,7 @@ struct pmc_mdep;
 #endif
 
 #include <dev/hwpmc/hwpmc_amd.h>
+#include <dev/hwpmc/hwpmc_core.h>
 #include <dev/hwpmc/hwpmc_piv.h>
 #include <dev/hwpmc/hwpmc_tsc.h>
 
@@ -51,8 +52,8 @@ struct pmc_mdep;
 #define	PMC_MDEP_CLASS_INDEX_TSC	0
 #define	PMC_MDEP_CLASS_INDEX_K8		1
 #define	PMC_MDEP_CLASS_INDEX_P4		1
-#define	PMC_MDEP_CLASS_INDEX_IAF	1
-#define	PMC_MDEP_CLASS_INDEX_IAP	2
+#define	PMC_MDEP_CLASS_INDEX_IAP	1
+#define	PMC_MDEP_CLASS_INDEX_IAF	2
 
 /*
  * On the amd64 platform we support the following PMCs.
@@ -66,6 +67,8 @@ struct pmc_mdep;
 
 union pmc_md_op_pmcallocate  {
 	struct pmc_md_amd_op_pmcallocate	pm_amd;
+	struct pmc_md_iaf_op_pmcallocate	pm_iaf;
+	struct pmc_md_iap_op_pmcallocate	pm_iap;
 	struct pmc_md_p4_op_pmcallocate		pm_p4;
 	uint64_t				__pad[4];
 };
@@ -78,6 +81,8 @@ union pmc_md_op_pmcallocate  {
 
 union pmc_md_pmc {
 	struct pmc_md_amd_pmc	pm_amd;
+	struct pmc_md_iaf_pmc	pm_iaf;
+	struct pmc_md_iap_pmc	pm_iap;
 	struct pmc_md_p4_pmc	pm_p4;
 };
 

Modified: head/sys/conf/files.amd64
==============================================================================
--- head/sys/conf/files.amd64	Thu Nov 27 08:42:58 2008	(r185362)
+++ head/sys/conf/files.amd64	Thu Nov 27 09:00:47 2008	(r185363)
@@ -189,6 +189,7 @@ dev/hptrr/hptrr_osm_bsd.c	optional	hptrr
 dev/hptrr/hptrr_config.c	optional	hptrr
 dev/hwpmc/hwpmc_amd.c		optional	hwpmc
 dev/hwpmc/hwpmc_intel.c		optional	hwpmc
+dev/hwpmc/hwpmc_core.c		optional	hwpmc
 dev/hwpmc/hwpmc_piv.c		optional	hwpmc
 dev/hwpmc/hwpmc_tsc.c		optional	hwpmc
 dev/hwpmc/hwpmc_x86.c		optional	hwpmc

Modified: head/sys/conf/files.i386
==============================================================================
--- head/sys/conf/files.i386	Thu Nov 27 08:42:58 2008	(r185362)
+++ head/sys/conf/files.i386	Thu Nov 27 09:00:47 2008	(r185363)
@@ -187,6 +187,7 @@ dev/hptrr/hptrr_osm_bsd.c	optional hptrr
 dev/hptrr/hptrr_config.c	optional hptrr
 dev/hwpmc/hwpmc_amd.c		optional hwpmc
 dev/hwpmc/hwpmc_intel.c		optional hwpmc
+dev/hwpmc/hwpmc_core.c		optional hwpmc
 dev/hwpmc/hwpmc_pentium.c	optional hwpmc
 dev/hwpmc/hwpmc_piv.c		optional hwpmc
 dev/hwpmc/hwpmc_ppro.c		optional hwpmc

Added: head/sys/dev/hwpmc/hwpmc_core.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/hwpmc/hwpmc_core.c	Thu Nov 27 09:00:47 2008	(r185363)
@@ -0,0 +1,1747 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Intel Core, Core 2 and Atom PMCs.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+
+#define	CORE_CPUID_REQUEST		0xA
+#define	CORE_CPUID_REQUEST_SIZE		0x4
+#define	CORE_CPUID_EAX			0x0
+#define	CORE_CPUID_EBX			0x1
+#define	CORE_CPUID_ECX			0x2
+#define	CORE_CPUID_EDX			0x3
+
+#define	IAF_PMC_CAPS			\
+	(PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT)
+#define	IAF_RI_TO_MSR(RI)		((RI) + (1 << 30))
+
+#define	IAP_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+    PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE |	 \
+    PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
+
+/*
+ * "Architectural" events defined by Intel.  The values of these
+ * symbols correspond to positions in the bitmask returned by
+ * the CPUID.0AH instruction.
+ */
+enum core_arch_events {
+	CORE_AE_BRANCH_INSTRUCTION_RETIRED	= 5,
+	CORE_AE_BRANCH_MISSES_RETIRED		= 6,
+	CORE_AE_INSTRUCTION_RETIRED		= 1,
+	CORE_AE_LLC_MISSES			= 4,
+	CORE_AE_LLC_REFERENCE			= 3,
+	CORE_AE_UNHALTED_REFERENCE_CYCLES	= 2,
+	CORE_AE_UNHALTED_CORE_CYCLES		= 0
+};
+
+static enum pmc_cputype	core_cputype;
+
+struct core_cpu {
+	volatile uint32_t	pc_resync;
+	volatile uint32_t	pc_iafctrl;	/* Fixed function control. */
+	volatile uint64_t	pc_globalctrl;	/* Global control register. */
+	struct pmc_hw		pc_corepmcs[];
+};
+
+static struct core_cpu **core_pcpu;
+
+static uint32_t core_architectural_events;
+static uint64_t core_pmcmask;
+
+static int core_iaf_ri;		/* relative index of fixed counters */
+static int core_iaf_width;
+static int core_iaf_npmc;
+
+static int core_iap_width;
+static int core_iap_npmc;
+
+static int
+core_pcpu_noop(struct pmc_mdep *md, int cpu)
+{
+	(void) md;
+	(void) cpu;
+	return (0);
+}
+
+static int
+core_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+	struct pmc_cpu *pc;
+	struct core_cpu *cc;
+	struct pmc_hw *phw;
+	int core_ri, n, npmc;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[iaf,%d] insane cpu number %d", __LINE__, cpu));
+
+	PMCDBG(MDP,INI,1,"core-init cpu=%d", cpu);
+
+	core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
+	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
+
+	if (core_cputype != PMC_CPU_INTEL_CORE)
+		npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
+
+	cc = malloc(sizeof(struct core_cpu) + npmc * sizeof(struct pmc_hw),
+	    M_PMC, M_WAITOK | M_ZERO);
+
+	core_pcpu[cpu] = cc;
+	pc = pmc_pcpu[cpu];
+
+	KASSERT(pc != NULL && cc != NULL,
+	    ("[core,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
+
+	for (n = 0, phw = cc->pc_corepmcs; n < npmc; n++, phw++) {
+		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
+		    PMC_PHW_CPU_TO_STATE(cpu) |
+		    PMC_PHW_INDEX_TO_STATE(n + core_ri);
+		phw->phw_pmc	  = NULL;
+		pc->pc_hwpmcs[n + core_ri]  = phw;
+	}
+
+	return (0);
+}
+
+static int
+core_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+	int core_ri, n, npmc;
+	struct pmc_cpu *pc;
+	struct core_cpu *cc;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[core,%d] insane cpu number (%d)", __LINE__, cpu));
+
+	PMCDBG(MDP,INI,1,"core-pcpu-fini cpu=%d", cpu);
+
+	if ((cc = core_pcpu[cpu]) == NULL)
+		return (0);
+
+	core_pcpu[cpu] = NULL;
+
+	pc = pmc_pcpu[cpu];
+
+	KASSERT(pc != NULL, ("[core,%d] NULL per-cpu %d state", __LINE__,
+		cpu));
+
+	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
+	core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
+
+	for (n = 0; n < npmc; n++)
+		wrmsr(IAP_EVSEL0 + n, 0);
+
+	if (core_cputype != PMC_CPU_INTEL_CORE) {
+		wrmsr(IAF_CTRL, 0);
+		npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
+	}
+
+	for (n = 0; n < npmc; n++)
+		pc->pc_hwpmcs[n + core_ri] = NULL;
+
+	free(cc, M_PMC);
+
+	return (0);
+}
+
+/*
+ * Fixed function counters.
+ */
+
+static pmc_value_t
+iaf_perfctr_value_to_reload_count(pmc_value_t v)
+{
+	v &= (1ULL << core_iaf_width) - 1;
+	return (1ULL << core_iaf_width) - v;
+}
+
+static pmc_value_t
+iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
+{
+	return (1ULL << core_iaf_width) - rlc;
+}
+
+static int
+iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
+    const struct pmc_op_pmcallocate *a)
+{
+	enum pmc_event ev;
+	uint32_t caps, flags, validflags;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
+
+	PMCDBG(MDP,ALL,1, "iaf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
+
+	if (ri < 0 || ri > core_iaf_npmc)
+		return (EINVAL);
+
+	caps = a->pm_caps;
+
+	if (a->pm_class != PMC_CLASS_IAF ||
+	    (caps & IAF_PMC_CAPS) != caps)
+		return (EINVAL);
+
+	ev = pm->pm_event;
+	if (ev < PMC_EV_IAF_FIRST || ev > PMC_EV_IAF_LAST)
+		return (EINVAL);
+
+	if (ev == PMC_EV_IAF_INSTR_RETIRED_ANY && ri != 0)
+		return (EINVAL);
+	if (ev == PMC_EV_IAF_CPU_CLK_UNHALTED_CORE && ri != 1)
+		return (EINVAL);
+	if (ev == PMC_EV_IAF_CPU_CLK_UNHALTED_REF && ri != 2)
+		return (EINVAL);
+
+	flags = a->pm_md.pm_iaf.pm_iaf_flags;
+
+	validflags = IAF_MASK;
+
+	if (core_cputype != PMC_CPU_INTEL_ATOM)
+		validflags &= ~IAF_ANY;
+
+	if ((flags & ~validflags) != 0)
+		return (EINVAL);
+
+	if (caps & PMC_CAP_INTERRUPT)
+		flags |= IAF_PMI;
+	if (caps & PMC_CAP_SYSTEM)
+		flags |= IAF_OS;
+	if (caps & PMC_CAP_USER)
+		flags |= IAF_USR;
+	if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+		flags |= (IAF_OS | IAF_USR);
+
+	pm->pm_md.pm_iaf.pm_iaf_ctrl = (flags << (ri * 4));
+
+	PMCDBG(MDP,ALL,2, "iaf-allocate config=0x%jx",
+	    (uintmax_t) pm->pm_md.pm_iaf.pm_iaf_ctrl);
+
+	return (0);
+}
+
+static int
+iaf_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[core,%d] illegal CPU %d", __LINE__, cpu));
+
+	KASSERT(ri >= 0 && ri < core_iaf_npmc,
+	    ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+	PMCDBG(MDP,CFG,1, "iaf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+	KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
+	    cpu));
+
+	core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc = pm;
+
+	return (0);
+}
+
+static int
+iaf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-head mailing list