svn commit: r228869 - in head: lib/libpmc sys/dev/hwpmc sys/powerpc/aim sys/powerpc/include sys/sys

Justin Hibbits jhibbits at FreeBSD.org
Sat Dec 24 19:34:53 UTC 2011


Author: jhibbits
Date: Sat Dec 24 19:34:52 2011
New Revision: 228869
URL: http://svn.freebsd.org/changeset/base/228869

Log:
  Implement hwpmc counting PMC support for PowerPC G4+ (MPC745x/MPC744x).
  Sampling is in progress.
  
  Approved by:	nwhitehorn (mentor)
  MFC after:	9.0-RELEASE

Modified:
  head/lib/libpmc/libpmc.c
  head/sys/dev/hwpmc/hwpmc_powerpc.c
  head/sys/dev/hwpmc/pmc_events.h
  head/sys/powerpc/aim/machdep.c
  head/sys/powerpc/aim/trap.c
  head/sys/powerpc/include/pmc_mdep.h
  head/sys/powerpc/include/spr.h
  head/sys/sys/pmc.h

Modified: head/lib/libpmc/libpmc.c
==============================================================================
--- head/lib/libpmc/libpmc.c	Sat Dec 24 19:01:31 2011	(r228868)
+++ head/lib/libpmc/libpmc.c	Sat Dec 24 19:34:52 2011	(r228869)
@@ -83,6 +83,10 @@ static int mips24k_allocate_pmc(enum pmc
 			     struct pmc_op_pmcallocate *_pmc_config);
 #endif /* __mips__ */
 
+#if defined(__powerpc__)
+static int ppc7450_allocate_pmc(enum pmc_event _pe, char* ctrspec,
+			     struct pmc_op_pmcallocate *_pmc_config);
+#endif /* __powerpc__ */
 
 #define PMC_CALL(cmd, params)				\
 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
@@ -149,6 +153,7 @@ PMC_CLASSDEP_TABLE(p6, P6);
 PMC_CLASSDEP_TABLE(xscale, XSCALE);
 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
 PMC_CLASSDEP_TABLE(ucf, UCF);
+PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
 
 #undef	__PMC_EV_ALIAS
 #define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
@@ -211,6 +216,7 @@ PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_MIPS24K);
+PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_PPC7450);
 
 static const struct pmc_event_descr tsc_event_table[] =
 {
@@ -263,6 +269,10 @@ PMC_CLASS_TABLE_DESC(xscale, XSCALE, xsc
 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips24k);
 #endif /* __mips__ */
 
+#if defined(__powerpc__)
+PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, ppc7450);
+#endif
+
 #undef	PMC_CLASS_TABLE_DESC
 
 static const struct pmc_class_descr **pmc_class_table;
@@ -2212,6 +2222,44 @@ mips24k_allocate_pmc(enum pmc_event pe, 
 }
 #endif /* __mips__ */
 
+#if defined(__powerpc__)
+
+static struct pmc_event_alias ppc7450_aliases[] = {
+	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
+	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
+	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
+	EV_ALIAS(NULL, NULL)
+};
+
+#define	PPC7450_KW_OS		"os"
+#define	PPC7450_KW_USR		"usr"
+#define	PPC7450_KW_ANYTHREAD	"anythread"
+
+static int
+ppc7450_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
+		  struct pmc_op_pmcallocate *pmc_config __unused)
+{
+	char *p;
+
+	(void) pe;
+
+	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
+	
+	while ((p = strsep(&ctrspec, ",")) != NULL) {
+		if (KWMATCH(p, PPC7450_KW_OS))
+			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+		else if (KWMATCH(p, PPC7450_KW_USR))
+			pmc_config->pm_caps |= PMC_CAP_USER;
+		else if (KWMATCH(p, PPC7450_KW_ANYTHREAD))
+			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
+		else
+			return (-1);
+	}
+
+	return (0);
+}
+#endif /* __powerpc__ */
+
 
 /*
  * Match an event name `name' with its canonical form.
@@ -2573,6 +2621,10 @@ pmc_event_names_of_class(enum pmc_class 
 		ev = mips24k_event_table;
 		count = PMC_EVENT_TABLE_SIZE(mips24k);
 		break;
+	case PMC_CLASS_PPC7450:
+		ev = ppc7450_event_table;
+		count = PMC_EVENT_TABLE_SIZE(ppc7450);
+		break;
 	default:
 		errno = EINVAL;
 		return (-1);
@@ -2784,6 +2836,12 @@ pmc_init(void)
 		pmc_class_table[n] = &mips24k_class_table_descr;
 		break;
 #endif /* __mips__ */
+#if defined(__powerpc__)
+	case PMC_CPU_PPC_7450:
+		PMC_MDEP_INIT(ppc7450);
+		pmc_class_table[n] = &ppc7450_class_table_descr;
+		break;
+#endif
 	default:
 		/*
 		 * Some kind of CPU this version of the library knows nothing
@@ -2924,6 +2982,10 @@ _pmc_name_of_event(enum pmc_event pe, en
 		ev = mips24k_event_table;
 		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k
 );
+	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
+		ev = ppc7450_event_table;
+		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450
+);
 	} else if (pe == PMC_EV_TSC_TSC) {
 		ev = tsc_event_table;
 		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);

Modified: head/sys/dev/hwpmc/hwpmc_powerpc.c
==============================================================================
--- head/sys/dev/hwpmc/hwpmc_powerpc.c	Sat Dec 24 19:01:31 2011	(r228868)
+++ head/sys/dev/hwpmc/hwpmc_powerpc.c	Sat Dec 24 19:34:52 2011	(r228869)
@@ -1,4 +1,5 @@
 /*-
+ * Copyright (c) 2011 Justin Hibbits
  * Copyright (c) 2005, Joseph Koshy
  * All rights reserved.
  *
@@ -30,20 +31,297 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
 
 #include <machine/pmc_mdep.h>
+#include <machine/spr.h>
+#include <machine/cpu.h>
 
-struct pmc_mdep *
-pmc_md_initialize()
-{
-	return NULL;
-}
+#define	POWERPC_PMC_CAPS	(PMC_CAP_INTERRUPT | PMC_CAP_USER |     \
+				 PMC_CAP_SYSTEM | PMC_CAP_EDGE |	\
+				 PMC_CAP_THRESHOLD | PMC_CAP_READ |	\
+				 PMC_CAP_WRITE | PMC_CAP_INVERT |	\
+				 PMC_CAP_QUALIFIER)
 
-void
-pmc_md_finalize(struct pmc_mdep *md)
-{
-	(void) md;
-}
+#define PPC_SET_PMC1SEL(r, x)	((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x))
+#define PPC_SET_PMC2SEL(r, x)	((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x))
+#define PPC_SET_PMC3SEL(r, x)	((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x))
+#define PPC_SET_PMC4SEL(r, x)	((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x))
+#define PPC_SET_PMC5SEL(r, x)	((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x))
+#define PPC_SET_PMC6SEL(r, x)	((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x))
+
+/* Change this when we support more than just the 7450. */
+#define PPC_MAX_PMCS	6
+
+#define POWERPC_PMC_KERNEL_ENABLE	(0x1 << 30)
+#define POWERPC_PMC_USER_ENABLE		(0x1 << 31)
+
+#define POWERPC_PMC_ENABLE		(POWERPC_PMC_KERNEL_ENABLE | POWERPC_PMC_USER_ENABLE)
+#define	POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V)	(0x80000000-(V))
+#define	POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P)	((P)-0x80000000)
+#define POWERPC_PMC_HAS_OVERFLOWED(x) (powerpc_pmcn_read(x) & (0x1 << 31))
+
+
+/*
+ * This should work for every 32-bit PowerPC implementation I know of (G3 and G4
+ * specifically).  PoewrPC 970 will take more work.
+ */
+
+/*
+ * Per-processor information.
+ */
+struct powerpc_cpu {
+	struct pmc_hw   *pc_ppcpmcs;
+};
+
+static struct powerpc_cpu **powerpc_pcpu;
+
+struct powerpc_event_code_map {
+	enum pmc_event	pe_ev;       /* enum value */
+	uint8_t         pe_counter_mask;  /* Which counter this can be counted in. */
+	uint8_t			pe_code;     /* numeric code */
+};
+
+#define PPC_PMC_MASK1	0
+#define PPC_PMC_MASK2	1
+#define PPC_PMC_MASK3	2
+#define PPC_PMC_MASK4	3
+#define PPC_PMC_MASK5	4
+#define PPC_PMC_MASK6	5
+#define PPC_PMC_MASK_ALL	0x3f
+
+#define PMC_POWERPC_EVENT(id, mask, number) \
+	{ .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
+
+static struct powerpc_event_code_map powerpc_event_codes[] = {
+	PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
+	PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
+	PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
+	PMC_POWERPC_EVENT(INSTR_DISPATCHED, 0x0f, 4),
+	PMC_POWERPC_EVENT(PMON_EXCEPT, 0x0f, 5),
+	PMC_POWERPC_EVENT(PMON_SIG, 0x0f, 7),
+	PMC_POWERPC_EVENT(VPU_INSTR_COMPLETED, 0x03, 8),
+	PMC_POWERPC_EVENT(VFPU_INSTR_COMPLETED, 0x03, 9),
+	PMC_POWERPC_EVENT(VIU1_INSTR_COMPLETED, 0x03, 10),
+	PMC_POWERPC_EVENT(VIU2_INSTR_COMPLETED, 0x03, 11),
+	PMC_POWERPC_EVENT(MTVSCR_INSTR_COMPLETED, 0x03, 12),
+	PMC_POWERPC_EVENT(MTVRSAVE_INSTR_COMPLETED, 0x03, 13),
+	PMC_POWERPC_EVENT(VPU_INSTR_WAIT_CYCLES, 0x03, 14),
+	PMC_POWERPC_EVENT(VFPU_INSTR_WAIT_CYCLES, 0x03, 15),
+	PMC_POWERPC_EVENT(VIU1_INSTR_WAIT_CYCLES, 0x03, 16),
+	PMC_POWERPC_EVENT(VIU2_INSTR_WAIT_CYCLES, 0x03, 17),
+	PMC_POWERPC_EVENT(MFVSCR_SYNC_CYCLES, 0x03, 18),
+	PMC_POWERPC_EVENT(VSCR_SAT_SET, 0x03, 19),
+	PMC_POWERPC_EVENT(STORE_INSTR_COMPLETED, 0x03, 20),
+	PMC_POWERPC_EVENT(L1_INSTR_CACHE_MISSES, 0x03, 21),
+	PMC_POWERPC_EVENT(L1_DATA_SNOOPS, 0x03, 22),
+	PMC_POWERPC_EVENT(UNRESOLVED_BRANCHES, 0x01, 23),
+	PMC_POWERPC_EVENT(SPEC_BUFFER_CYCLES, 0x01, 24),
+	PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_CYCLES, 0x01, 25),
+	PMC_POWERPC_EVENT(TRUE_BRANCH_TARGET_HITS, 0x01, 26),
+	PMC_POWERPC_EVENT(BRANCH_LINK_STAC_PREDICTED, 0x01, 27),
+	PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_DISPATCHES, 0x01, 28),
+	PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_DISPATCHED, 0x01, 29),
+	PMC_POWERPC_EVENT(THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 30),
+	PMC_POWERPC_EVENT(THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 31),
+	PMC_POWERPC_EVENT(CYCLES_NO_COMPLETED_INSTRS, 0x01, 32),
+	PMC_POWERPC_EVENT(IU2_INSTR_COMPLETED, 0x01, 33),
+	PMC_POWERPC_EVENT(BRANCHES_COMPLETED, 0x01, 34),
+	PMC_POWERPC_EVENT(EIEIO_INSTR_COMPLETED, 0x01, 35),
+	PMC_POWERPC_EVENT(MTSPR_INSTR_COMPLETED, 0x01, 36),
+	PMC_POWERPC_EVENT(SC_INSTR_COMPLETED, 0x01, 37),
+	PMC_POWERPC_EVENT(LS_LM_COMPLETED, 0x01, 38),
+	PMC_POWERPC_EVENT(ITLB_HW_TABLE_SEARCH_CYCLES, 0x01, 39),
+	PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x01, 40),
+	PMC_POWERPC_EVENT(L1_INSTR_CACHE_ACCESSES, 0x01, 41),
+	PMC_POWERPC_EVENT(INSTR_BKPT_MATCHES, 0x01, 42),
+	PMC_POWERPC_EVENT(L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD, 0x01, 43),
+	PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_ON_MODIFIED, 0x01, 44),
+	PMC_POWERPC_EVENT(LOAD_MISS_ALIAS, 0x01, 45),
+	PMC_POWERPC_EVENT(LOAD_MISS_ALIAS_ON_TOUCH, 0x01, 46),
+	PMC_POWERPC_EVENT(TOUCH_ALIAS, 0x01, 47),
+	PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT_QUEUE, 0x01, 48),
+	PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT, 0x01, 49),
+	PMC_POWERPC_EVENT(L1_DATA_SNOOP_HITS, 0x01, 50),
+	PMC_POWERPC_EVENT(WRITE_THROUGH_STORES, 0x01, 51),
+	PMC_POWERPC_EVENT(CACHE_INHIBITED_STORES, 0x01, 52),
+	PMC_POWERPC_EVENT(L1_DATA_LOAD_HIT, 0x01, 53),
+	PMC_POWERPC_EVENT(L1_DATA_TOUCH_HIT, 0x01, 54),
+	PMC_POWERPC_EVENT(L1_DATA_STORE_HIT, 0x01, 55),
+	PMC_POWERPC_EVENT(L1_DATA_TOTAL_HITS, 0x01, 56),
+	PMC_POWERPC_EVENT(DST_INSTR_DISPATCHED, 0x01, 57),
+	PMC_POWERPC_EVENT(REFRESHED_DSTS, 0x01, 58),
+	PMC_POWERPC_EVENT(SUCCESSFUL_DST_TABLE_SEARCHES, 0x01, 59),
+	PMC_POWERPC_EVENT(DSS_INSTR_COMPLETED, 0x01, 60),
+	PMC_POWERPC_EVENT(DST_STREAM_0_CACHE_LINE_FETCHES, 0x01, 61),
+	PMC_POWERPC_EVENT(VTQ_SUSPENDS_DUE_TO_CTX_CHANGE, 0x01, 62),
+	PMC_POWERPC_EVENT(VTQ_LINE_FETCH_HIT, 0x01, 63),
+	PMC_POWERPC_EVENT(VEC_LOAD_INSTR_COMPLETED, 0x01, 64),
+	PMC_POWERPC_EVENT(FP_STORE_INSTR_COMPLETED_IN_LSU, 0x01, 65),
+	PMC_POWERPC_EVENT(FPU_RENORMALIZATION, 0x01, 66),
+	PMC_POWERPC_EVENT(FPU_DENORMALIZATION, 0x01, 67),
+	PMC_POWERPC_EVENT(FP_STORE_CAUSES_STALL_IN_LSU, 0x01, 68),
+	PMC_POWERPC_EVENT(LD_ST_TRUE_ALIAS_STALL, 0x01, 70),
+	PMC_POWERPC_EVENT(LSU_INDEXED_ALIAS_STALL, 0x01, 71),
+	PMC_POWERPC_EVENT(LSU_ALIAS_VS_FSQ_WB0_WB1, 0x01, 72),
+	PMC_POWERPC_EVENT(LSU_ALIAS_VS_CSQ, 0x01, 73),
+	PMC_POWERPC_EVENT(LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0, 0x01, 74),
+	PMC_POWERPC_EVENT(LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0, 0x01, 75),
+	PMC_POWERPC_EVENT(LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1, 0x01, 76),
+	PMC_POWERPC_EVENT(LSU_TOUCH_ALIAS_VS_CSQ, 0x01, 77),
+	PMC_POWERPC_EVENT(LSU_LMQ_FULL_STALL, 0x01, 78),
+	PMC_POWERPC_EVENT(FP_LOAD_INSTR_COMPLETED_IN_LSU, 0x01, 79),
+	PMC_POWERPC_EVENT(FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU, 0x01, 80),
+	PMC_POWERPC_EVENT(FP_LOAD_DOUBLE_COMPLETED_IN_LSU, 0x01, 81),
+	PMC_POWERPC_EVENT(LSU_RA_LATCH_STALL, 0x01, 82),
+	PMC_POWERPC_EVENT(LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL, 0x01, 83),
+	PMC_POWERPC_EVENT(LSU_LMQ_INDEX_ALIAS, 0x01, 84),
+	PMC_POWERPC_EVENT(LSU_STORE_QUEUE_INDEX_ALIAS, 0x01, 85),
+	PMC_POWERPC_EVENT(LSU_CSQ_FORWARDING, 0x01, 86),
+	PMC_POWERPC_EVENT(LSU_MISALIGNED_LOAD_FINISH, 0x01, 87),
+	PMC_POWERPC_EVENT(LSU_MISALIGN_STORE_COMPLETED, 0x01, 88),
+	PMC_POWERPC_EVENT(LSU_MISALIGN_STALL, 0x01, 89),
+	PMC_POWERPC_EVENT(FP_ONE_QUARTER_FPSCR_RENAMES_BUSY, 0x01, 90),
+	PMC_POWERPC_EVENT(FP_ONE_HALF_FPSCR_RENAMES_BUSY, 0x01, 91),
+	PMC_POWERPC_EVENT(FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY, 0x01, 92),
+	PMC_POWERPC_EVENT(FP_ALL_FPSCR_RENAMES_BUSY, 0x01, 93),
+	PMC_POWERPC_EVENT(FP_DENORMALIZED_RESULT, 0x01, 94),
+	PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISSES, 0x02, 23),
+	PMC_POWERPC_EVENT(DISPATCHES_TO_FPR_ISSUE_QUEUE, 0x02, 24),
+	PMC_POWERPC_EVENT(LSU_INSTR_COMPLETED, 0x02, 25),
+	PMC_POWERPC_EVENT(LOAD_INSTR_COMPLETED, 0x02, 26),
+	PMC_POWERPC_EVENT(SS_SM_INSTR_COMPLETED, 0x02, 27),
+	PMC_POWERPC_EVENT(TLBIE_INSTR_COMPLETED, 0x02, 28),
+	PMC_POWERPC_EVENT(LWARX_INSTR_COMPLETED, 0x02, 29),
+	PMC_POWERPC_EVENT(MFSPR_INSTR_COMPLETED, 0x02, 30),
+	PMC_POWERPC_EVENT(REFETCH_SERIALIZATION, 0x02, 31),
+	PMC_POWERPC_EVENT(COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD, 0x02, 32),
+	PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x02, 33),
+	PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x02, 34),
+	PMC_POWERPC_EVENT(ITLB_NON_SPECULATIVE_MISSES, 0x02, 35),
+	PMC_POWERPC_EVENT(CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS, 0x02, 36),
+	PMC_POWERPC_EVENT(L1_DATA_LOAD_ACCESS_MISS, 0x02, 37),
+	PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS, 0x02, 38),
+	PMC_POWERPC_EVENT(L1_DATA_STORE_MISS, 0x02, 39),
+	PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS_CYCLES, 0x02, 40),
+	PMC_POWERPC_EVENT(L1_DATA_CYCLES_USED, 0x02, 41),
+	PMC_POWERPC_EVENT(DST_STREAM_1_CACHE_LINE_FETCHES, 0x02, 42),
+	PMC_POWERPC_EVENT(VTQ_STREAM_CANCELED_PREMATURELY, 0x02, 43),
+	PMC_POWERPC_EVENT(VTQ_RESUMES_DUE_TO_CTX_CHANGE, 0x02, 44),
+	PMC_POWERPC_EVENT(VTQ_LINE_FETCH_MISS, 0x02, 45),
+	PMC_POWERPC_EVENT(VTQ_LINE_FETCH, 0x02, 46),
+	PMC_POWERPC_EVENT(TLBIE_SNOOPS, 0x02, 47),
+	PMC_POWERPC_EVENT(L1_INSTR_CACHE_RELOADS, 0x02, 48),
+	PMC_POWERPC_EVENT(L1_DATA_CACHE_RELOADS, 0x02, 49),
+	PMC_POWERPC_EVENT(L1_DATA_CACHE_CASTOUTS_TO_L2, 0x02, 50),
+	PMC_POWERPC_EVENT(STORE_MERGE_GATHER, 0x02, 51),
+	PMC_POWERPC_EVENT(CACHEABLE_STORE_MERGE_TO_32_BYTES, 0x02, 52),
+	PMC_POWERPC_EVENT(DATA_BKPT_MATCHES, 0x02, 53),
+	PMC_POWERPC_EVENT(FALL_THROUGH_BRANCHES_PROCESSED, 0x02, 54),
+	PMC_POWERPC_EVENT(FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x02, 55),
+	PMC_POWERPC_EVENT(SECOND_SPECULATION_BUFFER_ACTIVE, 0x02, 56),
+	PMC_POWERPC_EVENT(BPU_STALL_ON_LR_DEPENDENCY, 0x02, 57),
+	PMC_POWERPC_EVENT(BTIC_MISS, 0x02, 58),
+	PMC_POWERPC_EVENT(BRANCH_LINK_STACK_CORRECTLY_RESOLVED, 0x02, 59),
+	PMC_POWERPC_EVENT(FPR_ISSUE_STALLED, 0x02, 60),
+	PMC_POWERPC_EVENT(SWITCHES_BETWEEN_PRIV_USER, 0x02, 61),
+	PMC_POWERPC_EVENT(LSU_COMPLETES_FP_STORE_SINGLE, 0x02, 62),
+	PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x04, 8),
+	PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x04, 9),
+	PMC_POWERPC_EVENT(VR_ISSUE_QUEUE_DISPATCHES, 0x04, 10),
+	PMC_POWERPC_EVENT(VR_STALLS, 0x04, 11),
+	PMC_POWERPC_EVENT(GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD, 0x04, 12),
+	PMC_POWERPC_EVENT(FPR_ISSUE_QUEUE_ENTRIES, 0x04, 13),
+	PMC_POWERPC_EVENT(FPU_INSTR_COMPLETED, 0x04, 14),
+	PMC_POWERPC_EVENT(STWCX_INSTR_COMPLETED, 0x04, 15),
+	PMC_POWERPC_EVENT(LS_LM_INSTR_PIECES, 0x04, 16),
+	PMC_POWERPC_EVENT(ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x04, 17),
+	PMC_POWERPC_EVENT(DTLB_MISSES, 0x04, 18),
+	PMC_POWERPC_EVENT(CANCELLED_L1_INSTR_CACHE_MISSES, 0x04, 19),
+	PMC_POWERPC_EVENT(L1_DATA_CACHE_OP_HIT, 0x04, 20),
+	PMC_POWERPC_EVENT(L1_DATA_LOAD_MISS_CYCLES, 0x04, 21),
+	PMC_POWERPC_EVENT(L1_DATA_PUSHES, 0x04, 22),
+	PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISS, 0x04, 23),
+	PMC_POWERPC_EVENT(VT2_FETCHES, 0x04, 24),
+	PMC_POWERPC_EVENT(TAKEN_BRANCHES_PROCESSED, 0x04, 25),
+	PMC_POWERPC_EVENT(BRANCH_FLUSHES, 0x04, 26),
+	PMC_POWERPC_EVENT(SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x04, 27),
+	PMC_POWERPC_EVENT(THIRD_SPECULATION_BUFFER_ACTIVE, 0x04, 28),
+	PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY, 0x04, 29),
+	PMC_POWERPC_EVENT(FAST_BTIC_HIT, 0x04, 30),
+	PMC_POWERPC_EVENT(BRANCH_LINK_STACK_MISPREDICTED, 0x04, 31),
+	PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_COMPLETED, 0x08, 14),
+	PMC_POWERPC_EVENT(CYCLES_NO_INSTR_DISPATCHED, 0x08, 15),
+	PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD, 0x08, 16),
+	PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_STALLED, 0x08, 17),
+	PMC_POWERPC_EVENT(IU1_INSTR_COMPLETED, 0x08, 18),
+	PMC_POWERPC_EVENT(DSSALL_INSTR_COMPLETED, 0x08, 19),
+	PMC_POWERPC_EVENT(TLBSYNC_INSTR_COMPLETED, 0x08, 20),
+	PMC_POWERPC_EVENT(SYNC_INSTR_COMPLETED, 0x08, 21),
+	PMC_POWERPC_EVENT(SS_SM_INSTR_PIECES, 0x08, 22),
+	PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES, 0x08, 23),
+	PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x08, 24),
+	PMC_POWERPC_EVENT(SUCCESSFUL_STWCX, 0x08, 25),
+	PMC_POWERPC_EVENT(DST_STREAM_3_CACHE_LINE_FETCHES, 0x08, 26),
+	PMC_POWERPC_EVENT(THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x08, 27),
+	PMC_POWERPC_EVENT(MISPREDICTED_BRANCHES, 0x08, 28),
+	PMC_POWERPC_EVENT(FOLDED_BRANCHES, 0x08, 29),
+	PMC_POWERPC_EVENT(FP_STORE_DOUBLE_COMPLETES_IN_LSU, 0x08, 30),
+	PMC_POWERPC_EVENT(L2_CACHE_HITS, 0x30, 2),
+	PMC_POWERPC_EVENT(L3_CACHE_HITS, 0x30, 3),
+	PMC_POWERPC_EVENT(L2_INSTR_CACHE_MISSES, 0x30, 4),
+	PMC_POWERPC_EVENT(L3_INSTR_CACHE_MISSES, 0x30, 5),
+	PMC_POWERPC_EVENT(L2_DATA_CACHE_MISSES, 0x30, 6),
+	PMC_POWERPC_EVENT(L3_DATA_CACHE_MISSES, 0x30, 7),
+	PMC_POWERPC_EVENT(L2_LOAD_HITS, 0x10, 8),
+	PMC_POWERPC_EVENT(L2_STORE_HITS, 0x10, 9),
+	PMC_POWERPC_EVENT(L3_LOAD_HITS, 0x10, 10),
+	PMC_POWERPC_EVENT(L3_STORE_HITS, 0x10, 11),
+	PMC_POWERPC_EVENT(L2_TOUCH_HITS, 0x30, 13),
+	PMC_POWERPC_EVENT(L3_TOUCH_HITS, 0x30, 14),
+	PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x30, 15),
+	PMC_POWERPC_EVENT(SNOOP_MODIFIED, 0x10, 16),
+	PMC_POWERPC_EVENT(SNOOP_VALID, 0x10, 17),
+	PMC_POWERPC_EVENT(INTERVENTION, 0x30, 18),
+	PMC_POWERPC_EVENT(L2_CACHE_MISSES, 0x10, 19),
+	PMC_POWERPC_EVENT(L3_CACHE_MISSES, 0x10, 20),
+	PMC_POWERPC_EVENT(L2_CACHE_CASTOUTS, 0x20, 8),
+	PMC_POWERPC_EVENT(L3_CACHE_CASTOUTS, 0x20, 9),
+	PMC_POWERPC_EVENT(L2SQ_FULL_CYCLES, 0x20, 10),
+	PMC_POWERPC_EVENT(L3SQ_FULL_CYCLES, 0x20, 11),
+	PMC_POWERPC_EVENT(RAQ_FULL_CYCLES, 0x20, 16),
+	PMC_POWERPC_EVENT(WAQ_FULL_CYCLES, 0x20, 17),
+	PMC_POWERPC_EVENT(L1_EXTERNAL_INTERVENTIONS, 0x20, 19),
+	PMC_POWERPC_EVENT(L2_EXTERNAL_INTERVENTIONS, 0x20, 20),
+	PMC_POWERPC_EVENT(L3_EXTERNAL_INTERVENTIONS, 0x20, 21),
+	PMC_POWERPC_EVENT(EXTERNAL_INTERVENTIONS, 0x20, 22),
+	PMC_POWERPC_EVENT(EXTERNAL_PUSHES, 0x20, 23),
+	PMC_POWERPC_EVENT(EXTERNAL_SNOOP_RETRY, 0x20, 24),
+	PMC_POWERPC_EVENT(DTQ_FULL_CYCLES, 0x20, 25),
+	PMC_POWERPC_EVENT(BUS_RETRY, 0x20, 26),
+	PMC_POWERPC_EVENT(L2_VALID_REQUEST, 0x20, 27),
+	PMC_POWERPC_EVENT(BORDQ_FULL, 0x20, 28),
+	PMC_POWERPC_EVENT(BUS_TAS_FOR_READS, 0x20, 42),
+	PMC_POWERPC_EVENT(BUS_TAS_FOR_WRITES, 0x20, 43),
+	PMC_POWERPC_EVENT(BUS_READS_NOT_RETRIED, 0x20, 44),
+	PMC_POWERPC_EVENT(BUS_WRITES_NOT_RETRIED, 0x20, 45),
+	PMC_POWERPC_EVENT(BUS_READS_WRITES_NOT_RETRIED, 0x20, 46),
+	PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_L1_RETRY, 0x20, 47),
+	PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT, 0x20, 48),
+	PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_COLLISION, 0x20, 49),
+	PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_INTERVENTION_ORDERING, 0x20, 50),
+	PMC_POWERPC_EVENT(SNOOP_REQUESTS, 0x20, 51),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_REQUEST, 0x20, 52),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD, 0x20, 53),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_STORE, 0x20, 54),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH, 0x20, 55),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
+	PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
+};
+
+const size_t powerpc_event_codes_size = 
+	sizeof(powerpc_event_codes) / sizeof(powerpc_event_codes[0]);
 
 int
 pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
@@ -55,6 +333,520 @@ pmc_save_kernel_callchain(uintptr_t *cc,
 	return (0);
 }
 
+static pmc_value_t
+powerpc_pmcn_read(unsigned int pmc)
+{
+	switch (pmc) {
+		case 0:
+			return mfspr(SPR_PMC1);
+			break;
+		case 1:
+			return mfspr(SPR_PMC2);
+			break;
+		case 2:
+			return mfspr(SPR_PMC3);
+			break;
+		case 3:
+			return mfspr(SPR_PMC4);
+			break;
+		case 4:
+			return mfspr(SPR_PMC5);
+			break;
+		case 5:
+			return mfspr(SPR_PMC6);
+		default:
+			panic("Invalid PMC number: %d\n", pmc);
+	}
+}
+
+static void
+powerpc_pmcn_write(unsigned int pmc, uint32_t val)
+{
+	switch (pmc) {
+		case 0:
+			mtspr(SPR_PMC1, val);
+			break;
+		case 1:
+			mtspr(SPR_PMC2, val);
+			break;
+		case 2:
+			mtspr(SPR_PMC3, val);
+			break;
+		case 3:
+			mtspr(SPR_PMC4, val);
+			break;
+		case 4:
+			mtspr(SPR_PMC5, val);
+			break;
+		case 5:
+			mtspr(SPR_PMC6, val);
+			break;
+		default:
+			panic("Invalid PMC number: %d\n", pmc);
+	}
+}
+
+static int
+powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
+  const struct pmc_op_pmcallocate *a)
+{
+	enum pmc_event pe;
+	uint32_t caps, config, counter;
+	int i;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+	caps = a->pm_caps;
+
+	/*
+	 * TODO: Check actual class for different generations.
+	 */
+	if (a->pm_class != PMC_CLASS_PPC7450)
+		return (EINVAL);
+	pe = a->pm_ev;
+	for (i = 0; i < powerpc_event_codes_size; i++) {
+		if (powerpc_event_codes[i].pe_ev == pe) {
+			config = powerpc_event_codes[i].pe_code;
+			counter =  powerpc_event_codes[i].pe_counter_mask;
+			break;
+		}
+	}
+	if (i == powerpc_event_codes_size)
+		return (EINVAL);
+
+	if ((counter & (1 << ri)) == 0)
+		return (EINVAL);
+
+	if (caps & PMC_CAP_SYSTEM)
+		config |= POWERPC_PMC_KERNEL_ENABLE;
+	if (caps & PMC_CAP_USER)
+		config |= POWERPC_PMC_USER_ENABLE;
+	if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+		config |= POWERPC_PMC_ENABLE;
+
+	pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
+
+	PMCDBG(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
+
+	return 0;
+}
+
+static int
+powerpc_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+	struct pmc *pm;
+	pmc_value_t tmp;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+	pm  = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+	tmp = powerpc_pmcn_read(ri);
+	PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
+	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+		*v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+	else
+		*v = tmp;
+
+	return 0;
+}
+
+static int
+powerpc_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+	struct pmc *pm;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+	pm  = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+		v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+	
+	PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+	powerpc_pmcn_write(ri, v);
+
+	return 0;
+}
+
+static int
+powerpc_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+	struct pmc_hw *phw;
+
+	PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+	phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+
+	KASSERT(pm == NULL || phw->phw_pmc == NULL,
+	    ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+	    __LINE__, pm, phw->phw_pmc));
+
+	phw->phw_pmc = pm;
+
+	return 0;
+}
+
+static int
+powerpc_start_pmc(int cpu, int ri)
+{
+	uint32_t config;
+        struct pmc *pm;
+        struct pmc_hw *phw;
+	register_t pmc_mmcr;
+
+	phw    = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+	pm     = phw->phw_pmc;
+	config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
+
+	/* Enable the PMC. */
+	switch (ri) {
+	case 0:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 1:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 2:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	case 3:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 4:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	case 5:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	default:
+		break;
+	}
+	
+	/* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
+	 * are Freeze flags.
+	 */
+	config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
+
+	pmc_mmcr = mfspr(SPR_MMCR0);
+	pmc_mmcr &= ~SPR_MMCR0_FC;
+	pmc_mmcr |= config;
+	mtspr(SPR_MMCR0, pmc_mmcr);
+
+	return 0;
+}
+
+static int
+powerpc_stop_pmc(int cpu, int ri)
+{
+        struct pmc *pm;
+        struct pmc_hw *phw;
+        register_t pmc_mmcr;
+
+	phw    = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+	pm     = phw->phw_pmc;
+
+	/*
+	 * Disable the PMCs.
+	 */
+	switch (ri) {
+	case 0:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 1:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 2:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	case 3:
+		pmc_mmcr = mfspr(SPR_MMCR0);
+		pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR0, pmc_mmcr);
+		break;
+	case 4:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	case 5:
+		pmc_mmcr = mfspr(SPR_MMCR1);
+		pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
+		mtspr(SPR_MMCR1, pmc_mmcr);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+powerpc_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+	struct pmc_hw *phw;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+	phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+	KASSERT(phw->phw_pmc == NULL,
+	    ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+	return 0;
+}
+
+static int
+powerpc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+	return 0;
+}
+
+static int
+powerpc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+	return 0;
+}
+
+static int
+powerpc_intr(int cpu, struct trapframe *tf)
+{
+	int i, error, retval;
+	uint32_t config;
+	struct pmc *pm;
+	struct powerpc_cpu *pac;
+	pmc_value_t v;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
+
+	PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
+	    TRAPF_USERMODE(tf));
+
+	retval = 0;
+
+	pac = powerpc_pcpu[cpu];
+
+	/*
+	 * look for all PMCs that have interrupted:
+	 * - look for a running, sampling PMC which has overflowed
+	 *   and which has a valid 'struct pmc' association
+	 *
+	 * If found, we call a helper to process the interrupt.
+	 */
+
+	for (i = 0; i < PPC_MAX_PMCS; i++) {
+		if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
+		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+			continue;
+		}
+
+		if (!POWERPC_PMC_HAS_OVERFLOWED(i))
+			continue;
+
+		retval = 1;	/* Found an interrupting PMC. */
+
+		if (pm->pm_state != PMC_STATE_RUNNING)
+			continue;
+
+		/* Stop the PMC, reload count. */
+		v       = pm->pm_sc.pm_reloadcount;
+		config  = mfspr(SPR_MMCR0);
+
+		KASSERT((config & ~AMD_PMC_ENABLE) ==
+		    (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
+		    ("[powerpc,%d] config mismatch reg=0x%x pm=0x%x", __LINE__,
+			config, pm->pm_md.pm_amd.pm_amd_evsel));
+
+		mtspr(SPR_MMCR0, config | SPR_MMCR0_FC);
+		powerpc_pmcn_write(i, v);
+
+		/* Restart the counter if logging succeeded. */
+		error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf));
+		mtspr(SPR_MMCR0, config);
+		if (error != 0)
+			powerpc_stop_pmc(cpu, i);
+		atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+				&pmc_stats.pm_intr_ignored, 1);
+
+	}
+
+	/* Re-enable PERF exceptions. */
+	mtspr(SPR_MMCR0, mfspr(SPR_MMCR0) | SPR_MMCR0_PMXE);
+
+	return (retval);
+}
+
+static int
+powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+	int error;
+	struct pmc_hw *phw;
+	char powerpc_name[PMC_NAME_MAX];
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d], illegal CPU %d", __LINE__, cpu));
+	KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+	    ("[powerpc,%d] row-index %d out of range", __LINE__, ri));
+
+	phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+	snprintf(powerpc_name, sizeof(powerpc_name), "POWERPC-%d", ri);
+	if ((error = copystr(powerpc_name, pi->pm_name, PMC_NAME_MAX,
+	    NULL)) != 0)
+		return error;
+	pi->pm_class = PMC_CLASS_PPC7450;
+	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+		pi->pm_enabled = TRUE;
+		*ppmc          = phw->phw_pmc;
+	} else {
+		pi->pm_enabled = FALSE;
+		*ppmc	       = NULL;
+	}
+
+	return (0);
+}
+
+static int
+powerpc_get_config(int cpu, int ri, struct pmc **ppm)
+{
+	*ppm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+	return 0;
+}
+
+static int
+powerpc_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+	int first_ri, i;
+	struct pmc_cpu *pc;
+	struct powerpc_cpu *pac;
+	struct pmc_hw  *phw;
+
+	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+	    ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
+	PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu);
+
+	powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
+	    M_WAITOK|M_ZERO);
+	pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC_MAX_PMCS,
+	    M_PMC, M_WAITOK|M_ZERO);
+	pc = pmc_pcpu[cpu];
+	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri;
+	KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
+
+	for (i = 0, phw = pac->pc_ppcpmcs; i < PPC_MAX_PMCS; i++, phw++) {
+		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
+		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+		phw->phw_pmc      = NULL;
+		pc->pc_hwpmcs[i + first_ri] = phw;
+	}
+
+	/* Clear the MMCRs, and set FC, to disable all PMCs. */
+	mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
+	mtspr(SPR_MMCR1, 0);
+
+	return 0;
+}
+
+static int
+powerpc_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+	uint32_t mmcr0 = mfspr(SPR_MMCR0);
+
+	mmcr0 |= SPR_MMCR0_FC;
+	mtspr(SPR_MMCR0, mmcr0);
+	free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
+	free(powerpc_pcpu[cpu], M_PMC);
+	return 0;
+}
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+	struct pmc_mdep *pmc_mdep;
+	struct pmc_classdep *pcd;
+	
+	/*
+	 * Allocate space for pointers to PMC HW descriptors and for
+	 * the MDEP structure used by MI code.
+	 */
+	powerpc_pcpu = malloc(sizeof(struct powerpc_cpu *) * pmc_cpu_max(), M_PMC,
+			   M_WAITOK|M_ZERO);
+
+	/* Just one class */
+	pmc_mdep = malloc(sizeof(struct pmc_mdep) + sizeof(struct pmc_classdep),
+			  M_PMC, M_WAITOK|M_ZERO);
+
+	pmc_mdep->pmd_cputype = PMC_CPU_PPC_7450;
+	pmc_mdep->pmd_nclass  = 1;
+
+	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450];
+	pcd->pcd_caps  = POWERPC_PMC_CAPS;
+	pcd->pcd_class = PMC_CLASS_PPC7450;
+	pcd->pcd_num   = PPC_MAX_PMCS;
+	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
+	pcd->pcd_width = 32;	/* All PMCs, even in ppc970, are 32-bit */
+
+	pcd->pcd_allocate_pmc   = powerpc_allocate_pmc;
+	pcd->pcd_config_pmc     = powerpc_config_pmc;
+	pcd->pcd_pcpu_fini      = powerpc_pcpu_fini;
+	pcd->pcd_pcpu_init      = powerpc_pcpu_init;
+	pcd->pcd_describe       = powerpc_describe;
+	pcd->pcd_get_config	= powerpc_get_config;
+	pcd->pcd_read_pmc       = powerpc_read_pmc;
+	pcd->pcd_release_pmc    = powerpc_release_pmc;
+	pcd->pcd_start_pmc      = powerpc_start_pmc;
+	pcd->pcd_stop_pmc       = powerpc_stop_pmc;
+ 	pcd->pcd_write_pmc      = powerpc_write_pmc;
+
+	pmc_mdep->pmd_intr       = powerpc_intr;
+	pmc_mdep->pmd_switch_in  = powerpc_switch_in;
+	pmc_mdep->pmd_switch_out = powerpc_switch_out;
+	
+	pmc_mdep->pmd_npmc   += PPC_MAX_PMCS;
+
+	return (pmc_mdep);
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+	free(md, M_PMC);
+}
+
 int
 pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
     struct trapframe *tf)

Modified: head/sys/dev/hwpmc/pmc_events.h
==============================================================================
--- head/sys/dev/hwpmc/pmc_events.h	Sat Dec 24 19:01:31 2011	(r228868)
+++ head/sys/dev/hwpmc/pmc_events.h	Sat Dec 24 19:34:52 2011	(r228869)
@@ -3093,6 +3093,231 @@ __PMC_EV_ALIAS("CYCLES_UNHALTED_L3_FLL_D
 #define	PMC_EV_MIPS24K_FIRST	PMC_EV_MIPS24K_CYCLE
 #define	PMC_EV_MIPS24K_LAST	PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS
 
+#define __PMC_EV_PPC7450() \
+	__PMC_EV(PPC7450, CYCLE)	\
+	__PMC_EV(PPC7450, INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, TLB_BIT_TRANSITIONS)	\
+	__PMC_EV(PPC7450, INSTR_DISPATCHED)	\
+	__PMC_EV(PPC7450, PMON_EXCEPT) \
+	__PMC_EV(PPC7450, PMON_SIG) \
+	__PMC_EV(PPC7450, VPU_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, VFPU_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, VIU1_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, VIU2_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES)	\
+	__PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES)	\
+	__PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES)	\
+	__PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES)	\
+	__PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES)	\
+	__PMC_EV(PPC7450, VSCR_SAT_SET)	\
+	__PMC_EV(PPC7450, STORE_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES)	\
+	__PMC_EV(PPC7450, L1_DATA_SNOOPS)	\
+	__PMC_EV(PPC7450, UNRESOLVED_BRANCHES)	\
+	__PMC_EV(PPC7450, SPEC_BUFFER_CYCLES)	\
+	__PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES)	\
+	__PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS)	\
+	__PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED)	\
+	__PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES)	\
+	__PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED)	\
+	__PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES)	\
+	__PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES)	\
+	__PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS)	\
+	__PMC_EV(PPC7450, IU2_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, BRANCHES_COMPLETED)	\
+	__PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED)	\
+	__PMC_EV(PPC7450, SC_INSTR_COMPLETED)	\

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list