1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance counter support for e500 family processors.
4 *
5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * Copyright 2010 Freescale Semiconductor, Inc.
7 */
8 #include <linux/string.h>
9 #include <linux/perf_event.h>
10 #include <asm/reg.h>
11 #include <asm/cputable.h>
12
13 /*
14 * Map of generic hardware event types to hardware events
15 * Zero if unsupported
16 */
17 static int e500_generic_events[] = {
18 [PERF_COUNT_HW_CPU_CYCLES] = 1,
19 [PERF_COUNT_HW_INSTRUCTIONS] = 2,
20 [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
21 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
22 [PERF_COUNT_HW_BRANCH_MISSES] = 15,
23 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
24 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
25 };
26
27 #define C(x) PERF_COUNT_HW_CACHE_##x
28
29 /*
30 * Table of generalized cache-related events.
31 * 0 means not supported, -1 means nonsensical, other values
32 * are event codes.
33 */
34 static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
35 /*
36 * D-cache misses are not split into read/write/prefetch;
37 * use raw event 41.
38 */
39 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
40 [C(OP_READ)] = { 27, 0 },
41 [C(OP_WRITE)] = { 28, 0 },
42 [C(OP_PREFETCH)] = { 29, 0 },
43 },
44 [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
45 [C(OP_READ)] = { 2, 60 },
46 [C(OP_WRITE)] = { -1, -1 },
47 [C(OP_PREFETCH)] = { 0, 0 },
48 },
49 /*
50 * Assuming LL means L2, it's not a good match for this model.
51 * It allocates only on L1 castout or explicit prefetch, and
52 * does not have separate read/write events (but it does have
53 * separate instruction/data events).
54 */
55 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
56 [C(OP_READ)] = { 0, 0 },
57 [C(OP_WRITE)] = { 0, 0 },
58 [C(OP_PREFETCH)] = { 0, 0 },
59 },
60 /*
61 * There are data/instruction MMU misses, but that's a miss on
62 * the chip's internal level-one TLB which is probably not
63 * what the user wants. Instead, unified level-two TLB misses
64 * are reported here.
65 */
66 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
67 [C(OP_READ)] = { 26, 66 },
68 [C(OP_WRITE)] = { -1, -1 },
69 [C(OP_PREFETCH)] = { -1, -1 },
70 },
71 [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
72 [C(OP_READ)] = { 12, 15 },
73 [C(OP_WRITE)] = { -1, -1 },
74 [C(OP_PREFETCH)] = { -1, -1 },
75 },
76 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
77 [C(OP_READ)] = { -1, -1 },
78 [C(OP_WRITE)] = { -1, -1 },
79 [C(OP_PREFETCH)] = { -1, -1 },
80 },
81 };
82
83 static int num_events = 128;
84
85 /* Upper half of event id is PMLCb, for threshold events */
e500_xlate_event(u64 event_id)86 static u64 e500_xlate_event(u64 event_id)
87 {
88 u32 event_low = (u32)event_id;
89 u64 ret;
90
91 if (event_low >= num_events)
92 return 0;
93
94 ret = FSL_EMB_EVENT_VALID;
95
96 if (event_low >= 76 && event_low <= 81) {
97 ret |= FSL_EMB_EVENT_RESTRICTED;
98 ret |= event_id &
99 (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
100 } else if (event_id &
101 (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
102 /* Threshold requested on non-threshold event */
103 return 0;
104 }
105
106 return ret;
107 }
108
109 static struct fsl_emb_pmu e500_pmu = {
110 .name = "e500 family",
111 .n_counter = 4,
112 .n_restricted = 2,
113 .xlate_event = e500_xlate_event,
114 .n_generic = ARRAY_SIZE(e500_generic_events),
115 .generic_events = e500_generic_events,
116 .cache_events = &e500_cache_events,
117 };
118
init_e500_pmu(void)119 static int init_e500_pmu(void)
120 {
121 unsigned int pvr = mfspr(SPRN_PVR);
122
123 /* ec500mc */
124 if (PVR_VER(pvr) == PVR_VER_E500MC || PVR_VER(pvr) == PVR_VER_E5500)
125 num_events = 256;
126 /* e500 */
127 else if (PVR_VER(pvr) != PVR_VER_E500V1 && PVR_VER(pvr) != PVR_VER_E500V2)
128 return -ENODEV;
129
130 return register_fsl_emb_pmu(&e500_pmu);
131 }
132
133 early_initcall(init_e500_pmu);
134