1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance counter support for POWER10 processors.
4 *
5 * Copyright 2020 Madhavan Srinivasan, IBM Corporation.
6 * Copyright 2020 Athira Rajeev, IBM Corporation.
7 */
8
9 #define pr_fmt(fmt) "power10-pmu: " fmt
10
11 #include "isa207-common.h"
12
13 /*
14 * Raw event encoding for Power10:
15 *
16 * 60 56 52 48 44 40 36 32
17 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
18 * | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ]
19 * | | | | | |
20 * | | *- IFM (Linux) | | thresh start/stop -*
21 * | *- BHRB (Linux) | src_sel
22 * *- EBB (Linux) *invert_bit
23 *
24 * 28 24 20 16 12 8 4 0
25 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
26 * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] | m [ pmcxsel ]
27 * | | | | | | |
28 * | | | | | | *- mark
29 * | | | *- L1/L2/L3 cache_sel | |*-radix_scope_qual
30 * | | sdar_mode |
31 * | *- sampling mode for marked events *- combine
32 * |
33 * *- thresh_sel
34 *
35 * Below uses IBM bit numbering.
36 *
37 * MMCR1[x:y] = unit (PMCxUNIT)
38 * MMCR1[24] = pmc1combine[0]
39 * MMCR1[25] = pmc1combine[1]
40 * MMCR1[26] = pmc2combine[0]
41 * MMCR1[27] = pmc2combine[1]
42 * MMCR1[28] = pmc3combine[0]
43 * MMCR1[29] = pmc3combine[1]
44 * MMCR1[30] = pmc4combine[0]
45 * MMCR1[31] = pmc4combine[1]
46 *
47 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
48 * MMCR1[20:27] = thresh_ctl
49 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
50 * MMCR1[20:27] = thresh_ctl
51 * else
52 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
53 *
54 * if thresh_sel:
55 * MMCRA[45:47] = thresh_sel
56 *
57 * if l2l3_sel:
58 * MMCR2[56:60] = l2l3_sel[0:4]
59 *
60 * MMCR1[16] = cache_sel[0]
61 * MMCR1[17] = cache_sel[1]
62 * MMCR1[18] = radix_scope_qual
63 *
64 * if mark:
65 * MMCRA[63] = 1 (SAMPLE_ENABLE)
66 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
67 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
68 *
69 * if EBB and BHRB:
70 * MMCRA[32:33] = IFM
71 *
72 * MMCRA[SDAR_MODE] = sdar_mode[0:1]
73 */
74
75 /*
76 * Some power10 event codes.
77 */
78 #define EVENT(_name, _code) enum{_name = _code}
79
80 #include "power10-events-list.h"
81
82 #undef EVENT
83
84 /* MMCRA IFM bits - POWER10 */
85 #define POWER10_MMCRA_IFM1 0x0000000040000000UL
86 #define POWER10_MMCRA_IFM2 0x0000000080000000UL
87 #define POWER10_MMCRA_IFM3 0x00000000C0000000UL
88 #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
89
90 extern u64 PERF_REG_EXTENDED_MASK;
91
92 /* Table of alternatives, sorted by column 0 */
93 static const unsigned int power10_event_alternatives[][MAX_ALT] = {
94 { PM_INST_CMPL_ALT, PM_INST_CMPL },
95 { PM_CYC_ALT, PM_CYC },
96 };
97
power10_get_alternatives(u64 event,unsigned int flags,u64 alt[])98 static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
99 {
100 int num_alt = 0;
101
102 num_alt = isa207_get_alternatives(event, alt,
103 ARRAY_SIZE(power10_event_alternatives), flags,
104 power10_event_alternatives);
105
106 return num_alt;
107 }
108
power10_check_attr_config(struct perf_event * ev)109 static int power10_check_attr_config(struct perf_event *ev)
110 {
111 u64 val;
112 u64 event = ev->attr.config;
113
114 val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
115 if (val == 0x10 || isa3XX_check_attr_config(ev))
116 return -EINVAL;
117
118 return 0;
119 }
120
121 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
122 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
123 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
124 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
125 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
126 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
127 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
128 GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
129 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_FIN);
130 GENERIC_EVENT_ATTR(branch-misses, PM_MPRED_BR_FIN);
131 GENERIC_EVENT_ATTR(cache-misses, PM_LD_DEMAND_MISS_L1_FIN);
132
133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
135 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
136 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
137 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
139 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
140 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
141 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
142 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PF_MISS_L3);
143 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
144 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
145 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
146 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
147 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
148 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
149
150 static struct attribute *power10_events_attr_dd1[] = {
151 GENERIC_EVENT_PTR(PM_CYC),
152 GENERIC_EVENT_PTR(PM_INST_CMPL),
153 GENERIC_EVENT_PTR(PM_BR_CMPL),
154 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
155 GENERIC_EVENT_PTR(PM_LD_REF_L1),
156 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
157 GENERIC_EVENT_PTR(MEM_LOADS),
158 GENERIC_EVENT_PTR(MEM_STORES),
159 CACHE_EVENT_PTR(PM_LD_MISS_L1),
160 CACHE_EVENT_PTR(PM_LD_REF_L1),
161 CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
162 CACHE_EVENT_PTR(PM_ST_MISS_L1),
163 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
164 CACHE_EVENT_PTR(PM_INST_FROM_L1),
165 CACHE_EVENT_PTR(PM_IC_PREF_REQ),
166 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
167 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
168 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
169 CACHE_EVENT_PTR(PM_BR_CMPL),
170 CACHE_EVENT_PTR(PM_DTLB_MISS),
171 CACHE_EVENT_PTR(PM_ITLB_MISS),
172 NULL
173 };
174
175 static struct attribute *power10_events_attr[] = {
176 GENERIC_EVENT_PTR(PM_CYC),
177 GENERIC_EVENT_PTR(PM_INST_CMPL),
178 GENERIC_EVENT_PTR(PM_BR_FIN),
179 GENERIC_EVENT_PTR(PM_MPRED_BR_FIN),
180 GENERIC_EVENT_PTR(PM_LD_REF_L1),
181 GENERIC_EVENT_PTR(PM_LD_DEMAND_MISS_L1_FIN),
182 GENERIC_EVENT_PTR(MEM_LOADS),
183 GENERIC_EVENT_PTR(MEM_STORES),
184 CACHE_EVENT_PTR(PM_LD_MISS_L1),
185 CACHE_EVENT_PTR(PM_LD_REF_L1),
186 CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
187 CACHE_EVENT_PTR(PM_ST_MISS_L1),
188 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
189 CACHE_EVENT_PTR(PM_INST_FROM_L1),
190 CACHE_EVENT_PTR(PM_IC_PREF_REQ),
191 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
192 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
193 CACHE_EVENT_PTR(PM_L3_PF_MISS_L3),
194 CACHE_EVENT_PTR(PM_L2_ST_MISS),
195 CACHE_EVENT_PTR(PM_L2_ST),
196 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
197 CACHE_EVENT_PTR(PM_BR_CMPL),
198 CACHE_EVENT_PTR(PM_DTLB_MISS),
199 CACHE_EVENT_PTR(PM_ITLB_MISS),
200 NULL
201 };
202
203 static const struct attribute_group power10_pmu_events_group_dd1 = {
204 .name = "events",
205 .attrs = power10_events_attr_dd1,
206 };
207
208 static const struct attribute_group power10_pmu_events_group = {
209 .name = "events",
210 .attrs = power10_events_attr,
211 };
212
213 PMU_FORMAT_ATTR(event, "config:0-59");
214 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
215 PMU_FORMAT_ATTR(mark, "config:8");
216 PMU_FORMAT_ATTR(combine, "config:10-11");
217 PMU_FORMAT_ATTR(unit, "config:12-15");
218 PMU_FORMAT_ATTR(pmc, "config:16-19");
219 PMU_FORMAT_ATTR(cache_sel, "config:20-21");
220 PMU_FORMAT_ATTR(sdar_mode, "config:22-23");
221 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
222 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
223 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
224 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
225 PMU_FORMAT_ATTR(l2l3_sel, "config:40-44");
226 PMU_FORMAT_ATTR(src_sel, "config:45-46");
227 PMU_FORMAT_ATTR(invert_bit, "config:47");
228 PMU_FORMAT_ATTR(src_mask, "config:48-53");
229 PMU_FORMAT_ATTR(src_match, "config:54-59");
230 PMU_FORMAT_ATTR(radix_scope, "config:9");
231 PMU_FORMAT_ATTR(thresh_cmp, "config1:0-17");
232
233 static struct attribute *power10_pmu_format_attr[] = {
234 &format_attr_event.attr,
235 &format_attr_pmcxsel.attr,
236 &format_attr_mark.attr,
237 &format_attr_combine.attr,
238 &format_attr_unit.attr,
239 &format_attr_pmc.attr,
240 &format_attr_cache_sel.attr,
241 &format_attr_sdar_mode.attr,
242 &format_attr_sample_mode.attr,
243 &format_attr_thresh_sel.attr,
244 &format_attr_thresh_stop.attr,
245 &format_attr_thresh_start.attr,
246 &format_attr_l2l3_sel.attr,
247 &format_attr_src_sel.attr,
248 &format_attr_invert_bit.attr,
249 &format_attr_src_mask.attr,
250 &format_attr_src_match.attr,
251 &format_attr_radix_scope.attr,
252 &format_attr_thresh_cmp.attr,
253 NULL,
254 };
255
256 static const struct attribute_group power10_pmu_format_group = {
257 .name = "format",
258 .attrs = power10_pmu_format_attr,
259 };
260
261 static struct attribute *power10_pmu_caps_attrs[] = {
262 NULL
263 };
264
265 static struct attribute_group power10_pmu_caps_group = {
266 .name = "caps",
267 .attrs = power10_pmu_caps_attrs,
268 };
269
270 static const struct attribute_group *power10_pmu_attr_groups_dd1[] = {
271 &power10_pmu_format_group,
272 &power10_pmu_events_group_dd1,
273 &power10_pmu_caps_group,
274 NULL,
275 };
276
277 static const struct attribute_group *power10_pmu_attr_groups[] = {
278 &power10_pmu_format_group,
279 &power10_pmu_events_group,
280 &power10_pmu_caps_group,
281 NULL,
282 };
283
284 static int power10_generic_events_dd1[] = {
285 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
286 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
287 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
288 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
289 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
290 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
291 };
292
293 static int power10_generic_events[] = {
294 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
295 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
296 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_FIN,
297 [PERF_COUNT_HW_BRANCH_MISSES] = PM_MPRED_BR_FIN,
298 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
299 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_DEMAND_MISS_L1_FIN,
300 };
301
power10_bhrb_filter_map(u64 branch_sample_type)302 static u64 power10_bhrb_filter_map(u64 branch_sample_type)
303 {
304 u64 pmu_bhrb_filter = 0;
305
306 /* BHRB and regular PMU events share the same privilege state
307 * filter configuration. BHRB is always recorded along with a
308 * regular PMU event. As the privilege state filter is handled
309 * in the basic PMC configuration of the accompanying regular
310 * PMU event, we ignore any separate BHRB specific request.
311 */
312
313 /* No branch filter requested */
314 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
315 return pmu_bhrb_filter;
316
317 /* Invalid branch filter options - HW does not support */
318 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
319 return -1;
320
321 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) {
322 pmu_bhrb_filter |= POWER10_MMCRA_IFM2;
323 return pmu_bhrb_filter;
324 }
325
326 if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) {
327 pmu_bhrb_filter |= POWER10_MMCRA_IFM3;
328 return pmu_bhrb_filter;
329 }
330
331 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
332 return -1;
333
334 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
335 pmu_bhrb_filter |= POWER10_MMCRA_IFM1;
336 return pmu_bhrb_filter;
337 }
338
339 /* Every thing else is unsupported */
340 return -1;
341 }
342
power10_config_bhrb(u64 pmu_bhrb_filter)343 static void power10_config_bhrb(u64 pmu_bhrb_filter)
344 {
345 pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK;
346
347 /* Enable BHRB filter in PMU */
348 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
349 }
350
351 #define C(x) PERF_COUNT_HW_CACHE_##x
352
353 /*
354 * Table of generalized cache-related events.
355 * 0 means not supported, -1 means nonsensical, other values
356 * are event codes.
357 */
358 static u64 power10_cache_events_dd1[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
359 [C(L1D)] = {
360 [C(OP_READ)] = {
361 [C(RESULT_ACCESS)] = PM_LD_REF_L1,
362 [C(RESULT_MISS)] = PM_LD_MISS_L1,
363 },
364 [C(OP_WRITE)] = {
365 [C(RESULT_ACCESS)] = 0,
366 [C(RESULT_MISS)] = PM_ST_MISS_L1,
367 },
368 [C(OP_PREFETCH)] = {
369 [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
370 [C(RESULT_MISS)] = 0,
371 },
372 },
373 [C(L1I)] = {
374 [C(OP_READ)] = {
375 [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
376 [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
377 },
378 [C(OP_WRITE)] = {
379 [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
380 [C(RESULT_MISS)] = -1,
381 },
382 [C(OP_PREFETCH)] = {
383 [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
384 [C(RESULT_MISS)] = 0,
385 },
386 },
387 [C(LL)] = {
388 [C(OP_READ)] = {
389 [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
390 [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
391 },
392 [C(OP_WRITE)] = {
393 [C(RESULT_ACCESS)] = -1,
394 [C(RESULT_MISS)] = -1,
395 },
396 [C(OP_PREFETCH)] = {
397 [C(RESULT_ACCESS)] = -1,
398 [C(RESULT_MISS)] = 0,
399 },
400 },
401 [C(DTLB)] = {
402 [C(OP_READ)] = {
403 [C(RESULT_ACCESS)] = 0,
404 [C(RESULT_MISS)] = PM_DTLB_MISS,
405 },
406 [C(OP_WRITE)] = {
407 [C(RESULT_ACCESS)] = -1,
408 [C(RESULT_MISS)] = -1,
409 },
410 [C(OP_PREFETCH)] = {
411 [C(RESULT_ACCESS)] = -1,
412 [C(RESULT_MISS)] = -1,
413 },
414 },
415 [C(ITLB)] = {
416 [C(OP_READ)] = {
417 [C(RESULT_ACCESS)] = 0,
418 [C(RESULT_MISS)] = PM_ITLB_MISS,
419 },
420 [C(OP_WRITE)] = {
421 [C(RESULT_ACCESS)] = -1,
422 [C(RESULT_MISS)] = -1,
423 },
424 [C(OP_PREFETCH)] = {
425 [C(RESULT_ACCESS)] = -1,
426 [C(RESULT_MISS)] = -1,
427 },
428 },
429 [C(BPU)] = {
430 [C(OP_READ)] = {
431 [C(RESULT_ACCESS)] = PM_BR_CMPL,
432 [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
433 },
434 [C(OP_WRITE)] = {
435 [C(RESULT_ACCESS)] = -1,
436 [C(RESULT_MISS)] = -1,
437 },
438 [C(OP_PREFETCH)] = {
439 [C(RESULT_ACCESS)] = -1,
440 [C(RESULT_MISS)] = -1,
441 },
442 },
443 [C(NODE)] = {
444 [C(OP_READ)] = {
445 [C(RESULT_ACCESS)] = -1,
446 [C(RESULT_MISS)] = -1,
447 },
448 [C(OP_WRITE)] = {
449 [C(RESULT_ACCESS)] = -1,
450 [C(RESULT_MISS)] = -1,
451 },
452 [C(OP_PREFETCH)] = {
453 [C(RESULT_ACCESS)] = -1,
454 [C(RESULT_MISS)] = -1,
455 },
456 },
457 };
458
459 static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
460 [C(L1D)] = {
461 [C(OP_READ)] = {
462 [C(RESULT_ACCESS)] = PM_LD_REF_L1,
463 [C(RESULT_MISS)] = PM_LD_MISS_L1,
464 },
465 [C(OP_WRITE)] = {
466 [C(RESULT_ACCESS)] = 0,
467 [C(RESULT_MISS)] = PM_ST_MISS_L1,
468 },
469 [C(OP_PREFETCH)] = {
470 [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
471 [C(RESULT_MISS)] = 0,
472 },
473 },
474 [C(L1I)] = {
475 [C(OP_READ)] = {
476 [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
477 [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
478 },
479 [C(OP_WRITE)] = {
480 [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
481 [C(RESULT_MISS)] = -1,
482 },
483 [C(OP_PREFETCH)] = {
484 [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
485 [C(RESULT_MISS)] = 0,
486 },
487 },
488 [C(LL)] = {
489 [C(OP_READ)] = {
490 [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
491 [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
492 },
493 [C(OP_WRITE)] = {
494 [C(RESULT_ACCESS)] = PM_L2_ST,
495 [C(RESULT_MISS)] = PM_L2_ST_MISS,
496 },
497 [C(OP_PREFETCH)] = {
498 [C(RESULT_ACCESS)] = PM_L3_PF_MISS_L3,
499 [C(RESULT_MISS)] = 0,
500 },
501 },
502 [C(DTLB)] = {
503 [C(OP_READ)] = {
504 [C(RESULT_ACCESS)] = 0,
505 [C(RESULT_MISS)] = PM_DTLB_MISS,
506 },
507 [C(OP_WRITE)] = {
508 [C(RESULT_ACCESS)] = -1,
509 [C(RESULT_MISS)] = -1,
510 },
511 [C(OP_PREFETCH)] = {
512 [C(RESULT_ACCESS)] = -1,
513 [C(RESULT_MISS)] = -1,
514 },
515 },
516 [C(ITLB)] = {
517 [C(OP_READ)] = {
518 [C(RESULT_ACCESS)] = 0,
519 [C(RESULT_MISS)] = PM_ITLB_MISS,
520 },
521 [C(OP_WRITE)] = {
522 [C(RESULT_ACCESS)] = -1,
523 [C(RESULT_MISS)] = -1,
524 },
525 [C(OP_PREFETCH)] = {
526 [C(RESULT_ACCESS)] = -1,
527 [C(RESULT_MISS)] = -1,
528 },
529 },
530 [C(BPU)] = {
531 [C(OP_READ)] = {
532 [C(RESULT_ACCESS)] = PM_BR_CMPL,
533 [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
534 },
535 [C(OP_WRITE)] = {
536 [C(RESULT_ACCESS)] = -1,
537 [C(RESULT_MISS)] = -1,
538 },
539 [C(OP_PREFETCH)] = {
540 [C(RESULT_ACCESS)] = -1,
541 [C(RESULT_MISS)] = -1,
542 },
543 },
544 [C(NODE)] = {
545 [C(OP_READ)] = {
546 [C(RESULT_ACCESS)] = -1,
547 [C(RESULT_MISS)] = -1,
548 },
549 [C(OP_WRITE)] = {
550 [C(RESULT_ACCESS)] = -1,
551 [C(RESULT_MISS)] = -1,
552 },
553 [C(OP_PREFETCH)] = {
554 [C(RESULT_ACCESS)] = -1,
555 [C(RESULT_MISS)] = -1,
556 },
557 },
558 };
559
560 #undef C
561
562 /*
563 * Set the MMCR0[CC56RUN] bit to enable counting for
564 * PMC5 and PMC6 regardless of the state of CTRL[RUN],
565 * so that we can use counters 5 and 6 as PM_INST_CMPL and
566 * PM_CYC.
567 */
power10_compute_mmcr(u64 event[],int n_ev,unsigned int hwc[],struct mmcr_regs * mmcr,struct perf_event * pevents[],u32 flags)568 static int power10_compute_mmcr(u64 event[], int n_ev,
569 unsigned int hwc[], struct mmcr_regs *mmcr,
570 struct perf_event *pevents[], u32 flags)
571 {
572 int ret;
573
574 ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
575 if (!ret)
576 mmcr->mmcr0 |= MMCR0_C56RUN;
577 return ret;
578 }
579
580 static struct power_pmu power10_pmu = {
581 .name = "POWER10",
582 .n_counter = MAX_PMU_COUNTERS,
583 .add_fields = ISA207_ADD_FIELDS,
584 .test_adder = ISA207_TEST_ADDER,
585 .group_constraint_mask = CNST_CACHE_PMC4_MASK,
586 .group_constraint_val = CNST_CACHE_PMC4_VAL,
587 .compute_mmcr = power10_compute_mmcr,
588 .config_bhrb = power10_config_bhrb,
589 .bhrb_filter_map = power10_bhrb_filter_map,
590 .get_constraint = isa207_get_constraint,
591 .get_alternatives = power10_get_alternatives,
592 .get_mem_data_src = isa207_get_mem_data_src,
593 .get_mem_weight = isa207_get_mem_weight,
594 .disable_pmc = isa207_disable_pmc,
595 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
596 PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1 |
597 PPMU_P10,
598 .n_generic = ARRAY_SIZE(power10_generic_events),
599 .generic_events = power10_generic_events,
600 .cache_events = &power10_cache_events,
601 .attr_groups = power10_pmu_attr_groups,
602 .bhrb_nr = 32,
603 .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
604 .check_attr_config = power10_check_attr_config,
605 };
606
init_power10_pmu(void)607 int __init init_power10_pmu(void)
608 {
609 unsigned int pvr;
610 int rc;
611
612 pvr = mfspr(SPRN_PVR);
613 if (PVR_VER(pvr) != PVR_POWER10)
614 return -ENODEV;
615
616 /* Add the ppmu flag for power10 DD1 */
617 if ((PVR_CFG(pvr) == 1))
618 power10_pmu.flags |= PPMU_P10_DD1;
619
620 /* Set the PERF_REG_EXTENDED_MASK here */
621 PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
622
623 if ((PVR_CFG(pvr) == 1)) {
624 power10_pmu.generic_events = power10_generic_events_dd1;
625 power10_pmu.attr_groups = power10_pmu_attr_groups_dd1;
626 power10_pmu.cache_events = &power10_cache_events_dd1;
627 }
628
629 rc = register_power_pmu(&power10_pmu);
630 if (rc)
631 return rc;
632
633 /* Tell userspace that EBB is supported */
634 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
635
636 return 0;
637 }
638
639 static struct power_pmu power11_pmu;
640
init_power11_pmu(void)641 int __init init_power11_pmu(void)
642 {
643 unsigned int pvr;
644 int rc;
645
646 pvr = mfspr(SPRN_PVR);
647 if (PVR_VER(pvr) != PVR_POWER11)
648 return -ENODEV;
649
650 /* Set the PERF_REG_EXTENDED_MASK here */
651 PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
652
653 power11_pmu = power10_pmu;
654 power11_pmu.name = "Power11";
655
656 rc = register_power_pmu(&power11_pmu);
657 if (rc)
658 return rc;
659
660 /* Tell userspace that EBB is supported */
661 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
662
663 return 0;
664 }
665