1 /*
2 * Performance events x86 architecture code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/export.h>
21 #include <linux/init.h>
22 #include <linux/kdebug.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/clock.h>
25 #include <linux/uaccess.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/bitops.h>
29 #include <linux/device.h>
30 #include <linux/nospec.h>
31 #include <linux/static_call.h>
32
33 #include <asm/apic.h>
34 #include <asm/stacktrace.h>
35 #include <asm/msr.h>
36 #include <asm/nmi.h>
37 #include <asm/smp.h>
38 #include <asm/alternative.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
42 #include <asm/desc.h>
43 #include <asm/ldt.h>
44 #include <asm/unwind.h>
45 #include <asm/uprobes.h>
46 #include <asm/ibt.h>
47
48 #include "perf_event.h"
49
50 struct x86_pmu x86_pmu __read_mostly;
51 static struct pmu pmu;
52
53 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
54 .enabled = 1,
55 .pmu = &pmu,
56 };
57
58 DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
59 DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
60 DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);
61
62 /*
63 * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
64 * from just a typename, as opposed to an actual function.
65 */
66 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
67 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
68 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
69 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
70 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
71
72 DEFINE_STATIC_CALL_NULL(x86_pmu_assign, *x86_pmu.assign);
73
74 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
75 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
76 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
77
78 DEFINE_STATIC_CALL_NULL(x86_pmu_set_period, *x86_pmu.set_period);
79 DEFINE_STATIC_CALL_NULL(x86_pmu_update, *x86_pmu.update);
80 DEFINE_STATIC_CALL_NULL(x86_pmu_limit_period, *x86_pmu.limit_period);
81
82 DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
83 DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
84 DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
85
86 DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
87 DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
88 DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
89
90 DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
91
92 DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
93 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
94
95 DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
96
97 DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup);
98
99 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
100 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
101 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
102 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
103
104 /*
105 * This one is magic, it will get called even when PMU init fails (because
106 * there is no PMU), in which case it should simply return NULL.
107 */
108 DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
109
110 u64 __read_mostly hw_cache_event_ids
111 [PERF_COUNT_HW_CACHE_MAX]
112 [PERF_COUNT_HW_CACHE_OP_MAX]
113 [PERF_COUNT_HW_CACHE_RESULT_MAX];
114 u64 __read_mostly hw_cache_extra_regs
115 [PERF_COUNT_HW_CACHE_MAX]
116 [PERF_COUNT_HW_CACHE_OP_MAX]
117 [PERF_COUNT_HW_CACHE_RESULT_MAX];
118
119 /*
120 * Propagate event elapsed time into the generic event.
121 * Can only be executed on the CPU where the event is active.
122 * Returns the delta events processed.
123 */
x86_perf_event_update(struct perf_event * event)124 u64 x86_perf_event_update(struct perf_event *event)
125 {
126 struct hw_perf_event *hwc = &event->hw;
127 int shift = 64 - x86_pmu.cntval_bits;
128 u64 prev_raw_count, new_raw_count;
129 u64 delta;
130
131 if (unlikely(!hwc->event_base))
132 return 0;
133
134 /*
135 * Careful: an NMI might modify the previous event value.
136 *
137 * Our tactic to handle this is to first atomically read and
138 * exchange a new raw count - then add that new-prev delta
139 * count to the generic event atomically:
140 */
141 prev_raw_count = local64_read(&hwc->prev_count);
142 do {
143 new_raw_count = rdpmc(hwc->event_base_rdpmc);
144 } while (!local64_try_cmpxchg(&hwc->prev_count,
145 &prev_raw_count, new_raw_count));
146
147 /*
148 * Now we have the new raw value and have updated the prev
149 * timestamp already. We can now calculate the elapsed delta
150 * (event-)time and add that to the generic event.
151 *
152 * Careful, not all hw sign-extends above the physical width
153 * of the count.
154 */
155 delta = (new_raw_count << shift) - (prev_raw_count << shift);
156 delta >>= shift;
157
158 local64_add(delta, &event->count);
159 local64_sub(delta, &hwc->period_left);
160
161 return new_raw_count;
162 }
163
164 /*
165 * Find and validate any extra registers to set up.
166 */
x86_pmu_extra_regs(u64 config,struct perf_event * event)167 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
168 {
169 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
170 struct hw_perf_event_extra *reg;
171 struct extra_reg *er;
172
173 reg = &event->hw.extra_reg;
174
175 if (!extra_regs)
176 return 0;
177
178 for (er = extra_regs; er->msr; er++) {
179 if (er->event != (config & er->config_mask))
180 continue;
181 if (event->attr.config1 & ~er->valid_mask)
182 return -EINVAL;
183 /* Check if the extra msrs can be safely accessed*/
184 if (!er->extra_msr_access)
185 return -ENXIO;
186
187 reg->idx = er->idx;
188 reg->config = event->attr.config1;
189 reg->reg = er->msr;
190 break;
191 }
192 return 0;
193 }
194
195 static atomic_t active_events;
196 static atomic_t pmc_refcount;
197 static DEFINE_MUTEX(pmc_reserve_mutex);
198
199 #ifdef CONFIG_X86_LOCAL_APIC
200
get_possible_counter_mask(void)201 static inline u64 get_possible_counter_mask(void)
202 {
203 u64 cntr_mask = x86_pmu.cntr_mask64;
204 int i;
205
206 if (!is_hybrid())
207 return cntr_mask;
208
209 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++)
210 cntr_mask |= x86_pmu.hybrid_pmu[i].cntr_mask64;
211
212 return cntr_mask;
213 }
214
reserve_pmc_hardware(void)215 static bool reserve_pmc_hardware(void)
216 {
217 u64 cntr_mask = get_possible_counter_mask();
218 int i, end;
219
220 for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
221 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
222 goto perfctr_fail;
223 }
224
225 for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
226 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
227 goto eventsel_fail;
228 }
229
230 return true;
231
232 eventsel_fail:
233 end = i;
234 for_each_set_bit(i, (unsigned long *)&cntr_mask, end)
235 release_evntsel_nmi(x86_pmu_config_addr(i));
236 i = X86_PMC_IDX_MAX;
237
238 perfctr_fail:
239 end = i;
240 for_each_set_bit(i, (unsigned long *)&cntr_mask, end)
241 release_perfctr_nmi(x86_pmu_event_addr(i));
242
243 return false;
244 }
245
release_pmc_hardware(void)246 static void release_pmc_hardware(void)
247 {
248 u64 cntr_mask = get_possible_counter_mask();
249 int i;
250
251 for_each_set_bit(i, (unsigned long *)&cntr_mask, X86_PMC_IDX_MAX) {
252 release_perfctr_nmi(x86_pmu_event_addr(i));
253 release_evntsel_nmi(x86_pmu_config_addr(i));
254 }
255 }
256
257 #else
258
reserve_pmc_hardware(void)259 static bool reserve_pmc_hardware(void) { return true; }
release_pmc_hardware(void)260 static void release_pmc_hardware(void) {}
261
262 #endif
263
check_hw_exists(struct pmu * pmu,unsigned long * cntr_mask,unsigned long * fixed_cntr_mask)264 bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
265 unsigned long *fixed_cntr_mask)
266 {
267 u64 val, val_fail = -1, val_new= ~0;
268 int i, reg, reg_fail = -1, ret = 0;
269 int bios_fail = 0;
270 int reg_safe = -1;
271
272 /*
273 * Check to see if the BIOS enabled any of the counters, if so
274 * complain and bail.
275 */
276 for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) {
277 reg = x86_pmu_config_addr(i);
278 ret = rdmsrq_safe(reg, &val);
279 if (ret)
280 goto msr_fail;
281 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
282 bios_fail = 1;
283 val_fail = val;
284 reg_fail = reg;
285 } else {
286 reg_safe = i;
287 }
288 }
289
290 if (*(u64 *)fixed_cntr_mask) {
291 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
292 ret = rdmsrq_safe(reg, &val);
293 if (ret)
294 goto msr_fail;
295 for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) {
296 if (fixed_counter_disabled(i, pmu))
297 continue;
298 if (val & (0x03ULL << i*4)) {
299 bios_fail = 1;
300 val_fail = val;
301 reg_fail = reg;
302 }
303 }
304 }
305
306 /*
307 * If all the counters are enabled, the below test will always
308 * fail. The tools will also become useless in this scenario.
309 * Just fail and disable the hardware counters.
310 */
311
312 if (reg_safe == -1) {
313 reg = reg_safe;
314 goto msr_fail;
315 }
316
317 /*
318 * Read the current value, change it and read it back to see if it
319 * matches, this is needed to detect certain hardware emulators
320 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
321 */
322 reg = x86_pmu_event_addr(reg_safe);
323 if (rdmsrq_safe(reg, &val))
324 goto msr_fail;
325 val ^= 0xffffUL;
326 ret = wrmsrq_safe(reg, val);
327 ret |= rdmsrq_safe(reg, &val_new);
328 if (ret || val != val_new)
329 goto msr_fail;
330
331 /*
332 * We still allow the PMU driver to operate:
333 */
334 if (bios_fail) {
335 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
336 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
337 reg_fail, val_fail);
338 }
339
340 return true;
341
342 msr_fail:
343 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
344 pr_cont("PMU not available due to virtualization, using software events only.\n");
345 } else {
346 pr_cont("Broken PMU hardware detected, using software events only.\n");
347 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
348 reg, val_new);
349 }
350
351 return false;
352 }
353
hw_perf_event_destroy(struct perf_event * event)354 static void hw_perf_event_destroy(struct perf_event *event)
355 {
356 x86_release_hardware();
357 atomic_dec(&active_events);
358 }
359
hw_perf_lbr_event_destroy(struct perf_event * event)360 void hw_perf_lbr_event_destroy(struct perf_event *event)
361 {
362 hw_perf_event_destroy(event);
363
364 /* undo the lbr/bts event accounting */
365 x86_del_exclusive(x86_lbr_exclusive_lbr);
366 }
367
x86_pmu_initialized(void)368 static inline int x86_pmu_initialized(void)
369 {
370 return x86_pmu.handle_irq != NULL;
371 }
372
373 static inline int
set_ext_hw_attr(struct hw_perf_event * hwc,struct perf_event * event)374 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
375 {
376 struct perf_event_attr *attr = &event->attr;
377 unsigned int cache_type, cache_op, cache_result;
378 u64 config, val;
379
380 config = attr->config;
381
382 cache_type = (config >> 0) & 0xff;
383 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
384 return -EINVAL;
385 cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
386
387 cache_op = (config >> 8) & 0xff;
388 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
389 return -EINVAL;
390 cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
391
392 cache_result = (config >> 16) & 0xff;
393 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
394 return -EINVAL;
395 cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
396
397 val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result];
398 if (val == 0)
399 return -ENOENT;
400
401 if (val == -1)
402 return -EINVAL;
403
404 hwc->config |= val;
405 attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result];
406 return x86_pmu_extra_regs(val, event);
407 }
408
x86_reserve_hardware(void)409 int x86_reserve_hardware(void)
410 {
411 int err = 0;
412
413 if (!atomic_inc_not_zero(&pmc_refcount)) {
414 mutex_lock(&pmc_reserve_mutex);
415 if (atomic_read(&pmc_refcount) == 0) {
416 if (!reserve_pmc_hardware()) {
417 err = -EBUSY;
418 } else {
419 reserve_ds_buffers();
420 reserve_lbr_buffers();
421 }
422 }
423 if (!err)
424 atomic_inc(&pmc_refcount);
425 mutex_unlock(&pmc_reserve_mutex);
426 }
427
428 return err;
429 }
430
x86_release_hardware(void)431 void x86_release_hardware(void)
432 {
433 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
434 release_pmc_hardware();
435 release_ds_buffers();
436 release_lbr_buffers();
437 mutex_unlock(&pmc_reserve_mutex);
438 }
439 }
440
441 /*
442 * Check if we can create event of a certain type (that no conflicting events
443 * are present).
444 */
x86_add_exclusive(unsigned int what)445 int x86_add_exclusive(unsigned int what)
446 {
447 int i;
448
449 /*
450 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
451 * LBR and BTS are still mutually exclusive.
452 */
453 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
454 goto out;
455
456 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
457 mutex_lock(&pmc_reserve_mutex);
458 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
459 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
460 goto fail_unlock;
461 }
462 atomic_inc(&x86_pmu.lbr_exclusive[what]);
463 mutex_unlock(&pmc_reserve_mutex);
464 }
465
466 out:
467 atomic_inc(&active_events);
468 return 0;
469
470 fail_unlock:
471 mutex_unlock(&pmc_reserve_mutex);
472 return -EBUSY;
473 }
474
x86_del_exclusive(unsigned int what)475 void x86_del_exclusive(unsigned int what)
476 {
477 atomic_dec(&active_events);
478
479 /*
480 * See the comment in x86_add_exclusive().
481 */
482 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
483 return;
484
485 atomic_dec(&x86_pmu.lbr_exclusive[what]);
486 }
487
x86_setup_perfctr(struct perf_event * event)488 int x86_setup_perfctr(struct perf_event *event)
489 {
490 struct perf_event_attr *attr = &event->attr;
491 struct hw_perf_event *hwc = &event->hw;
492 u64 config;
493
494 if (!is_sampling_event(event)) {
495 hwc->sample_period = x86_pmu.max_period;
496 hwc->last_period = hwc->sample_period;
497 local64_set(&hwc->period_left, hwc->sample_period);
498 }
499
500 if (attr->type == event->pmu->type)
501 return x86_pmu_extra_regs(event->attr.config, event);
502
503 if (attr->type == PERF_TYPE_HW_CACHE)
504 return set_ext_hw_attr(hwc, event);
505
506 if (attr->config >= x86_pmu.max_events)
507 return -EINVAL;
508
509 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
510
511 /*
512 * The generic map:
513 */
514 config = x86_pmu.event_map(attr->config);
515
516 if (config == 0)
517 return -ENOENT;
518
519 if (config == -1LL)
520 return -EINVAL;
521
522 hwc->config |= config;
523
524 return 0;
525 }
526
527 /*
528 * check that branch_sample_type is compatible with
529 * settings needed for precise_ip > 1 which implies
530 * using the LBR to capture ALL taken branches at the
531 * priv levels of the measurement
532 */
precise_br_compat(struct perf_event * event)533 static inline int precise_br_compat(struct perf_event *event)
534 {
535 u64 m = event->attr.branch_sample_type;
536 u64 b = 0;
537
538 /* must capture all branches */
539 if (!(m & PERF_SAMPLE_BRANCH_ANY))
540 return 0;
541
542 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
543
544 if (!event->attr.exclude_user)
545 b |= PERF_SAMPLE_BRANCH_USER;
546
547 if (!event->attr.exclude_kernel)
548 b |= PERF_SAMPLE_BRANCH_KERNEL;
549
550 /*
551 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
552 */
553
554 return m == b;
555 }
556
x86_pmu_max_precise(void)557 int x86_pmu_max_precise(void)
558 {
559 int precise = 0;
560
561 /* Support for constant skid */
562 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
563 precise++;
564
565 /* Support for IP fixup */
566 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
567 precise++;
568
569 if (x86_pmu.pebs_prec_dist)
570 precise++;
571 }
572 return precise;
573 }
574
x86_pmu_hw_config(struct perf_event * event)575 int x86_pmu_hw_config(struct perf_event *event)
576 {
577 if (event->attr.precise_ip) {
578 int precise = x86_pmu_max_precise();
579
580 if (event->attr.precise_ip > precise)
581 return -EOPNOTSUPP;
582
583 /* There's no sense in having PEBS for non sampling events: */
584 if (!is_sampling_event(event))
585 return -EINVAL;
586 }
587 /*
588 * check that PEBS LBR correction does not conflict with
589 * whatever the user is asking with attr->branch_sample_type
590 */
591 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
592 u64 *br_type = &event->attr.branch_sample_type;
593
594 if (has_branch_stack(event)) {
595 if (!precise_br_compat(event))
596 return -EOPNOTSUPP;
597
598 /* branch_sample_type is compatible */
599
600 } else {
601 /*
602 * user did not specify branch_sample_type
603 *
604 * For PEBS fixups, we capture all
605 * the branches at the priv level of the
606 * event.
607 */
608 *br_type = PERF_SAMPLE_BRANCH_ANY;
609
610 if (!event->attr.exclude_user)
611 *br_type |= PERF_SAMPLE_BRANCH_USER;
612
613 if (!event->attr.exclude_kernel)
614 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
615 }
616 }
617
618 if (branch_sample_call_stack(event))
619 event->attach_state |= PERF_ATTACH_TASK_DATA;
620
621 /*
622 * Generate PMC IRQs:
623 * (keep 'enabled' bit clear for now)
624 */
625 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
626
627 /*
628 * Count user and OS events unless requested not to
629 */
630 if (!event->attr.exclude_user)
631 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
632 if (!event->attr.exclude_kernel)
633 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
634
635 if (event->attr.type == event->pmu->type)
636 event->hw.config |= x86_pmu_get_event_config(event);
637
638 if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
639 s64 left = event->attr.sample_period;
640 x86_pmu.limit_period(event, &left);
641 if (left > event->attr.sample_period)
642 return -EINVAL;
643 }
644
645 /* sample_regs_user never support XMM registers */
646 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK))
647 return -EINVAL;
648 /*
649 * Besides the general purpose registers, XMM registers may
650 * be collected in PEBS on some platforms, e.g. Icelake
651 */
652 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) {
653 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
654 return -EINVAL;
655
656 if (!event->attr.precise_ip)
657 return -EINVAL;
658 }
659
660 return x86_setup_perfctr(event);
661 }
662
663 /*
664 * Setup the hardware configuration for a given attr_type
665 */
__x86_pmu_event_init(struct perf_event * event)666 static int __x86_pmu_event_init(struct perf_event *event)
667 {
668 int err;
669
670 if (!x86_pmu_initialized())
671 return -ENODEV;
672
673 err = x86_reserve_hardware();
674 if (err)
675 return err;
676
677 atomic_inc(&active_events);
678 event->destroy = hw_perf_event_destroy;
679
680 event->hw.idx = -1;
681 event->hw.last_cpu = -1;
682 event->hw.last_tag = ~0ULL;
683 event->hw.dyn_constraint = ~0ULL;
684
685 /* mark unused */
686 event->hw.extra_reg.idx = EXTRA_REG_NONE;
687 event->hw.branch_reg.idx = EXTRA_REG_NONE;
688
689 return x86_pmu.hw_config(event);
690 }
691
x86_pmu_disable_all(void)692 void x86_pmu_disable_all(void)
693 {
694 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
695 int idx;
696
697 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
698 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
699 u64 val;
700
701 if (!test_bit(idx, cpuc->active_mask))
702 continue;
703 rdmsrq(x86_pmu_config_addr(idx), val);
704 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
705 continue;
706 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
707 wrmsrq(x86_pmu_config_addr(idx), val);
708 if (is_counter_pair(hwc))
709 wrmsrq(x86_pmu_config_addr(idx + 1), 0);
710 }
711 }
712
perf_guest_get_msrs(int * nr,void * data)713 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data)
714 {
715 return static_call(x86_pmu_guest_get_msrs)(nr, data);
716 }
717 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
718
719 /*
720 * There may be PMI landing after enabled=0. The PMI hitting could be before or
721 * after disable_all.
722 *
723 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
724 * It will not be re-enabled in the NMI handler again, because enabled=0. After
725 * handling the NMI, disable_all will be called, which will not change the
726 * state either. If PMI hits after disable_all, the PMU is already disabled
727 * before entering NMI handler. The NMI handler will not change the state
728 * either.
729 *
730 * So either situation is harmless.
731 */
x86_pmu_disable(struct pmu * pmu)732 static void x86_pmu_disable(struct pmu *pmu)
733 {
734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
735
736 if (!x86_pmu_initialized())
737 return;
738
739 if (!cpuc->enabled)
740 return;
741
742 cpuc->n_added = 0;
743 cpuc->enabled = 0;
744 barrier();
745
746 static_call(x86_pmu_disable_all)();
747 }
748
x86_pmu_enable_all(int added)749 void x86_pmu_enable_all(int added)
750 {
751 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
752 int idx;
753
754 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
755 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
756
757 if (!test_bit(idx, cpuc->active_mask))
758 continue;
759
760 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
761 }
762 }
763
is_x86_event(struct perf_event * event)764 int is_x86_event(struct perf_event *event)
765 {
766 /*
767 * For a non-hybrid platforms, the type of X86 pmu is
768 * always PERF_TYPE_RAW.
769 * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
770 * is a unique capability for the X86 PMU.
771 * Use them to detect a X86 event.
772 */
773 if (event->pmu->type == PERF_TYPE_RAW ||
774 event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)
775 return true;
776
777 return false;
778 }
779
x86_get_pmu(unsigned int cpu)780 struct pmu *x86_get_pmu(unsigned int cpu)
781 {
782 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
783
784 /*
785 * All CPUs of the hybrid type have been offline.
786 * The x86_get_pmu() should not be invoked.
787 */
788 if (WARN_ON_ONCE(!cpuc->pmu))
789 return &pmu;
790
791 return cpuc->pmu;
792 }
793 /*
794 * Event scheduler state:
795 *
796 * Assign events iterating over all events and counters, beginning
797 * with events with least weights first. Keep the current iterator
798 * state in struct sched_state.
799 */
800 struct sched_state {
801 int weight;
802 int event; /* event index */
803 int counter; /* counter index */
804 int unassigned; /* number of events to be assigned left */
805 int nr_gp; /* number of GP counters used */
806 u64 used;
807 };
808
809 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
810 #define SCHED_STATES_MAX 2
811
812 struct perf_sched {
813 int max_weight;
814 int max_events;
815 int max_gp;
816 int saved_states;
817 struct event_constraint **constraints;
818 struct sched_state state;
819 struct sched_state saved[SCHED_STATES_MAX];
820 };
821
822 /*
823 * Initialize iterator that runs through all events and counters.
824 */
perf_sched_init(struct perf_sched * sched,struct event_constraint ** constraints,int num,int wmin,int wmax,int gpmax)825 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
826 int num, int wmin, int wmax, int gpmax)
827 {
828 int idx;
829
830 memset(sched, 0, sizeof(*sched));
831 sched->max_events = num;
832 sched->max_weight = wmax;
833 sched->max_gp = gpmax;
834 sched->constraints = constraints;
835
836 for (idx = 0; idx < num; idx++) {
837 if (constraints[idx]->weight == wmin)
838 break;
839 }
840
841 sched->state.event = idx; /* start with min weight */
842 sched->state.weight = wmin;
843 sched->state.unassigned = num;
844 }
845
perf_sched_save_state(struct perf_sched * sched)846 static void perf_sched_save_state(struct perf_sched *sched)
847 {
848 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
849 return;
850
851 sched->saved[sched->saved_states] = sched->state;
852 sched->saved_states++;
853 }
854
perf_sched_restore_state(struct perf_sched * sched)855 static bool perf_sched_restore_state(struct perf_sched *sched)
856 {
857 if (!sched->saved_states)
858 return false;
859
860 sched->saved_states--;
861 sched->state = sched->saved[sched->saved_states];
862
863 /* this assignment didn't work out */
864 /* XXX broken vs EVENT_PAIR */
865 sched->state.used &= ~BIT_ULL(sched->state.counter);
866
867 /* try the next one */
868 sched->state.counter++;
869
870 return true;
871 }
872
873 /*
874 * Select a counter for the current event to schedule. Return true on
875 * success.
876 */
__perf_sched_find_counter(struct perf_sched * sched)877 static bool __perf_sched_find_counter(struct perf_sched *sched)
878 {
879 struct event_constraint *c;
880 int idx;
881
882 if (!sched->state.unassigned)
883 return false;
884
885 if (sched->state.event >= sched->max_events)
886 return false;
887
888 c = sched->constraints[sched->state.event];
889 /* Prefer fixed purpose counters */
890 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
891 idx = INTEL_PMC_IDX_FIXED;
892 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
893 u64 mask = BIT_ULL(idx);
894
895 if (sched->state.used & mask)
896 continue;
897
898 sched->state.used |= mask;
899 goto done;
900 }
901 }
902
903 /* Grab the first unused counter starting with idx */
904 idx = sched->state.counter;
905 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
906 u64 mask = BIT_ULL(idx);
907
908 if (c->flags & PERF_X86_EVENT_PAIR)
909 mask |= mask << 1;
910
911 if (sched->state.used & mask)
912 continue;
913
914 if (sched->state.nr_gp++ >= sched->max_gp)
915 return false;
916
917 sched->state.used |= mask;
918 goto done;
919 }
920
921 return false;
922
923 done:
924 sched->state.counter = idx;
925
926 if (c->overlap)
927 perf_sched_save_state(sched);
928
929 return true;
930 }
931
perf_sched_find_counter(struct perf_sched * sched)932 static bool perf_sched_find_counter(struct perf_sched *sched)
933 {
934 while (!__perf_sched_find_counter(sched)) {
935 if (!perf_sched_restore_state(sched))
936 return false;
937 }
938
939 return true;
940 }
941
942 /*
943 * Go through all unassigned events and find the next one to schedule.
944 * Take events with the least weight first. Return true on success.
945 */
perf_sched_next_event(struct perf_sched * sched)946 static bool perf_sched_next_event(struct perf_sched *sched)
947 {
948 struct event_constraint *c;
949
950 if (!sched->state.unassigned || !--sched->state.unassigned)
951 return false;
952
953 do {
954 /* next event */
955 sched->state.event++;
956 if (sched->state.event >= sched->max_events) {
957 /* next weight */
958 sched->state.event = 0;
959 sched->state.weight++;
960 if (sched->state.weight > sched->max_weight)
961 return false;
962 }
963 c = sched->constraints[sched->state.event];
964 } while (c->weight != sched->state.weight);
965
966 sched->state.counter = 0; /* start with first counter */
967
968 return true;
969 }
970
971 /*
972 * Assign a counter for each event.
973 */
perf_assign_events(struct event_constraint ** constraints,int n,int wmin,int wmax,int gpmax,int * assign)974 int perf_assign_events(struct event_constraint **constraints, int n,
975 int wmin, int wmax, int gpmax, int *assign)
976 {
977 struct perf_sched sched;
978
979 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
980
981 do {
982 if (!perf_sched_find_counter(&sched))
983 break; /* failed */
984 if (assign)
985 assign[sched.state.event] = sched.state.counter;
986 } while (perf_sched_next_event(&sched));
987
988 return sched.state.unassigned;
989 }
990 EXPORT_SYMBOL_GPL(perf_assign_events);
991
x86_schedule_events(struct cpu_hw_events * cpuc,int n,int * assign)992 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
993 {
994 struct event_constraint *c;
995 struct perf_event *e;
996 int n0, i, wmin, wmax, unsched = 0;
997 struct hw_perf_event *hwc;
998 u64 used_mask = 0;
999
1000 /*
1001 * Compute the number of events already present; see x86_pmu_add(),
1002 * validate_group() and x86_pmu_commit_txn(). For the former two
1003 * cpuc->n_events hasn't been updated yet, while for the latter
1004 * cpuc->n_txn contains the number of events added in the current
1005 * transaction.
1006 */
1007 n0 = cpuc->n_events;
1008 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1009 n0 -= cpuc->n_txn;
1010
1011 static_call_cond(x86_pmu_start_scheduling)(cpuc);
1012
1013 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
1014 c = cpuc->event_constraint[i];
1015
1016 /*
1017 * Previously scheduled events should have a cached constraint,
1018 * while new events should not have one.
1019 */
1020 WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
1021
1022 /*
1023 * Request constraints for new events; or for those events that
1024 * have a dynamic constraint -- for those the constraint can
1025 * change due to external factors (sibling state, allow_tfa).
1026 */
1027 if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
1028 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
1029 cpuc->event_constraint[i] = c;
1030 }
1031
1032 wmin = min(wmin, c->weight);
1033 wmax = max(wmax, c->weight);
1034 }
1035
1036 /*
1037 * fastpath, try to reuse previous register
1038 */
1039 for (i = 0; i < n; i++) {
1040 u64 mask;
1041
1042 hwc = &cpuc->event_list[i]->hw;
1043 c = cpuc->event_constraint[i];
1044
1045 /* never assigned */
1046 if (hwc->idx == -1)
1047 break;
1048
1049 /* constraint still honored */
1050 if (!test_bit(hwc->idx, c->idxmsk))
1051 break;
1052
1053 mask = BIT_ULL(hwc->idx);
1054 if (is_counter_pair(hwc))
1055 mask |= mask << 1;
1056
1057 /* not already used */
1058 if (used_mask & mask)
1059 break;
1060
1061 used_mask |= mask;
1062
1063 if (assign)
1064 assign[i] = hwc->idx;
1065 }
1066
1067 /* slow path */
1068 if (i != n) {
1069 int gpmax = x86_pmu_max_num_counters(cpuc->pmu);
1070
1071 /*
1072 * Do not allow scheduling of more than half the available
1073 * generic counters.
1074 *
1075 * This helps avoid counter starvation of sibling thread by
1076 * ensuring at most half the counters cannot be in exclusive
1077 * mode. There is no designated counters for the limits. Any
1078 * N/2 counters can be used. This helps with events with
1079 * specific counter constraints.
1080 */
1081 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
1082 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
1083 gpmax /= 2;
1084
1085 /*
1086 * Reduce the amount of available counters to allow fitting
1087 * the extra Merge events needed by large increment events.
1088 */
1089 if (x86_pmu.flags & PMU_FL_PAIR) {
1090 gpmax -= cpuc->n_pair;
1091 WARN_ON(gpmax <= 0);
1092 }
1093
1094 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
1095 wmax, gpmax, assign);
1096 }
1097
1098 /*
1099 * In case of success (unsched = 0), mark events as committed,
1100 * so we do not put_constraint() in case new events are added
1101 * and fail to be scheduled
1102 *
1103 * We invoke the lower level commit callback to lock the resource
1104 *
1105 * We do not need to do all of this in case we are called to
1106 * validate an event group (assign == NULL)
1107 */
1108 if (!unsched && assign) {
1109 for (i = 0; i < n; i++)
1110 static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
1111 } else {
1112 for (i = n0; i < n; i++) {
1113 e = cpuc->event_list[i];
1114
1115 /*
1116 * release events that failed scheduling
1117 */
1118 static_call_cond(x86_pmu_put_event_constraints)(cpuc, e);
1119
1120 cpuc->event_constraint[i] = NULL;
1121 }
1122 }
1123
1124 static_call_cond(x86_pmu_stop_scheduling)(cpuc);
1125
1126 return unsched ? -EINVAL : 0;
1127 }
1128
add_nr_metric_event(struct cpu_hw_events * cpuc,struct perf_event * event)1129 static int add_nr_metric_event(struct cpu_hw_events *cpuc,
1130 struct perf_event *event)
1131 {
1132 if (is_metric_event(event)) {
1133 if (cpuc->n_metric == INTEL_TD_METRIC_NUM)
1134 return -EINVAL;
1135 cpuc->n_metric++;
1136 cpuc->n_txn_metric++;
1137 }
1138
1139 return 0;
1140 }
1141
del_nr_metric_event(struct cpu_hw_events * cpuc,struct perf_event * event)1142 static void del_nr_metric_event(struct cpu_hw_events *cpuc,
1143 struct perf_event *event)
1144 {
1145 if (is_metric_event(event))
1146 cpuc->n_metric--;
1147 }
1148
collect_event(struct cpu_hw_events * cpuc,struct perf_event * event,int max_count,int n)1149 static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
1150 int max_count, int n)
1151 {
1152 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
1153
1154 if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
1155 return -EINVAL;
1156
1157 if (n >= max_count + cpuc->n_metric)
1158 return -EINVAL;
1159
1160 cpuc->event_list[n] = event;
1161 if (is_counter_pair(&event->hw)) {
1162 cpuc->n_pair++;
1163 cpuc->n_txn_pair++;
1164 }
1165
1166 return 0;
1167 }
1168
1169 /*
1170 * dogrp: true if must collect siblings events (group)
1171 * returns total number of events and error code
1172 */
collect_events(struct cpu_hw_events * cpuc,struct perf_event * leader,bool dogrp)1173 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1174 {
1175 struct perf_event *event;
1176 int n, max_count;
1177
1178 max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu);
1179
1180 /* current number of events already accepted */
1181 n = cpuc->n_events;
1182 if (!cpuc->n_events)
1183 cpuc->pebs_output = 0;
1184
1185 if (!cpuc->is_fake && leader->attr.precise_ip) {
1186 /*
1187 * For PEBS->PT, if !aux_event, the group leader (PT) went
1188 * away, the group was broken down and this singleton event
1189 * can't schedule any more.
1190 */
1191 if (is_pebs_pt(leader) && !leader->aux_event)
1192 return -EINVAL;
1193
1194 /*
1195 * pebs_output: 0: no PEBS so far, 1: PT, 2: DS
1196 */
1197 if (cpuc->pebs_output &&
1198 cpuc->pebs_output != is_pebs_pt(leader) + 1)
1199 return -EINVAL;
1200
1201 cpuc->pebs_output = is_pebs_pt(leader) + 1;
1202 }
1203
1204 if (is_x86_event(leader)) {
1205 if (collect_event(cpuc, leader, max_count, n))
1206 return -EINVAL;
1207 n++;
1208 }
1209
1210 if (!dogrp)
1211 return n;
1212
1213 for_each_sibling_event(event, leader) {
1214 if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF)
1215 continue;
1216
1217 if (collect_event(cpuc, event, max_count, n))
1218 return -EINVAL;
1219
1220 n++;
1221 }
1222 return n;
1223 }
1224
x86_assign_hw_event(struct perf_event * event,struct cpu_hw_events * cpuc,int i)1225 static inline void x86_assign_hw_event(struct perf_event *event,
1226 struct cpu_hw_events *cpuc, int i)
1227 {
1228 struct hw_perf_event *hwc = &event->hw;
1229 int idx;
1230
1231 idx = hwc->idx = cpuc->assign[i];
1232 hwc->last_cpu = smp_processor_id();
1233 hwc->last_tag = ++cpuc->tags[i];
1234
1235 static_call_cond(x86_pmu_assign)(event, idx);
1236
1237 switch (hwc->idx) {
1238 case INTEL_PMC_IDX_FIXED_BTS:
1239 case INTEL_PMC_IDX_FIXED_VLBR:
1240 hwc->config_base = 0;
1241 hwc->event_base = 0;
1242 break;
1243
1244 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
1245 /* All the metric events are mapped onto the fixed counter 3. */
1246 idx = INTEL_PMC_IDX_FIXED_SLOTS;
1247 fallthrough;
1248 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
1249 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1250 hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
1251 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
1252 INTEL_PMC_FIXED_RDPMC_BASE;
1253 break;
1254
1255 default:
1256 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1257 hwc->event_base = x86_pmu_event_addr(hwc->idx);
1258 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1259 break;
1260 }
1261 }
1262
1263 /**
1264 * x86_perf_rdpmc_index - Return PMC counter used for event
1265 * @event: the perf_event to which the PMC counter was assigned
1266 *
1267 * The counter assigned to this performance event may change if interrupts
1268 * are enabled. This counter should thus never be used while interrupts are
1269 * enabled. Before this function is used to obtain the assigned counter the
1270 * event should be checked for validity using, for example,
1271 * perf_event_read_local(), within the same interrupt disabled section in
1272 * which this counter is planned to be used.
1273 *
1274 * Return: The index of the performance monitoring counter assigned to
1275 * @perf_event.
1276 */
x86_perf_rdpmc_index(struct perf_event * event)1277 int x86_perf_rdpmc_index(struct perf_event *event)
1278 {
1279 lockdep_assert_irqs_disabled();
1280
1281 return event->hw.event_base_rdpmc;
1282 }
1283
match_prev_assignment(struct hw_perf_event * hwc,struct cpu_hw_events * cpuc,int i)1284 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1285 struct cpu_hw_events *cpuc,
1286 int i)
1287 {
1288 return hwc->idx == cpuc->assign[i] &&
1289 hwc->last_cpu == smp_processor_id() &&
1290 hwc->last_tag == cpuc->tags[i];
1291 }
1292
1293 static void x86_pmu_start(struct perf_event *event, int flags);
1294
x86_pmu_enable(struct pmu * pmu)1295 static void x86_pmu_enable(struct pmu *pmu)
1296 {
1297 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1298 struct perf_event *event;
1299 struct hw_perf_event *hwc;
1300 int i, added = cpuc->n_added;
1301
1302 if (!x86_pmu_initialized())
1303 return;
1304
1305 if (cpuc->enabled)
1306 return;
1307
1308 if (cpuc->n_added) {
1309 int n_running = cpuc->n_events - cpuc->n_added;
1310
1311 /*
1312 * The late setup (after counters are scheduled)
1313 * is required for some cases, e.g., PEBS counters
1314 * snapshotting. Because an accurate counter index
1315 * is needed.
1316 */
1317 static_call_cond(x86_pmu_late_setup)();
1318
1319 /*
1320 * apply assignment obtained either from
1321 * hw_perf_group_sched_in() or x86_pmu_enable()
1322 *
1323 * step1: save events moving to new counters
1324 */
1325 for (i = 0; i < n_running; i++) {
1326 event = cpuc->event_list[i];
1327 hwc = &event->hw;
1328
1329 /*
1330 * we can avoid reprogramming counter if:
1331 * - assigned same counter as last time
1332 * - running on same CPU as last time
1333 * - no other event has used the counter since
1334 */
1335 if (hwc->idx == -1 ||
1336 match_prev_assignment(hwc, cpuc, i))
1337 continue;
1338
1339 /*
1340 * Ensure we don't accidentally enable a stopped
1341 * counter simply because we rescheduled.
1342 */
1343 if (hwc->state & PERF_HES_STOPPED)
1344 hwc->state |= PERF_HES_ARCH;
1345
1346 x86_pmu_stop(event, PERF_EF_UPDATE);
1347 }
1348
1349 /*
1350 * step2: reprogram moved events into new counters
1351 */
1352 for (i = 0; i < cpuc->n_events; i++) {
1353 event = cpuc->event_list[i];
1354 hwc = &event->hw;
1355
1356 if (!match_prev_assignment(hwc, cpuc, i))
1357 x86_assign_hw_event(event, cpuc, i);
1358 else if (i < n_running)
1359 continue;
1360
1361 if (hwc->state & PERF_HES_ARCH)
1362 continue;
1363
1364 /*
1365 * if cpuc->enabled = 0, then no wrmsr as
1366 * per x86_pmu_enable_event()
1367 */
1368 x86_pmu_start(event, PERF_EF_RELOAD);
1369 }
1370 cpuc->n_added = 0;
1371 perf_events_lapic_init();
1372 }
1373
1374 cpuc->enabled = 1;
1375 barrier();
1376
1377 static_call(x86_pmu_enable_all)(added);
1378 }
1379
1380 DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1381
1382 /*
1383 * Set the next IRQ period, based on the hwc->period_left value.
1384 * To be called with the event disabled in hw:
1385 */
x86_perf_event_set_period(struct perf_event * event)1386 int x86_perf_event_set_period(struct perf_event *event)
1387 {
1388 struct hw_perf_event *hwc = &event->hw;
1389 s64 left = local64_read(&hwc->period_left);
1390 s64 period = hwc->sample_period;
1391 int ret = 0, idx = hwc->idx;
1392
1393 if (unlikely(!hwc->event_base))
1394 return 0;
1395
1396 /*
1397 * If we are way outside a reasonable range then just skip forward:
1398 */
1399 if (unlikely(left <= -period)) {
1400 left = period;
1401 local64_set(&hwc->period_left, left);
1402 hwc->last_period = period;
1403 ret = 1;
1404 }
1405
1406 if (unlikely(left <= 0)) {
1407 left += period;
1408 local64_set(&hwc->period_left, left);
1409 hwc->last_period = period;
1410 ret = 1;
1411 }
1412 /*
1413 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1414 */
1415 if (unlikely(left < 2))
1416 left = 2;
1417
1418 if (left > x86_pmu.max_period)
1419 left = x86_pmu.max_period;
1420
1421 static_call_cond(x86_pmu_limit_period)(event, &left);
1422
1423 this_cpu_write(pmc_prev_left[idx], left);
1424
1425 /*
1426 * The hw event starts counting from this event offset,
1427 * mark it to be able to extra future deltas:
1428 */
1429 local64_set(&hwc->prev_count, (u64)-left);
1430
1431 wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1432
1433 /*
1434 * Sign extend the Merge event counter's upper 16 bits since
1435 * we currently declare a 48-bit counter width
1436 */
1437 if (is_counter_pair(hwc))
1438 wrmsrq(x86_pmu_event_addr(idx + 1), 0xffff);
1439
1440 perf_event_update_userpage(event);
1441
1442 return ret;
1443 }
1444
x86_pmu_enable_event(struct perf_event * event)1445 void x86_pmu_enable_event(struct perf_event *event)
1446 {
1447 if (__this_cpu_read(cpu_hw_events.enabled))
1448 __x86_pmu_enable_event(&event->hw,
1449 ARCH_PERFMON_EVENTSEL_ENABLE);
1450 }
1451
1452 /*
1453 * Add a single event to the PMU.
1454 *
1455 * The event is added to the group of enabled events
1456 * but only if it can be scheduled with existing events.
1457 */
x86_pmu_add(struct perf_event * event,int flags)1458 static int x86_pmu_add(struct perf_event *event, int flags)
1459 {
1460 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1461 struct hw_perf_event *hwc;
1462 int assign[X86_PMC_IDX_MAX];
1463 int n, n0, ret;
1464
1465 hwc = &event->hw;
1466
1467 n0 = cpuc->n_events;
1468 ret = n = collect_events(cpuc, event, false);
1469 if (ret < 0)
1470 goto out;
1471
1472 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1473 if (!(flags & PERF_EF_START))
1474 hwc->state |= PERF_HES_ARCH;
1475
1476 /*
1477 * If group events scheduling transaction was started,
1478 * skip the schedulability test here, it will be performed
1479 * at commit time (->commit_txn) as a whole.
1480 *
1481 * If commit fails, we'll call ->del() on all events
1482 * for which ->add() was called.
1483 */
1484 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1485 goto done_collect;
1486
1487 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
1488 if (ret)
1489 goto out;
1490 /*
1491 * copy new assignment, now we know it is possible
1492 * will be used by hw_perf_enable()
1493 */
1494 memcpy(cpuc->assign, assign, n*sizeof(int));
1495
1496 done_collect:
1497 /*
1498 * Commit the collect_events() state. See x86_pmu_del() and
1499 * x86_pmu_*_txn().
1500 */
1501 cpuc->n_events = n;
1502 cpuc->n_added += n - n0;
1503 cpuc->n_txn += n - n0;
1504
1505 /*
1506 * This is before x86_pmu_enable() will call x86_pmu_start(),
1507 * so we enable LBRs before an event needs them etc..
1508 */
1509 static_call_cond(x86_pmu_add)(event);
1510
1511 ret = 0;
1512 out:
1513 return ret;
1514 }
1515
x86_pmu_start(struct perf_event * event,int flags)1516 static void x86_pmu_start(struct perf_event *event, int flags)
1517 {
1518 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1519 int idx = event->hw.idx;
1520
1521 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1522 return;
1523
1524 if (WARN_ON_ONCE(idx == -1))
1525 return;
1526
1527 if (flags & PERF_EF_RELOAD) {
1528 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1529 static_call(x86_pmu_set_period)(event);
1530 }
1531
1532 event->hw.state = 0;
1533
1534 cpuc->events[idx] = event;
1535 __set_bit(idx, cpuc->active_mask);
1536 static_call(x86_pmu_enable)(event);
1537 perf_event_update_userpage(event);
1538 }
1539
perf_event_print_debug(void)1540 void perf_event_print_debug(void)
1541 {
1542 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1543 unsigned long *cntr_mask, *fixed_cntr_mask;
1544 struct event_constraint *pebs_constraints;
1545 struct cpu_hw_events *cpuc;
1546 u64 pebs, debugctl;
1547 int cpu, idx;
1548
1549 guard(irqsave)();
1550
1551 cpu = smp_processor_id();
1552 cpuc = &per_cpu(cpu_hw_events, cpu);
1553 cntr_mask = hybrid(cpuc->pmu, cntr_mask);
1554 fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
1555 pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
1556
1557 if (!*(u64 *)cntr_mask)
1558 return;
1559
1560 if (x86_pmu.version >= 2) {
1561 rdmsrq(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1562 rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
1563 rdmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1564 rdmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1565
1566 pr_info("\n");
1567 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1568 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1569 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1570 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1571 if (pebs_constraints) {
1572 rdmsrq(MSR_IA32_PEBS_ENABLE, pebs);
1573 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1574 }
1575 if (x86_pmu.lbr_nr) {
1576 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1577 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
1578 }
1579 }
1580 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1581
1582 for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) {
1583 rdmsrq(x86_pmu_config_addr(idx), pmc_ctrl);
1584 rdmsrq(x86_pmu_event_addr(idx), pmc_count);
1585
1586 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1587
1588 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1589 cpu, idx, pmc_ctrl);
1590 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1591 cpu, idx, pmc_count);
1592 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1593 cpu, idx, prev_left);
1594 }
1595 for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
1596 if (fixed_counter_disabled(idx, cpuc->pmu))
1597 continue;
1598 rdmsrq(x86_pmu_fixed_ctr_addr(idx), pmc_count);
1599
1600 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1601 cpu, idx, pmc_count);
1602 }
1603 }
1604
x86_pmu_stop(struct perf_event * event,int flags)1605 void x86_pmu_stop(struct perf_event *event, int flags)
1606 {
1607 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1608 struct hw_perf_event *hwc = &event->hw;
1609
1610 if (test_bit(hwc->idx, cpuc->active_mask)) {
1611 static_call(x86_pmu_disable)(event);
1612 __clear_bit(hwc->idx, cpuc->active_mask);
1613 cpuc->events[hwc->idx] = NULL;
1614 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1615 hwc->state |= PERF_HES_STOPPED;
1616 }
1617
1618 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1619 /*
1620 * Drain the remaining delta count out of a event
1621 * that we are disabling:
1622 */
1623 static_call(x86_pmu_update)(event);
1624 hwc->state |= PERF_HES_UPTODATE;
1625 }
1626 }
1627
x86_pmu_del(struct perf_event * event,int flags)1628 static void x86_pmu_del(struct perf_event *event, int flags)
1629 {
1630 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1631 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
1632 int i;
1633
1634 /*
1635 * If we're called during a txn, we only need to undo x86_pmu.add.
1636 * The events never got scheduled and ->cancel_txn will truncate
1637 * the event_list.
1638 *
1639 * XXX assumes any ->del() called during a TXN will only be on
1640 * an event added during that same TXN.
1641 */
1642 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1643 goto do_del;
1644
1645 __set_bit(event->hw.idx, cpuc->dirty);
1646
1647 /*
1648 * Not a TXN, therefore cleanup properly.
1649 */
1650 x86_pmu_stop(event, PERF_EF_UPDATE);
1651
1652 for (i = 0; i < cpuc->n_events; i++) {
1653 if (event == cpuc->event_list[i])
1654 break;
1655 }
1656
1657 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1658 return;
1659
1660 /* If we have a newly added event; make sure to decrease n_added. */
1661 if (i >= cpuc->n_events - cpuc->n_added)
1662 --cpuc->n_added;
1663
1664 static_call_cond(x86_pmu_put_event_constraints)(cpuc, event);
1665
1666 /* Delete the array entry. */
1667 while (++i < cpuc->n_events) {
1668 cpuc->event_list[i-1] = cpuc->event_list[i];
1669 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1670 cpuc->assign[i-1] = cpuc->assign[i];
1671 }
1672 cpuc->event_constraint[i-1] = NULL;
1673 --cpuc->n_events;
1674 if (intel_cap.perf_metrics)
1675 del_nr_metric_event(cpuc, event);
1676
1677 perf_event_update_userpage(event);
1678
1679 do_del:
1680
1681 /*
1682 * This is after x86_pmu_stop(); so we disable LBRs after any
1683 * event can need them etc..
1684 */
1685 static_call_cond(x86_pmu_del)(event);
1686 }
1687
x86_pmu_handle_irq(struct pt_regs * regs)1688 int x86_pmu_handle_irq(struct pt_regs *regs)
1689 {
1690 struct perf_sample_data data;
1691 struct cpu_hw_events *cpuc;
1692 struct perf_event *event;
1693 int idx, handled = 0;
1694 u64 last_period;
1695 u64 val;
1696
1697 cpuc = this_cpu_ptr(&cpu_hw_events);
1698
1699 /*
1700 * Some chipsets need to unmask the LVTPC in a particular spot
1701 * inside the nmi handler. As a result, the unmasking was pushed
1702 * into all the nmi handlers.
1703 *
1704 * This generic handler doesn't seem to have any issues where the
1705 * unmasking occurs so it was left at the top.
1706 */
1707 apic_write(APIC_LVTPC, APIC_DM_NMI);
1708
1709 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
1710 if (!test_bit(idx, cpuc->active_mask))
1711 continue;
1712
1713 event = cpuc->events[idx];
1714 last_period = event->hw.last_period;
1715
1716 val = static_call(x86_pmu_update)(event);
1717 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1718 continue;
1719
1720 /*
1721 * event overflow
1722 */
1723 handled++;
1724
1725 if (!static_call(x86_pmu_set_period)(event))
1726 continue;
1727
1728 perf_sample_data_init(&data, 0, last_period);
1729
1730 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
1731
1732 perf_event_overflow(event, &data, regs);
1733 }
1734
1735 if (handled)
1736 inc_irq_stat(apic_perf_irqs);
1737
1738 return handled;
1739 }
1740
perf_events_lapic_init(void)1741 void perf_events_lapic_init(void)
1742 {
1743 if (!x86_pmu.apic || !x86_pmu_initialized())
1744 return;
1745
1746 /*
1747 * Always use NMI for PMU
1748 */
1749 apic_write(APIC_LVTPC, APIC_DM_NMI);
1750 }
1751
1752 static int
perf_event_nmi_handler(unsigned int cmd,struct pt_regs * regs)1753 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1754 {
1755 u64 start_clock;
1756 u64 finish_clock;
1757 int ret;
1758
1759 /*
1760 * All PMUs/events that share this PMI handler should make sure to
1761 * increment active_events for their events.
1762 */
1763 if (!atomic_read(&active_events))
1764 return NMI_DONE;
1765
1766 start_clock = sched_clock();
1767 ret = static_call(x86_pmu_handle_irq)(regs);
1768 finish_clock = sched_clock();
1769
1770 perf_sample_event_took(finish_clock - start_clock);
1771
1772 return ret;
1773 }
1774 NOKPROBE_SYMBOL(perf_event_nmi_handler);
1775
1776 struct event_constraint emptyconstraint;
1777 struct event_constraint unconstrained;
1778
x86_pmu_prepare_cpu(unsigned int cpu)1779 static int x86_pmu_prepare_cpu(unsigned int cpu)
1780 {
1781 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1782 int i;
1783
1784 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1785 cpuc->kfree_on_online[i] = NULL;
1786 if (x86_pmu.cpu_prepare)
1787 return x86_pmu.cpu_prepare(cpu);
1788 return 0;
1789 }
1790
x86_pmu_dead_cpu(unsigned int cpu)1791 static int x86_pmu_dead_cpu(unsigned int cpu)
1792 {
1793 if (x86_pmu.cpu_dead)
1794 x86_pmu.cpu_dead(cpu);
1795 return 0;
1796 }
1797
x86_pmu_online_cpu(unsigned int cpu)1798 static int x86_pmu_online_cpu(unsigned int cpu)
1799 {
1800 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1801 int i;
1802
1803 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1804 kfree(cpuc->kfree_on_online[i]);
1805 cpuc->kfree_on_online[i] = NULL;
1806 }
1807 return 0;
1808 }
1809
x86_pmu_starting_cpu(unsigned int cpu)1810 static int x86_pmu_starting_cpu(unsigned int cpu)
1811 {
1812 if (x86_pmu.cpu_starting)
1813 x86_pmu.cpu_starting(cpu);
1814 return 0;
1815 }
1816
x86_pmu_dying_cpu(unsigned int cpu)1817 static int x86_pmu_dying_cpu(unsigned int cpu)
1818 {
1819 if (x86_pmu.cpu_dying)
1820 x86_pmu.cpu_dying(cpu);
1821 return 0;
1822 }
1823
pmu_check_apic(void)1824 static void __init pmu_check_apic(void)
1825 {
1826 if (boot_cpu_has(X86_FEATURE_APIC))
1827 return;
1828
1829 x86_pmu.apic = 0;
1830 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1831 pr_info("no hardware sampling interrupt available.\n");
1832
1833 /*
1834 * If we have a PMU initialized but no APIC
1835 * interrupts, we cannot sample hardware
1836 * events (user-space has to fall back and
1837 * sample via a hrtimer based software event):
1838 */
1839 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1840
1841 }
1842
1843 static struct attribute_group x86_pmu_format_group __ro_after_init = {
1844 .name = "format",
1845 .attrs = NULL,
1846 };
1847
events_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)1848 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
1849 {
1850 struct perf_pmu_events_attr *pmu_attr =
1851 container_of(attr, struct perf_pmu_events_attr, attr);
1852 u64 config = 0;
1853
1854 if (pmu_attr->id < x86_pmu.max_events)
1855 config = x86_pmu.event_map(pmu_attr->id);
1856
1857 /* string trumps id */
1858 if (pmu_attr->event_str)
1859 return sprintf(page, "%s\n", pmu_attr->event_str);
1860
1861 return x86_pmu.events_sysfs_show(page, config);
1862 }
1863 EXPORT_SYMBOL_GPL(events_sysfs_show);
1864
events_ht_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)1865 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1866 char *page)
1867 {
1868 struct perf_pmu_events_ht_attr *pmu_attr =
1869 container_of(attr, struct perf_pmu_events_ht_attr, attr);
1870
1871 /*
1872 * Report conditional events depending on Hyper-Threading.
1873 *
1874 * This is overly conservative as usually the HT special
1875 * handling is not needed if the other CPU thread is idle.
1876 *
1877 * Note this does not (and cannot) handle the case when thread
1878 * siblings are invisible, for example with virtualization
1879 * if they are owned by some other guest. The user tool
1880 * has to re-read when a thread sibling gets onlined later.
1881 */
1882 return sprintf(page, "%s",
1883 topology_max_smt_threads() > 1 ?
1884 pmu_attr->event_str_ht :
1885 pmu_attr->event_str_noht);
1886 }
1887
events_hybrid_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)1888 ssize_t events_hybrid_sysfs_show(struct device *dev,
1889 struct device_attribute *attr,
1890 char *page)
1891 {
1892 struct perf_pmu_events_hybrid_attr *pmu_attr =
1893 container_of(attr, struct perf_pmu_events_hybrid_attr, attr);
1894 struct x86_hybrid_pmu *pmu;
1895 const char *str, *next_str;
1896 int i;
1897
1898 if (hweight64(pmu_attr->pmu_type) == 1)
1899 return sprintf(page, "%s", pmu_attr->event_str);
1900
1901 /*
1902 * Hybrid PMUs may support the same event name, but with different
1903 * event encoding, e.g., the mem-loads event on an Atom PMU has
1904 * different event encoding from a Core PMU.
1905 *
1906 * The event_str includes all event encodings. Each event encoding
1907 * is divided by ";". The order of the event encodings must follow
1908 * the order of the hybrid PMU index.
1909 */
1910 pmu = container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
1911
1912 str = pmu_attr->event_str;
1913 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
1914 if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type))
1915 continue;
1916 if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) {
1917 next_str = strchr(str, ';');
1918 if (next_str)
1919 return snprintf(page, next_str - str + 1, "%s", str);
1920 else
1921 return sprintf(page, "%s", str);
1922 }
1923 str = strchr(str, ';');
1924 str++;
1925 }
1926
1927 return 0;
1928 }
1929 EXPORT_SYMBOL_GPL(events_hybrid_sysfs_show);
1930
1931 EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1932 EVENT_ATTR(instructions, INSTRUCTIONS );
1933 EVENT_ATTR(cache-references, CACHE_REFERENCES );
1934 EVENT_ATTR(cache-misses, CACHE_MISSES );
1935 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1936 EVENT_ATTR(branch-misses, BRANCH_MISSES );
1937 EVENT_ATTR(bus-cycles, BUS_CYCLES );
1938 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1939 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1940 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1941
1942 static struct attribute *empty_attrs;
1943
1944 static struct attribute *events_attr[] = {
1945 EVENT_PTR(CPU_CYCLES),
1946 EVENT_PTR(INSTRUCTIONS),
1947 EVENT_PTR(CACHE_REFERENCES),
1948 EVENT_PTR(CACHE_MISSES),
1949 EVENT_PTR(BRANCH_INSTRUCTIONS),
1950 EVENT_PTR(BRANCH_MISSES),
1951 EVENT_PTR(BUS_CYCLES),
1952 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1953 EVENT_PTR(STALLED_CYCLES_BACKEND),
1954 EVENT_PTR(REF_CPU_CYCLES),
1955 NULL,
1956 };
1957
1958 /*
1959 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1960 * out of events_attr attributes.
1961 */
1962 static umode_t
is_visible(struct kobject * kobj,struct attribute * attr,int idx)1963 is_visible(struct kobject *kobj, struct attribute *attr, int idx)
1964 {
1965 struct perf_pmu_events_attr *pmu_attr;
1966
1967 if (idx >= x86_pmu.max_events)
1968 return 0;
1969
1970 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
1971 /* str trumps id */
1972 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
1973 }
1974
1975 static struct attribute_group x86_pmu_events_group __ro_after_init = {
1976 .name = "events",
1977 .attrs = events_attr,
1978 .is_visible = is_visible,
1979 };
1980
x86_event_sysfs_show(char * page,u64 config,u64 event)1981 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1982 {
1983 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1984 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1985 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1986 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1987 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1988 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1989 ssize_t ret;
1990
1991 /*
1992 * We have whole page size to spend and just little data
1993 * to write, so we can safely use sprintf.
1994 */
1995 ret = sprintf(page, "event=0x%02llx", event);
1996
1997 if (umask)
1998 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1999
2000 if (edge)
2001 ret += sprintf(page + ret, ",edge");
2002
2003 if (pc)
2004 ret += sprintf(page + ret, ",pc");
2005
2006 if (any)
2007 ret += sprintf(page + ret, ",any");
2008
2009 if (inv)
2010 ret += sprintf(page + ret, ",inv");
2011
2012 if (cmask)
2013 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
2014
2015 ret += sprintf(page + ret, "\n");
2016
2017 return ret;
2018 }
2019
2020 static struct attribute_group x86_pmu_attr_group;
2021 static struct attribute_group x86_pmu_caps_group;
2022
x86_pmu_static_call_update(void)2023 static void x86_pmu_static_call_update(void)
2024 {
2025 static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq);
2026 static_call_update(x86_pmu_disable_all, x86_pmu.disable_all);
2027 static_call_update(x86_pmu_enable_all, x86_pmu.enable_all);
2028 static_call_update(x86_pmu_enable, x86_pmu.enable);
2029 static_call_update(x86_pmu_disable, x86_pmu.disable);
2030
2031 static_call_update(x86_pmu_assign, x86_pmu.assign);
2032
2033 static_call_update(x86_pmu_add, x86_pmu.add);
2034 static_call_update(x86_pmu_del, x86_pmu.del);
2035 static_call_update(x86_pmu_read, x86_pmu.read);
2036
2037 static_call_update(x86_pmu_set_period, x86_pmu.set_period);
2038 static_call_update(x86_pmu_update, x86_pmu.update);
2039 static_call_update(x86_pmu_limit_period, x86_pmu.limit_period);
2040
2041 static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
2042 static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
2043 static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
2044
2045 static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling);
2046 static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling);
2047 static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
2048
2049 static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
2050
2051 static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
2052 static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
2053
2054 static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
2055 static_call_update(x86_pmu_filter, x86_pmu.filter);
2056
2057 static_call_update(x86_pmu_late_setup, x86_pmu.late_setup);
2058
2059 static_call_update(x86_pmu_pebs_enable, x86_pmu.pebs_enable);
2060 static_call_update(x86_pmu_pebs_disable, x86_pmu.pebs_disable);
2061 static_call_update(x86_pmu_pebs_enable_all, x86_pmu.pebs_enable_all);
2062 static_call_update(x86_pmu_pebs_disable_all, x86_pmu.pebs_disable_all);
2063 }
2064
_x86_pmu_read(struct perf_event * event)2065 static void _x86_pmu_read(struct perf_event *event)
2066 {
2067 static_call(x86_pmu_update)(event);
2068 }
2069
x86_pmu_show_pmu_cap(struct pmu * pmu)2070 void x86_pmu_show_pmu_cap(struct pmu *pmu)
2071 {
2072 pr_info("... version: %d\n", x86_pmu.version);
2073 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
2074 pr_info("... generic counters: %d\n", x86_pmu_num_counters(pmu));
2075 pr_info("... generic bitmap: %016llx\n", hybrid(pmu, cntr_mask64));
2076 pr_info("... fixed-purpose counters: %d\n", x86_pmu_num_counters_fixed(pmu));
2077 pr_info("... fixed-purpose bitmap: %016llx\n", hybrid(pmu, fixed_cntr_mask64));
2078 pr_info("... value mask: %016llx\n", x86_pmu.cntval_mask);
2079 pr_info("... max period: %016llx\n", x86_pmu.max_period);
2080 pr_info("... global_ctrl mask: %016llx\n", hybrid(pmu, intel_ctrl));
2081 }
2082
init_hw_perf_events(void)2083 static int __init init_hw_perf_events(void)
2084 {
2085 struct x86_pmu_quirk *quirk;
2086 int err;
2087
2088 pr_info("Performance Events: ");
2089
2090 switch (boot_cpu_data.x86_vendor) {
2091 case X86_VENDOR_INTEL:
2092 err = intel_pmu_init();
2093 break;
2094 case X86_VENDOR_AMD:
2095 err = amd_pmu_init();
2096 break;
2097 case X86_VENDOR_HYGON:
2098 err = amd_pmu_init();
2099 x86_pmu.name = "HYGON";
2100 break;
2101 case X86_VENDOR_ZHAOXIN:
2102 case X86_VENDOR_CENTAUR:
2103 err = zhaoxin_pmu_init();
2104 break;
2105 default:
2106 err = -ENOTSUPP;
2107 }
2108 if (err != 0) {
2109 pr_cont("no PMU driver, software events only.\n");
2110 err = 0;
2111 goto out_bad_pmu;
2112 }
2113
2114 pmu_check_apic();
2115
2116 /* sanity check that the hardware exists or is emulated */
2117 if (!check_hw_exists(&pmu, x86_pmu.cntr_mask, x86_pmu.fixed_cntr_mask))
2118 goto out_bad_pmu;
2119
2120 pr_cont("%s PMU driver.\n", x86_pmu.name);
2121
2122 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
2123
2124 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
2125 quirk->func();
2126
2127 if (!x86_pmu.intel_ctrl)
2128 x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
2129
2130 if (!x86_pmu.config_mask)
2131 x86_pmu.config_mask = X86_RAW_EVENT_MASK;
2132
2133 perf_events_lapic_init();
2134 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
2135
2136 unconstrained = (struct event_constraint)
2137 __EVENT_CONSTRAINT(0, x86_pmu.cntr_mask64,
2138 0, x86_pmu_num_counters(NULL), 0, 0);
2139
2140 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
2141
2142 if (!x86_pmu.events_sysfs_show)
2143 x86_pmu_events_group.attrs = &empty_attrs;
2144
2145 pmu.attr_update = x86_pmu.attr_update;
2146
2147 if (!is_hybrid())
2148 x86_pmu_show_pmu_cap(NULL);
2149
2150 if (!x86_pmu.read)
2151 x86_pmu.read = _x86_pmu_read;
2152
2153 if (!x86_pmu.guest_get_msrs)
2154 x86_pmu.guest_get_msrs = (void *)&__static_call_return0;
2155
2156 if (!x86_pmu.set_period)
2157 x86_pmu.set_period = x86_perf_event_set_period;
2158
2159 if (!x86_pmu.update)
2160 x86_pmu.update = x86_perf_event_update;
2161
2162 x86_pmu_static_call_update();
2163
2164 /*
2165 * Install callbacks. Core will call them for each online
2166 * cpu.
2167 */
2168 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
2169 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
2170 if (err)
2171 return err;
2172
2173 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
2174 "perf/x86:starting", x86_pmu_starting_cpu,
2175 x86_pmu_dying_cpu);
2176 if (err)
2177 goto out;
2178
2179 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
2180 x86_pmu_online_cpu, NULL);
2181 if (err)
2182 goto out1;
2183
2184 if (!is_hybrid()) {
2185 err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
2186 if (err)
2187 goto out2;
2188 } else {
2189 struct x86_hybrid_pmu *hybrid_pmu;
2190 int i, j;
2191
2192 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
2193 hybrid_pmu = &x86_pmu.hybrid_pmu[i];
2194
2195 hybrid_pmu->pmu = pmu;
2196 hybrid_pmu->pmu.type = -1;
2197 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update;
2198 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE;
2199
2200 err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name,
2201 (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
2202 if (err)
2203 break;
2204 }
2205
2206 if (i < x86_pmu.num_hybrid_pmus) {
2207 for (j = 0; j < i; j++)
2208 perf_pmu_unregister(&x86_pmu.hybrid_pmu[j].pmu);
2209 pr_warn("Failed to register hybrid PMUs\n");
2210 kfree(x86_pmu.hybrid_pmu);
2211 x86_pmu.hybrid_pmu = NULL;
2212 x86_pmu.num_hybrid_pmus = 0;
2213 goto out2;
2214 }
2215 }
2216
2217 return 0;
2218
2219 out2:
2220 cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
2221 out1:
2222 cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
2223 out:
2224 cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
2225 out_bad_pmu:
2226 memset(&x86_pmu, 0, sizeof(x86_pmu));
2227 return err;
2228 }
2229 early_initcall(init_hw_perf_events);
2230
x86_pmu_read(struct perf_event * event)2231 static void x86_pmu_read(struct perf_event *event)
2232 {
2233 static_call(x86_pmu_read)(event);
2234 }
2235
2236 /*
2237 * Start group events scheduling transaction
2238 * Set the flag to make pmu::enable() not perform the
2239 * schedulability test, it will be performed at commit time
2240 *
2241 * We only support PERF_PMU_TXN_ADD transactions. Save the
2242 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
2243 * transactions.
2244 */
x86_pmu_start_txn(struct pmu * pmu,unsigned int txn_flags)2245 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
2246 {
2247 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2248
2249 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
2250
2251 cpuc->txn_flags = txn_flags;
2252 if (txn_flags & ~PERF_PMU_TXN_ADD)
2253 return;
2254
2255 perf_pmu_disable(pmu);
2256 __this_cpu_write(cpu_hw_events.n_txn, 0);
2257 __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
2258 __this_cpu_write(cpu_hw_events.n_txn_metric, 0);
2259 }
2260
2261 /*
2262 * Stop group events scheduling transaction
2263 * Clear the flag and pmu::enable() will perform the
2264 * schedulability test.
2265 */
x86_pmu_cancel_txn(struct pmu * pmu)2266 static void x86_pmu_cancel_txn(struct pmu *pmu)
2267 {
2268 unsigned int txn_flags;
2269 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2270
2271 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
2272
2273 txn_flags = cpuc->txn_flags;
2274 cpuc->txn_flags = 0;
2275 if (txn_flags & ~PERF_PMU_TXN_ADD)
2276 return;
2277
2278 /*
2279 * Truncate collected array by the number of events added in this
2280 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
2281 */
2282 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
2283 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
2284 __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
2285 __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric));
2286 perf_pmu_enable(pmu);
2287 }
2288
2289 /*
2290 * Commit group events scheduling transaction
2291 * Perform the group schedulability test as a whole
2292 * Return 0 if success
2293 *
2294 * Does not cancel the transaction on failure; expects the caller to do this.
2295 */
x86_pmu_commit_txn(struct pmu * pmu)2296 static int x86_pmu_commit_txn(struct pmu *pmu)
2297 {
2298 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2299 int assign[X86_PMC_IDX_MAX];
2300 int n, ret;
2301
2302 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
2303
2304 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
2305 cpuc->txn_flags = 0;
2306 return 0;
2307 }
2308
2309 n = cpuc->n_events;
2310
2311 if (!x86_pmu_initialized())
2312 return -EAGAIN;
2313
2314 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
2315 if (ret)
2316 return ret;
2317
2318 /*
2319 * copy new assignment, now we know it is possible
2320 * will be used by hw_perf_enable()
2321 */
2322 memcpy(cpuc->assign, assign, n*sizeof(int));
2323
2324 cpuc->txn_flags = 0;
2325 perf_pmu_enable(pmu);
2326 return 0;
2327 }
2328 /*
2329 * a fake_cpuc is used to validate event groups. Due to
2330 * the extra reg logic, we need to also allocate a fake
2331 * per_core and per_cpu structure. Otherwise, group events
2332 * using extra reg may conflict without the kernel being
2333 * able to catch this when the last event gets added to
2334 * the group.
2335 */
free_fake_cpuc(struct cpu_hw_events * cpuc)2336 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
2337 {
2338 intel_cpuc_finish(cpuc);
2339 kfree(cpuc);
2340 }
2341
allocate_fake_cpuc(struct pmu * event_pmu)2342 static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu)
2343 {
2344 struct cpu_hw_events *cpuc;
2345 int cpu;
2346
2347 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
2348 if (!cpuc)
2349 return ERR_PTR(-ENOMEM);
2350 cpuc->is_fake = 1;
2351
2352 if (is_hybrid()) {
2353 struct x86_hybrid_pmu *h_pmu;
2354
2355 h_pmu = hybrid_pmu(event_pmu);
2356 if (cpumask_empty(&h_pmu->supported_cpus))
2357 goto error;
2358 cpu = cpumask_first(&h_pmu->supported_cpus);
2359 } else
2360 cpu = raw_smp_processor_id();
2361 cpuc->pmu = event_pmu;
2362
2363 if (intel_cpuc_prepare(cpuc, cpu))
2364 goto error;
2365
2366 return cpuc;
2367 error:
2368 free_fake_cpuc(cpuc);
2369 return ERR_PTR(-ENOMEM);
2370 }
2371
2372 /*
2373 * validate that we can schedule this event
2374 */
validate_event(struct perf_event * event)2375 static int validate_event(struct perf_event *event)
2376 {
2377 struct cpu_hw_events *fake_cpuc;
2378 struct event_constraint *c;
2379 int ret = 0;
2380
2381 fake_cpuc = allocate_fake_cpuc(event->pmu);
2382 if (IS_ERR(fake_cpuc))
2383 return PTR_ERR(fake_cpuc);
2384
2385 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
2386
2387 if (!c || !c->weight)
2388 ret = -EINVAL;
2389
2390 if (x86_pmu.put_event_constraints)
2391 x86_pmu.put_event_constraints(fake_cpuc, event);
2392
2393 free_fake_cpuc(fake_cpuc);
2394
2395 return ret;
2396 }
2397
2398 /*
2399 * validate a single event group
2400 *
2401 * validation include:
2402 * - check events are compatible which each other
2403 * - events do not compete for the same counter
2404 * - number of events <= number of counters
2405 *
2406 * validation ensures the group can be loaded onto the
2407 * PMU if it was the only group available.
2408 */
validate_group(struct perf_event * event)2409 static int validate_group(struct perf_event *event)
2410 {
2411 struct perf_event *leader = event->group_leader;
2412 struct cpu_hw_events *fake_cpuc;
2413 int ret = -EINVAL, n;
2414
2415 /*
2416 * Reject events from different hybrid PMUs.
2417 */
2418 if (is_hybrid()) {
2419 struct perf_event *sibling;
2420 struct pmu *pmu = NULL;
2421
2422 if (is_x86_event(leader))
2423 pmu = leader->pmu;
2424
2425 for_each_sibling_event(sibling, leader) {
2426 if (!is_x86_event(sibling))
2427 continue;
2428 if (!pmu)
2429 pmu = sibling->pmu;
2430 else if (pmu != sibling->pmu)
2431 return ret;
2432 }
2433 }
2434
2435 fake_cpuc = allocate_fake_cpuc(event->pmu);
2436 if (IS_ERR(fake_cpuc))
2437 return PTR_ERR(fake_cpuc);
2438 /*
2439 * the event is not yet connected with its
2440 * siblings therefore we must first collect
2441 * existing siblings, then add the new event
2442 * before we can simulate the scheduling
2443 */
2444 n = collect_events(fake_cpuc, leader, true);
2445 if (n < 0)
2446 goto out;
2447
2448 fake_cpuc->n_events = n;
2449 n = collect_events(fake_cpuc, event, false);
2450 if (n < 0)
2451 goto out;
2452
2453 fake_cpuc->n_events = 0;
2454 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
2455
2456 out:
2457 free_fake_cpuc(fake_cpuc);
2458 return ret;
2459 }
2460
x86_pmu_event_init(struct perf_event * event)2461 static int x86_pmu_event_init(struct perf_event *event)
2462 {
2463 struct x86_hybrid_pmu *pmu = NULL;
2464 int err;
2465
2466 if ((event->attr.type != event->pmu->type) &&
2467 (event->attr.type != PERF_TYPE_HARDWARE) &&
2468 (event->attr.type != PERF_TYPE_HW_CACHE))
2469 return -ENOENT;
2470
2471 if (is_hybrid() && (event->cpu != -1)) {
2472 pmu = hybrid_pmu(event->pmu);
2473 if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus))
2474 return -ENOENT;
2475 }
2476
2477 err = __x86_pmu_event_init(event);
2478 if (!err) {
2479 if (event->group_leader != event)
2480 err = validate_group(event);
2481 else
2482 err = validate_event(event);
2483 }
2484 if (err) {
2485 if (event->destroy)
2486 event->destroy(event);
2487 event->destroy = NULL;
2488 }
2489
2490 if (READ_ONCE(x86_pmu.attr_rdpmc) &&
2491 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
2492 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
2493
2494 return err;
2495 }
2496
perf_clear_dirty_counters(void)2497 void perf_clear_dirty_counters(void)
2498 {
2499 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2500 int i;
2501
2502 /* Don't need to clear the assigned counter. */
2503 for (i = 0; i < cpuc->n_events; i++)
2504 __clear_bit(cpuc->assign[i], cpuc->dirty);
2505
2506 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
2507 return;
2508
2509 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
2510 if (i >= INTEL_PMC_IDX_FIXED) {
2511 /* Metrics and fake events don't have corresponding HW counters. */
2512 if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
2513 continue;
2514
2515 wrmsrq(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
2516 } else {
2517 wrmsrq(x86_pmu_event_addr(i), 0);
2518 }
2519 }
2520
2521 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
2522 }
2523
x86_pmu_event_mapped(struct perf_event * event,struct mm_struct * mm)2524 static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
2525 {
2526 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
2527 return;
2528
2529 /*
2530 * This function relies on not being called concurrently in two
2531 * tasks in the same mm. Otherwise one task could observe
2532 * perf_rdpmc_allowed > 1 and return all the way back to
2533 * userspace with CR4.PCE clear while another task is still
2534 * doing on_each_cpu_mask() to propagate CR4.PCE.
2535 *
2536 * For now, this can't happen because all callers hold mmap_lock
2537 * for write. If this changes, we'll need a different solution.
2538 */
2539 mmap_assert_write_locked(mm);
2540
2541 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2542 on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
2543 }
2544
x86_pmu_event_unmapped(struct perf_event * event,struct mm_struct * mm)2545 static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
2546 {
2547 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
2548 return;
2549
2550 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2551 on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
2552 }
2553
x86_pmu_event_idx(struct perf_event * event)2554 static int x86_pmu_event_idx(struct perf_event *event)
2555 {
2556 struct hw_perf_event *hwc = &event->hw;
2557
2558 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
2559 return 0;
2560
2561 if (is_metric_idx(hwc->idx))
2562 return INTEL_PMC_FIXED_RDPMC_METRICS + 1;
2563 else
2564 return hwc->event_base_rdpmc + 1;
2565 }
2566
get_attr_rdpmc(struct device * cdev,struct device_attribute * attr,char * buf)2567 static ssize_t get_attr_rdpmc(struct device *cdev,
2568 struct device_attribute *attr,
2569 char *buf)
2570 {
2571 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2572 }
2573
set_attr_rdpmc(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)2574 static ssize_t set_attr_rdpmc(struct device *cdev,
2575 struct device_attribute *attr,
2576 const char *buf, size_t count)
2577 {
2578 static DEFINE_MUTEX(rdpmc_mutex);
2579 unsigned long val;
2580 ssize_t ret;
2581
2582 ret = kstrtoul(buf, 0, &val);
2583 if (ret)
2584 return ret;
2585
2586 if (val > 2)
2587 return -EINVAL;
2588
2589 if (x86_pmu.attr_rdpmc_broken)
2590 return -ENOTSUPP;
2591
2592 guard(mutex)(&rdpmc_mutex);
2593
2594 if (val != x86_pmu.attr_rdpmc) {
2595 /*
2596 * Changing into or out of never available or always available,
2597 * aka perf-event-bypassing mode. This path is extremely slow,
2598 * but only root can trigger it, so it's okay.
2599 */
2600 if (val == 0)
2601 static_branch_inc(&rdpmc_never_available_key);
2602 else if (x86_pmu.attr_rdpmc == 0)
2603 static_branch_dec(&rdpmc_never_available_key);
2604
2605 if (val == 2)
2606 static_branch_inc(&rdpmc_always_available_key);
2607 else if (x86_pmu.attr_rdpmc == 2)
2608 static_branch_dec(&rdpmc_always_available_key);
2609
2610 on_each_cpu(cr4_update_pce, NULL, 1);
2611 x86_pmu.attr_rdpmc = val;
2612 }
2613
2614 return count;
2615 }
2616
2617 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2618
2619 static struct attribute *x86_pmu_attrs[] = {
2620 &dev_attr_rdpmc.attr,
2621 NULL,
2622 };
2623
2624 static struct attribute_group x86_pmu_attr_group __ro_after_init = {
2625 .attrs = x86_pmu_attrs,
2626 };
2627
max_precise_show(struct device * cdev,struct device_attribute * attr,char * buf)2628 static ssize_t max_precise_show(struct device *cdev,
2629 struct device_attribute *attr,
2630 char *buf)
2631 {
2632 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
2633 }
2634
2635 static DEVICE_ATTR_RO(max_precise);
2636
2637 static struct attribute *x86_pmu_caps_attrs[] = {
2638 &dev_attr_max_precise.attr,
2639 NULL
2640 };
2641
2642 static struct attribute_group x86_pmu_caps_group __ro_after_init = {
2643 .name = "caps",
2644 .attrs = x86_pmu_caps_attrs,
2645 };
2646
2647 static const struct attribute_group *x86_pmu_attr_groups[] = {
2648 &x86_pmu_attr_group,
2649 &x86_pmu_format_group,
2650 &x86_pmu_events_group,
2651 &x86_pmu_caps_group,
2652 NULL,
2653 };
2654
x86_pmu_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)2655 static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
2656 struct task_struct *task, bool sched_in)
2657 {
2658 static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
2659 }
2660
perf_check_microcode(void)2661 void perf_check_microcode(void)
2662 {
2663 if (x86_pmu.check_microcode)
2664 x86_pmu.check_microcode();
2665 }
2666
x86_pmu_check_period(struct perf_event * event,u64 value)2667 static int x86_pmu_check_period(struct perf_event *event, u64 value)
2668 {
2669 if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2670 return -EINVAL;
2671
2672 if (value && x86_pmu.limit_period) {
2673 s64 left = value;
2674 x86_pmu.limit_period(event, &left);
2675 if (left > value)
2676 return -EINVAL;
2677 }
2678
2679 return 0;
2680 }
2681
x86_pmu_aux_output_match(struct perf_event * event)2682 static int x86_pmu_aux_output_match(struct perf_event *event)
2683 {
2684 if (!(pmu.capabilities & PERF_PMU_CAP_AUX_OUTPUT))
2685 return 0;
2686
2687 if (x86_pmu.aux_output_match)
2688 return x86_pmu.aux_output_match(event);
2689
2690 return 0;
2691 }
2692
x86_pmu_filter(struct pmu * pmu,int cpu)2693 static bool x86_pmu_filter(struct pmu *pmu, int cpu)
2694 {
2695 bool ret = false;
2696
2697 static_call_cond(x86_pmu_filter)(pmu, cpu, &ret);
2698
2699 return ret;
2700 }
2701
2702 static struct pmu pmu = {
2703 .pmu_enable = x86_pmu_enable,
2704 .pmu_disable = x86_pmu_disable,
2705
2706 .attr_groups = x86_pmu_attr_groups,
2707
2708 .event_init = x86_pmu_event_init,
2709
2710 .event_mapped = x86_pmu_event_mapped,
2711 .event_unmapped = x86_pmu_event_unmapped,
2712
2713 .add = x86_pmu_add,
2714 .del = x86_pmu_del,
2715 .start = x86_pmu_start,
2716 .stop = x86_pmu_stop,
2717 .read = x86_pmu_read,
2718
2719 .start_txn = x86_pmu_start_txn,
2720 .cancel_txn = x86_pmu_cancel_txn,
2721 .commit_txn = x86_pmu_commit_txn,
2722
2723 .event_idx = x86_pmu_event_idx,
2724 .sched_task = x86_pmu_sched_task,
2725 .check_period = x86_pmu_check_period,
2726
2727 .aux_output_match = x86_pmu_aux_output_match,
2728
2729 .filter = x86_pmu_filter,
2730 };
2731
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)2732 void arch_perf_update_userpage(struct perf_event *event,
2733 struct perf_event_mmap_page *userpg, u64 now)
2734 {
2735 struct cyc2ns_data data;
2736 u64 offset;
2737
2738 userpg->cap_user_time = 0;
2739 userpg->cap_user_time_zero = 0;
2740 userpg->cap_user_rdpmc =
2741 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
2742 userpg->pmc_width = x86_pmu.cntval_bits;
2743
2744 if (!using_native_sched_clock() || !sched_clock_stable())
2745 return;
2746
2747 cyc2ns_read_begin(&data);
2748
2749 offset = data.cyc2ns_offset + __sched_clock_offset;
2750
2751 /*
2752 * Internal timekeeping for enabled/running/stopped times
2753 * is always in the local_clock domain.
2754 */
2755 userpg->cap_user_time = 1;
2756 userpg->time_mult = data.cyc2ns_mul;
2757 userpg->time_shift = data.cyc2ns_shift;
2758 userpg->time_offset = offset - now;
2759
2760 /*
2761 * cap_user_time_zero doesn't make sense when we're using a different
2762 * time base for the records.
2763 */
2764 if (!event->attr.use_clockid) {
2765 userpg->cap_user_time_zero = 1;
2766 userpg->time_zero = offset;
2767 }
2768
2769 cyc2ns_read_end();
2770 }
2771
2772 /*
2773 * Determine whether the regs were taken from an irq/exception handler rather
2774 * than from perf_arch_fetch_caller_regs().
2775 */
perf_hw_regs(struct pt_regs * regs)2776 static bool perf_hw_regs(struct pt_regs *regs)
2777 {
2778 return regs->flags & X86_EFLAGS_FIXED;
2779 }
2780
2781 void
perf_callchain_kernel(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)2782 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2783 {
2784 struct unwind_state state;
2785 unsigned long addr;
2786
2787 if (perf_guest_state()) {
2788 /* TODO: We don't support guest os callchain now */
2789 return;
2790 }
2791
2792 if (perf_callchain_store(entry, regs->ip))
2793 return;
2794
2795 if (perf_hw_regs(regs))
2796 unwind_start(&state, current, regs, NULL);
2797 else
2798 unwind_start(&state, current, NULL, (void *)regs->sp);
2799
2800 for (; !unwind_done(&state); unwind_next_frame(&state)) {
2801 addr = unwind_get_return_address(&state);
2802 if (!addr || perf_callchain_store(entry, addr))
2803 return;
2804 }
2805 }
2806
2807 static inline int
valid_user_frame(const void __user * fp,unsigned long size)2808 valid_user_frame(const void __user *fp, unsigned long size)
2809 {
2810 return __access_ok(fp, size);
2811 }
2812
get_segment_base(unsigned int segment)2813 static unsigned long get_segment_base(unsigned int segment)
2814 {
2815 struct desc_struct *desc;
2816 unsigned int idx = segment >> 3;
2817
2818 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2819 #ifdef CONFIG_MODIFY_LDT_SYSCALL
2820 struct ldt_struct *ldt;
2821
2822 /*
2823 * If we're not in a valid context with a real (not just lazy)
2824 * user mm, then don't even try.
2825 */
2826 if (!nmi_uaccess_okay())
2827 return 0;
2828
2829 /* IRQs are off, so this synchronizes with smp_store_release */
2830 ldt = smp_load_acquire(¤t->mm->context.ldt);
2831 if (!ldt || idx >= ldt->nr_entries)
2832 return 0;
2833
2834 desc = &ldt->entries[idx];
2835 #else
2836 return 0;
2837 #endif
2838 } else {
2839 if (idx >= GDT_ENTRIES)
2840 return 0;
2841
2842 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2843 }
2844
2845 return get_desc_base(desc);
2846 }
2847
2848 #ifdef CONFIG_UPROBES
2849 /*
2850 * Heuristic-based check if uprobe is installed at the function entry.
2851 *
2852 * Under assumption of user code being compiled with frame pointers,
2853 * `push %rbp/%ebp` is a good indicator that we indeed are.
2854 *
2855 * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
2856 * If we get this wrong, captured stack trace might have one extra bogus
2857 * entry, but the rest of stack trace will still be meaningful.
2858 */
is_uprobe_at_func_entry(struct pt_regs * regs)2859 static bool is_uprobe_at_func_entry(struct pt_regs *regs)
2860 {
2861 struct arch_uprobe *auprobe;
2862
2863 if (!current->utask)
2864 return false;
2865
2866 auprobe = current->utask->auprobe;
2867 if (!auprobe)
2868 return false;
2869
2870 /* push %rbp/%ebp */
2871 if (auprobe->insn[0] == 0x55)
2872 return true;
2873
2874 /* endbr64 (64-bit only) */
2875 if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
2876 return true;
2877
2878 return false;
2879 }
2880
2881 #else
is_uprobe_at_func_entry(struct pt_regs * regs)2882 static bool is_uprobe_at_func_entry(struct pt_regs *regs)
2883 {
2884 return false;
2885 }
2886 #endif /* CONFIG_UPROBES */
2887
2888 #ifdef CONFIG_IA32_EMULATION
2889
2890 #include <linux/compat.h>
2891
2892 static inline int
perf_callchain_user32(struct pt_regs * regs,struct perf_callchain_entry_ctx * entry)2893 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2894 {
2895 /* 32-bit process in 64-bit kernel. */
2896 unsigned long ss_base, cs_base;
2897 struct stack_frame_ia32 frame;
2898 const struct stack_frame_ia32 __user *fp;
2899 u32 ret_addr;
2900
2901 if (user_64bit_mode(regs))
2902 return 0;
2903
2904 cs_base = get_segment_base(regs->cs);
2905 ss_base = get_segment_base(regs->ss);
2906
2907 fp = compat_ptr(ss_base + regs->bp);
2908 pagefault_disable();
2909
2910 /* see perf_callchain_user() below for why we do this */
2911 if (is_uprobe_at_func_entry(regs) &&
2912 !get_user(ret_addr, (const u32 __user *)regs->sp))
2913 perf_callchain_store(entry, ret_addr);
2914
2915 while (entry->nr < entry->max_stack) {
2916 if (!valid_user_frame(fp, sizeof(frame)))
2917 break;
2918
2919 if (__get_user(frame.next_frame, &fp->next_frame))
2920 break;
2921 if (__get_user(frame.return_address, &fp->return_address))
2922 break;
2923
2924 perf_callchain_store(entry, cs_base + frame.return_address);
2925 fp = compat_ptr(ss_base + frame.next_frame);
2926 }
2927 pagefault_enable();
2928 return 1;
2929 }
2930 #else
2931 static inline int
perf_callchain_user32(struct pt_regs * regs,struct perf_callchain_entry_ctx * entry)2932 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2933 {
2934 return 0;
2935 }
2936 #endif
2937
2938 void
perf_callchain_user(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)2939 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2940 {
2941 struct stack_frame frame;
2942 const struct stack_frame __user *fp;
2943 unsigned long ret_addr;
2944
2945 if (perf_guest_state()) {
2946 /* TODO: We don't support guest os callchain now */
2947 return;
2948 }
2949
2950 /*
2951 * We don't know what to do with VM86 stacks.. ignore them for now.
2952 */
2953 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2954 return;
2955
2956 fp = (void __user *)regs->bp;
2957
2958 perf_callchain_store(entry, regs->ip);
2959
2960 if (!nmi_uaccess_okay())
2961 return;
2962
2963 if (perf_callchain_user32(regs, entry))
2964 return;
2965
2966 pagefault_disable();
2967
2968 /*
2969 * If we are called from uprobe handler, and we are indeed at the very
2970 * entry to user function (which is normally a `push %rbp` instruction,
2971 * under assumption of application being compiled with frame pointers),
2972 * we should read return address from *regs->sp before proceeding
2973 * to follow frame pointers, otherwise we'll skip immediate caller
2974 * as %rbp is not yet setup.
2975 */
2976 if (is_uprobe_at_func_entry(regs) &&
2977 !get_user(ret_addr, (const unsigned long __user *)regs->sp))
2978 perf_callchain_store(entry, ret_addr);
2979
2980 while (entry->nr < entry->max_stack) {
2981 if (!valid_user_frame(fp, sizeof(frame)))
2982 break;
2983
2984 if (__get_user(frame.next_frame, &fp->next_frame))
2985 break;
2986 if (__get_user(frame.return_address, &fp->return_address))
2987 break;
2988
2989 perf_callchain_store(entry, frame.return_address);
2990 fp = (void __user *)frame.next_frame;
2991 }
2992 pagefault_enable();
2993 }
2994
2995 /*
2996 * Deal with code segment offsets for the various execution modes:
2997 *
2998 * VM86 - the good olde 16 bit days, where the linear address is
2999 * 20 bits and we use regs->ip + 0x10 * regs->cs.
3000 *
3001 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
3002 * to figure out what the 32bit base address is.
3003 *
3004 * X32 - has TIF_X32 set, but is running in x86_64
3005 *
3006 * X86_64 - CS,DS,SS,ES are all zero based.
3007 */
code_segment_base(struct pt_regs * regs)3008 static unsigned long code_segment_base(struct pt_regs *regs)
3009 {
3010 /*
3011 * For IA32 we look at the GDT/LDT segment base to convert the
3012 * effective IP to a linear address.
3013 */
3014
3015 #ifdef CONFIG_X86_32
3016 /*
3017 * If we are in VM86 mode, add the segment offset to convert to a
3018 * linear address.
3019 */
3020 if (regs->flags & X86_VM_MASK)
3021 return 0x10 * regs->cs;
3022
3023 if (user_mode(regs) && regs->cs != __USER_CS)
3024 return get_segment_base(regs->cs);
3025 #else
3026 if (user_mode(regs) && !user_64bit_mode(regs) &&
3027 regs->cs != __USER32_CS)
3028 return get_segment_base(regs->cs);
3029 #endif
3030 return 0;
3031 }
3032
perf_arch_instruction_pointer(struct pt_regs * regs)3033 unsigned long perf_arch_instruction_pointer(struct pt_regs *regs)
3034 {
3035 return regs->ip + code_segment_base(regs);
3036 }
3037
common_misc_flags(struct pt_regs * regs)3038 static unsigned long common_misc_flags(struct pt_regs *regs)
3039 {
3040 if (regs->flags & PERF_EFLAGS_EXACT)
3041 return PERF_RECORD_MISC_EXACT_IP;
3042
3043 return 0;
3044 }
3045
guest_misc_flags(struct pt_regs * regs)3046 static unsigned long guest_misc_flags(struct pt_regs *regs)
3047 {
3048 unsigned long guest_state = perf_guest_state();
3049
3050 if (!(guest_state & PERF_GUEST_ACTIVE))
3051 return 0;
3052
3053 if (guest_state & PERF_GUEST_USER)
3054 return PERF_RECORD_MISC_GUEST_USER;
3055 else
3056 return PERF_RECORD_MISC_GUEST_KERNEL;
3057
3058 }
3059
host_misc_flags(struct pt_regs * regs)3060 static unsigned long host_misc_flags(struct pt_regs *regs)
3061 {
3062 if (user_mode(regs))
3063 return PERF_RECORD_MISC_USER;
3064 else
3065 return PERF_RECORD_MISC_KERNEL;
3066 }
3067
perf_arch_guest_misc_flags(struct pt_regs * regs)3068 unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
3069 {
3070 unsigned long flags = common_misc_flags(regs);
3071
3072 flags |= guest_misc_flags(regs);
3073
3074 return flags;
3075 }
3076
perf_arch_misc_flags(struct pt_regs * regs)3077 unsigned long perf_arch_misc_flags(struct pt_regs *regs)
3078 {
3079 unsigned long flags = common_misc_flags(regs);
3080
3081 flags |= host_misc_flags(regs);
3082
3083 return flags;
3084 }
3085
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)3086 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
3087 {
3088 /* This API doesn't currently support enumerating hybrid PMUs. */
3089 if (WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) ||
3090 !x86_pmu_initialized()) {
3091 memset(cap, 0, sizeof(*cap));
3092 return;
3093 }
3094
3095 /*
3096 * Note, hybrid CPU models get tracked as having hybrid PMUs even when
3097 * all E-cores are disabled via BIOS. When E-cores are disabled, the
3098 * base PMU holds the correct number of counters for P-cores.
3099 */
3100 cap->version = x86_pmu.version;
3101 cap->num_counters_gp = x86_pmu_num_counters(NULL);
3102 cap->num_counters_fixed = x86_pmu_num_counters_fixed(NULL);
3103 cap->bit_width_gp = x86_pmu.cntval_bits;
3104 cap->bit_width_fixed = x86_pmu.cntval_bits;
3105 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
3106 cap->events_mask_len = x86_pmu.events_mask_len;
3107 cap->pebs_ept = x86_pmu.pebs_ept;
3108 }
3109 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
3110
perf_get_hw_event_config(int hw_event)3111 u64 perf_get_hw_event_config(int hw_event)
3112 {
3113 int max = x86_pmu.max_events;
3114
3115 if (hw_event < max)
3116 return x86_pmu.event_map(array_index_nospec(hw_event, max));
3117
3118 return 0;
3119 }
3120 EXPORT_SYMBOL_GPL(perf_get_hw_event_config);
3121