1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance events core code:
4 *
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 */
10
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/idr.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/tick.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/hugetlb.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49 #include <linux/sched/clock.h>
50 #include <linux/sched/mm.h>
51 #include <linux/proc_ns.h>
52 #include <linux/mount.h>
53 #include <linux/min_heap.h>
54 #include <linux/highmem.h>
55 #include <linux/pgtable.h>
56 #include <linux/buildid.h>
57 #include <linux/task_work.h>
58 #include <linux/percpu-rwsem.h>
59
60 #include "internal.h"
61
62 #include <asm/irq_regs.h>
63
64 typedef int (*remote_function_f)(void *);
65
66 struct remote_function_call {
67 struct task_struct *p;
68 remote_function_f func;
69 void *info;
70 int ret;
71 };
72
remote_function(void * data)73 static void remote_function(void *data)
74 {
75 struct remote_function_call *tfc = data;
76 struct task_struct *p = tfc->p;
77
78 if (p) {
79 /* -EAGAIN */
80 if (task_cpu(p) != smp_processor_id())
81 return;
82
83 /*
84 * Now that we're on right CPU with IRQs disabled, we can test
85 * if we hit the right task without races.
86 */
87
88 tfc->ret = -ESRCH; /* No such (running) process */
89 if (p != current)
90 return;
91 }
92
93 tfc->ret = tfc->func(tfc->info);
94 }
95
96 /**
97 * task_function_call - call a function on the cpu on which a task runs
98 * @p: the task to evaluate
99 * @func: the function to be called
100 * @info: the function call argument
101 *
102 * Calls the function @func when the task is currently running. This might
103 * be on the current CPU, which just calls the function directly. This will
104 * retry due to any failures in smp_call_function_single(), such as if the
105 * task_cpu() goes offline concurrently.
106 *
107 * returns @func return value or -ESRCH or -ENXIO when the process isn't running
108 */
109 static int
task_function_call(struct task_struct * p,remote_function_f func,void * info)110 task_function_call(struct task_struct *p, remote_function_f func, void *info)
111 {
112 struct remote_function_call data = {
113 .p = p,
114 .func = func,
115 .info = info,
116 .ret = -EAGAIN,
117 };
118 int ret;
119
120 for (;;) {
121 ret = smp_call_function_single(task_cpu(p), remote_function,
122 &data, 1);
123 if (!ret)
124 ret = data.ret;
125
126 if (ret != -EAGAIN)
127 break;
128
129 cond_resched();
130 }
131
132 return ret;
133 }
134
135 /**
136 * cpu_function_call - call a function on the cpu
137 * @cpu: target cpu to queue this function
138 * @func: the function to be called
139 * @info: the function call argument
140 *
141 * Calls the function @func on the remote cpu.
142 *
143 * returns: @func return value or -ENXIO when the cpu is offline
144 */
cpu_function_call(int cpu,remote_function_f func,void * info)145 static int cpu_function_call(int cpu, remote_function_f func, void *info)
146 {
147 struct remote_function_call data = {
148 .p = NULL,
149 .func = func,
150 .info = info,
151 .ret = -ENXIO, /* No such CPU */
152 };
153
154 smp_call_function_single(cpu, remote_function, &data, 1);
155
156 return data.ret;
157 }
158
159 enum event_type_t {
160 EVENT_FLEXIBLE = 0x01,
161 EVENT_PINNED = 0x02,
162 EVENT_TIME = 0x04,
163 EVENT_FROZEN = 0x08,
164 /* see ctx_resched() for details */
165 EVENT_CPU = 0x10,
166 EVENT_CGROUP = 0x20,
167
168 /* compound helpers */
169 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
170 EVENT_TIME_FROZEN = EVENT_TIME | EVENT_FROZEN,
171 };
172
__perf_ctx_lock(struct perf_event_context * ctx)173 static inline void __perf_ctx_lock(struct perf_event_context *ctx)
174 {
175 raw_spin_lock(&ctx->lock);
176 WARN_ON_ONCE(ctx->is_active & EVENT_FROZEN);
177 }
178
perf_ctx_lock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)179 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
180 struct perf_event_context *ctx)
181 {
182 __perf_ctx_lock(&cpuctx->ctx);
183 if (ctx)
184 __perf_ctx_lock(ctx);
185 }
186
__perf_ctx_unlock(struct perf_event_context * ctx)187 static inline void __perf_ctx_unlock(struct perf_event_context *ctx)
188 {
189 /*
190 * If ctx_sched_in() didn't again set any ALL flags, clean up
191 * after ctx_sched_out() by clearing is_active.
192 */
193 if (ctx->is_active & EVENT_FROZEN) {
194 if (!(ctx->is_active & EVENT_ALL))
195 ctx->is_active = 0;
196 else
197 ctx->is_active &= ~EVENT_FROZEN;
198 }
199 raw_spin_unlock(&ctx->lock);
200 }
201
perf_ctx_unlock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)202 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
203 struct perf_event_context *ctx)
204 {
205 if (ctx)
206 __perf_ctx_unlock(ctx);
207 __perf_ctx_unlock(&cpuctx->ctx);
208 }
209
210 typedef struct {
211 struct perf_cpu_context *cpuctx;
212 struct perf_event_context *ctx;
213 } class_perf_ctx_lock_t;
214
class_perf_ctx_lock_destructor(class_perf_ctx_lock_t * _T)215 static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
216 { perf_ctx_unlock(_T->cpuctx, _T->ctx); }
217
218 static inline class_perf_ctx_lock_t
class_perf_ctx_lock_constructor(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)219 class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
220 struct perf_event_context *ctx)
221 { perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
222
223 #define TASK_TOMBSTONE ((void *)-1L)
224
is_kernel_event(struct perf_event * event)225 static bool is_kernel_event(struct perf_event *event)
226 {
227 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
228 }
229
230 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
231
perf_cpu_task_ctx(void)232 struct perf_event_context *perf_cpu_task_ctx(void)
233 {
234 lockdep_assert_irqs_disabled();
235 return this_cpu_ptr(&perf_cpu_context)->task_ctx;
236 }
237
238 /*
239 * On task ctx scheduling...
240 *
241 * When !ctx->nr_events a task context will not be scheduled. This means
242 * we can disable the scheduler hooks (for performance) without leaving
243 * pending task ctx state.
244 *
245 * This however results in two special cases:
246 *
247 * - removing the last event from a task ctx; this is relatively straight
248 * forward and is done in __perf_remove_from_context.
249 *
250 * - adding the first event to a task ctx; this is tricky because we cannot
251 * rely on ctx->is_active and therefore cannot use event_function_call().
252 * See perf_install_in_context().
253 *
254 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
255 */
256
257 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
258 struct perf_event_context *, void *);
259
260 struct event_function_struct {
261 struct perf_event *event;
262 event_f func;
263 void *data;
264 };
265
event_function(void * info)266 static int event_function(void *info)
267 {
268 struct event_function_struct *efs = info;
269 struct perf_event *event = efs->event;
270 struct perf_event_context *ctx = event->ctx;
271 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
272 struct perf_event_context *task_ctx = cpuctx->task_ctx;
273 int ret = 0;
274
275 lockdep_assert_irqs_disabled();
276
277 perf_ctx_lock(cpuctx, task_ctx);
278 /*
279 * Since we do the IPI call without holding ctx->lock things can have
280 * changed, double check we hit the task we set out to hit.
281 */
282 if (ctx->task) {
283 if (ctx->task != current) {
284 ret = -ESRCH;
285 goto unlock;
286 }
287
288 /*
289 * We only use event_function_call() on established contexts,
290 * and event_function() is only ever called when active (or
291 * rather, we'll have bailed in task_function_call() or the
292 * above ctx->task != current test), therefore we must have
293 * ctx->is_active here.
294 */
295 WARN_ON_ONCE(!ctx->is_active);
296 /*
297 * And since we have ctx->is_active, cpuctx->task_ctx must
298 * match.
299 */
300 WARN_ON_ONCE(task_ctx != ctx);
301 } else {
302 WARN_ON_ONCE(&cpuctx->ctx != ctx);
303 }
304
305 efs->func(event, cpuctx, ctx, efs->data);
306 unlock:
307 perf_ctx_unlock(cpuctx, task_ctx);
308
309 return ret;
310 }
311
event_function_call(struct perf_event * event,event_f func,void * data)312 static void event_function_call(struct perf_event *event, event_f func, void *data)
313 {
314 struct perf_event_context *ctx = event->ctx;
315 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
316 struct perf_cpu_context *cpuctx;
317 struct event_function_struct efs = {
318 .event = event,
319 .func = func,
320 .data = data,
321 };
322
323 if (!event->parent) {
324 /*
325 * If this is a !child event, we must hold ctx::mutex to
326 * stabilize the event->ctx relation. See
327 * perf_event_ctx_lock().
328 */
329 lockdep_assert_held(&ctx->mutex);
330 }
331
332 if (!task) {
333 cpu_function_call(event->cpu, event_function, &efs);
334 return;
335 }
336
337 if (task == TASK_TOMBSTONE)
338 return;
339
340 again:
341 if (!task_function_call(task, event_function, &efs))
342 return;
343
344 local_irq_disable();
345 cpuctx = this_cpu_ptr(&perf_cpu_context);
346 perf_ctx_lock(cpuctx, ctx);
347 /*
348 * Reload the task pointer, it might have been changed by
349 * a concurrent perf_event_context_sched_out().
350 */
351 task = ctx->task;
352 if (task == TASK_TOMBSTONE)
353 goto unlock;
354 if (ctx->is_active) {
355 perf_ctx_unlock(cpuctx, ctx);
356 local_irq_enable();
357 goto again;
358 }
359 func(event, NULL, ctx, data);
360 unlock:
361 perf_ctx_unlock(cpuctx, ctx);
362 local_irq_enable();
363 }
364
365 /*
366 * Similar to event_function_call() + event_function(), but hard assumes IRQs
367 * are already disabled and we're on the right CPU.
368 */
event_function_local(struct perf_event * event,event_f func,void * data)369 static void event_function_local(struct perf_event *event, event_f func, void *data)
370 {
371 struct perf_event_context *ctx = event->ctx;
372 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
373 struct task_struct *task = READ_ONCE(ctx->task);
374 struct perf_event_context *task_ctx = NULL;
375
376 lockdep_assert_irqs_disabled();
377
378 if (task) {
379 if (task == TASK_TOMBSTONE)
380 return;
381
382 task_ctx = ctx;
383 }
384
385 perf_ctx_lock(cpuctx, task_ctx);
386
387 task = ctx->task;
388 if (task == TASK_TOMBSTONE)
389 goto unlock;
390
391 if (task) {
392 /*
393 * We must be either inactive or active and the right task,
394 * otherwise we're screwed, since we cannot IPI to somewhere
395 * else.
396 */
397 if (ctx->is_active) {
398 if (WARN_ON_ONCE(task != current))
399 goto unlock;
400
401 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
402 goto unlock;
403 }
404 } else {
405 WARN_ON_ONCE(&cpuctx->ctx != ctx);
406 }
407
408 func(event, cpuctx, ctx, data);
409 unlock:
410 perf_ctx_unlock(cpuctx, task_ctx);
411 }
412
413 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
414 PERF_FLAG_FD_OUTPUT |\
415 PERF_FLAG_PID_CGROUP |\
416 PERF_FLAG_FD_CLOEXEC)
417
418 /*
419 * branch priv levels that need permission checks
420 */
421 #define PERF_SAMPLE_BRANCH_PERM_PLM \
422 (PERF_SAMPLE_BRANCH_KERNEL |\
423 PERF_SAMPLE_BRANCH_HV)
424
425 /*
426 * perf_sched_events : >0 events exist
427 */
428
429 static void perf_sched_delayed(struct work_struct *work);
430 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
431 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
432 static DEFINE_MUTEX(perf_sched_mutex);
433 static atomic_t perf_sched_count;
434
435 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
436
437 static atomic_t nr_mmap_events __read_mostly;
438 static atomic_t nr_comm_events __read_mostly;
439 static atomic_t nr_namespaces_events __read_mostly;
440 static atomic_t nr_task_events __read_mostly;
441 static atomic_t nr_freq_events __read_mostly;
442 static atomic_t nr_switch_events __read_mostly;
443 static atomic_t nr_ksymbol_events __read_mostly;
444 static atomic_t nr_bpf_events __read_mostly;
445 static atomic_t nr_cgroup_events __read_mostly;
446 static atomic_t nr_text_poke_events __read_mostly;
447 static atomic_t nr_build_id_events __read_mostly;
448
449 static LIST_HEAD(pmus);
450 static DEFINE_MUTEX(pmus_lock);
451 static struct srcu_struct pmus_srcu;
452 static cpumask_var_t perf_online_mask;
453 static cpumask_var_t perf_online_core_mask;
454 static cpumask_var_t perf_online_die_mask;
455 static cpumask_var_t perf_online_cluster_mask;
456 static cpumask_var_t perf_online_pkg_mask;
457 static cpumask_var_t perf_online_sys_mask;
458 static struct kmem_cache *perf_event_cache;
459
460 /*
461 * perf event paranoia level:
462 * -1 - not paranoid at all
463 * 0 - disallow raw tracepoint access for unpriv
464 * 1 - disallow cpu events for unpriv
465 * 2 - disallow kernel profiling for unpriv
466 */
467 int sysctl_perf_event_paranoid __read_mostly = 2;
468
469 /* Minimum for 512 kiB + 1 user control page. 'free' kiB per user. */
470 static int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
471
472 /*
473 * max perf event sample rate
474 */
475 #define DEFAULT_MAX_SAMPLE_RATE 100000
476 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
477 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
478
479 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
480 static int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
481
482 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
483 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
484
485 static int perf_sample_allowed_ns __read_mostly =
486 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
487
update_perf_cpu_limits(void)488 static void update_perf_cpu_limits(void)
489 {
490 u64 tmp = perf_sample_period_ns;
491
492 tmp *= sysctl_perf_cpu_time_max_percent;
493 tmp = div_u64(tmp, 100);
494 if (!tmp)
495 tmp = 1;
496
497 WRITE_ONCE(perf_sample_allowed_ns, tmp);
498 }
499
500 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
501
perf_event_max_sample_rate_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)502 static int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
503 void *buffer, size_t *lenp, loff_t *ppos)
504 {
505 int ret;
506 int perf_cpu = sysctl_perf_cpu_time_max_percent;
507 /*
508 * If throttling is disabled don't allow the write:
509 */
510 if (write && (perf_cpu == 100 || perf_cpu == 0))
511 return -EINVAL;
512
513 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
514 if (ret || !write)
515 return ret;
516
517 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
518 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
519 update_perf_cpu_limits();
520
521 return 0;
522 }
523
perf_cpu_time_max_percent_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)524 static int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
525 void *buffer, size_t *lenp, loff_t *ppos)
526 {
527 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528
529 if (ret || !write)
530 return ret;
531
532 if (sysctl_perf_cpu_time_max_percent == 100 ||
533 sysctl_perf_cpu_time_max_percent == 0) {
534 printk(KERN_WARNING
535 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
536 WRITE_ONCE(perf_sample_allowed_ns, 0);
537 } else {
538 update_perf_cpu_limits();
539 }
540
541 return 0;
542 }
543
544 static const struct ctl_table events_core_sysctl_table[] = {
545 /*
546 * User-space relies on this file as a feature check for
547 * perf_events being enabled. It's an ABI, do not remove!
548 */
549 {
550 .procname = "perf_event_paranoid",
551 .data = &sysctl_perf_event_paranoid,
552 .maxlen = sizeof(sysctl_perf_event_paranoid),
553 .mode = 0644,
554 .proc_handler = proc_dointvec,
555 },
556 {
557 .procname = "perf_event_mlock_kb",
558 .data = &sysctl_perf_event_mlock,
559 .maxlen = sizeof(sysctl_perf_event_mlock),
560 .mode = 0644,
561 .proc_handler = proc_dointvec,
562 },
563 {
564 .procname = "perf_event_max_sample_rate",
565 .data = &sysctl_perf_event_sample_rate,
566 .maxlen = sizeof(sysctl_perf_event_sample_rate),
567 .mode = 0644,
568 .proc_handler = perf_event_max_sample_rate_handler,
569 .extra1 = SYSCTL_ONE,
570 },
571 {
572 .procname = "perf_cpu_time_max_percent",
573 .data = &sysctl_perf_cpu_time_max_percent,
574 .maxlen = sizeof(sysctl_perf_cpu_time_max_percent),
575 .mode = 0644,
576 .proc_handler = perf_cpu_time_max_percent_handler,
577 .extra1 = SYSCTL_ZERO,
578 .extra2 = SYSCTL_ONE_HUNDRED,
579 },
580 };
581
init_events_core_sysctls(void)582 static int __init init_events_core_sysctls(void)
583 {
584 register_sysctl_init("kernel", events_core_sysctl_table);
585 return 0;
586 }
587 core_initcall(init_events_core_sysctls);
588
589
590 /*
591 * perf samples are done in some very critical code paths (NMIs).
592 * If they take too much CPU time, the system can lock up and not
593 * get any real work done. This will drop the sample rate when
594 * we detect that events are taking too long.
595 */
596 #define NR_ACCUMULATED_SAMPLES 128
597 static DEFINE_PER_CPU(u64, running_sample_length);
598
599 static u64 __report_avg;
600 static u64 __report_allowed;
601
perf_duration_warn(struct irq_work * w)602 static void perf_duration_warn(struct irq_work *w)
603 {
604 printk_ratelimited(KERN_INFO
605 "perf: interrupt took too long (%lld > %lld), lowering "
606 "kernel.perf_event_max_sample_rate to %d\n",
607 __report_avg, __report_allowed,
608 sysctl_perf_event_sample_rate);
609 }
610
611 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
612
perf_sample_event_took(u64 sample_len_ns)613 void perf_sample_event_took(u64 sample_len_ns)
614 {
615 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
616 u64 running_len;
617 u64 avg_len;
618 u32 max;
619
620 if (max_len == 0)
621 return;
622
623 /* Decay the counter by 1 average sample. */
624 running_len = __this_cpu_read(running_sample_length);
625 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
626 running_len += sample_len_ns;
627 __this_cpu_write(running_sample_length, running_len);
628
629 /*
630 * Note: this will be biased artificially low until we have
631 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
632 * from having to maintain a count.
633 */
634 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
635 if (avg_len <= max_len)
636 return;
637
638 __report_avg = avg_len;
639 __report_allowed = max_len;
640
641 /*
642 * Compute a throttle threshold 25% below the current duration.
643 */
644 avg_len += avg_len / 4;
645 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
646 if (avg_len < max)
647 max /= (u32)avg_len;
648 else
649 max = 1;
650
651 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
652 WRITE_ONCE(max_samples_per_tick, max);
653
654 sysctl_perf_event_sample_rate = max * HZ;
655 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
656
657 if (!irq_work_queue(&perf_duration_work)) {
658 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
659 "kernel.perf_event_max_sample_rate to %d\n",
660 __report_avg, __report_allowed,
661 sysctl_perf_event_sample_rate);
662 }
663 }
664
665 static atomic64_t perf_event_id;
666
667 static void update_context_time(struct perf_event_context *ctx);
668 static u64 perf_event_time(struct perf_event *event);
669
perf_event_print_debug(void)670 void __weak perf_event_print_debug(void) { }
671
perf_clock(void)672 static inline u64 perf_clock(void)
673 {
674 return local_clock();
675 }
676
perf_event_clock(struct perf_event * event)677 static inline u64 perf_event_clock(struct perf_event *event)
678 {
679 return event->clock();
680 }
681
682 /*
683 * State based event timekeeping...
684 *
685 * The basic idea is to use event->state to determine which (if any) time
686 * fields to increment with the current delta. This means we only need to
687 * update timestamps when we change state or when they are explicitly requested
688 * (read).
689 *
690 * Event groups make things a little more complicated, but not terribly so. The
691 * rules for a group are that if the group leader is OFF the entire group is
692 * OFF, irrespective of what the group member states are. This results in
693 * __perf_effective_state().
694 *
695 * A further ramification is that when a group leader flips between OFF and
696 * !OFF, we need to update all group member times.
697 *
698 *
699 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
700 * need to make sure the relevant context time is updated before we try and
701 * update our timestamps.
702 */
703
704 static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event * event)705 __perf_effective_state(struct perf_event *event)
706 {
707 struct perf_event *leader = event->group_leader;
708
709 if (leader->state <= PERF_EVENT_STATE_OFF)
710 return leader->state;
711
712 return event->state;
713 }
714
715 static __always_inline void
__perf_update_times(struct perf_event * event,u64 now,u64 * enabled,u64 * running)716 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
717 {
718 enum perf_event_state state = __perf_effective_state(event);
719 u64 delta = now - event->tstamp;
720
721 *enabled = event->total_time_enabled;
722 if (state >= PERF_EVENT_STATE_INACTIVE)
723 *enabled += delta;
724
725 *running = event->total_time_running;
726 if (state >= PERF_EVENT_STATE_ACTIVE)
727 *running += delta;
728 }
729
perf_event_update_time(struct perf_event * event)730 static void perf_event_update_time(struct perf_event *event)
731 {
732 u64 now = perf_event_time(event);
733
734 __perf_update_times(event, now, &event->total_time_enabled,
735 &event->total_time_running);
736 event->tstamp = now;
737 }
738
perf_event_update_sibling_time(struct perf_event * leader)739 static void perf_event_update_sibling_time(struct perf_event *leader)
740 {
741 struct perf_event *sibling;
742
743 for_each_sibling_event(sibling, leader)
744 perf_event_update_time(sibling);
745 }
746
747 static void
perf_event_set_state(struct perf_event * event,enum perf_event_state state)748 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
749 {
750 if (event->state == state)
751 return;
752
753 perf_event_update_time(event);
754 /*
755 * If a group leader gets enabled/disabled all its siblings
756 * are affected too.
757 */
758 if ((event->state < 0) ^ (state < 0))
759 perf_event_update_sibling_time(event);
760
761 WRITE_ONCE(event->state, state);
762 }
763
764 /*
765 * UP store-release, load-acquire
766 */
767
768 #define __store_release(ptr, val) \
769 do { \
770 barrier(); \
771 WRITE_ONCE(*(ptr), (val)); \
772 } while (0)
773
774 #define __load_acquire(ptr) \
775 ({ \
776 __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
777 barrier(); \
778 ___p; \
779 })
780
781 #define for_each_epc(_epc, _ctx, _pmu, _cgroup) \
782 list_for_each_entry(_epc, &((_ctx)->pmu_ctx_list), pmu_ctx_entry) \
783 if (_cgroup && !_epc->nr_cgroups) \
784 continue; \
785 else if (_pmu && _epc->pmu != _pmu) \
786 continue; \
787 else
788
perf_ctx_disable(struct perf_event_context * ctx,bool cgroup)789 static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
790 {
791 struct perf_event_pmu_context *pmu_ctx;
792
793 for_each_epc(pmu_ctx, ctx, NULL, cgroup)
794 perf_pmu_disable(pmu_ctx->pmu);
795 }
796
perf_ctx_enable(struct perf_event_context * ctx,bool cgroup)797 static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
798 {
799 struct perf_event_pmu_context *pmu_ctx;
800
801 for_each_epc(pmu_ctx, ctx, NULL, cgroup)
802 perf_pmu_enable(pmu_ctx->pmu);
803 }
804
805 static void ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
806 static void ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
807
808 #ifdef CONFIG_CGROUP_PERF
809
810 static inline bool
perf_cgroup_match(struct perf_event * event)811 perf_cgroup_match(struct perf_event *event)
812 {
813 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
814
815 /* @event doesn't care about cgroup */
816 if (!event->cgrp)
817 return true;
818
819 /* wants specific cgroup scope but @cpuctx isn't associated with any */
820 if (!cpuctx->cgrp)
821 return false;
822
823 /*
824 * Cgroup scoping is recursive. An event enabled for a cgroup is
825 * also enabled for all its descendant cgroups. If @cpuctx's
826 * cgroup is a descendant of @event's (the test covers identity
827 * case), it's a match.
828 */
829 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
830 event->cgrp->css.cgroup);
831 }
832
perf_detach_cgroup(struct perf_event * event)833 static inline void perf_detach_cgroup(struct perf_event *event)
834 {
835 css_put(&event->cgrp->css);
836 event->cgrp = NULL;
837 }
838
is_cgroup_event(struct perf_event * event)839 static inline int is_cgroup_event(struct perf_event *event)
840 {
841 return event->cgrp != NULL;
842 }
843
perf_cgroup_event_time(struct perf_event * event)844 static inline u64 perf_cgroup_event_time(struct perf_event *event)
845 {
846 struct perf_cgroup_info *t;
847
848 t = per_cpu_ptr(event->cgrp->info, event->cpu);
849 return t->time;
850 }
851
perf_cgroup_event_time_now(struct perf_event * event,u64 now)852 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
853 {
854 struct perf_cgroup_info *t;
855
856 t = per_cpu_ptr(event->cgrp->info, event->cpu);
857 if (!__load_acquire(&t->active))
858 return t->time;
859 now += READ_ONCE(t->timeoffset);
860 return now;
861 }
862
__update_cgrp_time(struct perf_cgroup_info * info,u64 now,bool adv)863 static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
864 {
865 if (adv)
866 info->time += now - info->timestamp;
867 info->timestamp = now;
868 /*
869 * see update_context_time()
870 */
871 WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
872 }
873
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)874 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
875 {
876 struct perf_cgroup *cgrp = cpuctx->cgrp;
877 struct cgroup_subsys_state *css;
878 struct perf_cgroup_info *info;
879
880 if (cgrp) {
881 u64 now = perf_clock();
882
883 for (css = &cgrp->css; css; css = css->parent) {
884 cgrp = container_of(css, struct perf_cgroup, css);
885 info = this_cpu_ptr(cgrp->info);
886
887 __update_cgrp_time(info, now, true);
888 if (final)
889 __store_release(&info->active, 0);
890 }
891 }
892 }
893
update_cgrp_time_from_event(struct perf_event * event)894 static inline void update_cgrp_time_from_event(struct perf_event *event)
895 {
896 struct perf_cgroup_info *info;
897
898 /*
899 * ensure we access cgroup data only when needed and
900 * when we know the cgroup is pinned (css_get)
901 */
902 if (!is_cgroup_event(event))
903 return;
904
905 info = this_cpu_ptr(event->cgrp->info);
906 /*
907 * Do not update time when cgroup is not active
908 */
909 if (info->active)
910 __update_cgrp_time(info, perf_clock(), true);
911 }
912
913 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)914 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
915 {
916 struct perf_event_context *ctx = &cpuctx->ctx;
917 struct perf_cgroup *cgrp = cpuctx->cgrp;
918 struct perf_cgroup_info *info;
919 struct cgroup_subsys_state *css;
920
921 /*
922 * ctx->lock held by caller
923 * ensure we do not access cgroup data
924 * unless we have the cgroup pinned (css_get)
925 */
926 if (!cgrp)
927 return;
928
929 WARN_ON_ONCE(!ctx->nr_cgroups);
930
931 for (css = &cgrp->css; css; css = css->parent) {
932 cgrp = container_of(css, struct perf_cgroup, css);
933 info = this_cpu_ptr(cgrp->info);
934 __update_cgrp_time(info, ctx->timestamp, false);
935 __store_release(&info->active, 1);
936 }
937 }
938
939 /*
940 * reschedule events based on the cgroup constraint of task.
941 */
perf_cgroup_switch(struct task_struct * task)942 static void perf_cgroup_switch(struct task_struct *task)
943 {
944 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
945 struct perf_cgroup *cgrp;
946
947 /*
948 * cpuctx->cgrp is set when the first cgroup event enabled,
949 * and is cleared when the last cgroup event disabled.
950 */
951 if (READ_ONCE(cpuctx->cgrp) == NULL)
952 return;
953
954 cgrp = perf_cgroup_from_task(task, NULL);
955 if (READ_ONCE(cpuctx->cgrp) == cgrp)
956 return;
957
958 guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
959 /*
960 * Re-check, could've raced vs perf_remove_from_context().
961 */
962 if (READ_ONCE(cpuctx->cgrp) == NULL)
963 return;
964
965 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
966
967 perf_ctx_disable(&cpuctx->ctx, true);
968
969 ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
970 /*
971 * must not be done before ctxswout due
972 * to update_cgrp_time_from_cpuctx() in
973 * ctx_sched_out()
974 */
975 cpuctx->cgrp = cgrp;
976 /*
977 * set cgrp before ctxsw in to allow
978 * perf_cgroup_set_timestamp() in ctx_sched_in()
979 * to not have to pass task around
980 */
981 ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
982
983 perf_ctx_enable(&cpuctx->ctx, true);
984 }
985
perf_cgroup_ensure_storage(struct perf_event * event,struct cgroup_subsys_state * css)986 static int perf_cgroup_ensure_storage(struct perf_event *event,
987 struct cgroup_subsys_state *css)
988 {
989 struct perf_cpu_context *cpuctx;
990 struct perf_event **storage;
991 int cpu, heap_size, ret = 0;
992
993 /*
994 * Allow storage to have sufficient space for an iterator for each
995 * possibly nested cgroup plus an iterator for events with no cgroup.
996 */
997 for (heap_size = 1; css; css = css->parent)
998 heap_size++;
999
1000 for_each_possible_cpu(cpu) {
1001 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
1002 if (heap_size <= cpuctx->heap_size)
1003 continue;
1004
1005 storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
1006 GFP_KERNEL, cpu_to_node(cpu));
1007 if (!storage) {
1008 ret = -ENOMEM;
1009 break;
1010 }
1011
1012 raw_spin_lock_irq(&cpuctx->ctx.lock);
1013 if (cpuctx->heap_size < heap_size) {
1014 swap(cpuctx->heap, storage);
1015 if (storage == cpuctx->heap_default)
1016 storage = NULL;
1017 cpuctx->heap_size = heap_size;
1018 }
1019 raw_spin_unlock_irq(&cpuctx->ctx.lock);
1020
1021 kfree(storage);
1022 }
1023
1024 return ret;
1025 }
1026
perf_cgroup_connect(int fd,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1027 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
1028 struct perf_event_attr *attr,
1029 struct perf_event *group_leader)
1030 {
1031 struct perf_cgroup *cgrp;
1032 struct cgroup_subsys_state *css;
1033 CLASS(fd, f)(fd);
1034 int ret = 0;
1035
1036 if (fd_empty(f))
1037 return -EBADF;
1038
1039 css = css_tryget_online_from_dir(fd_file(f)->f_path.dentry,
1040 &perf_event_cgrp_subsys);
1041 if (IS_ERR(css))
1042 return PTR_ERR(css);
1043
1044 ret = perf_cgroup_ensure_storage(event, css);
1045 if (ret)
1046 return ret;
1047
1048 cgrp = container_of(css, struct perf_cgroup, css);
1049 event->cgrp = cgrp;
1050
1051 /*
1052 * all events in a group must monitor
1053 * the same cgroup because a task belongs
1054 * to only one perf cgroup at a time
1055 */
1056 if (group_leader && group_leader->cgrp != cgrp) {
1057 perf_detach_cgroup(event);
1058 ret = -EINVAL;
1059 }
1060 return ret;
1061 }
1062
1063 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1064 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1065 {
1066 struct perf_cpu_context *cpuctx;
1067
1068 if (!is_cgroup_event(event))
1069 return;
1070
1071 event->pmu_ctx->nr_cgroups++;
1072
1073 /*
1074 * Because cgroup events are always per-cpu events,
1075 * @ctx == &cpuctx->ctx.
1076 */
1077 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1078
1079 if (ctx->nr_cgroups++)
1080 return;
1081
1082 cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
1083 }
1084
1085 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1086 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1087 {
1088 struct perf_cpu_context *cpuctx;
1089
1090 if (!is_cgroup_event(event))
1091 return;
1092
1093 event->pmu_ctx->nr_cgroups--;
1094
1095 /*
1096 * Because cgroup events are always per-cpu events,
1097 * @ctx == &cpuctx->ctx.
1098 */
1099 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1100
1101 if (--ctx->nr_cgroups)
1102 return;
1103
1104 cpuctx->cgrp = NULL;
1105 }
1106
1107 #else /* !CONFIG_CGROUP_PERF */
1108
1109 static inline bool
perf_cgroup_match(struct perf_event * event)1110 perf_cgroup_match(struct perf_event *event)
1111 {
1112 return true;
1113 }
1114
perf_detach_cgroup(struct perf_event * event)1115 static inline void perf_detach_cgroup(struct perf_event *event)
1116 {}
1117
is_cgroup_event(struct perf_event * event)1118 static inline int is_cgroup_event(struct perf_event *event)
1119 {
1120 return 0;
1121 }
1122
update_cgrp_time_from_event(struct perf_event * event)1123 static inline void update_cgrp_time_from_event(struct perf_event *event)
1124 {
1125 }
1126
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)1127 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
1128 bool final)
1129 {
1130 }
1131
perf_cgroup_connect(pid_t pid,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1132 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1133 struct perf_event_attr *attr,
1134 struct perf_event *group_leader)
1135 {
1136 return -EINVAL;
1137 }
1138
1139 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx)1140 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
1141 {
1142 }
1143
perf_cgroup_event_time(struct perf_event * event)1144 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1145 {
1146 return 0;
1147 }
1148
perf_cgroup_event_time_now(struct perf_event * event,u64 now)1149 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
1150 {
1151 return 0;
1152 }
1153
1154 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1155 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1156 {
1157 }
1158
1159 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1160 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1161 {
1162 }
1163
perf_cgroup_switch(struct task_struct * task)1164 static void perf_cgroup_switch(struct task_struct *task)
1165 {
1166 }
1167 #endif
1168
1169 /*
1170 * set default to be dependent on timer tick just
1171 * like original code
1172 */
1173 #define PERF_CPU_HRTIMER (1000 / HZ)
1174 /*
1175 * function must be called with interrupts disabled
1176 */
perf_mux_hrtimer_handler(struct hrtimer * hr)1177 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1178 {
1179 struct perf_cpu_pmu_context *cpc;
1180 bool rotations;
1181
1182 lockdep_assert_irqs_disabled();
1183
1184 cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
1185 rotations = perf_rotate_context(cpc);
1186
1187 raw_spin_lock(&cpc->hrtimer_lock);
1188 if (rotations)
1189 hrtimer_forward_now(hr, cpc->hrtimer_interval);
1190 else
1191 cpc->hrtimer_active = 0;
1192 raw_spin_unlock(&cpc->hrtimer_lock);
1193
1194 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1195 }
1196
__perf_mux_hrtimer_init(struct perf_cpu_pmu_context * cpc,int cpu)1197 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
1198 {
1199 struct hrtimer *timer = &cpc->hrtimer;
1200 struct pmu *pmu = cpc->epc.pmu;
1201 u64 interval;
1202
1203 /*
1204 * check default is sane, if not set then force to
1205 * default interval (1/tick)
1206 */
1207 interval = pmu->hrtimer_interval_ms;
1208 if (interval < 1)
1209 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1210
1211 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1212
1213 raw_spin_lock_init(&cpc->hrtimer_lock);
1214 hrtimer_setup(timer, perf_mux_hrtimer_handler, CLOCK_MONOTONIC,
1215 HRTIMER_MODE_ABS_PINNED_HARD);
1216 }
1217
perf_mux_hrtimer_restart(struct perf_cpu_pmu_context * cpc)1218 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
1219 {
1220 struct hrtimer *timer = &cpc->hrtimer;
1221 unsigned long flags;
1222
1223 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
1224 if (!cpc->hrtimer_active) {
1225 cpc->hrtimer_active = 1;
1226 hrtimer_forward_now(timer, cpc->hrtimer_interval);
1227 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
1228 }
1229 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
1230
1231 return 0;
1232 }
1233
perf_mux_hrtimer_restart_ipi(void * arg)1234 static int perf_mux_hrtimer_restart_ipi(void *arg)
1235 {
1236 return perf_mux_hrtimer_restart(arg);
1237 }
1238
this_cpc(struct pmu * pmu)1239 static __always_inline struct perf_cpu_pmu_context *this_cpc(struct pmu *pmu)
1240 {
1241 return *this_cpu_ptr(pmu->cpu_pmu_context);
1242 }
1243
perf_pmu_disable(struct pmu * pmu)1244 void perf_pmu_disable(struct pmu *pmu)
1245 {
1246 int *count = &this_cpc(pmu)->pmu_disable_count;
1247 if (!(*count)++)
1248 pmu->pmu_disable(pmu);
1249 }
1250
perf_pmu_enable(struct pmu * pmu)1251 void perf_pmu_enable(struct pmu *pmu)
1252 {
1253 int *count = &this_cpc(pmu)->pmu_disable_count;
1254 if (!--(*count))
1255 pmu->pmu_enable(pmu);
1256 }
1257
perf_assert_pmu_disabled(struct pmu * pmu)1258 static void perf_assert_pmu_disabled(struct pmu *pmu)
1259 {
1260 int *count = &this_cpc(pmu)->pmu_disable_count;
1261 WARN_ON_ONCE(*count == 0);
1262 }
1263
perf_pmu_read(struct perf_event * event)1264 static inline void perf_pmu_read(struct perf_event *event)
1265 {
1266 if (event->state == PERF_EVENT_STATE_ACTIVE)
1267 event->pmu->read(event);
1268 }
1269
get_ctx(struct perf_event_context * ctx)1270 static void get_ctx(struct perf_event_context *ctx)
1271 {
1272 refcount_inc(&ctx->refcount);
1273 }
1274
free_ctx(struct rcu_head * head)1275 static void free_ctx(struct rcu_head *head)
1276 {
1277 struct perf_event_context *ctx;
1278
1279 ctx = container_of(head, struct perf_event_context, rcu_head);
1280 kfree(ctx);
1281 }
1282
put_ctx(struct perf_event_context * ctx)1283 static void put_ctx(struct perf_event_context *ctx)
1284 {
1285 if (refcount_dec_and_test(&ctx->refcount)) {
1286 if (ctx->parent_ctx)
1287 put_ctx(ctx->parent_ctx);
1288 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1289 put_task_struct(ctx->task);
1290 call_rcu(&ctx->rcu_head, free_ctx);
1291 } else {
1292 smp_mb__after_atomic(); /* pairs with wait_var_event() */
1293 if (ctx->task == TASK_TOMBSTONE)
1294 wake_up_var(&ctx->refcount);
1295 }
1296 }
1297
1298 /*
1299 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1300 * perf_pmu_migrate_context() we need some magic.
1301 *
1302 * Those places that change perf_event::ctx will hold both
1303 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1304 *
1305 * Lock ordering is by mutex address. There are two other sites where
1306 * perf_event_context::mutex nests and those are:
1307 *
1308 * - perf_event_exit_task_context() [ child , 0 ]
1309 * perf_event_exit_event()
1310 * put_event() [ parent, 1 ]
1311 *
1312 * - perf_event_init_context() [ parent, 0 ]
1313 * inherit_task_group()
1314 * inherit_group()
1315 * inherit_event()
1316 * perf_event_alloc()
1317 * perf_init_event()
1318 * perf_try_init_event() [ child , 1 ]
1319 *
1320 * While it appears there is an obvious deadlock here -- the parent and child
1321 * nesting levels are inverted between the two. This is in fact safe because
1322 * life-time rules separate them. That is an exiting task cannot fork, and a
1323 * spawning task cannot (yet) exit.
1324 *
1325 * But remember that these are parent<->child context relations, and
1326 * migration does not affect children, therefore these two orderings should not
1327 * interact.
1328 *
1329 * The change in perf_event::ctx does not affect children (as claimed above)
1330 * because the sys_perf_event_open() case will install a new event and break
1331 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1332 * concerned with cpuctx and that doesn't have children.
1333 *
1334 * The places that change perf_event::ctx will issue:
1335 *
1336 * perf_remove_from_context();
1337 * synchronize_rcu();
1338 * perf_install_in_context();
1339 *
1340 * to affect the change. The remove_from_context() + synchronize_rcu() should
1341 * quiesce the event, after which we can install it in the new location. This
1342 * means that only external vectors (perf_fops, prctl) can perturb the event
1343 * while in transit. Therefore all such accessors should also acquire
1344 * perf_event_context::mutex to serialize against this.
1345 *
1346 * However; because event->ctx can change while we're waiting to acquire
1347 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1348 * function.
1349 *
1350 * Lock order:
1351 * exec_update_lock
1352 * task_struct::perf_event_mutex
1353 * perf_event_context::mutex
1354 * perf_event::child_mutex;
1355 * perf_event_context::lock
1356 * mmap_lock
1357 * perf_event::mmap_mutex
1358 * perf_buffer::aux_mutex
1359 * perf_addr_filters_head::lock
1360 *
1361 * cpu_hotplug_lock
1362 * pmus_lock
1363 * cpuctx->mutex / perf_event_context::mutex
1364 */
1365 static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event * event,int nesting)1366 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1367 {
1368 struct perf_event_context *ctx;
1369
1370 again:
1371 rcu_read_lock();
1372 ctx = READ_ONCE(event->ctx);
1373 if (!refcount_inc_not_zero(&ctx->refcount)) {
1374 rcu_read_unlock();
1375 goto again;
1376 }
1377 rcu_read_unlock();
1378
1379 mutex_lock_nested(&ctx->mutex, nesting);
1380 if (event->ctx != ctx) {
1381 mutex_unlock(&ctx->mutex);
1382 put_ctx(ctx);
1383 goto again;
1384 }
1385
1386 return ctx;
1387 }
1388
1389 static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event * event)1390 perf_event_ctx_lock(struct perf_event *event)
1391 {
1392 return perf_event_ctx_lock_nested(event, 0);
1393 }
1394
perf_event_ctx_unlock(struct perf_event * event,struct perf_event_context * ctx)1395 static void perf_event_ctx_unlock(struct perf_event *event,
1396 struct perf_event_context *ctx)
1397 {
1398 mutex_unlock(&ctx->mutex);
1399 put_ctx(ctx);
1400 }
1401
1402 /*
1403 * This must be done under the ctx->lock, such as to serialize against
1404 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1405 * calling scheduler related locks and ctx->lock nests inside those.
1406 */
1407 static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context * ctx)1408 unclone_ctx(struct perf_event_context *ctx)
1409 {
1410 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1411
1412 lockdep_assert_held(&ctx->lock);
1413
1414 if (parent_ctx)
1415 ctx->parent_ctx = NULL;
1416 ctx->generation++;
1417
1418 return parent_ctx;
1419 }
1420
perf_event_pid_type(struct perf_event * event,struct task_struct * p,enum pid_type type)1421 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1422 enum pid_type type)
1423 {
1424 u32 nr;
1425 /*
1426 * only top level events have the pid namespace they were created in
1427 */
1428 if (event->parent)
1429 event = event->parent;
1430
1431 nr = __task_pid_nr_ns(p, type, event->ns);
1432 /* avoid -1 if it is idle thread or runs in another ns */
1433 if (!nr && !pid_alive(p))
1434 nr = -1;
1435 return nr;
1436 }
1437
perf_event_pid(struct perf_event * event,struct task_struct * p)1438 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1439 {
1440 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1441 }
1442
perf_event_tid(struct perf_event * event,struct task_struct * p)1443 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1444 {
1445 return perf_event_pid_type(event, p, PIDTYPE_PID);
1446 }
1447
1448 /*
1449 * If we inherit events we want to return the parent event id
1450 * to userspace.
1451 */
primary_event_id(struct perf_event * event)1452 static u64 primary_event_id(struct perf_event *event)
1453 {
1454 u64 id = event->id;
1455
1456 if (event->parent)
1457 id = event->parent->id;
1458
1459 return id;
1460 }
1461
1462 /*
1463 * Get the perf_event_context for a task and lock it.
1464 *
1465 * This has to cope with the fact that until it is locked,
1466 * the context could get moved to another task.
1467 */
1468 static struct perf_event_context *
perf_lock_task_context(struct task_struct * task,unsigned long * flags)1469 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
1470 {
1471 struct perf_event_context *ctx;
1472
1473 retry:
1474 /*
1475 * One of the few rules of preemptible RCU is that one cannot do
1476 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1477 * part of the read side critical section was irqs-enabled -- see
1478 * rcu_read_unlock_special().
1479 *
1480 * Since ctx->lock nests under rq->lock we must ensure the entire read
1481 * side critical section has interrupts disabled.
1482 */
1483 local_irq_save(*flags);
1484 rcu_read_lock();
1485 ctx = rcu_dereference(task->perf_event_ctxp);
1486 if (ctx) {
1487 /*
1488 * If this context is a clone of another, it might
1489 * get swapped for another underneath us by
1490 * perf_event_task_sched_out, though the
1491 * rcu_read_lock() protects us from any context
1492 * getting freed. Lock the context and check if it
1493 * got swapped before we could get the lock, and retry
1494 * if so. If we locked the right context, then it
1495 * can't get swapped on us any more.
1496 */
1497 raw_spin_lock(&ctx->lock);
1498 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
1499 raw_spin_unlock(&ctx->lock);
1500 rcu_read_unlock();
1501 local_irq_restore(*flags);
1502 goto retry;
1503 }
1504
1505 if (ctx->task == TASK_TOMBSTONE ||
1506 !refcount_inc_not_zero(&ctx->refcount)) {
1507 raw_spin_unlock(&ctx->lock);
1508 ctx = NULL;
1509 } else {
1510 WARN_ON_ONCE(ctx->task != task);
1511 }
1512 }
1513 rcu_read_unlock();
1514 if (!ctx)
1515 local_irq_restore(*flags);
1516 return ctx;
1517 }
1518
1519 /*
1520 * Get the context for a task and increment its pin_count so it
1521 * can't get swapped to another task. This also increments its
1522 * reference count so that the context can't get freed.
1523 */
1524 static struct perf_event_context *
perf_pin_task_context(struct task_struct * task)1525 perf_pin_task_context(struct task_struct *task)
1526 {
1527 struct perf_event_context *ctx;
1528 unsigned long flags;
1529
1530 ctx = perf_lock_task_context(task, &flags);
1531 if (ctx) {
1532 ++ctx->pin_count;
1533 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1534 }
1535 return ctx;
1536 }
1537
perf_unpin_context(struct perf_event_context * ctx)1538 static void perf_unpin_context(struct perf_event_context *ctx)
1539 {
1540 unsigned long flags;
1541
1542 raw_spin_lock_irqsave(&ctx->lock, flags);
1543 --ctx->pin_count;
1544 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1545 }
1546
1547 /*
1548 * Update the record of the current time in a context.
1549 */
__update_context_time(struct perf_event_context * ctx,bool adv)1550 static void __update_context_time(struct perf_event_context *ctx, bool adv)
1551 {
1552 u64 now = perf_clock();
1553
1554 lockdep_assert_held(&ctx->lock);
1555
1556 if (adv)
1557 ctx->time += now - ctx->timestamp;
1558 ctx->timestamp = now;
1559
1560 /*
1561 * The above: time' = time + (now - timestamp), can be re-arranged
1562 * into: time` = now + (time - timestamp), which gives a single value
1563 * offset to compute future time without locks on.
1564 *
1565 * See perf_event_time_now(), which can be used from NMI context where
1566 * it's (obviously) not possible to acquire ctx->lock in order to read
1567 * both the above values in a consistent manner.
1568 */
1569 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
1570 }
1571
update_context_time(struct perf_event_context * ctx)1572 static void update_context_time(struct perf_event_context *ctx)
1573 {
1574 __update_context_time(ctx, true);
1575 }
1576
perf_event_time(struct perf_event * event)1577 static u64 perf_event_time(struct perf_event *event)
1578 {
1579 struct perf_event_context *ctx = event->ctx;
1580
1581 if (unlikely(!ctx))
1582 return 0;
1583
1584 if (is_cgroup_event(event))
1585 return perf_cgroup_event_time(event);
1586
1587 return ctx->time;
1588 }
1589
perf_event_time_now(struct perf_event * event,u64 now)1590 static u64 perf_event_time_now(struct perf_event *event, u64 now)
1591 {
1592 struct perf_event_context *ctx = event->ctx;
1593
1594 if (unlikely(!ctx))
1595 return 0;
1596
1597 if (is_cgroup_event(event))
1598 return perf_cgroup_event_time_now(event, now);
1599
1600 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
1601 return ctx->time;
1602
1603 now += READ_ONCE(ctx->timeoffset);
1604 return now;
1605 }
1606
get_event_type(struct perf_event * event)1607 static enum event_type_t get_event_type(struct perf_event *event)
1608 {
1609 struct perf_event_context *ctx = event->ctx;
1610 enum event_type_t event_type;
1611
1612 lockdep_assert_held(&ctx->lock);
1613
1614 /*
1615 * It's 'group type', really, because if our group leader is
1616 * pinned, so are we.
1617 */
1618 if (event->group_leader != event)
1619 event = event->group_leader;
1620
1621 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1622 if (!ctx->task)
1623 event_type |= EVENT_CPU;
1624
1625 return event_type;
1626 }
1627
1628 /*
1629 * Helper function to initialize event group nodes.
1630 */
init_event_group(struct perf_event * event)1631 static void init_event_group(struct perf_event *event)
1632 {
1633 RB_CLEAR_NODE(&event->group_node);
1634 event->group_index = 0;
1635 }
1636
1637 /*
1638 * Extract pinned or flexible groups from the context
1639 * based on event attrs bits.
1640 */
1641 static struct perf_event_groups *
get_event_groups(struct perf_event * event,struct perf_event_context * ctx)1642 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1643 {
1644 if (event->attr.pinned)
1645 return &ctx->pinned_groups;
1646 else
1647 return &ctx->flexible_groups;
1648 }
1649
1650 /*
1651 * Helper function to initializes perf_event_group trees.
1652 */
perf_event_groups_init(struct perf_event_groups * groups)1653 static void perf_event_groups_init(struct perf_event_groups *groups)
1654 {
1655 groups->tree = RB_ROOT;
1656 groups->index = 0;
1657 }
1658
event_cgroup(const struct perf_event * event)1659 static inline struct cgroup *event_cgroup(const struct perf_event *event)
1660 {
1661 struct cgroup *cgroup = NULL;
1662
1663 #ifdef CONFIG_CGROUP_PERF
1664 if (event->cgrp)
1665 cgroup = event->cgrp->css.cgroup;
1666 #endif
1667
1668 return cgroup;
1669 }
1670
1671 /*
1672 * Compare function for event groups;
1673 *
1674 * Implements complex key that first sorts by CPU and then by virtual index
1675 * which provides ordering when rotating groups for the same CPU.
1676 */
1677 static __always_inline int
perf_event_groups_cmp(const int left_cpu,const struct pmu * left_pmu,const struct cgroup * left_cgroup,const u64 left_group_index,const struct perf_event * right)1678 perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
1679 const struct cgroup *left_cgroup, const u64 left_group_index,
1680 const struct perf_event *right)
1681 {
1682 if (left_cpu < right->cpu)
1683 return -1;
1684 if (left_cpu > right->cpu)
1685 return 1;
1686
1687 if (left_pmu) {
1688 if (left_pmu < right->pmu_ctx->pmu)
1689 return -1;
1690 if (left_pmu > right->pmu_ctx->pmu)
1691 return 1;
1692 }
1693
1694 #ifdef CONFIG_CGROUP_PERF
1695 {
1696 const struct cgroup *right_cgroup = event_cgroup(right);
1697
1698 if (left_cgroup != right_cgroup) {
1699 if (!left_cgroup) {
1700 /*
1701 * Left has no cgroup but right does, no
1702 * cgroups come first.
1703 */
1704 return -1;
1705 }
1706 if (!right_cgroup) {
1707 /*
1708 * Right has no cgroup but left does, no
1709 * cgroups come first.
1710 */
1711 return 1;
1712 }
1713 /* Two dissimilar cgroups, order by id. */
1714 if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
1715 return -1;
1716
1717 return 1;
1718 }
1719 }
1720 #endif
1721
1722 if (left_group_index < right->group_index)
1723 return -1;
1724 if (left_group_index > right->group_index)
1725 return 1;
1726
1727 return 0;
1728 }
1729
1730 #define __node_2_pe(node) \
1731 rb_entry((node), struct perf_event, group_node)
1732
__group_less(struct rb_node * a,const struct rb_node * b)1733 static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
1734 {
1735 struct perf_event *e = __node_2_pe(a);
1736 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
1737 e->group_index, __node_2_pe(b)) < 0;
1738 }
1739
1740 struct __group_key {
1741 int cpu;
1742 struct pmu *pmu;
1743 struct cgroup *cgroup;
1744 };
1745
__group_cmp(const void * key,const struct rb_node * node)1746 static inline int __group_cmp(const void *key, const struct rb_node *node)
1747 {
1748 const struct __group_key *a = key;
1749 const struct perf_event *b = __node_2_pe(node);
1750
1751 /* partial/subtree match: @cpu, @pmu, @cgroup; ignore: @group_index */
1752 return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
1753 }
1754
1755 static inline int
__group_cmp_ignore_cgroup(const void * key,const struct rb_node * node)1756 __group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
1757 {
1758 const struct __group_key *a = key;
1759 const struct perf_event *b = __node_2_pe(node);
1760
1761 /* partial/subtree match: @cpu, @pmu, ignore: @cgroup, @group_index */
1762 return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
1763 b->group_index, b);
1764 }
1765
1766 /*
1767 * Insert @event into @groups' tree; using
1768 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1769 * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
1770 */
1771 static void
perf_event_groups_insert(struct perf_event_groups * groups,struct perf_event * event)1772 perf_event_groups_insert(struct perf_event_groups *groups,
1773 struct perf_event *event)
1774 {
1775 event->group_index = ++groups->index;
1776
1777 rb_add(&event->group_node, &groups->tree, __group_less);
1778 }
1779
1780 /*
1781 * Helper function to insert event into the pinned or flexible groups.
1782 */
1783 static void
add_event_to_groups(struct perf_event * event,struct perf_event_context * ctx)1784 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1785 {
1786 struct perf_event_groups *groups;
1787
1788 groups = get_event_groups(event, ctx);
1789 perf_event_groups_insert(groups, event);
1790 }
1791
1792 /*
1793 * Delete a group from a tree.
1794 */
1795 static void
perf_event_groups_delete(struct perf_event_groups * groups,struct perf_event * event)1796 perf_event_groups_delete(struct perf_event_groups *groups,
1797 struct perf_event *event)
1798 {
1799 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1800 RB_EMPTY_ROOT(&groups->tree));
1801
1802 rb_erase(&event->group_node, &groups->tree);
1803 init_event_group(event);
1804 }
1805
1806 /*
1807 * Helper function to delete event from its groups.
1808 */
1809 static void
del_event_from_groups(struct perf_event * event,struct perf_event_context * ctx)1810 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1811 {
1812 struct perf_event_groups *groups;
1813
1814 groups = get_event_groups(event, ctx);
1815 perf_event_groups_delete(groups, event);
1816 }
1817
1818 /*
1819 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1820 */
1821 static struct perf_event *
perf_event_groups_first(struct perf_event_groups * groups,int cpu,struct pmu * pmu,struct cgroup * cgrp)1822 perf_event_groups_first(struct perf_event_groups *groups, int cpu,
1823 struct pmu *pmu, struct cgroup *cgrp)
1824 {
1825 struct __group_key key = {
1826 .cpu = cpu,
1827 .pmu = pmu,
1828 .cgroup = cgrp,
1829 };
1830 struct rb_node *node;
1831
1832 node = rb_find_first(&key, &groups->tree, __group_cmp);
1833 if (node)
1834 return __node_2_pe(node);
1835
1836 return NULL;
1837 }
1838
1839 static struct perf_event *
perf_event_groups_next(struct perf_event * event,struct pmu * pmu)1840 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1841 {
1842 struct __group_key key = {
1843 .cpu = event->cpu,
1844 .pmu = pmu,
1845 .cgroup = event_cgroup(event),
1846 };
1847 struct rb_node *next;
1848
1849 next = rb_next_match(&key, &event->group_node, __group_cmp);
1850 if (next)
1851 return __node_2_pe(next);
1852
1853 return NULL;
1854 }
1855
1856 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
1857 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1858 event; event = perf_event_groups_next(event, pmu))
1859
1860 /*
1861 * Iterate through the whole groups tree.
1862 */
1863 #define perf_event_groups_for_each(event, groups) \
1864 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1865 typeof(*event), group_node); event; \
1866 event = rb_entry_safe(rb_next(&event->group_node), \
1867 typeof(*event), group_node))
1868
1869 /*
1870 * Does the event attribute request inherit with PERF_SAMPLE_READ
1871 */
has_inherit_and_sample_read(struct perf_event_attr * attr)1872 static inline bool has_inherit_and_sample_read(struct perf_event_attr *attr)
1873 {
1874 return attr->inherit && (attr->sample_type & PERF_SAMPLE_READ);
1875 }
1876
1877 /*
1878 * Add an event from the lists for its context.
1879 * Must be called with ctx->mutex and ctx->lock held.
1880 */
1881 static void
list_add_event(struct perf_event * event,struct perf_event_context * ctx)1882 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1883 {
1884 lockdep_assert_held(&ctx->lock);
1885
1886 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1887 event->attach_state |= PERF_ATTACH_CONTEXT;
1888
1889 event->tstamp = perf_event_time(event);
1890
1891 /*
1892 * If we're a stand alone event or group leader, we go to the context
1893 * list, group events are kept attached to the group so that
1894 * perf_group_detach can, at all times, locate all siblings.
1895 */
1896 if (event->group_leader == event) {
1897 event->group_caps = event->event_caps;
1898 add_event_to_groups(event, ctx);
1899 }
1900
1901 list_add_rcu(&event->event_entry, &ctx->event_list);
1902 ctx->nr_events++;
1903 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
1904 ctx->nr_user++;
1905 if (event->attr.inherit_stat)
1906 ctx->nr_stat++;
1907 if (has_inherit_and_sample_read(&event->attr))
1908 local_inc(&ctx->nr_no_switch_fast);
1909
1910 if (event->state > PERF_EVENT_STATE_OFF)
1911 perf_cgroup_event_enable(event, ctx);
1912
1913 ctx->generation++;
1914 event->pmu_ctx->nr_events++;
1915 }
1916
1917 /*
1918 * Initialize event state based on the perf_event_attr::disabled.
1919 */
perf_event__state_init(struct perf_event * event)1920 static inline void perf_event__state_init(struct perf_event *event)
1921 {
1922 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1923 PERF_EVENT_STATE_INACTIVE;
1924 }
1925
__perf_event_read_size(u64 read_format,int nr_siblings)1926 static int __perf_event_read_size(u64 read_format, int nr_siblings)
1927 {
1928 int entry = sizeof(u64); /* value */
1929 int size = 0;
1930 int nr = 1;
1931
1932 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1933 size += sizeof(u64);
1934
1935 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1936 size += sizeof(u64);
1937
1938 if (read_format & PERF_FORMAT_ID)
1939 entry += sizeof(u64);
1940
1941 if (read_format & PERF_FORMAT_LOST)
1942 entry += sizeof(u64);
1943
1944 if (read_format & PERF_FORMAT_GROUP) {
1945 nr += nr_siblings;
1946 size += sizeof(u64);
1947 }
1948
1949 /*
1950 * Since perf_event_validate_size() limits this to 16k and inhibits
1951 * adding more siblings, this will never overflow.
1952 */
1953 return size + nr * entry;
1954 }
1955
__perf_event_header_size(struct perf_event * event,u64 sample_type)1956 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1957 {
1958 struct perf_sample_data *data;
1959 u16 size = 0;
1960
1961 if (sample_type & PERF_SAMPLE_IP)
1962 size += sizeof(data->ip);
1963
1964 if (sample_type & PERF_SAMPLE_ADDR)
1965 size += sizeof(data->addr);
1966
1967 if (sample_type & PERF_SAMPLE_PERIOD)
1968 size += sizeof(data->period);
1969
1970 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
1971 size += sizeof(data->weight.full);
1972
1973 if (sample_type & PERF_SAMPLE_READ)
1974 size += event->read_size;
1975
1976 if (sample_type & PERF_SAMPLE_DATA_SRC)
1977 size += sizeof(data->data_src.val);
1978
1979 if (sample_type & PERF_SAMPLE_TRANSACTION)
1980 size += sizeof(data->txn);
1981
1982 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1983 size += sizeof(data->phys_addr);
1984
1985 if (sample_type & PERF_SAMPLE_CGROUP)
1986 size += sizeof(data->cgroup);
1987
1988 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1989 size += sizeof(data->data_page_size);
1990
1991 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1992 size += sizeof(data->code_page_size);
1993
1994 event->header_size = size;
1995 }
1996
1997 /*
1998 * Called at perf_event creation and when events are attached/detached from a
1999 * group.
2000 */
perf_event__header_size(struct perf_event * event)2001 static void perf_event__header_size(struct perf_event *event)
2002 {
2003 event->read_size =
2004 __perf_event_read_size(event->attr.read_format,
2005 event->group_leader->nr_siblings);
2006 __perf_event_header_size(event, event->attr.sample_type);
2007 }
2008
perf_event__id_header_size(struct perf_event * event)2009 static void perf_event__id_header_size(struct perf_event *event)
2010 {
2011 struct perf_sample_data *data;
2012 u64 sample_type = event->attr.sample_type;
2013 u16 size = 0;
2014
2015 if (sample_type & PERF_SAMPLE_TID)
2016 size += sizeof(data->tid_entry);
2017
2018 if (sample_type & PERF_SAMPLE_TIME)
2019 size += sizeof(data->time);
2020
2021 if (sample_type & PERF_SAMPLE_IDENTIFIER)
2022 size += sizeof(data->id);
2023
2024 if (sample_type & PERF_SAMPLE_ID)
2025 size += sizeof(data->id);
2026
2027 if (sample_type & PERF_SAMPLE_STREAM_ID)
2028 size += sizeof(data->stream_id);
2029
2030 if (sample_type & PERF_SAMPLE_CPU)
2031 size += sizeof(data->cpu_entry);
2032
2033 event->id_header_size = size;
2034 }
2035
2036 /*
2037 * Check that adding an event to the group does not result in anybody
2038 * overflowing the 64k event limit imposed by the output buffer.
2039 *
2040 * Specifically, check that the read_size for the event does not exceed 16k,
2041 * read_size being the one term that grows with groups size. Since read_size
2042 * depends on per-event read_format, also (re)check the existing events.
2043 *
2044 * This leaves 48k for the constant size fields and things like callchains,
2045 * branch stacks and register sets.
2046 */
perf_event_validate_size(struct perf_event * event)2047 static bool perf_event_validate_size(struct perf_event *event)
2048 {
2049 struct perf_event *sibling, *group_leader = event->group_leader;
2050
2051 if (__perf_event_read_size(event->attr.read_format,
2052 group_leader->nr_siblings + 1) > 16*1024)
2053 return false;
2054
2055 if (__perf_event_read_size(group_leader->attr.read_format,
2056 group_leader->nr_siblings + 1) > 16*1024)
2057 return false;
2058
2059 /*
2060 * When creating a new group leader, group_leader->ctx is initialized
2061 * after the size has been validated, but we cannot safely use
2062 * for_each_sibling_event() until group_leader->ctx is set. A new group
2063 * leader cannot have any siblings yet, so we can safely skip checking
2064 * the non-existent siblings.
2065 */
2066 if (event == group_leader)
2067 return true;
2068
2069 for_each_sibling_event(sibling, group_leader) {
2070 if (__perf_event_read_size(sibling->attr.read_format,
2071 group_leader->nr_siblings + 1) > 16*1024)
2072 return false;
2073 }
2074
2075 return true;
2076 }
2077
perf_group_attach(struct perf_event * event)2078 static void perf_group_attach(struct perf_event *event)
2079 {
2080 struct perf_event *group_leader = event->group_leader, *pos;
2081
2082 lockdep_assert_held(&event->ctx->lock);
2083
2084 /*
2085 * We can have double attach due to group movement (move_group) in
2086 * perf_event_open().
2087 */
2088 if (event->attach_state & PERF_ATTACH_GROUP)
2089 return;
2090
2091 event->attach_state |= PERF_ATTACH_GROUP;
2092
2093 if (group_leader == event)
2094 return;
2095
2096 WARN_ON_ONCE(group_leader->ctx != event->ctx);
2097
2098 group_leader->group_caps &= event->event_caps;
2099
2100 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
2101 group_leader->nr_siblings++;
2102 group_leader->group_generation++;
2103
2104 perf_event__header_size(group_leader);
2105
2106 for_each_sibling_event(pos, group_leader)
2107 perf_event__header_size(pos);
2108 }
2109
2110 /*
2111 * Remove an event from the lists for its context.
2112 * Must be called with ctx->mutex and ctx->lock held.
2113 */
2114 static void
list_del_event(struct perf_event * event,struct perf_event_context * ctx)2115 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2116 {
2117 WARN_ON_ONCE(event->ctx != ctx);
2118 lockdep_assert_held(&ctx->lock);
2119
2120 /*
2121 * We can have double detach due to exit/hot-unplug + close.
2122 */
2123 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
2124 return;
2125
2126 event->attach_state &= ~PERF_ATTACH_CONTEXT;
2127
2128 ctx->nr_events--;
2129 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
2130 ctx->nr_user--;
2131 if (event->attr.inherit_stat)
2132 ctx->nr_stat--;
2133 if (has_inherit_and_sample_read(&event->attr))
2134 local_dec(&ctx->nr_no_switch_fast);
2135
2136 list_del_rcu(&event->event_entry);
2137
2138 if (event->group_leader == event)
2139 del_event_from_groups(event, ctx);
2140
2141 ctx->generation++;
2142 event->pmu_ctx->nr_events--;
2143 }
2144
2145 static int
perf_aux_output_match(struct perf_event * event,struct perf_event * aux_event)2146 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2147 {
2148 if (!has_aux(aux_event))
2149 return 0;
2150
2151 if (!event->pmu->aux_output_match)
2152 return 0;
2153
2154 return event->pmu->aux_output_match(aux_event);
2155 }
2156
2157 static void put_event(struct perf_event *event);
2158 static void __event_disable(struct perf_event *event,
2159 struct perf_event_context *ctx,
2160 enum perf_event_state state);
2161
perf_put_aux_event(struct perf_event * event)2162 static void perf_put_aux_event(struct perf_event *event)
2163 {
2164 struct perf_event_context *ctx = event->ctx;
2165 struct perf_event *iter;
2166
2167 /*
2168 * If event uses aux_event tear down the link
2169 */
2170 if (event->aux_event) {
2171 iter = event->aux_event;
2172 event->aux_event = NULL;
2173 put_event(iter);
2174 return;
2175 }
2176
2177 /*
2178 * If the event is an aux_event, tear down all links to
2179 * it from other events.
2180 */
2181 for_each_sibling_event(iter, event) {
2182 if (iter->aux_event != event)
2183 continue;
2184
2185 iter->aux_event = NULL;
2186 put_event(event);
2187
2188 /*
2189 * If it's ACTIVE, schedule it out and put it into ERROR
2190 * state so that we don't try to schedule it again. Note
2191 * that perf_event_enable() will clear the ERROR status.
2192 */
2193 __event_disable(iter, ctx, PERF_EVENT_STATE_ERROR);
2194 }
2195 }
2196
perf_need_aux_event(struct perf_event * event)2197 static bool perf_need_aux_event(struct perf_event *event)
2198 {
2199 return event->attr.aux_output || has_aux_action(event);
2200 }
2201
perf_get_aux_event(struct perf_event * event,struct perf_event * group_leader)2202 static int perf_get_aux_event(struct perf_event *event,
2203 struct perf_event *group_leader)
2204 {
2205 /*
2206 * Our group leader must be an aux event if we want to be
2207 * an aux_output. This way, the aux event will precede its
2208 * aux_output events in the group, and therefore will always
2209 * schedule first.
2210 */
2211 if (!group_leader)
2212 return 0;
2213
2214 /*
2215 * aux_output and aux_sample_size are mutually exclusive.
2216 */
2217 if (event->attr.aux_output && event->attr.aux_sample_size)
2218 return 0;
2219
2220 if (event->attr.aux_output &&
2221 !perf_aux_output_match(event, group_leader))
2222 return 0;
2223
2224 if ((event->attr.aux_pause || event->attr.aux_resume) &&
2225 !(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
2226 return 0;
2227
2228 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2229 return 0;
2230
2231 if (!atomic_long_inc_not_zero(&group_leader->refcount))
2232 return 0;
2233
2234 /*
2235 * Link aux_outputs to their aux event; this is undone in
2236 * perf_group_detach() by perf_put_aux_event(). When the
2237 * group in torn down, the aux_output events loose their
2238 * link to the aux_event and can't schedule any more.
2239 */
2240 event->aux_event = group_leader;
2241
2242 return 1;
2243 }
2244
get_event_list(struct perf_event * event)2245 static inline struct list_head *get_event_list(struct perf_event *event)
2246 {
2247 return event->attr.pinned ? &event->pmu_ctx->pinned_active :
2248 &event->pmu_ctx->flexible_active;
2249 }
2250
perf_group_detach(struct perf_event * event)2251 static void perf_group_detach(struct perf_event *event)
2252 {
2253 struct perf_event *leader = event->group_leader;
2254 struct perf_event *sibling, *tmp;
2255 struct perf_event_context *ctx = event->ctx;
2256
2257 lockdep_assert_held(&ctx->lock);
2258
2259 /*
2260 * We can have double detach due to exit/hot-unplug + close.
2261 */
2262 if (!(event->attach_state & PERF_ATTACH_GROUP))
2263 return;
2264
2265 event->attach_state &= ~PERF_ATTACH_GROUP;
2266
2267 perf_put_aux_event(event);
2268
2269 /*
2270 * If this is a sibling, remove it from its group.
2271 */
2272 if (leader != event) {
2273 list_del_init(&event->sibling_list);
2274 event->group_leader->nr_siblings--;
2275 event->group_leader->group_generation++;
2276 goto out;
2277 }
2278
2279 /*
2280 * If this was a group event with sibling events then
2281 * upgrade the siblings to singleton events by adding them
2282 * to whatever list we are on.
2283 */
2284 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
2285
2286 /*
2287 * Events that have PERF_EV_CAP_SIBLING require being part of
2288 * a group and cannot exist on their own, schedule them out
2289 * and move them into the ERROR state. Also see
2290 * _perf_event_enable(), it will not be able to recover this
2291 * ERROR state.
2292 */
2293 if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2294 __event_disable(sibling, ctx, PERF_EVENT_STATE_ERROR);
2295
2296 sibling->group_leader = sibling;
2297 list_del_init(&sibling->sibling_list);
2298
2299 /* Inherit group flags from the previous leader */
2300 sibling->group_caps = event->group_caps;
2301
2302 if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
2303 add_event_to_groups(sibling, event->ctx);
2304
2305 if (sibling->state == PERF_EVENT_STATE_ACTIVE)
2306 list_add_tail(&sibling->active_list, get_event_list(sibling));
2307 }
2308
2309 WARN_ON_ONCE(sibling->ctx != event->ctx);
2310 }
2311
2312 out:
2313 for_each_sibling_event(tmp, leader)
2314 perf_event__header_size(tmp);
2315
2316 perf_event__header_size(leader);
2317 }
2318
2319 static void sync_child_event(struct perf_event *child_event);
2320
perf_child_detach(struct perf_event * event)2321 static void perf_child_detach(struct perf_event *event)
2322 {
2323 struct perf_event *parent_event = event->parent;
2324
2325 if (!(event->attach_state & PERF_ATTACH_CHILD))
2326 return;
2327
2328 event->attach_state &= ~PERF_ATTACH_CHILD;
2329
2330 if (WARN_ON_ONCE(!parent_event))
2331 return;
2332
2333 /*
2334 * Can't check this from an IPI, the holder is likey another CPU.
2335 *
2336 lockdep_assert_held(&parent_event->child_mutex);
2337 */
2338
2339 sync_child_event(event);
2340 list_del_init(&event->child_list);
2341 }
2342
is_orphaned_event(struct perf_event * event)2343 static bool is_orphaned_event(struct perf_event *event)
2344 {
2345 return event->state == PERF_EVENT_STATE_DEAD;
2346 }
2347
2348 static inline int
event_filter_match(struct perf_event * event)2349 event_filter_match(struct perf_event *event)
2350 {
2351 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2352 perf_cgroup_match(event);
2353 }
2354
is_event_in_freq_mode(struct perf_event * event)2355 static inline bool is_event_in_freq_mode(struct perf_event *event)
2356 {
2357 return event->attr.freq && event->attr.sample_freq;
2358 }
2359
2360 static void
event_sched_out(struct perf_event * event,struct perf_event_context * ctx)2361 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2362 {
2363 struct perf_event_pmu_context *epc = event->pmu_ctx;
2364 struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2365 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
2366
2367 // XXX cpc serialization, probably per-cpu IRQ disabled
2368
2369 WARN_ON_ONCE(event->ctx != ctx);
2370 lockdep_assert_held(&ctx->lock);
2371
2372 if (event->state != PERF_EVENT_STATE_ACTIVE)
2373 return;
2374
2375 /*
2376 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2377 * we can schedule events _OUT_ individually through things like
2378 * __perf_remove_from_context().
2379 */
2380 list_del_init(&event->active_list);
2381
2382 perf_pmu_disable(event->pmu);
2383
2384 event->pmu->del(event, 0);
2385 event->oncpu = -1;
2386
2387 if (event->pending_disable) {
2388 event->pending_disable = 0;
2389 perf_cgroup_event_disable(event, ctx);
2390 state = PERF_EVENT_STATE_OFF;
2391 }
2392
2393 perf_event_set_state(event, state);
2394
2395 if (!is_software_event(event))
2396 cpc->active_oncpu--;
2397 if (is_event_in_freq_mode(event)) {
2398 ctx->nr_freq--;
2399 epc->nr_freq--;
2400 }
2401 if (event->attr.exclusive || !cpc->active_oncpu)
2402 cpc->exclusive = 0;
2403
2404 perf_pmu_enable(event->pmu);
2405 }
2406
2407 static void
group_sched_out(struct perf_event * group_event,struct perf_event_context * ctx)2408 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
2409 {
2410 struct perf_event *event;
2411
2412 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2413 return;
2414
2415 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
2416
2417 event_sched_out(group_event, ctx);
2418
2419 /*
2420 * Schedule out siblings (if any):
2421 */
2422 for_each_sibling_event(event, group_event)
2423 event_sched_out(event, ctx);
2424 }
2425
2426 static inline void
__ctx_time_update(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,bool final)2427 __ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, bool final)
2428 {
2429 if (ctx->is_active & EVENT_TIME) {
2430 if (ctx->is_active & EVENT_FROZEN)
2431 return;
2432 update_context_time(ctx);
2433 update_cgrp_time_from_cpuctx(cpuctx, final);
2434 }
2435 }
2436
2437 static inline void
ctx_time_update(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2438 ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
2439 {
2440 __ctx_time_update(cpuctx, ctx, false);
2441 }
2442
2443 /*
2444 * To be used inside perf_ctx_lock() / perf_ctx_unlock(). Lasts until perf_ctx_unlock().
2445 */
2446 static inline void
ctx_time_freeze(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2447 ctx_time_freeze(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
2448 {
2449 ctx_time_update(cpuctx, ctx);
2450 if (ctx->is_active & EVENT_TIME)
2451 ctx->is_active |= EVENT_FROZEN;
2452 }
2453
2454 static inline void
ctx_time_update_event(struct perf_event_context * ctx,struct perf_event * event)2455 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event)
2456 {
2457 if (ctx->is_active & EVENT_TIME) {
2458 if (ctx->is_active & EVENT_FROZEN)
2459 return;
2460 update_context_time(ctx);
2461 update_cgrp_time_from_event(event);
2462 }
2463 }
2464
2465 #define DETACH_GROUP 0x01UL
2466 #define DETACH_CHILD 0x02UL
2467 #define DETACH_EXIT 0x04UL
2468 #define DETACH_REVOKE 0x08UL
2469 #define DETACH_DEAD 0x10UL
2470
2471 /*
2472 * Cross CPU call to remove a performance event
2473 *
2474 * We disable the event on the hardware level first. After that we
2475 * remove it from the context list.
2476 */
2477 static void
__perf_remove_from_context(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2478 __perf_remove_from_context(struct perf_event *event,
2479 struct perf_cpu_context *cpuctx,
2480 struct perf_event_context *ctx,
2481 void *info)
2482 {
2483 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
2484 enum perf_event_state state = PERF_EVENT_STATE_OFF;
2485 unsigned long flags = (unsigned long)info;
2486
2487 ctx_time_update(cpuctx, ctx);
2488
2489 /*
2490 * Ensure event_sched_out() switches to OFF, at the very least
2491 * this avoids raising perf_pending_task() at this time.
2492 */
2493 if (flags & DETACH_EXIT)
2494 state = PERF_EVENT_STATE_EXIT;
2495 if (flags & DETACH_REVOKE)
2496 state = PERF_EVENT_STATE_REVOKED;
2497 if (flags & DETACH_DEAD)
2498 state = PERF_EVENT_STATE_DEAD;
2499
2500 event_sched_out(event, ctx);
2501
2502 if (event->state > PERF_EVENT_STATE_OFF)
2503 perf_cgroup_event_disable(event, ctx);
2504
2505 perf_event_set_state(event, min(event->state, state));
2506
2507 if (flags & DETACH_GROUP)
2508 perf_group_detach(event);
2509 if (flags & DETACH_CHILD)
2510 perf_child_detach(event);
2511 list_del_event(event, ctx);
2512
2513 if (!pmu_ctx->nr_events) {
2514 pmu_ctx->rotate_necessary = 0;
2515
2516 if (ctx->task && ctx->is_active) {
2517 struct perf_cpu_pmu_context *cpc = this_cpc(pmu_ctx->pmu);
2518
2519 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
2520 cpc->task_epc = NULL;
2521 }
2522 }
2523
2524 if (!ctx->nr_events && ctx->is_active) {
2525 if (ctx == &cpuctx->ctx)
2526 update_cgrp_time_from_cpuctx(cpuctx, true);
2527
2528 ctx->is_active = 0;
2529 if (ctx->task) {
2530 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2531 cpuctx->task_ctx = NULL;
2532 }
2533 }
2534 }
2535
2536 /*
2537 * Remove the event from a task's (or a CPU's) list of events.
2538 *
2539 * If event->ctx is a cloned context, callers must make sure that
2540 * every task struct that event->ctx->task could possibly point to
2541 * remains valid. This is OK when called from perf_release since
2542 * that only calls us on the top-level context, which can't be a clone.
2543 * When called from perf_event_exit_task, it's OK because the
2544 * context has been detached from its task.
2545 */
perf_remove_from_context(struct perf_event * event,unsigned long flags)2546 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2547 {
2548 struct perf_event_context *ctx = event->ctx;
2549
2550 lockdep_assert_held(&ctx->mutex);
2551
2552 /*
2553 * Because of perf_event_exit_task(), perf_remove_from_context() ought
2554 * to work in the face of TASK_TOMBSTONE, unlike every other
2555 * event_function_call() user.
2556 */
2557 raw_spin_lock_irq(&ctx->lock);
2558 if (!ctx->is_active) {
2559 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
2560 ctx, (void *)flags);
2561 raw_spin_unlock_irq(&ctx->lock);
2562 return;
2563 }
2564 raw_spin_unlock_irq(&ctx->lock);
2565
2566 event_function_call(event, __perf_remove_from_context, (void *)flags);
2567 }
2568
__event_disable(struct perf_event * event,struct perf_event_context * ctx,enum perf_event_state state)2569 static void __event_disable(struct perf_event *event,
2570 struct perf_event_context *ctx,
2571 enum perf_event_state state)
2572 {
2573 event_sched_out(event, ctx);
2574 perf_cgroup_event_disable(event, ctx);
2575 perf_event_set_state(event, state);
2576 }
2577
2578 /*
2579 * Cross CPU call to disable a performance event
2580 */
__perf_event_disable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2581 static void __perf_event_disable(struct perf_event *event,
2582 struct perf_cpu_context *cpuctx,
2583 struct perf_event_context *ctx,
2584 void *info)
2585 {
2586 if (event->state < PERF_EVENT_STATE_INACTIVE)
2587 return;
2588
2589 perf_pmu_disable(event->pmu_ctx->pmu);
2590 ctx_time_update_event(ctx, event);
2591
2592 /*
2593 * When disabling a group leader, the whole group becomes ineligible
2594 * to run, so schedule out the full group.
2595 */
2596 if (event == event->group_leader)
2597 group_sched_out(event, ctx);
2598
2599 /*
2600 * But only mark the leader OFF; the siblings will remain
2601 * INACTIVE.
2602 */
2603 __event_disable(event, ctx, PERF_EVENT_STATE_OFF);
2604
2605 perf_pmu_enable(event->pmu_ctx->pmu);
2606 }
2607
2608 /*
2609 * Disable an event.
2610 *
2611 * If event->ctx is a cloned context, callers must make sure that
2612 * every task struct that event->ctx->task could possibly point to
2613 * remains valid. This condition is satisfied when called through
2614 * perf_event_for_each_child or perf_event_for_each because they
2615 * hold the top-level event's child_mutex, so any descendant that
2616 * goes to exit will block in perf_event_exit_event().
2617 *
2618 * When called from perf_pending_disable it's OK because event->ctx
2619 * is the current context on this CPU and preemption is disabled,
2620 * hence we can't get into perf_event_task_sched_out for this context.
2621 */
_perf_event_disable(struct perf_event * event)2622 static void _perf_event_disable(struct perf_event *event)
2623 {
2624 struct perf_event_context *ctx = event->ctx;
2625
2626 raw_spin_lock_irq(&ctx->lock);
2627 if (event->state <= PERF_EVENT_STATE_OFF) {
2628 raw_spin_unlock_irq(&ctx->lock);
2629 return;
2630 }
2631 raw_spin_unlock_irq(&ctx->lock);
2632
2633 event_function_call(event, __perf_event_disable, NULL);
2634 }
2635
perf_event_disable_local(struct perf_event * event)2636 void perf_event_disable_local(struct perf_event *event)
2637 {
2638 event_function_local(event, __perf_event_disable, NULL);
2639 }
2640
2641 /*
2642 * Strictly speaking kernel users cannot create groups and therefore this
2643 * interface does not need the perf_event_ctx_lock() magic.
2644 */
perf_event_disable(struct perf_event * event)2645 void perf_event_disable(struct perf_event *event)
2646 {
2647 struct perf_event_context *ctx;
2648
2649 ctx = perf_event_ctx_lock(event);
2650 _perf_event_disable(event);
2651 perf_event_ctx_unlock(event, ctx);
2652 }
2653 EXPORT_SYMBOL_GPL(perf_event_disable);
2654
perf_event_disable_inatomic(struct perf_event * event)2655 void perf_event_disable_inatomic(struct perf_event *event)
2656 {
2657 event->pending_disable = 1;
2658 irq_work_queue(&event->pending_disable_irq);
2659 }
2660
2661 #define MAX_INTERRUPTS (~0ULL)
2662
2663 static void perf_log_throttle(struct perf_event *event, int enable);
2664 static void perf_log_itrace_start(struct perf_event *event);
2665
perf_event_unthrottle(struct perf_event * event,bool start)2666 static void perf_event_unthrottle(struct perf_event *event, bool start)
2667 {
2668 if (event->state != PERF_EVENT_STATE_ACTIVE)
2669 return;
2670
2671 event->hw.interrupts = 0;
2672 if (start)
2673 event->pmu->start(event, 0);
2674 if (event == event->group_leader)
2675 perf_log_throttle(event, 1);
2676 }
2677
perf_event_throttle(struct perf_event * event)2678 static void perf_event_throttle(struct perf_event *event)
2679 {
2680 if (event->state != PERF_EVENT_STATE_ACTIVE)
2681 return;
2682
2683 event->hw.interrupts = MAX_INTERRUPTS;
2684 event->pmu->stop(event, 0);
2685 if (event == event->group_leader)
2686 perf_log_throttle(event, 0);
2687 }
2688
perf_event_unthrottle_group(struct perf_event * event,bool skip_start_event)2689 static void perf_event_unthrottle_group(struct perf_event *event, bool skip_start_event)
2690 {
2691 struct perf_event *sibling, *leader = event->group_leader;
2692
2693 perf_event_unthrottle(leader, skip_start_event ? leader != event : true);
2694 for_each_sibling_event(sibling, leader)
2695 perf_event_unthrottle(sibling, skip_start_event ? sibling != event : true);
2696 }
2697
perf_event_throttle_group(struct perf_event * event)2698 static void perf_event_throttle_group(struct perf_event *event)
2699 {
2700 struct perf_event *sibling, *leader = event->group_leader;
2701
2702 perf_event_throttle(leader);
2703 for_each_sibling_event(sibling, leader)
2704 perf_event_throttle(sibling);
2705 }
2706
2707 static int
event_sched_in(struct perf_event * event,struct perf_event_context * ctx)2708 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2709 {
2710 struct perf_event_pmu_context *epc = event->pmu_ctx;
2711 struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2712 int ret = 0;
2713
2714 WARN_ON_ONCE(event->ctx != ctx);
2715
2716 lockdep_assert_held(&ctx->lock);
2717
2718 if (event->state <= PERF_EVENT_STATE_OFF)
2719 return 0;
2720
2721 WRITE_ONCE(event->oncpu, smp_processor_id());
2722 /*
2723 * Order event::oncpu write to happen before the ACTIVE state is
2724 * visible. This allows perf_event_{stop,read}() to observe the correct
2725 * ->oncpu if it sees ACTIVE.
2726 */
2727 smp_wmb();
2728 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2729
2730 /*
2731 * Unthrottle events, since we scheduled we might have missed several
2732 * ticks already, also for a heavily scheduling task there is little
2733 * guarantee it'll get a tick in a timely manner.
2734 */
2735 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS))
2736 perf_event_unthrottle(event, false);
2737
2738 perf_pmu_disable(event->pmu);
2739
2740 perf_log_itrace_start(event);
2741
2742 if (event->pmu->add(event, PERF_EF_START)) {
2743 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2744 event->oncpu = -1;
2745 ret = -EAGAIN;
2746 goto out;
2747 }
2748
2749 if (!is_software_event(event))
2750 cpc->active_oncpu++;
2751 if (is_event_in_freq_mode(event)) {
2752 ctx->nr_freq++;
2753 epc->nr_freq++;
2754 }
2755 if (event->attr.exclusive)
2756 cpc->exclusive = 1;
2757
2758 out:
2759 perf_pmu_enable(event->pmu);
2760
2761 return ret;
2762 }
2763
2764 static int
group_sched_in(struct perf_event * group_event,struct perf_event_context * ctx)2765 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2766 {
2767 struct perf_event *event, *partial_group = NULL;
2768 struct pmu *pmu = group_event->pmu_ctx->pmu;
2769
2770 if (group_event->state == PERF_EVENT_STATE_OFF)
2771 return 0;
2772
2773 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2774
2775 if (event_sched_in(group_event, ctx))
2776 goto error;
2777
2778 /*
2779 * Schedule in siblings as one group (if any):
2780 */
2781 for_each_sibling_event(event, group_event) {
2782 if (event_sched_in(event, ctx)) {
2783 partial_group = event;
2784 goto group_error;
2785 }
2786 }
2787
2788 if (!pmu->commit_txn(pmu))
2789 return 0;
2790
2791 group_error:
2792 /*
2793 * Groups can be scheduled in as one unit only, so undo any
2794 * partial group before returning:
2795 * The events up to the failed event are scheduled out normally.
2796 */
2797 for_each_sibling_event(event, group_event) {
2798 if (event == partial_group)
2799 break;
2800
2801 event_sched_out(event, ctx);
2802 }
2803 event_sched_out(group_event, ctx);
2804
2805 error:
2806 pmu->cancel_txn(pmu);
2807 return -EAGAIN;
2808 }
2809
2810 /*
2811 * Work out whether we can put this event group on the CPU now.
2812 */
group_can_go_on(struct perf_event * event,int can_add_hw)2813 static int group_can_go_on(struct perf_event *event, int can_add_hw)
2814 {
2815 struct perf_event_pmu_context *epc = event->pmu_ctx;
2816 struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2817
2818 /*
2819 * Groups consisting entirely of software events can always go on.
2820 */
2821 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2822 return 1;
2823 /*
2824 * If an exclusive group is already on, no other hardware
2825 * events can go on.
2826 */
2827 if (cpc->exclusive)
2828 return 0;
2829 /*
2830 * If this group is exclusive and there are already
2831 * events on the CPU, it can't go on.
2832 */
2833 if (event->attr.exclusive && !list_empty(get_event_list(event)))
2834 return 0;
2835 /*
2836 * Otherwise, try to add it if all previous groups were able
2837 * to go on.
2838 */
2839 return can_add_hw;
2840 }
2841
add_event_to_ctx(struct perf_event * event,struct perf_event_context * ctx)2842 static void add_event_to_ctx(struct perf_event *event,
2843 struct perf_event_context *ctx)
2844 {
2845 list_add_event(event, ctx);
2846 perf_group_attach(event);
2847 }
2848
task_ctx_sched_out(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)2849 static void task_ctx_sched_out(struct perf_event_context *ctx,
2850 struct pmu *pmu,
2851 enum event_type_t event_type)
2852 {
2853 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2854
2855 if (!cpuctx->task_ctx)
2856 return;
2857
2858 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2859 return;
2860
2861 ctx_sched_out(ctx, pmu, event_type);
2862 }
2863
perf_event_sched_in(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,struct pmu * pmu)2864 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2865 struct perf_event_context *ctx,
2866 struct pmu *pmu)
2867 {
2868 ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED);
2869 if (ctx)
2870 ctx_sched_in(ctx, pmu, EVENT_PINNED);
2871 ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
2872 if (ctx)
2873 ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE);
2874 }
2875
2876 /*
2877 * We want to maintain the following priority of scheduling:
2878 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2879 * - task pinned (EVENT_PINNED)
2880 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2881 * - task flexible (EVENT_FLEXIBLE).
2882 *
2883 * In order to avoid unscheduling and scheduling back in everything every
2884 * time an event is added, only do it for the groups of equal priority and
2885 * below.
2886 *
2887 * This can be called after a batch operation on task events, in which case
2888 * event_type is a bit mask of the types of events involved. For CPU events,
2889 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2890 */
ctx_resched(struct perf_cpu_context * cpuctx,struct perf_event_context * task_ctx,struct pmu * pmu,enum event_type_t event_type)2891 static void ctx_resched(struct perf_cpu_context *cpuctx,
2892 struct perf_event_context *task_ctx,
2893 struct pmu *pmu, enum event_type_t event_type)
2894 {
2895 bool cpu_event = !!(event_type & EVENT_CPU);
2896 struct perf_event_pmu_context *epc;
2897
2898 /*
2899 * If pinned groups are involved, flexible groups also need to be
2900 * scheduled out.
2901 */
2902 if (event_type & EVENT_PINNED)
2903 event_type |= EVENT_FLEXIBLE;
2904
2905 event_type &= EVENT_ALL;
2906
2907 for_each_epc(epc, &cpuctx->ctx, pmu, false)
2908 perf_pmu_disable(epc->pmu);
2909
2910 if (task_ctx) {
2911 for_each_epc(epc, task_ctx, pmu, false)
2912 perf_pmu_disable(epc->pmu);
2913
2914 task_ctx_sched_out(task_ctx, pmu, event_type);
2915 }
2916
2917 /*
2918 * Decide which cpu ctx groups to schedule out based on the types
2919 * of events that caused rescheduling:
2920 * - EVENT_CPU: schedule out corresponding groups;
2921 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2922 * - otherwise, do nothing more.
2923 */
2924 if (cpu_event)
2925 ctx_sched_out(&cpuctx->ctx, pmu, event_type);
2926 else if (event_type & EVENT_PINNED)
2927 ctx_sched_out(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
2928
2929 perf_event_sched_in(cpuctx, task_ctx, pmu);
2930
2931 for_each_epc(epc, &cpuctx->ctx, pmu, false)
2932 perf_pmu_enable(epc->pmu);
2933
2934 if (task_ctx) {
2935 for_each_epc(epc, task_ctx, pmu, false)
2936 perf_pmu_enable(epc->pmu);
2937 }
2938 }
2939
perf_pmu_resched(struct pmu * pmu)2940 void perf_pmu_resched(struct pmu *pmu)
2941 {
2942 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2943 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2944
2945 perf_ctx_lock(cpuctx, task_ctx);
2946 ctx_resched(cpuctx, task_ctx, pmu, EVENT_ALL|EVENT_CPU);
2947 perf_ctx_unlock(cpuctx, task_ctx);
2948 }
2949
2950 /*
2951 * Cross CPU call to install and enable a performance event
2952 *
2953 * Very similar to remote_function() + event_function() but cannot assume that
2954 * things like ctx->is_active and cpuctx->task_ctx are set.
2955 */
__perf_install_in_context(void * info)2956 static int __perf_install_in_context(void *info)
2957 {
2958 struct perf_event *event = info;
2959 struct perf_event_context *ctx = event->ctx;
2960 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2961 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2962 bool reprogram = true;
2963 int ret = 0;
2964
2965 raw_spin_lock(&cpuctx->ctx.lock);
2966 if (ctx->task) {
2967 raw_spin_lock(&ctx->lock);
2968 task_ctx = ctx;
2969
2970 reprogram = (ctx->task == current);
2971
2972 /*
2973 * If the task is running, it must be running on this CPU,
2974 * otherwise we cannot reprogram things.
2975 *
2976 * If its not running, we don't care, ctx->lock will
2977 * serialize against it becoming runnable.
2978 */
2979 if (task_curr(ctx->task) && !reprogram) {
2980 ret = -ESRCH;
2981 goto unlock;
2982 }
2983
2984 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2985 } else if (task_ctx) {
2986 raw_spin_lock(&task_ctx->lock);
2987 }
2988
2989 #ifdef CONFIG_CGROUP_PERF
2990 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
2991 /*
2992 * If the current cgroup doesn't match the event's
2993 * cgroup, we should not try to schedule it.
2994 */
2995 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2996 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2997 event->cgrp->css.cgroup);
2998 }
2999 #endif
3000
3001 if (reprogram) {
3002 ctx_time_freeze(cpuctx, ctx);
3003 add_event_to_ctx(event, ctx);
3004 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu,
3005 get_event_type(event));
3006 } else {
3007 add_event_to_ctx(event, ctx);
3008 }
3009
3010 unlock:
3011 perf_ctx_unlock(cpuctx, task_ctx);
3012
3013 return ret;
3014 }
3015
3016 static bool exclusive_event_installable(struct perf_event *event,
3017 struct perf_event_context *ctx);
3018
3019 /*
3020 * Attach a performance event to a context.
3021 *
3022 * Very similar to event_function_call, see comment there.
3023 */
3024 static void
perf_install_in_context(struct perf_event_context * ctx,struct perf_event * event,int cpu)3025 perf_install_in_context(struct perf_event_context *ctx,
3026 struct perf_event *event,
3027 int cpu)
3028 {
3029 struct task_struct *task = READ_ONCE(ctx->task);
3030
3031 lockdep_assert_held(&ctx->mutex);
3032
3033 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
3034
3035 if (event->cpu != -1)
3036 WARN_ON_ONCE(event->cpu != cpu);
3037
3038 /*
3039 * Ensures that if we can observe event->ctx, both the event and ctx
3040 * will be 'complete'. See perf_iterate_sb_cpu().
3041 */
3042 smp_store_release(&event->ctx, ctx);
3043
3044 /*
3045 * perf_event_attr::disabled events will not run and can be initialized
3046 * without IPI. Except when this is the first event for the context, in
3047 * that case we need the magic of the IPI to set ctx->is_active.
3048 *
3049 * The IOC_ENABLE that is sure to follow the creation of a disabled
3050 * event will issue the IPI and reprogram the hardware.
3051 */
3052 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
3053 ctx->nr_events && !is_cgroup_event(event)) {
3054 raw_spin_lock_irq(&ctx->lock);
3055 if (ctx->task == TASK_TOMBSTONE) {
3056 raw_spin_unlock_irq(&ctx->lock);
3057 return;
3058 }
3059 add_event_to_ctx(event, ctx);
3060 raw_spin_unlock_irq(&ctx->lock);
3061 return;
3062 }
3063
3064 if (!task) {
3065 cpu_function_call(cpu, __perf_install_in_context, event);
3066 return;
3067 }
3068
3069 /*
3070 * Should not happen, we validate the ctx is still alive before calling.
3071 */
3072 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
3073 return;
3074
3075 /*
3076 * Installing events is tricky because we cannot rely on ctx->is_active
3077 * to be set in case this is the nr_events 0 -> 1 transition.
3078 *
3079 * Instead we use task_curr(), which tells us if the task is running.
3080 * However, since we use task_curr() outside of rq::lock, we can race
3081 * against the actual state. This means the result can be wrong.
3082 *
3083 * If we get a false positive, we retry, this is harmless.
3084 *
3085 * If we get a false negative, things are complicated. If we are after
3086 * perf_event_context_sched_in() ctx::lock will serialize us, and the
3087 * value must be correct. If we're before, it doesn't matter since
3088 * perf_event_context_sched_in() will program the counter.
3089 *
3090 * However, this hinges on the remote context switch having observed
3091 * our task->perf_event_ctxp[] store, such that it will in fact take
3092 * ctx::lock in perf_event_context_sched_in().
3093 *
3094 * We do this by task_function_call(), if the IPI fails to hit the task
3095 * we know any future context switch of task must see the
3096 * perf_event_ctpx[] store.
3097 */
3098
3099 /*
3100 * This smp_mb() orders the task->perf_event_ctxp[] store with the
3101 * task_cpu() load, such that if the IPI then does not find the task
3102 * running, a future context switch of that task must observe the
3103 * store.
3104 */
3105 smp_mb();
3106 again:
3107 if (!task_function_call(task, __perf_install_in_context, event))
3108 return;
3109
3110 raw_spin_lock_irq(&ctx->lock);
3111 task = ctx->task;
3112 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
3113 /*
3114 * Cannot happen because we already checked above (which also
3115 * cannot happen), and we hold ctx->mutex, which serializes us
3116 * against perf_event_exit_task_context().
3117 */
3118 raw_spin_unlock_irq(&ctx->lock);
3119 return;
3120 }
3121 /*
3122 * If the task is not running, ctx->lock will avoid it becoming so,
3123 * thus we can safely install the event.
3124 */
3125 if (task_curr(task)) {
3126 raw_spin_unlock_irq(&ctx->lock);
3127 goto again;
3128 }
3129 add_event_to_ctx(event, ctx);
3130 raw_spin_unlock_irq(&ctx->lock);
3131 }
3132
3133 /*
3134 * Cross CPU call to enable a performance event
3135 */
__perf_event_enable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)3136 static void __perf_event_enable(struct perf_event *event,
3137 struct perf_cpu_context *cpuctx,
3138 struct perf_event_context *ctx,
3139 void *info)
3140 {
3141 struct perf_event *leader = event->group_leader;
3142 struct perf_event_context *task_ctx;
3143
3144 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
3145 event->state <= PERF_EVENT_STATE_ERROR)
3146 return;
3147
3148 ctx_time_freeze(cpuctx, ctx);
3149
3150 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
3151 perf_cgroup_event_enable(event, ctx);
3152
3153 if (!ctx->is_active)
3154 return;
3155
3156 if (!event_filter_match(event))
3157 return;
3158
3159 /*
3160 * If the event is in a group and isn't the group leader,
3161 * then don't put it on unless the group is on.
3162 */
3163 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
3164 return;
3165
3166 task_ctx = cpuctx->task_ctx;
3167 if (ctx->task)
3168 WARN_ON_ONCE(task_ctx != ctx);
3169
3170 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event));
3171 }
3172
3173 /*
3174 * Enable an event.
3175 *
3176 * If event->ctx is a cloned context, callers must make sure that
3177 * every task struct that event->ctx->task could possibly point to
3178 * remains valid. This condition is satisfied when called through
3179 * perf_event_for_each_child or perf_event_for_each as described
3180 * for perf_event_disable.
3181 */
_perf_event_enable(struct perf_event * event)3182 static void _perf_event_enable(struct perf_event *event)
3183 {
3184 struct perf_event_context *ctx = event->ctx;
3185
3186 raw_spin_lock_irq(&ctx->lock);
3187 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
3188 event->state < PERF_EVENT_STATE_ERROR) {
3189 out:
3190 raw_spin_unlock_irq(&ctx->lock);
3191 return;
3192 }
3193
3194 /*
3195 * If the event is in error state, clear that first.
3196 *
3197 * That way, if we see the event in error state below, we know that it
3198 * has gone back into error state, as distinct from the task having
3199 * been scheduled away before the cross-call arrived.
3200 */
3201 if (event->state == PERF_EVENT_STATE_ERROR) {
3202 /*
3203 * Detached SIBLING events cannot leave ERROR state.
3204 */
3205 if (event->event_caps & PERF_EV_CAP_SIBLING &&
3206 event->group_leader == event)
3207 goto out;
3208
3209 event->state = PERF_EVENT_STATE_OFF;
3210 }
3211 raw_spin_unlock_irq(&ctx->lock);
3212
3213 event_function_call(event, __perf_event_enable, NULL);
3214 }
3215
3216 /*
3217 * See perf_event_disable();
3218 */
perf_event_enable(struct perf_event * event)3219 void perf_event_enable(struct perf_event *event)
3220 {
3221 struct perf_event_context *ctx;
3222
3223 ctx = perf_event_ctx_lock(event);
3224 _perf_event_enable(event);
3225 perf_event_ctx_unlock(event, ctx);
3226 }
3227 EXPORT_SYMBOL_GPL(perf_event_enable);
3228
3229 struct stop_event_data {
3230 struct perf_event *event;
3231 unsigned int restart;
3232 };
3233
__perf_event_stop(void * info)3234 static int __perf_event_stop(void *info)
3235 {
3236 struct stop_event_data *sd = info;
3237 struct perf_event *event = sd->event;
3238
3239 /* if it's already INACTIVE, do nothing */
3240 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3241 return 0;
3242
3243 /* matches smp_wmb() in event_sched_in() */
3244 smp_rmb();
3245
3246 /*
3247 * There is a window with interrupts enabled before we get here,
3248 * so we need to check again lest we try to stop another CPU's event.
3249 */
3250 if (READ_ONCE(event->oncpu) != smp_processor_id())
3251 return -EAGAIN;
3252
3253 event->pmu->stop(event, PERF_EF_UPDATE);
3254
3255 /*
3256 * May race with the actual stop (through perf_pmu_output_stop()),
3257 * but it is only used for events with AUX ring buffer, and such
3258 * events will refuse to restart because of rb::aux_mmap_count==0,
3259 * see comments in perf_aux_output_begin().
3260 *
3261 * Since this is happening on an event-local CPU, no trace is lost
3262 * while restarting.
3263 */
3264 if (sd->restart)
3265 event->pmu->start(event, 0);
3266
3267 return 0;
3268 }
3269
perf_event_stop(struct perf_event * event,int restart)3270 static int perf_event_stop(struct perf_event *event, int restart)
3271 {
3272 struct stop_event_data sd = {
3273 .event = event,
3274 .restart = restart,
3275 };
3276 int ret = 0;
3277
3278 do {
3279 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3280 return 0;
3281
3282 /* matches smp_wmb() in event_sched_in() */
3283 smp_rmb();
3284
3285 /*
3286 * We only want to restart ACTIVE events, so if the event goes
3287 * inactive here (event->oncpu==-1), there's nothing more to do;
3288 * fall through with ret==-ENXIO.
3289 */
3290 ret = cpu_function_call(READ_ONCE(event->oncpu),
3291 __perf_event_stop, &sd);
3292 } while (ret == -EAGAIN);
3293
3294 return ret;
3295 }
3296
3297 /*
3298 * In order to contain the amount of racy and tricky in the address filter
3299 * configuration management, it is a two part process:
3300 *
3301 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
3302 * we update the addresses of corresponding vmas in
3303 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3304 * (p2) when an event is scheduled in (pmu::add), it calls
3305 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3306 * if the generation has changed since the previous call.
3307 *
3308 * If (p1) happens while the event is active, we restart it to force (p2).
3309 *
3310 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
3311 * pre-existing mappings, called once when new filters arrive via SET_FILTER
3312 * ioctl;
3313 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
3314 * registered mapping, called for every new mmap(), with mm::mmap_lock down
3315 * for reading;
3316 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
3317 * of exec.
3318 */
perf_event_addr_filters_sync(struct perf_event * event)3319 void perf_event_addr_filters_sync(struct perf_event *event)
3320 {
3321 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3322
3323 if (!has_addr_filter(event))
3324 return;
3325
3326 raw_spin_lock(&ifh->lock);
3327 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3328 event->pmu->addr_filters_sync(event);
3329 event->hw.addr_filters_gen = event->addr_filters_gen;
3330 }
3331 raw_spin_unlock(&ifh->lock);
3332 }
3333 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
3334
_perf_event_refresh(struct perf_event * event,int refresh)3335 static int _perf_event_refresh(struct perf_event *event, int refresh)
3336 {
3337 /*
3338 * not supported on inherited events
3339 */
3340 if (event->attr.inherit || !is_sampling_event(event))
3341 return -EINVAL;
3342
3343 atomic_add(refresh, &event->event_limit);
3344 _perf_event_enable(event);
3345
3346 return 0;
3347 }
3348
3349 /*
3350 * See perf_event_disable()
3351 */
perf_event_refresh(struct perf_event * event,int refresh)3352 int perf_event_refresh(struct perf_event *event, int refresh)
3353 {
3354 struct perf_event_context *ctx;
3355 int ret;
3356
3357 ctx = perf_event_ctx_lock(event);
3358 ret = _perf_event_refresh(event, refresh);
3359 perf_event_ctx_unlock(event, ctx);
3360
3361 return ret;
3362 }
3363 EXPORT_SYMBOL_GPL(perf_event_refresh);
3364
perf_event_modify_breakpoint(struct perf_event * bp,struct perf_event_attr * attr)3365 static int perf_event_modify_breakpoint(struct perf_event *bp,
3366 struct perf_event_attr *attr)
3367 {
3368 int err;
3369
3370 _perf_event_disable(bp);
3371
3372 err = modify_user_hw_breakpoint_check(bp, attr, true);
3373
3374 if (!bp->attr.disabled)
3375 _perf_event_enable(bp);
3376
3377 return err;
3378 }
3379
3380 /*
3381 * Copy event-type-independent attributes that may be modified.
3382 */
perf_event_modify_copy_attr(struct perf_event_attr * to,const struct perf_event_attr * from)3383 static void perf_event_modify_copy_attr(struct perf_event_attr *to,
3384 const struct perf_event_attr *from)
3385 {
3386 to->sig_data = from->sig_data;
3387 }
3388
perf_event_modify_attr(struct perf_event * event,struct perf_event_attr * attr)3389 static int perf_event_modify_attr(struct perf_event *event,
3390 struct perf_event_attr *attr)
3391 {
3392 int (*func)(struct perf_event *, struct perf_event_attr *);
3393 struct perf_event *child;
3394 int err;
3395
3396 if (event->attr.type != attr->type)
3397 return -EINVAL;
3398
3399 switch (event->attr.type) {
3400 case PERF_TYPE_BREAKPOINT:
3401 func = perf_event_modify_breakpoint;
3402 break;
3403 default:
3404 /* Place holder for future additions. */
3405 return -EOPNOTSUPP;
3406 }
3407
3408 WARN_ON_ONCE(event->ctx->parent_ctx);
3409
3410 mutex_lock(&event->child_mutex);
3411 /*
3412 * Event-type-independent attributes must be copied before event-type
3413 * modification, which will validate that final attributes match the
3414 * source attributes after all relevant attributes have been copied.
3415 */
3416 perf_event_modify_copy_attr(&event->attr, attr);
3417 err = func(event, attr);
3418 if (err)
3419 goto out;
3420 list_for_each_entry(child, &event->child_list, child_list) {
3421 perf_event_modify_copy_attr(&child->attr, attr);
3422 err = func(child, attr);
3423 if (err)
3424 goto out;
3425 }
3426 out:
3427 mutex_unlock(&event->child_mutex);
3428 return err;
3429 }
3430
__pmu_ctx_sched_out(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)3431 static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
3432 enum event_type_t event_type)
3433 {
3434 struct perf_event_context *ctx = pmu_ctx->ctx;
3435 struct perf_event *event, *tmp;
3436 struct pmu *pmu = pmu_ctx->pmu;
3437
3438 if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
3439 struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3440
3441 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3442 cpc->task_epc = NULL;
3443 }
3444
3445 if (!(event_type & EVENT_ALL))
3446 return;
3447
3448 perf_pmu_disable(pmu);
3449 if (event_type & EVENT_PINNED) {
3450 list_for_each_entry_safe(event, tmp,
3451 &pmu_ctx->pinned_active,
3452 active_list)
3453 group_sched_out(event, ctx);
3454 }
3455
3456 if (event_type & EVENT_FLEXIBLE) {
3457 list_for_each_entry_safe(event, tmp,
3458 &pmu_ctx->flexible_active,
3459 active_list)
3460 group_sched_out(event, ctx);
3461 /*
3462 * Since we cleared EVENT_FLEXIBLE, also clear
3463 * rotate_necessary, is will be reset by
3464 * ctx_flexible_sched_in() when needed.
3465 */
3466 pmu_ctx->rotate_necessary = 0;
3467 }
3468 perf_pmu_enable(pmu);
3469 }
3470
3471 /*
3472 * Be very careful with the @pmu argument since this will change ctx state.
3473 * The @pmu argument works for ctx_resched(), because that is symmetric in
3474 * ctx_sched_out() / ctx_sched_in() usage and the ctx state ends up invariant.
3475 *
3476 * However, if you were to be asymmetrical, you could end up with messed up
3477 * state, eg. ctx->is_active cleared even though most EPCs would still actually
3478 * be active.
3479 */
3480 static void
ctx_sched_out(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)3481 ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
3482 {
3483 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3484 struct perf_event_pmu_context *pmu_ctx;
3485 int is_active = ctx->is_active;
3486 bool cgroup = event_type & EVENT_CGROUP;
3487
3488 event_type &= ~EVENT_CGROUP;
3489
3490 lockdep_assert_held(&ctx->lock);
3491
3492 if (likely(!ctx->nr_events)) {
3493 /*
3494 * See __perf_remove_from_context().
3495 */
3496 WARN_ON_ONCE(ctx->is_active);
3497 if (ctx->task)
3498 WARN_ON_ONCE(cpuctx->task_ctx);
3499 return;
3500 }
3501
3502 /*
3503 * Always update time if it was set; not only when it changes.
3504 * Otherwise we can 'forget' to update time for any but the last
3505 * context we sched out. For example:
3506 *
3507 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
3508 * ctx_sched_out(.event_type = EVENT_PINNED)
3509 *
3510 * would only update time for the pinned events.
3511 */
3512 __ctx_time_update(cpuctx, ctx, ctx == &cpuctx->ctx);
3513
3514 /*
3515 * CPU-release for the below ->is_active store,
3516 * see __load_acquire() in perf_event_time_now()
3517 */
3518 barrier();
3519 ctx->is_active &= ~event_type;
3520
3521 if (!(ctx->is_active & EVENT_ALL)) {
3522 /*
3523 * For FROZEN, preserve TIME|FROZEN such that perf_event_time_now()
3524 * does not observe a hole. perf_ctx_unlock() will clean up.
3525 */
3526 if (ctx->is_active & EVENT_FROZEN)
3527 ctx->is_active &= EVENT_TIME_FROZEN;
3528 else
3529 ctx->is_active = 0;
3530 }
3531
3532 if (ctx->task) {
3533 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3534 if (!(ctx->is_active & EVENT_ALL))
3535 cpuctx->task_ctx = NULL;
3536 }
3537
3538 is_active ^= ctx->is_active; /* changed bits */
3539
3540 for_each_epc(pmu_ctx, ctx, pmu, cgroup)
3541 __pmu_ctx_sched_out(pmu_ctx, is_active);
3542 }
3543
3544 /*
3545 * Test whether two contexts are equivalent, i.e. whether they have both been
3546 * cloned from the same version of the same context.
3547 *
3548 * Equivalence is measured using a generation number in the context that is
3549 * incremented on each modification to it; see unclone_ctx(), list_add_event()
3550 * and list_del_event().
3551 */
context_equiv(struct perf_event_context * ctx1,struct perf_event_context * ctx2)3552 static int context_equiv(struct perf_event_context *ctx1,
3553 struct perf_event_context *ctx2)
3554 {
3555 lockdep_assert_held(&ctx1->lock);
3556 lockdep_assert_held(&ctx2->lock);
3557
3558 /* Pinning disables the swap optimization */
3559 if (ctx1->pin_count || ctx2->pin_count)
3560 return 0;
3561
3562 /* If ctx1 is the parent of ctx2 */
3563 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
3564 return 1;
3565
3566 /* If ctx2 is the parent of ctx1 */
3567 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
3568 return 1;
3569
3570 /*
3571 * If ctx1 and ctx2 have the same parent; we flatten the parent
3572 * hierarchy, see perf_event_init_context().
3573 */
3574 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
3575 ctx1->parent_gen == ctx2->parent_gen)
3576 return 1;
3577
3578 /* Unmatched */
3579 return 0;
3580 }
3581
__perf_event_sync_stat(struct perf_event * event,struct perf_event * next_event)3582 static void __perf_event_sync_stat(struct perf_event *event,
3583 struct perf_event *next_event)
3584 {
3585 u64 value;
3586
3587 if (!event->attr.inherit_stat)
3588 return;
3589
3590 /*
3591 * Update the event value, we cannot use perf_event_read()
3592 * because we're in the middle of a context switch and have IRQs
3593 * disabled, which upsets smp_call_function_single(), however
3594 * we know the event must be on the current CPU, therefore we
3595 * don't need to use it.
3596 */
3597 perf_pmu_read(event);
3598
3599 perf_event_update_time(event);
3600
3601 /*
3602 * In order to keep per-task stats reliable we need to flip the event
3603 * values when we flip the contexts.
3604 */
3605 value = local64_read(&next_event->count);
3606 value = local64_xchg(&event->count, value);
3607 local64_set(&next_event->count, value);
3608
3609 swap(event->total_time_enabled, next_event->total_time_enabled);
3610 swap(event->total_time_running, next_event->total_time_running);
3611
3612 /*
3613 * Since we swizzled the values, update the user visible data too.
3614 */
3615 perf_event_update_userpage(event);
3616 perf_event_update_userpage(next_event);
3617 }
3618
perf_event_sync_stat(struct perf_event_context * ctx,struct perf_event_context * next_ctx)3619 static void perf_event_sync_stat(struct perf_event_context *ctx,
3620 struct perf_event_context *next_ctx)
3621 {
3622 struct perf_event *event, *next_event;
3623
3624 if (!ctx->nr_stat)
3625 return;
3626
3627 update_context_time(ctx);
3628
3629 event = list_first_entry(&ctx->event_list,
3630 struct perf_event, event_entry);
3631
3632 next_event = list_first_entry(&next_ctx->event_list,
3633 struct perf_event, event_entry);
3634
3635 while (&event->event_entry != &ctx->event_list &&
3636 &next_event->event_entry != &next_ctx->event_list) {
3637
3638 __perf_event_sync_stat(event, next_event);
3639
3640 event = list_next_entry(event, event_entry);
3641 next_event = list_next_entry(next_event, event_entry);
3642 }
3643 }
3644
perf_ctx_sched_task_cb(struct perf_event_context * ctx,struct task_struct * task,bool sched_in)3645 static void perf_ctx_sched_task_cb(struct perf_event_context *ctx,
3646 struct task_struct *task, bool sched_in)
3647 {
3648 struct perf_event_pmu_context *pmu_ctx;
3649 struct perf_cpu_pmu_context *cpc;
3650
3651 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3652 cpc = this_cpc(pmu_ctx->pmu);
3653
3654 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3655 pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in);
3656 }
3657 }
3658
3659 static void
perf_event_context_sched_out(struct task_struct * task,struct task_struct * next)3660 perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
3661 {
3662 struct perf_event_context *ctx = task->perf_event_ctxp;
3663 struct perf_event_context *next_ctx;
3664 struct perf_event_context *parent, *next_parent;
3665 int do_switch = 1;
3666
3667 if (likely(!ctx))
3668 return;
3669
3670 rcu_read_lock();
3671 next_ctx = rcu_dereference(next->perf_event_ctxp);
3672 if (!next_ctx)
3673 goto unlock;
3674
3675 parent = rcu_dereference(ctx->parent_ctx);
3676 next_parent = rcu_dereference(next_ctx->parent_ctx);
3677
3678 /* If neither context have a parent context; they cannot be clones. */
3679 if (!parent && !next_parent)
3680 goto unlock;
3681
3682 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3683 /*
3684 * Looks like the two contexts are clones, so we might be
3685 * able to optimize the context switch. We lock both
3686 * contexts and check that they are clones under the
3687 * lock (including re-checking that neither has been
3688 * uncloned in the meantime). It doesn't matter which
3689 * order we take the locks because no other cpu could
3690 * be trying to lock both of these tasks.
3691 */
3692 raw_spin_lock(&ctx->lock);
3693 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
3694 if (context_equiv(ctx, next_ctx)) {
3695
3696 perf_ctx_disable(ctx, false);
3697
3698 /* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
3699 if (local_read(&ctx->nr_no_switch_fast) ||
3700 local_read(&next_ctx->nr_no_switch_fast)) {
3701 /*
3702 * Must not swap out ctx when there's pending
3703 * events that rely on the ctx->task relation.
3704 *
3705 * Likewise, when a context contains inherit +
3706 * SAMPLE_READ events they should be switched
3707 * out using the slow path so that they are
3708 * treated as if they were distinct contexts.
3709 */
3710 raw_spin_unlock(&next_ctx->lock);
3711 rcu_read_unlock();
3712 goto inside_switch;
3713 }
3714
3715 WRITE_ONCE(ctx->task, next);
3716 WRITE_ONCE(next_ctx->task, task);
3717
3718 perf_ctx_sched_task_cb(ctx, task, false);
3719
3720 perf_ctx_enable(ctx, false);
3721
3722 /*
3723 * RCU_INIT_POINTER here is safe because we've not
3724 * modified the ctx and the above modification of
3725 * ctx->task is immaterial since this value is
3726 * always verified under ctx->lock which we're now
3727 * holding.
3728 */
3729 RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
3730 RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
3731
3732 do_switch = 0;
3733
3734 perf_event_sync_stat(ctx, next_ctx);
3735 }
3736 raw_spin_unlock(&next_ctx->lock);
3737 raw_spin_unlock(&ctx->lock);
3738 }
3739 unlock:
3740 rcu_read_unlock();
3741
3742 if (do_switch) {
3743 raw_spin_lock(&ctx->lock);
3744 perf_ctx_disable(ctx, false);
3745
3746 inside_switch:
3747 perf_ctx_sched_task_cb(ctx, task, false);
3748 task_ctx_sched_out(ctx, NULL, EVENT_ALL);
3749
3750 perf_ctx_enable(ctx, false);
3751 raw_spin_unlock(&ctx->lock);
3752 }
3753 }
3754
3755 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3756 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
3757
perf_sched_cb_dec(struct pmu * pmu)3758 void perf_sched_cb_dec(struct pmu *pmu)
3759 {
3760 struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3761
3762 this_cpu_dec(perf_sched_cb_usages);
3763 barrier();
3764
3765 if (!--cpc->sched_cb_usage)
3766 list_del(&cpc->sched_cb_entry);
3767 }
3768
3769
perf_sched_cb_inc(struct pmu * pmu)3770 void perf_sched_cb_inc(struct pmu *pmu)
3771 {
3772 struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3773
3774 if (!cpc->sched_cb_usage++)
3775 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3776
3777 barrier();
3778 this_cpu_inc(perf_sched_cb_usages);
3779 }
3780
3781 /*
3782 * This function provides the context switch callback to the lower code
3783 * layer. It is invoked ONLY when the context switch callback is enabled.
3784 *
3785 * This callback is relevant even to per-cpu events; for example multi event
3786 * PEBS requires this to provide PID/TID information. This requires we flush
3787 * all queued PEBS records before we context switch to a new task.
3788 */
__perf_pmu_sched_task(struct perf_cpu_pmu_context * cpc,struct task_struct * task,bool sched_in)3789 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc,
3790 struct task_struct *task, bool sched_in)
3791 {
3792 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3793 struct pmu *pmu;
3794
3795 pmu = cpc->epc.pmu;
3796
3797 /* software PMUs will not have sched_task */
3798 if (WARN_ON_ONCE(!pmu->sched_task))
3799 return;
3800
3801 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3802 perf_pmu_disable(pmu);
3803
3804 pmu->sched_task(cpc->task_epc, task, sched_in);
3805
3806 perf_pmu_enable(pmu);
3807 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3808 }
3809
perf_pmu_sched_task(struct task_struct * prev,struct task_struct * next,bool sched_in)3810 static void perf_pmu_sched_task(struct task_struct *prev,
3811 struct task_struct *next,
3812 bool sched_in)
3813 {
3814 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3815 struct perf_cpu_pmu_context *cpc;
3816
3817 /* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */
3818 if (prev == next || cpuctx->task_ctx)
3819 return;
3820
3821 list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
3822 __perf_pmu_sched_task(cpc, sched_in ? next : prev, sched_in);
3823 }
3824
3825 static void perf_event_switch(struct task_struct *task,
3826 struct task_struct *next_prev, bool sched_in);
3827
3828 /*
3829 * Called from scheduler to remove the events of the current task,
3830 * with interrupts disabled.
3831 *
3832 * We stop each event and update the event value in event->count.
3833 *
3834 * This does not protect us against NMI, but disable()
3835 * sets the disabled bit in the control field of event _before_
3836 * accessing the event control register. If a NMI hits, then it will
3837 * not restart the event.
3838 */
__perf_event_task_sched_out(struct task_struct * task,struct task_struct * next)3839 void __perf_event_task_sched_out(struct task_struct *task,
3840 struct task_struct *next)
3841 {
3842 if (__this_cpu_read(perf_sched_cb_usages))
3843 perf_pmu_sched_task(task, next, false);
3844
3845 if (atomic_read(&nr_switch_events))
3846 perf_event_switch(task, next, false);
3847
3848 perf_event_context_sched_out(task, next);
3849
3850 /*
3851 * if cgroup events exist on this CPU, then we need
3852 * to check if we have to switch out PMU state.
3853 * cgroup event are system-wide mode only
3854 */
3855 perf_cgroup_switch(next);
3856 }
3857
perf_less_group_idx(const void * l,const void * r,void __always_unused * args)3858 static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args)
3859 {
3860 const struct perf_event *le = *(const struct perf_event **)l;
3861 const struct perf_event *re = *(const struct perf_event **)r;
3862
3863 return le->group_index < re->group_index;
3864 }
3865
3866 DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap);
3867
3868 static const struct min_heap_callbacks perf_min_heap = {
3869 .less = perf_less_group_idx,
3870 .swp = NULL,
3871 };
3872
__heap_add(struct perf_event_min_heap * heap,struct perf_event * event)3873 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
3874 {
3875 struct perf_event **itrs = heap->data;
3876
3877 if (event) {
3878 itrs[heap->nr] = event;
3879 heap->nr++;
3880 }
3881 }
3882
__link_epc(struct perf_event_pmu_context * pmu_ctx)3883 static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
3884 {
3885 struct perf_cpu_pmu_context *cpc;
3886
3887 if (!pmu_ctx->ctx->task)
3888 return;
3889
3890 cpc = this_cpc(pmu_ctx->pmu);
3891 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3892 cpc->task_epc = pmu_ctx;
3893 }
3894
visit_groups_merge(struct perf_event_context * ctx,struct perf_event_groups * groups,int cpu,struct pmu * pmu,int (* func)(struct perf_event *,void *),void * data)3895 static noinline int visit_groups_merge(struct perf_event_context *ctx,
3896 struct perf_event_groups *groups, int cpu,
3897 struct pmu *pmu,
3898 int (*func)(struct perf_event *, void *),
3899 void *data)
3900 {
3901 #ifdef CONFIG_CGROUP_PERF
3902 struct cgroup_subsys_state *css = NULL;
3903 #endif
3904 struct perf_cpu_context *cpuctx = NULL;
3905 /* Space for per CPU and/or any CPU event iterators. */
3906 struct perf_event *itrs[2];
3907 struct perf_event_min_heap event_heap;
3908 struct perf_event **evt;
3909 int ret;
3910
3911 if (pmu->filter && pmu->filter(pmu, cpu))
3912 return 0;
3913
3914 if (!ctx->task) {
3915 cpuctx = this_cpu_ptr(&perf_cpu_context);
3916 event_heap = (struct perf_event_min_heap){
3917 .data = cpuctx->heap,
3918 .nr = 0,
3919 .size = cpuctx->heap_size,
3920 };
3921
3922 lockdep_assert_held(&cpuctx->ctx.lock);
3923
3924 #ifdef CONFIG_CGROUP_PERF
3925 if (cpuctx->cgrp)
3926 css = &cpuctx->cgrp->css;
3927 #endif
3928 } else {
3929 event_heap = (struct perf_event_min_heap){
3930 .data = itrs,
3931 .nr = 0,
3932 .size = ARRAY_SIZE(itrs),
3933 };
3934 /* Events not within a CPU context may be on any CPU. */
3935 __heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
3936 }
3937 evt = event_heap.data;
3938
3939 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
3940
3941 #ifdef CONFIG_CGROUP_PERF
3942 for (; css; css = css->parent)
3943 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
3944 #endif
3945
3946 if (event_heap.nr) {
3947 __link_epc((*evt)->pmu_ctx);
3948 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
3949 }
3950
3951 min_heapify_all_inline(&event_heap, &perf_min_heap, NULL);
3952
3953 while (event_heap.nr) {
3954 ret = func(*evt, data);
3955 if (ret)
3956 return ret;
3957
3958 *evt = perf_event_groups_next(*evt, pmu);
3959 if (*evt)
3960 min_heap_sift_down_inline(&event_heap, 0, &perf_min_heap, NULL);
3961 else
3962 min_heap_pop_inline(&event_heap, &perf_min_heap, NULL);
3963 }
3964
3965 return 0;
3966 }
3967
3968 /*
3969 * Because the userpage is strictly per-event (there is no concept of context,
3970 * so there cannot be a context indirection), every userpage must be updated
3971 * when context time starts :-(
3972 *
3973 * IOW, we must not miss EVENT_TIME edges.
3974 */
event_update_userpage(struct perf_event * event)3975 static inline bool event_update_userpage(struct perf_event *event)
3976 {
3977 if (likely(!refcount_read(&event->mmap_count)))
3978 return false;
3979
3980 perf_event_update_time(event);
3981 perf_event_update_userpage(event);
3982
3983 return true;
3984 }
3985
group_update_userpage(struct perf_event * group_event)3986 static inline void group_update_userpage(struct perf_event *group_event)
3987 {
3988 struct perf_event *event;
3989
3990 if (!event_update_userpage(group_event))
3991 return;
3992
3993 for_each_sibling_event(event, group_event)
3994 event_update_userpage(event);
3995 }
3996
merge_sched_in(struct perf_event * event,void * data)3997 static int merge_sched_in(struct perf_event *event, void *data)
3998 {
3999 struct perf_event_context *ctx = event->ctx;
4000 int *can_add_hw = data;
4001
4002 if (event->state <= PERF_EVENT_STATE_OFF)
4003 return 0;
4004
4005 if (!event_filter_match(event))
4006 return 0;
4007
4008 if (group_can_go_on(event, *can_add_hw)) {
4009 if (!group_sched_in(event, ctx))
4010 list_add_tail(&event->active_list, get_event_list(event));
4011 }
4012
4013 if (event->state == PERF_EVENT_STATE_INACTIVE) {
4014 *can_add_hw = 0;
4015 if (event->attr.pinned) {
4016 perf_cgroup_event_disable(event, ctx);
4017 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
4018
4019 if (*perf_event_fasync(event))
4020 event->pending_kill = POLL_ERR;
4021
4022 perf_event_wakeup(event);
4023 } else {
4024 struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
4025
4026 event->pmu_ctx->rotate_necessary = 1;
4027 perf_mux_hrtimer_restart(cpc);
4028 group_update_userpage(event);
4029 }
4030 }
4031
4032 return 0;
4033 }
4034
pmu_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,struct pmu * pmu)4035 static void pmu_groups_sched_in(struct perf_event_context *ctx,
4036 struct perf_event_groups *groups,
4037 struct pmu *pmu)
4038 {
4039 int can_add_hw = 1;
4040 visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
4041 merge_sched_in, &can_add_hw);
4042 }
4043
__pmu_ctx_sched_in(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)4044 static void __pmu_ctx_sched_in(struct perf_event_pmu_context *pmu_ctx,
4045 enum event_type_t event_type)
4046 {
4047 struct perf_event_context *ctx = pmu_ctx->ctx;
4048
4049 if (event_type & EVENT_PINNED)
4050 pmu_groups_sched_in(ctx, &ctx->pinned_groups, pmu_ctx->pmu);
4051 if (event_type & EVENT_FLEXIBLE)
4052 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu_ctx->pmu);
4053 }
4054
4055 static void
ctx_sched_in(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)4056 ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
4057 {
4058 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4059 struct perf_event_pmu_context *pmu_ctx;
4060 int is_active = ctx->is_active;
4061 bool cgroup = event_type & EVENT_CGROUP;
4062
4063 event_type &= ~EVENT_CGROUP;
4064
4065 lockdep_assert_held(&ctx->lock);
4066
4067 if (likely(!ctx->nr_events))
4068 return;
4069
4070 if (!(is_active & EVENT_TIME)) {
4071 /* start ctx time */
4072 __update_context_time(ctx, false);
4073 perf_cgroup_set_timestamp(cpuctx);
4074 /*
4075 * CPU-release for the below ->is_active store,
4076 * see __load_acquire() in perf_event_time_now()
4077 */
4078 barrier();
4079 }
4080
4081 ctx->is_active |= (event_type | EVENT_TIME);
4082 if (ctx->task) {
4083 if (!(is_active & EVENT_ALL))
4084 cpuctx->task_ctx = ctx;
4085 else
4086 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
4087 }
4088
4089 is_active ^= ctx->is_active; /* changed bits */
4090
4091 /*
4092 * First go through the list and put on any pinned groups
4093 * in order to give them the best chance of going on.
4094 */
4095 if (is_active & EVENT_PINNED) {
4096 for_each_epc(pmu_ctx, ctx, pmu, cgroup)
4097 __pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED);
4098 }
4099
4100 /* Then walk through the lower prio flexible groups */
4101 if (is_active & EVENT_FLEXIBLE) {
4102 for_each_epc(pmu_ctx, ctx, pmu, cgroup)
4103 __pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE);
4104 }
4105 }
4106
perf_event_context_sched_in(struct task_struct * task)4107 static void perf_event_context_sched_in(struct task_struct *task)
4108 {
4109 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4110 struct perf_event_context *ctx;
4111
4112 rcu_read_lock();
4113 ctx = rcu_dereference(task->perf_event_ctxp);
4114 if (!ctx)
4115 goto rcu_unlock;
4116
4117 if (cpuctx->task_ctx == ctx) {
4118 perf_ctx_lock(cpuctx, ctx);
4119 perf_ctx_disable(ctx, false);
4120
4121 perf_ctx_sched_task_cb(ctx, task, true);
4122
4123 perf_ctx_enable(ctx, false);
4124 perf_ctx_unlock(cpuctx, ctx);
4125 goto rcu_unlock;
4126 }
4127
4128 perf_ctx_lock(cpuctx, ctx);
4129 /*
4130 * We must check ctx->nr_events while holding ctx->lock, such
4131 * that we serialize against perf_install_in_context().
4132 */
4133 if (!ctx->nr_events)
4134 goto unlock;
4135
4136 perf_ctx_disable(ctx, false);
4137 /*
4138 * We want to keep the following priority order:
4139 * cpu pinned (that don't need to move), task pinned,
4140 * cpu flexible, task flexible.
4141 *
4142 * However, if task's ctx is not carrying any pinned
4143 * events, no need to flip the cpuctx's events around.
4144 */
4145 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
4146 perf_ctx_disable(&cpuctx->ctx, false);
4147 ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
4148 }
4149
4150 perf_event_sched_in(cpuctx, ctx, NULL);
4151
4152 perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
4153
4154 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
4155 perf_ctx_enable(&cpuctx->ctx, false);
4156
4157 perf_ctx_enable(ctx, false);
4158
4159 unlock:
4160 perf_ctx_unlock(cpuctx, ctx);
4161 rcu_unlock:
4162 rcu_read_unlock();
4163 }
4164
4165 /*
4166 * Called from scheduler to add the events of the current task
4167 * with interrupts disabled.
4168 *
4169 * We restore the event value and then enable it.
4170 *
4171 * This does not protect us against NMI, but enable()
4172 * sets the enabled bit in the control field of event _before_
4173 * accessing the event control register. If a NMI hits, then it will
4174 * keep the event running.
4175 */
__perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)4176 void __perf_event_task_sched_in(struct task_struct *prev,
4177 struct task_struct *task)
4178 {
4179 perf_event_context_sched_in(task);
4180
4181 if (atomic_read(&nr_switch_events))
4182 perf_event_switch(task, prev, true);
4183
4184 if (__this_cpu_read(perf_sched_cb_usages))
4185 perf_pmu_sched_task(prev, task, true);
4186 }
4187
perf_calculate_period(struct perf_event * event,u64 nsec,u64 count)4188 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
4189 {
4190 u64 frequency = event->attr.sample_freq;
4191 u64 sec = NSEC_PER_SEC;
4192 u64 divisor, dividend;
4193
4194 int count_fls, nsec_fls, frequency_fls, sec_fls;
4195
4196 count_fls = fls64(count);
4197 nsec_fls = fls64(nsec);
4198 frequency_fls = fls64(frequency);
4199 sec_fls = 30;
4200
4201 /*
4202 * We got @count in @nsec, with a target of sample_freq HZ
4203 * the target period becomes:
4204 *
4205 * @count * 10^9
4206 * period = -------------------
4207 * @nsec * sample_freq
4208 *
4209 */
4210
4211 /*
4212 * Reduce accuracy by one bit such that @a and @b converge
4213 * to a similar magnitude.
4214 */
4215 #define REDUCE_FLS(a, b) \
4216 do { \
4217 if (a##_fls > b##_fls) { \
4218 a >>= 1; \
4219 a##_fls--; \
4220 } else { \
4221 b >>= 1; \
4222 b##_fls--; \
4223 } \
4224 } while (0)
4225
4226 /*
4227 * Reduce accuracy until either term fits in a u64, then proceed with
4228 * the other, so that finally we can do a u64/u64 division.
4229 */
4230 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
4231 REDUCE_FLS(nsec, frequency);
4232 REDUCE_FLS(sec, count);
4233 }
4234
4235 if (count_fls + sec_fls > 64) {
4236 divisor = nsec * frequency;
4237
4238 while (count_fls + sec_fls > 64) {
4239 REDUCE_FLS(count, sec);
4240 divisor >>= 1;
4241 }
4242
4243 dividend = count * sec;
4244 } else {
4245 dividend = count * sec;
4246
4247 while (nsec_fls + frequency_fls > 64) {
4248 REDUCE_FLS(nsec, frequency);
4249 dividend >>= 1;
4250 }
4251
4252 divisor = nsec * frequency;
4253 }
4254
4255 if (!divisor)
4256 return dividend;
4257
4258 return div64_u64(dividend, divisor);
4259 }
4260
4261 static DEFINE_PER_CPU(int, perf_throttled_count);
4262 static DEFINE_PER_CPU(u64, perf_throttled_seq);
4263
perf_adjust_period(struct perf_event * event,u64 nsec,u64 count,bool disable)4264 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
4265 {
4266 struct hw_perf_event *hwc = &event->hw;
4267 s64 period, sample_period;
4268 s64 delta;
4269
4270 period = perf_calculate_period(event, nsec, count);
4271
4272 delta = (s64)(period - hwc->sample_period);
4273 if (delta >= 0)
4274 delta += 7;
4275 else
4276 delta -= 7;
4277 delta /= 8; /* low pass filter */
4278
4279 sample_period = hwc->sample_period + delta;
4280
4281 if (!sample_period)
4282 sample_period = 1;
4283
4284 hwc->sample_period = sample_period;
4285
4286 if (local64_read(&hwc->period_left) > 8*sample_period) {
4287 if (disable)
4288 event->pmu->stop(event, PERF_EF_UPDATE);
4289
4290 local64_set(&hwc->period_left, 0);
4291
4292 if (disable)
4293 event->pmu->start(event, PERF_EF_RELOAD);
4294 }
4295 }
4296
perf_adjust_freq_unthr_events(struct list_head * event_list)4297 static void perf_adjust_freq_unthr_events(struct list_head *event_list)
4298 {
4299 struct perf_event *event;
4300 struct hw_perf_event *hwc;
4301 u64 now, period = TICK_NSEC;
4302 s64 delta;
4303
4304 list_for_each_entry(event, event_list, active_list) {
4305 if (event->state != PERF_EVENT_STATE_ACTIVE)
4306 continue;
4307
4308 // XXX use visit thingy to avoid the -1,cpu match
4309 if (!event_filter_match(event))
4310 continue;
4311
4312 hwc = &event->hw;
4313
4314 if (hwc->interrupts == MAX_INTERRUPTS)
4315 perf_event_unthrottle_group(event, is_event_in_freq_mode(event));
4316
4317 if (!is_event_in_freq_mode(event))
4318 continue;
4319
4320 /*
4321 * stop the event and update event->count
4322 */
4323 event->pmu->stop(event, PERF_EF_UPDATE);
4324
4325 now = local64_read(&event->count);
4326 delta = now - hwc->freq_count_stamp;
4327 hwc->freq_count_stamp = now;
4328
4329 /*
4330 * restart the event
4331 * reload only if value has changed
4332 * we have stopped the event so tell that
4333 * to perf_adjust_period() to avoid stopping it
4334 * twice.
4335 */
4336 if (delta > 0)
4337 perf_adjust_period(event, period, delta, false);
4338
4339 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4340 }
4341 }
4342
4343 /*
4344 * combine freq adjustment with unthrottling to avoid two passes over the
4345 * events. At the same time, make sure, having freq events does not change
4346 * the rate of unthrottling as that would introduce bias.
4347 */
4348 static void
perf_adjust_freq_unthr_context(struct perf_event_context * ctx,bool unthrottle)4349 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
4350 {
4351 struct perf_event_pmu_context *pmu_ctx;
4352
4353 /*
4354 * only need to iterate over all events iff:
4355 * - context have events in frequency mode (needs freq adjust)
4356 * - there are events to unthrottle on this cpu
4357 */
4358 if (!(ctx->nr_freq || unthrottle))
4359 return;
4360
4361 raw_spin_lock(&ctx->lock);
4362
4363 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4364 if (!(pmu_ctx->nr_freq || unthrottle))
4365 continue;
4366 if (!perf_pmu_ctx_is_active(pmu_ctx))
4367 continue;
4368 if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
4369 continue;
4370
4371 perf_pmu_disable(pmu_ctx->pmu);
4372 perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
4373 perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
4374 perf_pmu_enable(pmu_ctx->pmu);
4375 }
4376
4377 raw_spin_unlock(&ctx->lock);
4378 }
4379
4380 /*
4381 * Move @event to the tail of the @ctx's elegible events.
4382 */
rotate_ctx(struct perf_event_context * ctx,struct perf_event * event)4383 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4384 {
4385 /*
4386 * Rotate the first entry last of non-pinned groups. Rotation might be
4387 * disabled by the inheritance code.
4388 */
4389 if (ctx->rotate_disable)
4390 return;
4391
4392 perf_event_groups_delete(&ctx->flexible_groups, event);
4393 perf_event_groups_insert(&ctx->flexible_groups, event);
4394 }
4395
4396 /* pick an event from the flexible_groups to rotate */
4397 static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context * pmu_ctx)4398 ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
4399 {
4400 struct perf_event *event;
4401 struct rb_node *node;
4402 struct rb_root *tree;
4403 struct __group_key key = {
4404 .pmu = pmu_ctx->pmu,
4405 };
4406
4407 /* pick the first active flexible event */
4408 event = list_first_entry_or_null(&pmu_ctx->flexible_active,
4409 struct perf_event, active_list);
4410 if (event)
4411 goto out;
4412
4413 /* if no active flexible event, pick the first event */
4414 tree = &pmu_ctx->ctx->flexible_groups.tree;
4415
4416 if (!pmu_ctx->ctx->task) {
4417 key.cpu = smp_processor_id();
4418
4419 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4420 if (node)
4421 event = __node_2_pe(node);
4422 goto out;
4423 }
4424
4425 key.cpu = -1;
4426 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4427 if (node) {
4428 event = __node_2_pe(node);
4429 goto out;
4430 }
4431
4432 key.cpu = smp_processor_id();
4433 node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4434 if (node)
4435 event = __node_2_pe(node);
4436
4437 out:
4438 /*
4439 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
4440 * finds there are unschedulable events, it will set it again.
4441 */
4442 pmu_ctx->rotate_necessary = 0;
4443
4444 return event;
4445 }
4446
perf_rotate_context(struct perf_cpu_pmu_context * cpc)4447 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
4448 {
4449 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4450 struct perf_event_pmu_context *cpu_epc, *task_epc = NULL;
4451 struct perf_event *cpu_event = NULL, *task_event = NULL;
4452 int cpu_rotate, task_rotate;
4453 struct pmu *pmu;
4454
4455 /*
4456 * Since we run this from IRQ context, nobody can install new
4457 * events, thus the event count values are stable.
4458 */
4459
4460 cpu_epc = &cpc->epc;
4461 pmu = cpu_epc->pmu;
4462 task_epc = cpc->task_epc;
4463
4464 cpu_rotate = cpu_epc->rotate_necessary;
4465 task_rotate = task_epc ? task_epc->rotate_necessary : 0;
4466
4467 if (!(cpu_rotate || task_rotate))
4468 return false;
4469
4470 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
4471 perf_pmu_disable(pmu);
4472
4473 if (task_rotate)
4474 task_event = ctx_event_to_rotate(task_epc);
4475 if (cpu_rotate)
4476 cpu_event = ctx_event_to_rotate(cpu_epc);
4477
4478 /*
4479 * As per the order given at ctx_resched() first 'pop' task flexible
4480 * and then, if needed CPU flexible.
4481 */
4482 if (task_event || (task_epc && cpu_event)) {
4483 update_context_time(task_epc->ctx);
4484 __pmu_ctx_sched_out(task_epc, EVENT_FLEXIBLE);
4485 }
4486
4487 if (cpu_event) {
4488 update_context_time(&cpuctx->ctx);
4489 __pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
4490 rotate_ctx(&cpuctx->ctx, cpu_event);
4491 __pmu_ctx_sched_in(cpu_epc, EVENT_FLEXIBLE);
4492 }
4493
4494 if (task_event)
4495 rotate_ctx(task_epc->ctx, task_event);
4496
4497 if (task_event || (task_epc && cpu_event))
4498 __pmu_ctx_sched_in(task_epc, EVENT_FLEXIBLE);
4499
4500 perf_pmu_enable(pmu);
4501 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
4502
4503 return true;
4504 }
4505
perf_event_task_tick(void)4506 void perf_event_task_tick(void)
4507 {
4508 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4509 struct perf_event_context *ctx;
4510 int throttled;
4511
4512 lockdep_assert_irqs_disabled();
4513
4514 __this_cpu_inc(perf_throttled_seq);
4515 throttled = __this_cpu_xchg(perf_throttled_count, 0);
4516 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
4517
4518 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4519
4520 rcu_read_lock();
4521 ctx = rcu_dereference(current->perf_event_ctxp);
4522 if (ctx)
4523 perf_adjust_freq_unthr_context(ctx, !!throttled);
4524 rcu_read_unlock();
4525 }
4526
event_enable_on_exec(struct perf_event * event,struct perf_event_context * ctx)4527 static int event_enable_on_exec(struct perf_event *event,
4528 struct perf_event_context *ctx)
4529 {
4530 if (!event->attr.enable_on_exec)
4531 return 0;
4532
4533 event->attr.enable_on_exec = 0;
4534 if (event->state >= PERF_EVENT_STATE_INACTIVE)
4535 return 0;
4536
4537 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
4538
4539 return 1;
4540 }
4541
4542 /*
4543 * Enable all of a task's events that have been marked enable-on-exec.
4544 * This expects task == current.
4545 */
perf_event_enable_on_exec(struct perf_event_context * ctx)4546 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
4547 {
4548 struct perf_event_context *clone_ctx = NULL;
4549 enum event_type_t event_type = 0;
4550 struct perf_cpu_context *cpuctx;
4551 struct perf_event *event;
4552 unsigned long flags;
4553 int enabled = 0;
4554
4555 local_irq_save(flags);
4556 if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
4557 goto out;
4558
4559 if (!ctx->nr_events)
4560 goto out;
4561
4562 cpuctx = this_cpu_ptr(&perf_cpu_context);
4563 perf_ctx_lock(cpuctx, ctx);
4564 ctx_time_freeze(cpuctx, ctx);
4565
4566 list_for_each_entry(event, &ctx->event_list, event_entry) {
4567 enabled |= event_enable_on_exec(event, ctx);
4568 event_type |= get_event_type(event);
4569 }
4570
4571 /*
4572 * Unclone and reschedule this context if we enabled any event.
4573 */
4574 if (enabled) {
4575 clone_ctx = unclone_ctx(ctx);
4576 ctx_resched(cpuctx, ctx, NULL, event_type);
4577 }
4578 perf_ctx_unlock(cpuctx, ctx);
4579
4580 out:
4581 local_irq_restore(flags);
4582
4583 if (clone_ctx)
4584 put_ctx(clone_ctx);
4585 }
4586
4587 static void perf_remove_from_owner(struct perf_event *event);
4588 static void perf_event_exit_event(struct perf_event *event,
4589 struct perf_event_context *ctx,
4590 bool revoke);
4591
4592 /*
4593 * Removes all events from the current task that have been marked
4594 * remove-on-exec, and feeds their values back to parent events.
4595 */
perf_event_remove_on_exec(struct perf_event_context * ctx)4596 static void perf_event_remove_on_exec(struct perf_event_context *ctx)
4597 {
4598 struct perf_event_context *clone_ctx = NULL;
4599 struct perf_event *event, *next;
4600 unsigned long flags;
4601 bool modified = false;
4602
4603 mutex_lock(&ctx->mutex);
4604
4605 if (WARN_ON_ONCE(ctx->task != current))
4606 goto unlock;
4607
4608 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4609 if (!event->attr.remove_on_exec)
4610 continue;
4611
4612 if (!is_kernel_event(event))
4613 perf_remove_from_owner(event);
4614
4615 modified = true;
4616
4617 perf_event_exit_event(event, ctx, false);
4618 }
4619
4620 raw_spin_lock_irqsave(&ctx->lock, flags);
4621 if (modified)
4622 clone_ctx = unclone_ctx(ctx);
4623 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4624
4625 unlock:
4626 mutex_unlock(&ctx->mutex);
4627
4628 if (clone_ctx)
4629 put_ctx(clone_ctx);
4630 }
4631
4632 struct perf_read_data {
4633 struct perf_event *event;
4634 bool group;
4635 int ret;
4636 };
4637
4638 static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
4639
__perf_event_read_cpu(struct perf_event * event,int event_cpu)4640 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
4641 {
4642 int local_cpu = smp_processor_id();
4643 u16 local_pkg, event_pkg;
4644
4645 if ((unsigned)event_cpu >= nr_cpu_ids)
4646 return event_cpu;
4647
4648 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) {
4649 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu);
4650
4651 if (cpumask && cpumask_test_cpu(local_cpu, cpumask))
4652 return local_cpu;
4653 }
4654
4655 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
4656 event_pkg = topology_physical_package_id(event_cpu);
4657 local_pkg = topology_physical_package_id(local_cpu);
4658
4659 if (event_pkg == local_pkg)
4660 return local_cpu;
4661 }
4662
4663 return event_cpu;
4664 }
4665
4666 /*
4667 * Cross CPU call to read the hardware event
4668 */
__perf_event_read(void * info)4669 static void __perf_event_read(void *info)
4670 {
4671 struct perf_read_data *data = info;
4672 struct perf_event *sub, *event = data->event;
4673 struct perf_event_context *ctx = event->ctx;
4674 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4675 struct pmu *pmu = event->pmu;
4676
4677 /*
4678 * If this is a task context, we need to check whether it is
4679 * the current task context of this cpu. If not it has been
4680 * scheduled out before the smp call arrived. In that case
4681 * event->count would have been updated to a recent sample
4682 * when the event was scheduled out.
4683 */
4684 if (ctx->task && cpuctx->task_ctx != ctx)
4685 return;
4686
4687 raw_spin_lock(&ctx->lock);
4688 ctx_time_update_event(ctx, event);
4689
4690 perf_event_update_time(event);
4691 if (data->group)
4692 perf_event_update_sibling_time(event);
4693
4694 if (event->state != PERF_EVENT_STATE_ACTIVE)
4695 goto unlock;
4696
4697 if (!data->group) {
4698 pmu->read(event);
4699 data->ret = 0;
4700 goto unlock;
4701 }
4702
4703 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4704
4705 pmu->read(event);
4706
4707 for_each_sibling_event(sub, event)
4708 perf_pmu_read(sub);
4709
4710 data->ret = pmu->commit_txn(pmu);
4711
4712 unlock:
4713 raw_spin_unlock(&ctx->lock);
4714 }
4715
perf_event_count(struct perf_event * event,bool self)4716 static inline u64 perf_event_count(struct perf_event *event, bool self)
4717 {
4718 if (self)
4719 return local64_read(&event->count);
4720
4721 return local64_read(&event->count) + atomic64_read(&event->child_count);
4722 }
4723
calc_timer_values(struct perf_event * event,u64 * now,u64 * enabled,u64 * running)4724 static void calc_timer_values(struct perf_event *event,
4725 u64 *now,
4726 u64 *enabled,
4727 u64 *running)
4728 {
4729 u64 ctx_time;
4730
4731 *now = perf_clock();
4732 ctx_time = perf_event_time_now(event, *now);
4733 __perf_update_times(event, ctx_time, enabled, running);
4734 }
4735
4736 /*
4737 * NMI-safe method to read a local event, that is an event that
4738 * is:
4739 * - either for the current task, or for this CPU
4740 * - does not have inherit set, for inherited task events
4741 * will not be local and we cannot read them atomically
4742 * - must not have a pmu::count method
4743 */
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)4744 int perf_event_read_local(struct perf_event *event, u64 *value,
4745 u64 *enabled, u64 *running)
4746 {
4747 unsigned long flags;
4748 int event_oncpu;
4749 int event_cpu;
4750 int ret = 0;
4751
4752 /*
4753 * Disabling interrupts avoids all counter scheduling (context
4754 * switches, timer based rotation and IPIs).
4755 */
4756 local_irq_save(flags);
4757
4758 /*
4759 * It must not be an event with inherit set, we cannot read
4760 * all child counters from atomic context.
4761 */
4762 if (event->attr.inherit) {
4763 ret = -EOPNOTSUPP;
4764 goto out;
4765 }
4766
4767 /* If this is a per-task event, it must be for current */
4768 if ((event->attach_state & PERF_ATTACH_TASK) &&
4769 event->hw.target != current) {
4770 ret = -EINVAL;
4771 goto out;
4772 }
4773
4774 /*
4775 * Get the event CPU numbers, and adjust them to local if the event is
4776 * a per-package event that can be read locally
4777 */
4778 event_oncpu = __perf_event_read_cpu(event, event->oncpu);
4779 event_cpu = __perf_event_read_cpu(event, event->cpu);
4780
4781 /* If this is a per-CPU event, it must be for this CPU */
4782 if (!(event->attach_state & PERF_ATTACH_TASK) &&
4783 event_cpu != smp_processor_id()) {
4784 ret = -EINVAL;
4785 goto out;
4786 }
4787
4788 /* If this is a pinned event it must be running on this CPU */
4789 if (event->attr.pinned && event_oncpu != smp_processor_id()) {
4790 ret = -EBUSY;
4791 goto out;
4792 }
4793
4794 /*
4795 * If the event is currently on this CPU, its either a per-task event,
4796 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
4797 * oncpu == -1).
4798 */
4799 if (event_oncpu == smp_processor_id())
4800 event->pmu->read(event);
4801
4802 *value = local64_read(&event->count);
4803 if (enabled || running) {
4804 u64 __enabled, __running, __now;
4805
4806 calc_timer_values(event, &__now, &__enabled, &__running);
4807 if (enabled)
4808 *enabled = __enabled;
4809 if (running)
4810 *running = __running;
4811 }
4812 out:
4813 local_irq_restore(flags);
4814
4815 return ret;
4816 }
4817
perf_event_read(struct perf_event * event,bool group)4818 static int perf_event_read(struct perf_event *event, bool group)
4819 {
4820 enum perf_event_state state = READ_ONCE(event->state);
4821 int event_cpu, ret = 0;
4822
4823 /*
4824 * If event is enabled and currently active on a CPU, update the
4825 * value in the event structure:
4826 */
4827 again:
4828 if (state == PERF_EVENT_STATE_ACTIVE) {
4829 struct perf_read_data data;
4830
4831 /*
4832 * Orders the ->state and ->oncpu loads such that if we see
4833 * ACTIVE we must also see the right ->oncpu.
4834 *
4835 * Matches the smp_wmb() from event_sched_in().
4836 */
4837 smp_rmb();
4838
4839 event_cpu = READ_ONCE(event->oncpu);
4840 if ((unsigned)event_cpu >= nr_cpu_ids)
4841 return 0;
4842
4843 data = (struct perf_read_data){
4844 .event = event,
4845 .group = group,
4846 .ret = 0,
4847 };
4848
4849 preempt_disable();
4850 event_cpu = __perf_event_read_cpu(event, event_cpu);
4851
4852 /*
4853 * Purposely ignore the smp_call_function_single() return
4854 * value.
4855 *
4856 * If event_cpu isn't a valid CPU it means the event got
4857 * scheduled out and that will have updated the event count.
4858 *
4859 * Therefore, either way, we'll have an up-to-date event count
4860 * after this.
4861 */
4862 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
4863 preempt_enable();
4864 ret = data.ret;
4865
4866 } else if (state == PERF_EVENT_STATE_INACTIVE) {
4867 struct perf_event_context *ctx = event->ctx;
4868 unsigned long flags;
4869
4870 raw_spin_lock_irqsave(&ctx->lock, flags);
4871 state = event->state;
4872 if (state != PERF_EVENT_STATE_INACTIVE) {
4873 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4874 goto again;
4875 }
4876
4877 /*
4878 * May read while context is not active (e.g., thread is
4879 * blocked), in that case we cannot update context time
4880 */
4881 ctx_time_update_event(ctx, event);
4882
4883 perf_event_update_time(event);
4884 if (group)
4885 perf_event_update_sibling_time(event);
4886 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4887 }
4888
4889 return ret;
4890 }
4891
4892 /*
4893 * Initialize the perf_event context in a task_struct:
4894 */
__perf_event_init_context(struct perf_event_context * ctx)4895 static void __perf_event_init_context(struct perf_event_context *ctx)
4896 {
4897 raw_spin_lock_init(&ctx->lock);
4898 mutex_init(&ctx->mutex);
4899 INIT_LIST_HEAD(&ctx->pmu_ctx_list);
4900 perf_event_groups_init(&ctx->pinned_groups);
4901 perf_event_groups_init(&ctx->flexible_groups);
4902 INIT_LIST_HEAD(&ctx->event_list);
4903 refcount_set(&ctx->refcount, 1);
4904 }
4905
4906 static void
__perf_init_event_pmu_context(struct perf_event_pmu_context * epc,struct pmu * pmu)4907 __perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
4908 {
4909 epc->pmu = pmu;
4910 INIT_LIST_HEAD(&epc->pmu_ctx_entry);
4911 INIT_LIST_HEAD(&epc->pinned_active);
4912 INIT_LIST_HEAD(&epc->flexible_active);
4913 atomic_set(&epc->refcount, 1);
4914 }
4915
4916 static struct perf_event_context *
alloc_perf_context(struct task_struct * task)4917 alloc_perf_context(struct task_struct *task)
4918 {
4919 struct perf_event_context *ctx;
4920
4921 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4922 if (!ctx)
4923 return NULL;
4924
4925 __perf_event_init_context(ctx);
4926 if (task)
4927 ctx->task = get_task_struct(task);
4928
4929 return ctx;
4930 }
4931
4932 static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)4933 find_lively_task_by_vpid(pid_t vpid)
4934 {
4935 struct task_struct *task;
4936
4937 rcu_read_lock();
4938 if (!vpid)
4939 task = current;
4940 else
4941 task = find_task_by_vpid(vpid);
4942 if (task)
4943 get_task_struct(task);
4944 rcu_read_unlock();
4945
4946 if (!task)
4947 return ERR_PTR(-ESRCH);
4948
4949 return task;
4950 }
4951
4952 /*
4953 * Returns a matching context with refcount and pincount.
4954 */
4955 static struct perf_event_context *
find_get_context(struct task_struct * task,struct perf_event * event)4956 find_get_context(struct task_struct *task, struct perf_event *event)
4957 {
4958 struct perf_event_context *ctx, *clone_ctx = NULL;
4959 struct perf_cpu_context *cpuctx;
4960 unsigned long flags;
4961 int err;
4962
4963 if (!task) {
4964 /* Must be root to operate on a CPU event: */
4965 err = perf_allow_cpu();
4966 if (err)
4967 return ERR_PTR(err);
4968
4969 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
4970 ctx = &cpuctx->ctx;
4971 get_ctx(ctx);
4972 raw_spin_lock_irqsave(&ctx->lock, flags);
4973 ++ctx->pin_count;
4974 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4975
4976 return ctx;
4977 }
4978
4979 err = -EINVAL;
4980 retry:
4981 ctx = perf_lock_task_context(task, &flags);
4982 if (ctx) {
4983 clone_ctx = unclone_ctx(ctx);
4984 ++ctx->pin_count;
4985
4986 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4987
4988 if (clone_ctx)
4989 put_ctx(clone_ctx);
4990 } else {
4991 ctx = alloc_perf_context(task);
4992 err = -ENOMEM;
4993 if (!ctx)
4994 goto errout;
4995
4996 err = 0;
4997 mutex_lock(&task->perf_event_mutex);
4998 /*
4999 * If it has already passed perf_event_exit_task().
5000 * we must see PF_EXITING, it takes this mutex too.
5001 */
5002 if (task->flags & PF_EXITING)
5003 err = -ESRCH;
5004 else if (task->perf_event_ctxp)
5005 err = -EAGAIN;
5006 else {
5007 get_ctx(ctx);
5008 ++ctx->pin_count;
5009 rcu_assign_pointer(task->perf_event_ctxp, ctx);
5010 }
5011 mutex_unlock(&task->perf_event_mutex);
5012
5013 if (unlikely(err)) {
5014 put_ctx(ctx);
5015
5016 if (err == -EAGAIN)
5017 goto retry;
5018 goto errout;
5019 }
5020 }
5021
5022 return ctx;
5023
5024 errout:
5025 return ERR_PTR(err);
5026 }
5027
5028 static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu * pmu,struct perf_event_context * ctx,struct perf_event * event)5029 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5030 struct perf_event *event)
5031 {
5032 struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
5033
5034 if (!ctx->task) {
5035 /*
5036 * perf_pmu_migrate_context() / __perf_pmu_install_event()
5037 * relies on the fact that find_get_pmu_context() cannot fail
5038 * for CPU contexts.
5039 */
5040 struct perf_cpu_pmu_context *cpc;
5041
5042 cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
5043 epc = &cpc->epc;
5044 raw_spin_lock_irq(&ctx->lock);
5045 if (!epc->ctx) {
5046 /*
5047 * One extra reference for the pmu; see perf_pmu_free().
5048 */
5049 atomic_set(&epc->refcount, 2);
5050 epc->embedded = 1;
5051 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
5052 epc->ctx = ctx;
5053 } else {
5054 WARN_ON_ONCE(epc->ctx != ctx);
5055 atomic_inc(&epc->refcount);
5056 }
5057 raw_spin_unlock_irq(&ctx->lock);
5058 return epc;
5059 }
5060
5061 new = kzalloc(sizeof(*epc), GFP_KERNEL);
5062 if (!new)
5063 return ERR_PTR(-ENOMEM);
5064
5065 __perf_init_event_pmu_context(new, pmu);
5066
5067 /*
5068 * XXX
5069 *
5070 * lockdep_assert_held(&ctx->mutex);
5071 *
5072 * can't because perf_event_init_task() doesn't actually hold the
5073 * child_ctx->mutex.
5074 */
5075
5076 raw_spin_lock_irq(&ctx->lock);
5077 list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
5078 if (epc->pmu == pmu) {
5079 WARN_ON_ONCE(epc->ctx != ctx);
5080 atomic_inc(&epc->refcount);
5081 goto found_epc;
5082 }
5083 /* Make sure the pmu_ctx_list is sorted by PMU type: */
5084 if (!pos && epc->pmu->type > pmu->type)
5085 pos = epc;
5086 }
5087
5088 epc = new;
5089 new = NULL;
5090
5091 if (!pos)
5092 list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
5093 else
5094 list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
5095
5096 epc->ctx = ctx;
5097
5098 found_epc:
5099 raw_spin_unlock_irq(&ctx->lock);
5100 kfree(new);
5101
5102 return epc;
5103 }
5104
get_pmu_ctx(struct perf_event_pmu_context * epc)5105 static void get_pmu_ctx(struct perf_event_pmu_context *epc)
5106 {
5107 WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
5108 }
5109
free_cpc_rcu(struct rcu_head * head)5110 static void free_cpc_rcu(struct rcu_head *head)
5111 {
5112 struct perf_cpu_pmu_context *cpc =
5113 container_of(head, typeof(*cpc), epc.rcu_head);
5114
5115 kfree(cpc);
5116 }
5117
free_epc_rcu(struct rcu_head * head)5118 static void free_epc_rcu(struct rcu_head *head)
5119 {
5120 struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
5121
5122 kfree(epc);
5123 }
5124
put_pmu_ctx(struct perf_event_pmu_context * epc)5125 static void put_pmu_ctx(struct perf_event_pmu_context *epc)
5126 {
5127 struct perf_event_context *ctx = epc->ctx;
5128 unsigned long flags;
5129
5130 /*
5131 * XXX
5132 *
5133 * lockdep_assert_held(&ctx->mutex);
5134 *
5135 * can't because of the call-site in _free_event()/put_event()
5136 * which isn't always called under ctx->mutex.
5137 */
5138 if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
5139 return;
5140
5141 WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
5142
5143 list_del_init(&epc->pmu_ctx_entry);
5144 epc->ctx = NULL;
5145
5146 WARN_ON_ONCE(!list_empty(&epc->pinned_active));
5147 WARN_ON_ONCE(!list_empty(&epc->flexible_active));
5148
5149 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5150
5151 if (epc->embedded) {
5152 call_rcu(&epc->rcu_head, free_cpc_rcu);
5153 return;
5154 }
5155
5156 call_rcu(&epc->rcu_head, free_epc_rcu);
5157 }
5158
5159 static void perf_event_free_filter(struct perf_event *event);
5160
free_event_rcu(struct rcu_head * head)5161 static void free_event_rcu(struct rcu_head *head)
5162 {
5163 struct perf_event *event = container_of(head, typeof(*event), rcu_head);
5164
5165 if (event->ns)
5166 put_pid_ns(event->ns);
5167 perf_event_free_filter(event);
5168 kmem_cache_free(perf_event_cache, event);
5169 }
5170
5171 static void ring_buffer_attach(struct perf_event *event,
5172 struct perf_buffer *rb);
5173
detach_sb_event(struct perf_event * event)5174 static void detach_sb_event(struct perf_event *event)
5175 {
5176 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
5177
5178 raw_spin_lock(&pel->lock);
5179 list_del_rcu(&event->sb_list);
5180 raw_spin_unlock(&pel->lock);
5181 }
5182
is_sb_event(struct perf_event * event)5183 static bool is_sb_event(struct perf_event *event)
5184 {
5185 struct perf_event_attr *attr = &event->attr;
5186
5187 if (event->parent)
5188 return false;
5189
5190 if (event->attach_state & PERF_ATTACH_TASK)
5191 return false;
5192
5193 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
5194 attr->comm || attr->comm_exec ||
5195 attr->task || attr->ksymbol ||
5196 attr->context_switch || attr->text_poke ||
5197 attr->bpf_event)
5198 return true;
5199
5200 return false;
5201 }
5202
unaccount_pmu_sb_event(struct perf_event * event)5203 static void unaccount_pmu_sb_event(struct perf_event *event)
5204 {
5205 if (is_sb_event(event))
5206 detach_sb_event(event);
5207 }
5208
5209 #ifdef CONFIG_NO_HZ_FULL
5210 static DEFINE_SPINLOCK(nr_freq_lock);
5211 #endif
5212
unaccount_freq_event_nohz(void)5213 static void unaccount_freq_event_nohz(void)
5214 {
5215 #ifdef CONFIG_NO_HZ_FULL
5216 spin_lock(&nr_freq_lock);
5217 if (atomic_dec_and_test(&nr_freq_events))
5218 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
5219 spin_unlock(&nr_freq_lock);
5220 #endif
5221 }
5222
unaccount_freq_event(void)5223 static void unaccount_freq_event(void)
5224 {
5225 if (tick_nohz_full_enabled())
5226 unaccount_freq_event_nohz();
5227 else
5228 atomic_dec(&nr_freq_events);
5229 }
5230
5231
5232 static struct perf_ctx_data *
alloc_perf_ctx_data(struct kmem_cache * ctx_cache,bool global)5233 alloc_perf_ctx_data(struct kmem_cache *ctx_cache, bool global)
5234 {
5235 struct perf_ctx_data *cd;
5236
5237 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
5238 if (!cd)
5239 return NULL;
5240
5241 cd->data = kmem_cache_zalloc(ctx_cache, GFP_KERNEL);
5242 if (!cd->data) {
5243 kfree(cd);
5244 return NULL;
5245 }
5246
5247 cd->global = global;
5248 cd->ctx_cache = ctx_cache;
5249 refcount_set(&cd->refcount, 1);
5250
5251 return cd;
5252 }
5253
free_perf_ctx_data(struct perf_ctx_data * cd)5254 static void free_perf_ctx_data(struct perf_ctx_data *cd)
5255 {
5256 kmem_cache_free(cd->ctx_cache, cd->data);
5257 kfree(cd);
5258 }
5259
__free_perf_ctx_data_rcu(struct rcu_head * rcu_head)5260 static void __free_perf_ctx_data_rcu(struct rcu_head *rcu_head)
5261 {
5262 struct perf_ctx_data *cd;
5263
5264 cd = container_of(rcu_head, struct perf_ctx_data, rcu_head);
5265 free_perf_ctx_data(cd);
5266 }
5267
perf_free_ctx_data_rcu(struct perf_ctx_data * cd)5268 static inline void perf_free_ctx_data_rcu(struct perf_ctx_data *cd)
5269 {
5270 call_rcu(&cd->rcu_head, __free_perf_ctx_data_rcu);
5271 }
5272
5273 static int
attach_task_ctx_data(struct task_struct * task,struct kmem_cache * ctx_cache,bool global)5274 attach_task_ctx_data(struct task_struct *task, struct kmem_cache *ctx_cache,
5275 bool global)
5276 {
5277 struct perf_ctx_data *cd, *old = NULL;
5278
5279 cd = alloc_perf_ctx_data(ctx_cache, global);
5280 if (!cd)
5281 return -ENOMEM;
5282
5283 for (;;) {
5284 if (try_cmpxchg((struct perf_ctx_data **)&task->perf_ctx_data, &old, cd)) {
5285 if (old)
5286 perf_free_ctx_data_rcu(old);
5287 return 0;
5288 }
5289
5290 if (!old) {
5291 /*
5292 * After seeing a dead @old, we raced with
5293 * removal and lost, try again to install @cd.
5294 */
5295 continue;
5296 }
5297
5298 if (refcount_inc_not_zero(&old->refcount)) {
5299 free_perf_ctx_data(cd); /* unused */
5300 return 0;
5301 }
5302
5303 /*
5304 * @old is a dead object, refcount==0 is stable, try and
5305 * replace it with @cd.
5306 */
5307 }
5308 return 0;
5309 }
5310
5311 static void __detach_global_ctx_data(void);
5312 DEFINE_STATIC_PERCPU_RWSEM(global_ctx_data_rwsem);
5313 static refcount_t global_ctx_data_ref;
5314
5315 static int
attach_global_ctx_data(struct kmem_cache * ctx_cache)5316 attach_global_ctx_data(struct kmem_cache *ctx_cache)
5317 {
5318 struct task_struct *g, *p;
5319 struct perf_ctx_data *cd;
5320 int ret;
5321
5322 if (refcount_inc_not_zero(&global_ctx_data_ref))
5323 return 0;
5324
5325 guard(percpu_write)(&global_ctx_data_rwsem);
5326 if (refcount_inc_not_zero(&global_ctx_data_ref))
5327 return 0;
5328 again:
5329 /* Allocate everything */
5330 scoped_guard (rcu) {
5331 for_each_process_thread(g, p) {
5332 cd = rcu_dereference(p->perf_ctx_data);
5333 if (cd && !cd->global) {
5334 cd->global = 1;
5335 if (!refcount_inc_not_zero(&cd->refcount))
5336 cd = NULL;
5337 }
5338 if (!cd) {
5339 get_task_struct(p);
5340 goto alloc;
5341 }
5342 }
5343 }
5344
5345 refcount_set(&global_ctx_data_ref, 1);
5346
5347 return 0;
5348 alloc:
5349 ret = attach_task_ctx_data(p, ctx_cache, true);
5350 put_task_struct(p);
5351 if (ret) {
5352 __detach_global_ctx_data();
5353 return ret;
5354 }
5355 goto again;
5356 }
5357
5358 static int
attach_perf_ctx_data(struct perf_event * event)5359 attach_perf_ctx_data(struct perf_event *event)
5360 {
5361 struct task_struct *task = event->hw.target;
5362 struct kmem_cache *ctx_cache = event->pmu->task_ctx_cache;
5363 int ret;
5364
5365 if (!ctx_cache)
5366 return -ENOMEM;
5367
5368 if (task)
5369 return attach_task_ctx_data(task, ctx_cache, false);
5370
5371 ret = attach_global_ctx_data(ctx_cache);
5372 if (ret)
5373 return ret;
5374
5375 event->attach_state |= PERF_ATTACH_GLOBAL_DATA;
5376 return 0;
5377 }
5378
5379 static void
detach_task_ctx_data(struct task_struct * p)5380 detach_task_ctx_data(struct task_struct *p)
5381 {
5382 struct perf_ctx_data *cd;
5383
5384 scoped_guard (rcu) {
5385 cd = rcu_dereference(p->perf_ctx_data);
5386 if (!cd || !refcount_dec_and_test(&cd->refcount))
5387 return;
5388 }
5389
5390 /*
5391 * The old ctx_data may be lost because of the race.
5392 * Nothing is required to do for the case.
5393 * See attach_task_ctx_data().
5394 */
5395 if (try_cmpxchg((struct perf_ctx_data **)&p->perf_ctx_data, &cd, NULL))
5396 perf_free_ctx_data_rcu(cd);
5397 }
5398
__detach_global_ctx_data(void)5399 static void __detach_global_ctx_data(void)
5400 {
5401 struct task_struct *g, *p;
5402 struct perf_ctx_data *cd;
5403
5404 again:
5405 scoped_guard (rcu) {
5406 for_each_process_thread(g, p) {
5407 cd = rcu_dereference(p->perf_ctx_data);
5408 if (!cd || !cd->global)
5409 continue;
5410 cd->global = 0;
5411 get_task_struct(p);
5412 goto detach;
5413 }
5414 }
5415 return;
5416 detach:
5417 detach_task_ctx_data(p);
5418 put_task_struct(p);
5419 goto again;
5420 }
5421
detach_global_ctx_data(void)5422 static void detach_global_ctx_data(void)
5423 {
5424 if (refcount_dec_not_one(&global_ctx_data_ref))
5425 return;
5426
5427 guard(percpu_write)(&global_ctx_data_rwsem);
5428 if (!refcount_dec_and_test(&global_ctx_data_ref))
5429 return;
5430
5431 /* remove everything */
5432 __detach_global_ctx_data();
5433 }
5434
detach_perf_ctx_data(struct perf_event * event)5435 static void detach_perf_ctx_data(struct perf_event *event)
5436 {
5437 struct task_struct *task = event->hw.target;
5438
5439 event->attach_state &= ~PERF_ATTACH_TASK_DATA;
5440
5441 if (task)
5442 return detach_task_ctx_data(task);
5443
5444 if (event->attach_state & PERF_ATTACH_GLOBAL_DATA) {
5445 detach_global_ctx_data();
5446 event->attach_state &= ~PERF_ATTACH_GLOBAL_DATA;
5447 }
5448 }
5449
unaccount_event(struct perf_event * event)5450 static void unaccount_event(struct perf_event *event)
5451 {
5452 bool dec = false;
5453
5454 if (event->parent)
5455 return;
5456
5457 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
5458 dec = true;
5459 if (event->attr.mmap || event->attr.mmap_data)
5460 atomic_dec(&nr_mmap_events);
5461 if (event->attr.build_id)
5462 atomic_dec(&nr_build_id_events);
5463 if (event->attr.comm)
5464 atomic_dec(&nr_comm_events);
5465 if (event->attr.namespaces)
5466 atomic_dec(&nr_namespaces_events);
5467 if (event->attr.cgroup)
5468 atomic_dec(&nr_cgroup_events);
5469 if (event->attr.task)
5470 atomic_dec(&nr_task_events);
5471 if (event->attr.freq)
5472 unaccount_freq_event();
5473 if (event->attr.context_switch) {
5474 dec = true;
5475 atomic_dec(&nr_switch_events);
5476 }
5477 if (is_cgroup_event(event))
5478 dec = true;
5479 if (has_branch_stack(event))
5480 dec = true;
5481 if (event->attr.ksymbol)
5482 atomic_dec(&nr_ksymbol_events);
5483 if (event->attr.bpf_event)
5484 atomic_dec(&nr_bpf_events);
5485 if (event->attr.text_poke)
5486 atomic_dec(&nr_text_poke_events);
5487
5488 if (dec) {
5489 if (!atomic_add_unless(&perf_sched_count, -1, 1))
5490 schedule_delayed_work(&perf_sched_work, HZ);
5491 }
5492
5493 unaccount_pmu_sb_event(event);
5494 }
5495
perf_sched_delayed(struct work_struct * work)5496 static void perf_sched_delayed(struct work_struct *work)
5497 {
5498 mutex_lock(&perf_sched_mutex);
5499 if (atomic_dec_and_test(&perf_sched_count))
5500 static_branch_disable(&perf_sched_events);
5501 mutex_unlock(&perf_sched_mutex);
5502 }
5503
5504 /*
5505 * The following implement mutual exclusion of events on "exclusive" pmus
5506 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5507 * at a time, so we disallow creating events that might conflict, namely:
5508 *
5509 * 1) cpu-wide events in the presence of per-task events,
5510 * 2) per-task events in the presence of cpu-wide events,
5511 * 3) two matching events on the same perf_event_context.
5512 *
5513 * The former two cases are handled in the allocation path (perf_event_alloc(),
5514 * _free_event()), the latter -- before the first perf_install_in_context().
5515 */
exclusive_event_init(struct perf_event * event)5516 static int exclusive_event_init(struct perf_event *event)
5517 {
5518 struct pmu *pmu = event->pmu;
5519
5520 if (!is_exclusive_pmu(pmu))
5521 return 0;
5522
5523 /*
5524 * Prevent co-existence of per-task and cpu-wide events on the
5525 * same exclusive pmu.
5526 *
5527 * Negative pmu::exclusive_cnt means there are cpu-wide
5528 * events on this "exclusive" pmu, positive means there are
5529 * per-task events.
5530 *
5531 * Since this is called in perf_event_alloc() path, event::ctx
5532 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
5533 * to mean "per-task event", because unlike other attach states it
5534 * never gets cleared.
5535 */
5536 if (event->attach_state & PERF_ATTACH_TASK) {
5537 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
5538 return -EBUSY;
5539 } else {
5540 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
5541 return -EBUSY;
5542 }
5543
5544 event->attach_state |= PERF_ATTACH_EXCLUSIVE;
5545
5546 return 0;
5547 }
5548
exclusive_event_destroy(struct perf_event * event)5549 static void exclusive_event_destroy(struct perf_event *event)
5550 {
5551 struct pmu *pmu = event->pmu;
5552
5553 /* see comment in exclusive_event_init() */
5554 if (event->attach_state & PERF_ATTACH_TASK)
5555 atomic_dec(&pmu->exclusive_cnt);
5556 else
5557 atomic_inc(&pmu->exclusive_cnt);
5558
5559 event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
5560 }
5561
exclusive_event_match(struct perf_event * e1,struct perf_event * e2)5562 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
5563 {
5564 if ((e1->pmu == e2->pmu) &&
5565 (e1->cpu == e2->cpu ||
5566 e1->cpu == -1 ||
5567 e2->cpu == -1))
5568 return true;
5569 return false;
5570 }
5571
exclusive_event_installable(struct perf_event * event,struct perf_event_context * ctx)5572 static bool exclusive_event_installable(struct perf_event *event,
5573 struct perf_event_context *ctx)
5574 {
5575 struct perf_event *iter_event;
5576 struct pmu *pmu = event->pmu;
5577
5578 lockdep_assert_held(&ctx->mutex);
5579
5580 if (!is_exclusive_pmu(pmu))
5581 return true;
5582
5583 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
5584 if (exclusive_event_match(iter_event, event))
5585 return false;
5586 }
5587
5588 return true;
5589 }
5590
5591 static void perf_free_addr_filters(struct perf_event *event);
5592
5593 /* vs perf_event_alloc() error */
__free_event(struct perf_event * event)5594 static void __free_event(struct perf_event *event)
5595 {
5596 struct pmu *pmu = event->pmu;
5597
5598 if (event->attach_state & PERF_ATTACH_CALLCHAIN)
5599 put_callchain_buffers();
5600
5601 kfree(event->addr_filter_ranges);
5602
5603 if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
5604 exclusive_event_destroy(event);
5605
5606 if (is_cgroup_event(event))
5607 perf_detach_cgroup(event);
5608
5609 if (event->attach_state & PERF_ATTACH_TASK_DATA)
5610 detach_perf_ctx_data(event);
5611
5612 if (event->destroy)
5613 event->destroy(event);
5614
5615 /*
5616 * Must be after ->destroy(), due to uprobe_perf_close() using
5617 * hw.target.
5618 */
5619 if (event->hw.target)
5620 put_task_struct(event->hw.target);
5621
5622 if (event->pmu_ctx) {
5623 /*
5624 * put_pmu_ctx() needs an event->ctx reference, because of
5625 * epc->ctx.
5626 */
5627 WARN_ON_ONCE(!pmu);
5628 WARN_ON_ONCE(!event->ctx);
5629 WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
5630 put_pmu_ctx(event->pmu_ctx);
5631 }
5632
5633 /*
5634 * perf_event_free_task() relies on put_ctx() being 'last', in
5635 * particular all task references must be cleaned up.
5636 */
5637 if (event->ctx)
5638 put_ctx(event->ctx);
5639
5640 if (pmu) {
5641 module_put(pmu->module);
5642 scoped_guard (spinlock, &pmu->events_lock) {
5643 list_del(&event->pmu_list);
5644 wake_up_var(pmu);
5645 }
5646 }
5647
5648 call_rcu(&event->rcu_head, free_event_rcu);
5649 }
5650
DEFINE_FREE(__free_event,struct perf_event *,if (_T)__free_event (_T))5651 DEFINE_FREE(__free_event, struct perf_event *, if (_T) __free_event(_T))
5652
5653 /* vs perf_event_alloc() success */
5654 static void _free_event(struct perf_event *event)
5655 {
5656 irq_work_sync(&event->pending_irq);
5657 irq_work_sync(&event->pending_disable_irq);
5658
5659 unaccount_event(event);
5660
5661 security_perf_event_free(event);
5662
5663 if (event->rb) {
5664 /*
5665 * Can happen when we close an event with re-directed output.
5666 *
5667 * Since we have a 0 refcount, perf_mmap_close() will skip
5668 * over us; possibly making our ring_buffer_put() the last.
5669 */
5670 mutex_lock(&event->mmap_mutex);
5671 ring_buffer_attach(event, NULL);
5672 mutex_unlock(&event->mmap_mutex);
5673 }
5674
5675 perf_event_free_bpf_prog(event);
5676 perf_free_addr_filters(event);
5677
5678 __free_event(event);
5679 }
5680
5681 /*
5682 * Used to free events which have a known refcount of 1, such as in error paths
5683 * of inherited events.
5684 */
free_event(struct perf_event * event)5685 static void free_event(struct perf_event *event)
5686 {
5687 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
5688 "unexpected event refcount: %ld; ptr=%p\n",
5689 atomic_long_read(&event->refcount), event)) {
5690 /* leak to avoid use-after-free */
5691 return;
5692 }
5693
5694 _free_event(event);
5695 }
5696
5697 /*
5698 * Remove user event from the owner task.
5699 */
perf_remove_from_owner(struct perf_event * event)5700 static void perf_remove_from_owner(struct perf_event *event)
5701 {
5702 struct task_struct *owner;
5703
5704 rcu_read_lock();
5705 /*
5706 * Matches the smp_store_release() in perf_event_exit_task(). If we
5707 * observe !owner it means the list deletion is complete and we can
5708 * indeed free this event, otherwise we need to serialize on
5709 * owner->perf_event_mutex.
5710 */
5711 owner = READ_ONCE(event->owner);
5712 if (owner) {
5713 /*
5714 * Since delayed_put_task_struct() also drops the last
5715 * task reference we can safely take a new reference
5716 * while holding the rcu_read_lock().
5717 */
5718 get_task_struct(owner);
5719 }
5720 rcu_read_unlock();
5721
5722 if (owner) {
5723 /*
5724 * If we're here through perf_event_exit_task() we're already
5725 * holding ctx->mutex which would be an inversion wrt. the
5726 * normal lock order.
5727 *
5728 * However we can safely take this lock because its the child
5729 * ctx->mutex.
5730 */
5731 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
5732
5733 /*
5734 * We have to re-check the event->owner field, if it is cleared
5735 * we raced with perf_event_exit_task(), acquiring the mutex
5736 * ensured they're done, and we can proceed with freeing the
5737 * event.
5738 */
5739 if (event->owner) {
5740 list_del_init(&event->owner_entry);
5741 smp_store_release(&event->owner, NULL);
5742 }
5743 mutex_unlock(&owner->perf_event_mutex);
5744 put_task_struct(owner);
5745 }
5746 }
5747
put_event(struct perf_event * event)5748 static void put_event(struct perf_event *event)
5749 {
5750 struct perf_event *parent;
5751
5752 if (!atomic_long_dec_and_test(&event->refcount))
5753 return;
5754
5755 parent = event->parent;
5756 _free_event(event);
5757
5758 /* Matches the refcount bump in inherit_event() */
5759 if (parent)
5760 put_event(parent);
5761 }
5762
5763 /*
5764 * Kill an event dead; while event:refcount will preserve the event
5765 * object, it will not preserve its functionality. Once the last 'user'
5766 * gives up the object, we'll destroy the thing.
5767 */
perf_event_release_kernel(struct perf_event * event)5768 int perf_event_release_kernel(struct perf_event *event)
5769 {
5770 struct perf_event_context *ctx = event->ctx;
5771 struct perf_event *child, *tmp;
5772
5773 /*
5774 * If we got here through err_alloc: free_event(event); we will not
5775 * have attached to a context yet.
5776 */
5777 if (!ctx) {
5778 WARN_ON_ONCE(event->attach_state &
5779 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
5780 goto no_ctx;
5781 }
5782
5783 if (!is_kernel_event(event))
5784 perf_remove_from_owner(event);
5785
5786 ctx = perf_event_ctx_lock(event);
5787 WARN_ON_ONCE(ctx->parent_ctx);
5788
5789 /*
5790 * Mark this event as STATE_DEAD, there is no external reference to it
5791 * anymore.
5792 *
5793 * Anybody acquiring event->child_mutex after the below loop _must_
5794 * also see this, most importantly inherit_event() which will avoid
5795 * placing more children on the list.
5796 *
5797 * Thus this guarantees that we will in fact observe and kill _ALL_
5798 * child events.
5799 */
5800 if (event->state > PERF_EVENT_STATE_REVOKED) {
5801 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
5802 } else {
5803 event->state = PERF_EVENT_STATE_DEAD;
5804 }
5805
5806 perf_event_ctx_unlock(event, ctx);
5807
5808 again:
5809 mutex_lock(&event->child_mutex);
5810 list_for_each_entry(child, &event->child_list, child_list) {
5811 /*
5812 * Cannot change, child events are not migrated, see the
5813 * comment with perf_event_ctx_lock_nested().
5814 */
5815 ctx = READ_ONCE(child->ctx);
5816 /*
5817 * Since child_mutex nests inside ctx::mutex, we must jump
5818 * through hoops. We start by grabbing a reference on the ctx.
5819 *
5820 * Since the event cannot get freed while we hold the
5821 * child_mutex, the context must also exist and have a !0
5822 * reference count.
5823 */
5824 get_ctx(ctx);
5825
5826 /*
5827 * Now that we have a ctx ref, we can drop child_mutex, and
5828 * acquire ctx::mutex without fear of it going away. Then we
5829 * can re-acquire child_mutex.
5830 */
5831 mutex_unlock(&event->child_mutex);
5832 mutex_lock(&ctx->mutex);
5833 mutex_lock(&event->child_mutex);
5834
5835 /*
5836 * Now that we hold ctx::mutex and child_mutex, revalidate our
5837 * state, if child is still the first entry, it didn't get freed
5838 * and we can continue doing so.
5839 */
5840 tmp = list_first_entry_or_null(&event->child_list,
5841 struct perf_event, child_list);
5842 if (tmp == child) {
5843 perf_remove_from_context(child, DETACH_GROUP | DETACH_CHILD);
5844 } else {
5845 child = NULL;
5846 }
5847
5848 mutex_unlock(&event->child_mutex);
5849 mutex_unlock(&ctx->mutex);
5850
5851 if (child) {
5852 /* Last reference unless ->pending_task work is pending */
5853 put_event(child);
5854 }
5855 put_ctx(ctx);
5856
5857 goto again;
5858 }
5859 mutex_unlock(&event->child_mutex);
5860
5861 no_ctx:
5862 /*
5863 * Last reference unless ->pending_task work is pending on this event
5864 * or any of its children.
5865 */
5866 put_event(event);
5867 return 0;
5868 }
5869 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
5870
5871 /*
5872 * Called when the last reference to the file is gone.
5873 */
perf_release(struct inode * inode,struct file * file)5874 static int perf_release(struct inode *inode, struct file *file)
5875 {
5876 perf_event_release_kernel(file->private_data);
5877 return 0;
5878 }
5879
__perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5880 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5881 {
5882 struct perf_event *child;
5883 u64 total = 0;
5884
5885 *enabled = 0;
5886 *running = 0;
5887
5888 mutex_lock(&event->child_mutex);
5889
5890 (void)perf_event_read(event, false);
5891 total += perf_event_count(event, false);
5892
5893 *enabled += event->total_time_enabled +
5894 atomic64_read(&event->child_total_time_enabled);
5895 *running += event->total_time_running +
5896 atomic64_read(&event->child_total_time_running);
5897
5898 list_for_each_entry(child, &event->child_list, child_list) {
5899 (void)perf_event_read(child, false);
5900 total += perf_event_count(child, false);
5901 *enabled += child->total_time_enabled;
5902 *running += child->total_time_running;
5903 }
5904 mutex_unlock(&event->child_mutex);
5905
5906 return total;
5907 }
5908
perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)5909 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5910 {
5911 struct perf_event_context *ctx;
5912 u64 count;
5913
5914 ctx = perf_event_ctx_lock(event);
5915 count = __perf_event_read_value(event, enabled, running);
5916 perf_event_ctx_unlock(event, ctx);
5917
5918 return count;
5919 }
5920 EXPORT_SYMBOL_GPL(perf_event_read_value);
5921
__perf_read_group_add(struct perf_event * leader,u64 read_format,u64 * values)5922 static int __perf_read_group_add(struct perf_event *leader,
5923 u64 read_format, u64 *values)
5924 {
5925 struct perf_event_context *ctx = leader->ctx;
5926 struct perf_event *sub, *parent;
5927 unsigned long flags;
5928 int n = 1; /* skip @nr */
5929 int ret;
5930
5931 ret = perf_event_read(leader, true);
5932 if (ret)
5933 return ret;
5934
5935 raw_spin_lock_irqsave(&ctx->lock, flags);
5936 /*
5937 * Verify the grouping between the parent and child (inherited)
5938 * events is still in tact.
5939 *
5940 * Specifically:
5941 * - leader->ctx->lock pins leader->sibling_list
5942 * - parent->child_mutex pins parent->child_list
5943 * - parent->ctx->mutex pins parent->sibling_list
5944 *
5945 * Because parent->ctx != leader->ctx (and child_list nests inside
5946 * ctx->mutex), group destruction is not atomic between children, also
5947 * see perf_event_release_kernel(). Additionally, parent can grow the
5948 * group.
5949 *
5950 * Therefore it is possible to have parent and child groups in a
5951 * different configuration and summing over such a beast makes no sense
5952 * what so ever.
5953 *
5954 * Reject this.
5955 */
5956 parent = leader->parent;
5957 if (parent &&
5958 (parent->group_generation != leader->group_generation ||
5959 parent->nr_siblings != leader->nr_siblings)) {
5960 ret = -ECHILD;
5961 goto unlock;
5962 }
5963
5964 /*
5965 * Since we co-schedule groups, {enabled,running} times of siblings
5966 * will be identical to those of the leader, so we only publish one
5967 * set.
5968 */
5969 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5970 values[n++] += leader->total_time_enabled +
5971 atomic64_read(&leader->child_total_time_enabled);
5972 }
5973
5974 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5975 values[n++] += leader->total_time_running +
5976 atomic64_read(&leader->child_total_time_running);
5977 }
5978
5979 /*
5980 * Write {count,id} tuples for every sibling.
5981 */
5982 values[n++] += perf_event_count(leader, false);
5983 if (read_format & PERF_FORMAT_ID)
5984 values[n++] = primary_event_id(leader);
5985 if (read_format & PERF_FORMAT_LOST)
5986 values[n++] = atomic64_read(&leader->lost_samples);
5987
5988 for_each_sibling_event(sub, leader) {
5989 values[n++] += perf_event_count(sub, false);
5990 if (read_format & PERF_FORMAT_ID)
5991 values[n++] = primary_event_id(sub);
5992 if (read_format & PERF_FORMAT_LOST)
5993 values[n++] = atomic64_read(&sub->lost_samples);
5994 }
5995
5996 unlock:
5997 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5998 return ret;
5999 }
6000
perf_read_group(struct perf_event * event,u64 read_format,char __user * buf)6001 static int perf_read_group(struct perf_event *event,
6002 u64 read_format, char __user *buf)
6003 {
6004 struct perf_event *leader = event->group_leader, *child;
6005 struct perf_event_context *ctx = leader->ctx;
6006 int ret;
6007 u64 *values;
6008
6009 lockdep_assert_held(&ctx->mutex);
6010
6011 values = kzalloc(event->read_size, GFP_KERNEL);
6012 if (!values)
6013 return -ENOMEM;
6014
6015 values[0] = 1 + leader->nr_siblings;
6016
6017 mutex_lock(&leader->child_mutex);
6018
6019 ret = __perf_read_group_add(leader, read_format, values);
6020 if (ret)
6021 goto unlock;
6022
6023 list_for_each_entry(child, &leader->child_list, child_list) {
6024 ret = __perf_read_group_add(child, read_format, values);
6025 if (ret)
6026 goto unlock;
6027 }
6028
6029 mutex_unlock(&leader->child_mutex);
6030
6031 ret = event->read_size;
6032 if (copy_to_user(buf, values, event->read_size))
6033 ret = -EFAULT;
6034 goto out;
6035
6036 unlock:
6037 mutex_unlock(&leader->child_mutex);
6038 out:
6039 kfree(values);
6040 return ret;
6041 }
6042
perf_read_one(struct perf_event * event,u64 read_format,char __user * buf)6043 static int perf_read_one(struct perf_event *event,
6044 u64 read_format, char __user *buf)
6045 {
6046 u64 enabled, running;
6047 u64 values[5];
6048 int n = 0;
6049
6050 values[n++] = __perf_event_read_value(event, &enabled, &running);
6051 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
6052 values[n++] = enabled;
6053 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
6054 values[n++] = running;
6055 if (read_format & PERF_FORMAT_ID)
6056 values[n++] = primary_event_id(event);
6057 if (read_format & PERF_FORMAT_LOST)
6058 values[n++] = atomic64_read(&event->lost_samples);
6059
6060 if (copy_to_user(buf, values, n * sizeof(u64)))
6061 return -EFAULT;
6062
6063 return n * sizeof(u64);
6064 }
6065
is_event_hup(struct perf_event * event)6066 static bool is_event_hup(struct perf_event *event)
6067 {
6068 bool no_children;
6069
6070 if (event->state > PERF_EVENT_STATE_EXIT)
6071 return false;
6072
6073 mutex_lock(&event->child_mutex);
6074 no_children = list_empty(&event->child_list);
6075 mutex_unlock(&event->child_mutex);
6076 return no_children;
6077 }
6078
6079 /*
6080 * Read the performance event - simple non blocking version for now
6081 */
6082 static ssize_t
__perf_read(struct perf_event * event,char __user * buf,size_t count)6083 __perf_read(struct perf_event *event, char __user *buf, size_t count)
6084 {
6085 u64 read_format = event->attr.read_format;
6086 int ret;
6087
6088 /*
6089 * Return end-of-file for a read on an event that is in
6090 * error state (i.e. because it was pinned but it couldn't be
6091 * scheduled on to the CPU at some point).
6092 */
6093 if (event->state == PERF_EVENT_STATE_ERROR)
6094 return 0;
6095
6096 if (count < event->read_size)
6097 return -ENOSPC;
6098
6099 WARN_ON_ONCE(event->ctx->parent_ctx);
6100 if (read_format & PERF_FORMAT_GROUP)
6101 ret = perf_read_group(event, read_format, buf);
6102 else
6103 ret = perf_read_one(event, read_format, buf);
6104
6105 return ret;
6106 }
6107
6108 static ssize_t
perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)6109 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
6110 {
6111 struct perf_event *event = file->private_data;
6112 struct perf_event_context *ctx;
6113 int ret;
6114
6115 ret = security_perf_event_read(event);
6116 if (ret)
6117 return ret;
6118
6119 ctx = perf_event_ctx_lock(event);
6120 ret = __perf_read(event, buf, count);
6121 perf_event_ctx_unlock(event, ctx);
6122
6123 return ret;
6124 }
6125
perf_poll(struct file * file,poll_table * wait)6126 static __poll_t perf_poll(struct file *file, poll_table *wait)
6127 {
6128 struct perf_event *event = file->private_data;
6129 struct perf_buffer *rb;
6130 __poll_t events = EPOLLHUP;
6131
6132 if (event->state <= PERF_EVENT_STATE_REVOKED)
6133 return EPOLLERR;
6134
6135 poll_wait(file, &event->waitq, wait);
6136
6137 if (event->state <= PERF_EVENT_STATE_REVOKED)
6138 return EPOLLERR;
6139
6140 if (is_event_hup(event))
6141 return events;
6142
6143 if (unlikely(READ_ONCE(event->state) == PERF_EVENT_STATE_ERROR &&
6144 event->attr.pinned))
6145 return EPOLLERR;
6146
6147 /*
6148 * Pin the event->rb by taking event->mmap_mutex; otherwise
6149 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
6150 */
6151 mutex_lock(&event->mmap_mutex);
6152 rb = event->rb;
6153 if (rb)
6154 events = atomic_xchg(&rb->poll, 0);
6155 mutex_unlock(&event->mmap_mutex);
6156 return events;
6157 }
6158
_perf_event_reset(struct perf_event * event)6159 static void _perf_event_reset(struct perf_event *event)
6160 {
6161 (void)perf_event_read(event, false);
6162 local64_set(&event->count, 0);
6163 perf_event_update_userpage(event);
6164 }
6165
6166 /* Assume it's not an event with inherit set. */
perf_event_pause(struct perf_event * event,bool reset)6167 u64 perf_event_pause(struct perf_event *event, bool reset)
6168 {
6169 struct perf_event_context *ctx;
6170 u64 count;
6171
6172 ctx = perf_event_ctx_lock(event);
6173 WARN_ON_ONCE(event->attr.inherit);
6174 _perf_event_disable(event);
6175 count = local64_read(&event->count);
6176 if (reset)
6177 local64_set(&event->count, 0);
6178 perf_event_ctx_unlock(event, ctx);
6179
6180 return count;
6181 }
6182 EXPORT_SYMBOL_GPL(perf_event_pause);
6183
6184 /*
6185 * Holding the top-level event's child_mutex means that any
6186 * descendant process that has inherited this event will block
6187 * in perf_event_exit_event() if it goes to exit, thus satisfying the
6188 * task existence requirements of perf_event_enable/disable.
6189 */
perf_event_for_each_child(struct perf_event * event,void (* func)(struct perf_event *))6190 static void perf_event_for_each_child(struct perf_event *event,
6191 void (*func)(struct perf_event *))
6192 {
6193 struct perf_event *child;
6194
6195 WARN_ON_ONCE(event->ctx->parent_ctx);
6196
6197 mutex_lock(&event->child_mutex);
6198 func(event);
6199 list_for_each_entry(child, &event->child_list, child_list)
6200 func(child);
6201 mutex_unlock(&event->child_mutex);
6202 }
6203
perf_event_for_each(struct perf_event * event,void (* func)(struct perf_event *))6204 static void perf_event_for_each(struct perf_event *event,
6205 void (*func)(struct perf_event *))
6206 {
6207 struct perf_event_context *ctx = event->ctx;
6208 struct perf_event *sibling;
6209
6210 lockdep_assert_held(&ctx->mutex);
6211
6212 event = event->group_leader;
6213
6214 perf_event_for_each_child(event, func);
6215 for_each_sibling_event(sibling, event)
6216 perf_event_for_each_child(sibling, func);
6217 }
6218
__perf_event_period(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)6219 static void __perf_event_period(struct perf_event *event,
6220 struct perf_cpu_context *cpuctx,
6221 struct perf_event_context *ctx,
6222 void *info)
6223 {
6224 u64 value = *((u64 *)info);
6225 bool active;
6226
6227 if (event->attr.freq) {
6228 event->attr.sample_freq = value;
6229 } else {
6230 event->attr.sample_period = value;
6231 event->hw.sample_period = value;
6232 }
6233
6234 active = (event->state == PERF_EVENT_STATE_ACTIVE);
6235 if (active) {
6236 perf_pmu_disable(event->pmu);
6237 event->pmu->stop(event, PERF_EF_UPDATE);
6238 }
6239
6240 local64_set(&event->hw.period_left, 0);
6241
6242 if (active) {
6243 event->pmu->start(event, PERF_EF_RELOAD);
6244 /*
6245 * Once the period is force-reset, the event starts immediately.
6246 * But the event/group could be throttled. Unthrottle the
6247 * event/group now to avoid the next tick trying to unthrottle
6248 * while we already re-started the event/group.
6249 */
6250 if (event->hw.interrupts == MAX_INTERRUPTS)
6251 perf_event_unthrottle_group(event, true);
6252 perf_pmu_enable(event->pmu);
6253 }
6254 }
6255
perf_event_check_period(struct perf_event * event,u64 value)6256 static int perf_event_check_period(struct perf_event *event, u64 value)
6257 {
6258 return event->pmu->check_period(event, value);
6259 }
6260
_perf_event_period(struct perf_event * event,u64 value)6261 static int _perf_event_period(struct perf_event *event, u64 value)
6262 {
6263 if (!is_sampling_event(event))
6264 return -EINVAL;
6265
6266 if (!value)
6267 return -EINVAL;
6268
6269 if (event->attr.freq) {
6270 if (value > sysctl_perf_event_sample_rate)
6271 return -EINVAL;
6272 } else {
6273 if (perf_event_check_period(event, value))
6274 return -EINVAL;
6275 if (value & (1ULL << 63))
6276 return -EINVAL;
6277 }
6278
6279 event_function_call(event, __perf_event_period, &value);
6280
6281 return 0;
6282 }
6283
perf_event_period(struct perf_event * event,u64 value)6284 int perf_event_period(struct perf_event *event, u64 value)
6285 {
6286 struct perf_event_context *ctx;
6287 int ret;
6288
6289 ctx = perf_event_ctx_lock(event);
6290 ret = _perf_event_period(event, value);
6291 perf_event_ctx_unlock(event, ctx);
6292
6293 return ret;
6294 }
6295 EXPORT_SYMBOL_GPL(perf_event_period);
6296
6297 static const struct file_operations perf_fops;
6298
is_perf_file(struct fd f)6299 static inline bool is_perf_file(struct fd f)
6300 {
6301 return !fd_empty(f) && fd_file(f)->f_op == &perf_fops;
6302 }
6303
6304 static int perf_event_set_output(struct perf_event *event,
6305 struct perf_event *output_event);
6306 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6307 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6308 struct perf_event_attr *attr);
6309 static int __perf_event_set_bpf_prog(struct perf_event *event,
6310 struct bpf_prog *prog,
6311 u64 bpf_cookie);
6312
_perf_ioctl(struct perf_event * event,unsigned int cmd,unsigned long arg)6313 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
6314 {
6315 void (*func)(struct perf_event *);
6316 u32 flags = arg;
6317
6318 if (event->state <= PERF_EVENT_STATE_REVOKED)
6319 return -ENODEV;
6320
6321 switch (cmd) {
6322 case PERF_EVENT_IOC_ENABLE:
6323 func = _perf_event_enable;
6324 break;
6325 case PERF_EVENT_IOC_DISABLE:
6326 func = _perf_event_disable;
6327 break;
6328 case PERF_EVENT_IOC_RESET:
6329 func = _perf_event_reset;
6330 break;
6331
6332 case PERF_EVENT_IOC_REFRESH:
6333 return _perf_event_refresh(event, arg);
6334
6335 case PERF_EVENT_IOC_PERIOD:
6336 {
6337 u64 value;
6338
6339 if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
6340 return -EFAULT;
6341
6342 return _perf_event_period(event, value);
6343 }
6344 case PERF_EVENT_IOC_ID:
6345 {
6346 u64 id = primary_event_id(event);
6347
6348 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
6349 return -EFAULT;
6350 return 0;
6351 }
6352
6353 case PERF_EVENT_IOC_SET_OUTPUT:
6354 {
6355 CLASS(fd, output)(arg); // arg == -1 => empty
6356 struct perf_event *output_event = NULL;
6357 if (arg != -1) {
6358 if (!is_perf_file(output))
6359 return -EBADF;
6360 output_event = fd_file(output)->private_data;
6361 }
6362 return perf_event_set_output(event, output_event);
6363 }
6364
6365 case PERF_EVENT_IOC_SET_FILTER:
6366 return perf_event_set_filter(event, (void __user *)arg);
6367
6368 case PERF_EVENT_IOC_SET_BPF:
6369 {
6370 struct bpf_prog *prog;
6371 int err;
6372
6373 prog = bpf_prog_get(arg);
6374 if (IS_ERR(prog))
6375 return PTR_ERR(prog);
6376
6377 err = __perf_event_set_bpf_prog(event, prog, 0);
6378 if (err) {
6379 bpf_prog_put(prog);
6380 return err;
6381 }
6382
6383 return 0;
6384 }
6385
6386 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
6387 struct perf_buffer *rb;
6388
6389 rcu_read_lock();
6390 rb = rcu_dereference(event->rb);
6391 if (!rb || !rb->nr_pages) {
6392 rcu_read_unlock();
6393 return -EINVAL;
6394 }
6395 rb_toggle_paused(rb, !!arg);
6396 rcu_read_unlock();
6397 return 0;
6398 }
6399
6400 case PERF_EVENT_IOC_QUERY_BPF:
6401 return perf_event_query_prog_array(event, (void __user *)arg);
6402
6403 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
6404 struct perf_event_attr new_attr;
6405 int err = perf_copy_attr((struct perf_event_attr __user *)arg,
6406 &new_attr);
6407
6408 if (err)
6409 return err;
6410
6411 return perf_event_modify_attr(event, &new_attr);
6412 }
6413 default:
6414 return -ENOTTY;
6415 }
6416
6417 if (flags & PERF_IOC_FLAG_GROUP)
6418 perf_event_for_each(event, func);
6419 else
6420 perf_event_for_each_child(event, func);
6421
6422 return 0;
6423 }
6424
perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6425 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6426 {
6427 struct perf_event *event = file->private_data;
6428 struct perf_event_context *ctx;
6429 long ret;
6430
6431 /* Treat ioctl like writes as it is likely a mutating operation. */
6432 ret = security_perf_event_write(event);
6433 if (ret)
6434 return ret;
6435
6436 ctx = perf_event_ctx_lock(event);
6437 ret = _perf_ioctl(event, cmd, arg);
6438 perf_event_ctx_unlock(event, ctx);
6439
6440 return ret;
6441 }
6442
6443 #ifdef CONFIG_COMPAT
perf_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6444 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
6445 unsigned long arg)
6446 {
6447 switch (_IOC_NR(cmd)) {
6448 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
6449 case _IOC_NR(PERF_EVENT_IOC_ID):
6450 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
6451 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
6452 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
6453 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
6454 cmd &= ~IOCSIZE_MASK;
6455 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
6456 }
6457 break;
6458 }
6459 return perf_ioctl(file, cmd, arg);
6460 }
6461 #else
6462 # define perf_compat_ioctl NULL
6463 #endif
6464
perf_event_task_enable(void)6465 int perf_event_task_enable(void)
6466 {
6467 struct perf_event_context *ctx;
6468 struct perf_event *event;
6469
6470 mutex_lock(¤t->perf_event_mutex);
6471 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6472 ctx = perf_event_ctx_lock(event);
6473 perf_event_for_each_child(event, _perf_event_enable);
6474 perf_event_ctx_unlock(event, ctx);
6475 }
6476 mutex_unlock(¤t->perf_event_mutex);
6477
6478 return 0;
6479 }
6480
perf_event_task_disable(void)6481 int perf_event_task_disable(void)
6482 {
6483 struct perf_event_context *ctx;
6484 struct perf_event *event;
6485
6486 mutex_lock(¤t->perf_event_mutex);
6487 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
6488 ctx = perf_event_ctx_lock(event);
6489 perf_event_for_each_child(event, _perf_event_disable);
6490 perf_event_ctx_unlock(event, ctx);
6491 }
6492 mutex_unlock(¤t->perf_event_mutex);
6493
6494 return 0;
6495 }
6496
perf_event_index(struct perf_event * event)6497 static int perf_event_index(struct perf_event *event)
6498 {
6499 if (event->hw.state & PERF_HES_STOPPED)
6500 return 0;
6501
6502 if (event->state != PERF_EVENT_STATE_ACTIVE)
6503 return 0;
6504
6505 return event->pmu->event_idx(event);
6506 }
6507
perf_event_init_userpage(struct perf_event * event)6508 static void perf_event_init_userpage(struct perf_event *event)
6509 {
6510 struct perf_event_mmap_page *userpg;
6511 struct perf_buffer *rb;
6512
6513 rcu_read_lock();
6514 rb = rcu_dereference(event->rb);
6515 if (!rb)
6516 goto unlock;
6517
6518 userpg = rb->user_page;
6519
6520 /* Allow new userspace to detect that bit 0 is deprecated */
6521 userpg->cap_bit0_is_deprecated = 1;
6522 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
6523 userpg->data_offset = PAGE_SIZE;
6524 userpg->data_size = perf_data_size(rb);
6525
6526 unlock:
6527 rcu_read_unlock();
6528 }
6529
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)6530 void __weak arch_perf_update_userpage(
6531 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
6532 {
6533 }
6534
6535 /*
6536 * Callers need to ensure there can be no nesting of this function, otherwise
6537 * the seqlock logic goes bad. We can not serialize this because the arch
6538 * code calls this from NMI context.
6539 */
perf_event_update_userpage(struct perf_event * event)6540 void perf_event_update_userpage(struct perf_event *event)
6541 {
6542 struct perf_event_mmap_page *userpg;
6543 struct perf_buffer *rb;
6544 u64 enabled, running, now;
6545
6546 rcu_read_lock();
6547 rb = rcu_dereference(event->rb);
6548 if (!rb)
6549 goto unlock;
6550
6551 /*
6552 * compute total_time_enabled, total_time_running
6553 * based on snapshot values taken when the event
6554 * was last scheduled in.
6555 *
6556 * we cannot simply called update_context_time()
6557 * because of locking issue as we can be called in
6558 * NMI context
6559 */
6560 calc_timer_values(event, &now, &enabled, &running);
6561
6562 userpg = rb->user_page;
6563 /*
6564 * Disable preemption to guarantee consistent time stamps are stored to
6565 * the user page.
6566 */
6567 preempt_disable();
6568 ++userpg->lock;
6569 barrier();
6570 userpg->index = perf_event_index(event);
6571 userpg->offset = perf_event_count(event, false);
6572 if (userpg->index)
6573 userpg->offset -= local64_read(&event->hw.prev_count);
6574
6575 userpg->time_enabled = enabled +
6576 atomic64_read(&event->child_total_time_enabled);
6577
6578 userpg->time_running = running +
6579 atomic64_read(&event->child_total_time_running);
6580
6581 arch_perf_update_userpage(event, userpg, now);
6582
6583 barrier();
6584 ++userpg->lock;
6585 preempt_enable();
6586 unlock:
6587 rcu_read_unlock();
6588 }
6589 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
6590
ring_buffer_attach(struct perf_event * event,struct perf_buffer * rb)6591 static void ring_buffer_attach(struct perf_event *event,
6592 struct perf_buffer *rb)
6593 {
6594 struct perf_buffer *old_rb = NULL;
6595 unsigned long flags;
6596
6597 WARN_ON_ONCE(event->parent);
6598
6599 if (event->rb) {
6600 /*
6601 * Should be impossible, we set this when removing
6602 * event->rb_entry and wait/clear when adding event->rb_entry.
6603 */
6604 WARN_ON_ONCE(event->rcu_pending);
6605
6606 old_rb = event->rb;
6607 spin_lock_irqsave(&old_rb->event_lock, flags);
6608 list_del_rcu(&event->rb_entry);
6609 spin_unlock_irqrestore(&old_rb->event_lock, flags);
6610
6611 event->rcu_batches = get_state_synchronize_rcu();
6612 event->rcu_pending = 1;
6613 }
6614
6615 if (rb) {
6616 if (event->rcu_pending) {
6617 cond_synchronize_rcu(event->rcu_batches);
6618 event->rcu_pending = 0;
6619 }
6620
6621 spin_lock_irqsave(&rb->event_lock, flags);
6622 list_add_rcu(&event->rb_entry, &rb->event_list);
6623 spin_unlock_irqrestore(&rb->event_lock, flags);
6624 }
6625
6626 /*
6627 * Avoid racing with perf_mmap_close(AUX): stop the event
6628 * before swizzling the event::rb pointer; if it's getting
6629 * unmapped, its aux_mmap_count will be 0 and it won't
6630 * restart. See the comment in __perf_pmu_output_stop().
6631 *
6632 * Data will inevitably be lost when set_output is done in
6633 * mid-air, but then again, whoever does it like this is
6634 * not in for the data anyway.
6635 */
6636 if (has_aux(event))
6637 perf_event_stop(event, 0);
6638
6639 rcu_assign_pointer(event->rb, rb);
6640
6641 if (old_rb) {
6642 ring_buffer_put(old_rb);
6643 /*
6644 * Since we detached before setting the new rb, so that we
6645 * could attach the new rb, we could have missed a wakeup.
6646 * Provide it now.
6647 */
6648 wake_up_all(&event->waitq);
6649 }
6650 }
6651
ring_buffer_wakeup(struct perf_event * event)6652 static void ring_buffer_wakeup(struct perf_event *event)
6653 {
6654 struct perf_buffer *rb;
6655
6656 if (event->parent)
6657 event = event->parent;
6658
6659 rcu_read_lock();
6660 rb = rcu_dereference(event->rb);
6661 if (rb) {
6662 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
6663 wake_up_all(&event->waitq);
6664 }
6665 rcu_read_unlock();
6666 }
6667
ring_buffer_get(struct perf_event * event)6668 struct perf_buffer *ring_buffer_get(struct perf_event *event)
6669 {
6670 struct perf_buffer *rb;
6671
6672 if (event->parent)
6673 event = event->parent;
6674
6675 rcu_read_lock();
6676 rb = rcu_dereference(event->rb);
6677 if (rb) {
6678 if (!refcount_inc_not_zero(&rb->refcount))
6679 rb = NULL;
6680 }
6681 rcu_read_unlock();
6682
6683 return rb;
6684 }
6685
ring_buffer_put(struct perf_buffer * rb)6686 void ring_buffer_put(struct perf_buffer *rb)
6687 {
6688 if (!refcount_dec_and_test(&rb->refcount))
6689 return;
6690
6691 WARN_ON_ONCE(!list_empty(&rb->event_list));
6692
6693 call_rcu(&rb->rcu_head, rb_free_rcu);
6694 }
6695
6696 typedef void (*mapped_f)(struct perf_event *event, struct mm_struct *mm);
6697
6698 #define get_mapped(event, func) \
6699 ({ struct pmu *pmu; \
6700 mapped_f f = NULL; \
6701 guard(rcu)(); \
6702 pmu = READ_ONCE(event->pmu); \
6703 if (pmu) \
6704 f = pmu->func; \
6705 f; \
6706 })
6707
perf_mmap_open(struct vm_area_struct * vma)6708 static void perf_mmap_open(struct vm_area_struct *vma)
6709 {
6710 struct perf_event *event = vma->vm_file->private_data;
6711 mapped_f mapped = get_mapped(event, event_mapped);
6712
6713 refcount_inc(&event->mmap_count);
6714 refcount_inc(&event->rb->mmap_count);
6715
6716 if (vma->vm_pgoff)
6717 refcount_inc(&event->rb->aux_mmap_count);
6718
6719 if (mapped)
6720 mapped(event, vma->vm_mm);
6721 }
6722
6723 static void perf_pmu_output_stop(struct perf_event *event);
6724
6725 /*
6726 * A buffer can be mmap()ed multiple times; either directly through the same
6727 * event, or through other events by use of perf_event_set_output().
6728 *
6729 * In order to undo the VM accounting done by perf_mmap() we need to destroy
6730 * the buffer here, where we still have a VM context. This means we need
6731 * to detach all events redirecting to us.
6732 */
perf_mmap_close(struct vm_area_struct * vma)6733 static void perf_mmap_close(struct vm_area_struct *vma)
6734 {
6735 struct perf_event *event = vma->vm_file->private_data;
6736 mapped_f unmapped = get_mapped(event, event_unmapped);
6737 struct perf_buffer *rb = ring_buffer_get(event);
6738 struct user_struct *mmap_user = rb->mmap_user;
6739 int mmap_locked = rb->mmap_locked;
6740 unsigned long size = perf_data_size(rb);
6741 bool detach_rest = false;
6742
6743 /* FIXIES vs perf_pmu_unregister() */
6744 if (unmapped)
6745 unmapped(event, vma->vm_mm);
6746
6747 /*
6748 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
6749 * to avoid complications.
6750 */
6751 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
6752 refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
6753 /*
6754 * Stop all AUX events that are writing to this buffer,
6755 * so that we can free its AUX pages and corresponding PMU
6756 * data. Note that after rb::aux_mmap_count dropped to zero,
6757 * they won't start any more (see perf_aux_output_begin()).
6758 */
6759 perf_pmu_output_stop(event);
6760
6761 /* now it's safe to free the pages */
6762 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
6763 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
6764
6765 /* this has to be the last one */
6766 rb_free_aux(rb);
6767 WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
6768
6769 mutex_unlock(&rb->aux_mutex);
6770 }
6771
6772 if (refcount_dec_and_test(&rb->mmap_count))
6773 detach_rest = true;
6774
6775 if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
6776 goto out_put;
6777
6778 ring_buffer_attach(event, NULL);
6779 mutex_unlock(&event->mmap_mutex);
6780
6781 /* If there's still other mmap()s of this buffer, we're done. */
6782 if (!detach_rest)
6783 goto out_put;
6784
6785 /*
6786 * No other mmap()s, detach from all other events that might redirect
6787 * into the now unreachable buffer. Somewhat complicated by the
6788 * fact that rb::event_lock otherwise nests inside mmap_mutex.
6789 */
6790 again:
6791 rcu_read_lock();
6792 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
6793 if (!atomic_long_inc_not_zero(&event->refcount)) {
6794 /*
6795 * This event is en-route to free_event() which will
6796 * detach it and remove it from the list.
6797 */
6798 continue;
6799 }
6800 rcu_read_unlock();
6801
6802 mutex_lock(&event->mmap_mutex);
6803 /*
6804 * Check we didn't race with perf_event_set_output() which can
6805 * swizzle the rb from under us while we were waiting to
6806 * acquire mmap_mutex.
6807 *
6808 * If we find a different rb; ignore this event, a next
6809 * iteration will no longer find it on the list. We have to
6810 * still restart the iteration to make sure we're not now
6811 * iterating the wrong list.
6812 */
6813 if (event->rb == rb)
6814 ring_buffer_attach(event, NULL);
6815
6816 mutex_unlock(&event->mmap_mutex);
6817 put_event(event);
6818
6819 /*
6820 * Restart the iteration; either we're on the wrong list or
6821 * destroyed its integrity by doing a deletion.
6822 */
6823 goto again;
6824 }
6825 rcu_read_unlock();
6826
6827 /*
6828 * It could be there's still a few 0-ref events on the list; they'll
6829 * get cleaned up by free_event() -- they'll also still have their
6830 * ref on the rb and will free it whenever they are done with it.
6831 *
6832 * Aside from that, this buffer is 'fully' detached and unmapped,
6833 * undo the VM accounting.
6834 */
6835
6836 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
6837 &mmap_user->locked_vm);
6838 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
6839 free_uid(mmap_user);
6840
6841 out_put:
6842 ring_buffer_put(rb); /* could be last */
6843 }
6844
perf_mmap_pfn_mkwrite(struct vm_fault * vmf)6845 static vm_fault_t perf_mmap_pfn_mkwrite(struct vm_fault *vmf)
6846 {
6847 /* The first page is the user control page, others are read-only. */
6848 return vmf->pgoff == 0 ? 0 : VM_FAULT_SIGBUS;
6849 }
6850
perf_mmap_may_split(struct vm_area_struct * vma,unsigned long addr)6851 static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
6852 {
6853 /*
6854 * Forbid splitting perf mappings to prevent refcount leaks due to
6855 * the resulting non-matching offsets and sizes. See open()/close().
6856 */
6857 return -EINVAL;
6858 }
6859
6860 static const struct vm_operations_struct perf_mmap_vmops = {
6861 .open = perf_mmap_open,
6862 .close = perf_mmap_close, /* non mergeable */
6863 .pfn_mkwrite = perf_mmap_pfn_mkwrite,
6864 .may_split = perf_mmap_may_split,
6865 };
6866
map_range(struct perf_buffer * rb,struct vm_area_struct * vma)6867 static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
6868 {
6869 unsigned long nr_pages = vma_pages(vma);
6870 int err = 0;
6871 unsigned long pagenum;
6872
6873 /*
6874 * We map this as a VM_PFNMAP VMA.
6875 *
6876 * This is not ideal as this is designed broadly for mappings of PFNs
6877 * referencing memory-mapped I/O ranges or non-system RAM i.e. for which
6878 * !pfn_valid(pfn).
6879 *
6880 * We are mapping kernel-allocated memory (memory we manage ourselves)
6881 * which would more ideally be mapped using vm_insert_page() or a
6882 * similar mechanism, that is as a VM_MIXEDMAP mapping.
6883 *
6884 * However this won't work here, because:
6885 *
6886 * 1. It uses vma->vm_page_prot, but this field has not been completely
6887 * setup at the point of the f_op->mmp() hook, so we are unable to
6888 * indicate that this should be mapped CoW in order that the
6889 * mkwrite() hook can be invoked to make the first page R/W and the
6890 * rest R/O as desired.
6891 *
6892 * 2. Anything other than a VM_PFNMAP of valid PFNs will result in
6893 * vm_normal_page() returning a struct page * pointer, which means
6894 * vm_ops->page_mkwrite() will be invoked rather than
6895 * vm_ops->pfn_mkwrite(), and this means we have to set page->mapping
6896 * to work around retry logic in the fault handler, however this
6897 * field is no longer allowed to be used within struct page.
6898 *
6899 * 3. Having a struct page * made available in the fault logic also
6900 * means that the page gets put on the rmap and becomes
6901 * inappropriately accessible and subject to map and ref counting.
6902 *
6903 * Ideally we would have a mechanism that could explicitly express our
6904 * desires, but this is not currently the case, so we instead use
6905 * VM_PFNMAP.
6906 *
6907 * We manage the lifetime of these mappings with internal refcounts (see
6908 * perf_mmap_open() and perf_mmap_close()) so we ensure the lifetime of
6909 * this mapping is maintained correctly.
6910 */
6911 for (pagenum = 0; pagenum < nr_pages; pagenum++) {
6912 unsigned long va = vma->vm_start + PAGE_SIZE * pagenum;
6913 struct page *page = perf_mmap_to_page(rb, vma->vm_pgoff + pagenum);
6914
6915 if (page == NULL) {
6916 err = -EINVAL;
6917 break;
6918 }
6919
6920 /* Map readonly, perf_mmap_pfn_mkwrite() called on write fault. */
6921 err = remap_pfn_range(vma, va, page_to_pfn(page), PAGE_SIZE,
6922 vm_get_page_prot(vma->vm_flags & ~VM_SHARED));
6923 if (err)
6924 break;
6925 }
6926
6927 #ifdef CONFIG_MMU
6928 /* Clear any partial mappings on error. */
6929 if (err)
6930 zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
6931 #endif
6932
6933 return err;
6934 }
6935
perf_mmap_calc_limits(struct vm_area_struct * vma,long * user_extra,long * extra)6936 static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
6937 {
6938 unsigned long user_locked, user_lock_limit, locked, lock_limit;
6939 struct user_struct *user = current_user();
6940
6941 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
6942 /* Increase the limit linearly with more CPUs */
6943 user_lock_limit *= num_online_cpus();
6944
6945 user_locked = atomic_long_read(&user->locked_vm);
6946
6947 /*
6948 * sysctl_perf_event_mlock may have changed, so that
6949 * user->locked_vm > user_lock_limit
6950 */
6951 if (user_locked > user_lock_limit)
6952 user_locked = user_lock_limit;
6953 user_locked += *user_extra;
6954
6955 if (user_locked > user_lock_limit) {
6956 /*
6957 * charge locked_vm until it hits user_lock_limit;
6958 * charge the rest from pinned_vm
6959 */
6960 *extra = user_locked - user_lock_limit;
6961 *user_extra -= *extra;
6962 }
6963
6964 lock_limit = rlimit(RLIMIT_MEMLOCK);
6965 lock_limit >>= PAGE_SHIFT;
6966 locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
6967
6968 return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
6969 }
6970
perf_mmap_account(struct vm_area_struct * vma,long user_extra,long extra)6971 static void perf_mmap_account(struct vm_area_struct *vma, long user_extra, long extra)
6972 {
6973 struct user_struct *user = current_user();
6974
6975 atomic_long_add(user_extra, &user->locked_vm);
6976 atomic64_add(extra, &vma->vm_mm->pinned_vm);
6977 }
6978
perf_mmap_rb(struct vm_area_struct * vma,struct perf_event * event,unsigned long nr_pages)6979 static int perf_mmap_rb(struct vm_area_struct *vma, struct perf_event *event,
6980 unsigned long nr_pages)
6981 {
6982 long extra = 0, user_extra = nr_pages;
6983 struct perf_buffer *rb;
6984 int rb_flags = 0;
6985
6986 nr_pages -= 1;
6987
6988 /*
6989 * If we have rb pages ensure they're a power-of-two number, so we
6990 * can do bitmasks instead of modulo.
6991 */
6992 if (nr_pages != 0 && !is_power_of_2(nr_pages))
6993 return -EINVAL;
6994
6995 WARN_ON_ONCE(event->ctx->parent_ctx);
6996
6997 if (event->rb) {
6998 if (data_page_nr(event->rb) != nr_pages)
6999 return -EINVAL;
7000
7001 if (refcount_inc_not_zero(&event->rb->mmap_count)) {
7002 /*
7003 * Success -- managed to mmap() the same buffer
7004 * multiple times.
7005 */
7006 perf_mmap_account(vma, user_extra, extra);
7007 refcount_inc(&event->mmap_count);
7008 return 0;
7009 }
7010
7011 /*
7012 * Raced against perf_mmap_close()'s
7013 * refcount_dec_and_mutex_lock() remove the
7014 * event and continue as if !event->rb
7015 */
7016 ring_buffer_attach(event, NULL);
7017 }
7018
7019 if (!perf_mmap_calc_limits(vma, &user_extra, &extra))
7020 return -EPERM;
7021
7022 if (vma->vm_flags & VM_WRITE)
7023 rb_flags |= RING_BUFFER_WRITABLE;
7024
7025 rb = rb_alloc(nr_pages,
7026 event->attr.watermark ? event->attr.wakeup_watermark : 0,
7027 event->cpu, rb_flags);
7028
7029 if (!rb)
7030 return -ENOMEM;
7031
7032 refcount_set(&rb->mmap_count, 1);
7033 rb->mmap_user = get_current_user();
7034 rb->mmap_locked = extra;
7035
7036 ring_buffer_attach(event, rb);
7037
7038 perf_event_update_time(event);
7039 perf_event_init_userpage(event);
7040 perf_event_update_userpage(event);
7041
7042 perf_mmap_account(vma, user_extra, extra);
7043 refcount_set(&event->mmap_count, 1);
7044
7045 return 0;
7046 }
7047
perf_mmap_aux(struct vm_area_struct * vma,struct perf_event * event,unsigned long nr_pages)7048 static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
7049 unsigned long nr_pages)
7050 {
7051 long extra = 0, user_extra = nr_pages;
7052 u64 aux_offset, aux_size;
7053 struct perf_buffer *rb;
7054 int ret, rb_flags = 0;
7055
7056 rb = event->rb;
7057 if (!rb)
7058 return -EINVAL;
7059
7060 guard(mutex)(&rb->aux_mutex);
7061
7062 /*
7063 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
7064 * mapped, all subsequent mappings should have the same size
7065 * and offset. Must be above the normal perf buffer.
7066 */
7067 aux_offset = READ_ONCE(rb->user_page->aux_offset);
7068 aux_size = READ_ONCE(rb->user_page->aux_size);
7069
7070 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
7071 return -EINVAL;
7072
7073 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
7074 return -EINVAL;
7075
7076 /* already mapped with a different offset */
7077 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
7078 return -EINVAL;
7079
7080 if (aux_size != nr_pages * PAGE_SIZE)
7081 return -EINVAL;
7082
7083 /* already mapped with a different size */
7084 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
7085 return -EINVAL;
7086
7087 if (!is_power_of_2(nr_pages))
7088 return -EINVAL;
7089
7090 if (!refcount_inc_not_zero(&rb->mmap_count))
7091 return -EINVAL;
7092
7093 if (rb_has_aux(rb)) {
7094 refcount_inc(&rb->aux_mmap_count);
7095
7096 } else {
7097 if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
7098 refcount_dec(&rb->mmap_count);
7099 return -EPERM;
7100 }
7101
7102 WARN_ON(!rb && event->rb);
7103
7104 if (vma->vm_flags & VM_WRITE)
7105 rb_flags |= RING_BUFFER_WRITABLE;
7106
7107 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
7108 event->attr.aux_watermark, rb_flags);
7109 if (ret) {
7110 refcount_dec(&rb->mmap_count);
7111 return ret;
7112 }
7113
7114 refcount_set(&rb->aux_mmap_count, 1);
7115 rb->aux_mmap_locked = extra;
7116 }
7117
7118 perf_mmap_account(vma, user_extra, extra);
7119 refcount_inc(&event->mmap_count);
7120
7121 return 0;
7122 }
7123
perf_mmap(struct file * file,struct vm_area_struct * vma)7124 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
7125 {
7126 struct perf_event *event = file->private_data;
7127 unsigned long vma_size, nr_pages;
7128 mapped_f mapped;
7129 int ret;
7130
7131 /*
7132 * Don't allow mmap() of inherited per-task counters. This would
7133 * create a performance issue due to all children writing to the
7134 * same rb.
7135 */
7136 if (event->cpu == -1 && event->attr.inherit)
7137 return -EINVAL;
7138
7139 if (!(vma->vm_flags & VM_SHARED))
7140 return -EINVAL;
7141
7142 ret = security_perf_event_read(event);
7143 if (ret)
7144 return ret;
7145
7146 vma_size = vma->vm_end - vma->vm_start;
7147 nr_pages = vma_size / PAGE_SIZE;
7148
7149 if (nr_pages > INT_MAX)
7150 return -ENOMEM;
7151
7152 if (vma_size != PAGE_SIZE * nr_pages)
7153 return -EINVAL;
7154
7155 scoped_guard (mutex, &event->mmap_mutex) {
7156 /*
7157 * This relies on __pmu_detach_event() taking mmap_mutex after marking
7158 * the event REVOKED. Either we observe the state, or __pmu_detach_event()
7159 * will detach the rb created here.
7160 */
7161 if (event->state <= PERF_EVENT_STATE_REVOKED)
7162 return -ENODEV;
7163
7164 if (vma->vm_pgoff == 0)
7165 ret = perf_mmap_rb(vma, event, nr_pages);
7166 else
7167 ret = perf_mmap_aux(vma, event, nr_pages);
7168 if (ret)
7169 return ret;
7170 }
7171
7172 /*
7173 * Since pinned accounting is per vm we cannot allow fork() to copy our
7174 * vma.
7175 */
7176 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
7177 vma->vm_ops = &perf_mmap_vmops;
7178
7179 mapped = get_mapped(event, event_mapped);
7180 if (mapped)
7181 mapped(event, vma->vm_mm);
7182
7183 /*
7184 * Try to map it into the page table. On fail, invoke
7185 * perf_mmap_close() to undo the above, as the callsite expects
7186 * full cleanup in this case and therefore does not invoke
7187 * vmops::close().
7188 */
7189 ret = map_range(event->rb, vma);
7190 if (ret)
7191 perf_mmap_close(vma);
7192
7193 return ret;
7194 }
7195
perf_fasync(int fd,struct file * filp,int on)7196 static int perf_fasync(int fd, struct file *filp, int on)
7197 {
7198 struct inode *inode = file_inode(filp);
7199 struct perf_event *event = filp->private_data;
7200 int retval;
7201
7202 if (event->state <= PERF_EVENT_STATE_REVOKED)
7203 return -ENODEV;
7204
7205 inode_lock(inode);
7206 retval = fasync_helper(fd, filp, on, &event->fasync);
7207 inode_unlock(inode);
7208
7209 if (retval < 0)
7210 return retval;
7211
7212 return 0;
7213 }
7214
7215 static const struct file_operations perf_fops = {
7216 .release = perf_release,
7217 .read = perf_read,
7218 .poll = perf_poll,
7219 .unlocked_ioctl = perf_ioctl,
7220 .compat_ioctl = perf_compat_ioctl,
7221 .mmap = perf_mmap,
7222 .fasync = perf_fasync,
7223 };
7224
7225 /*
7226 * Perf event wakeup
7227 *
7228 * If there's data, ensure we set the poll() state and publish everything
7229 * to user-space before waking everybody up.
7230 */
7231
perf_event_wakeup(struct perf_event * event)7232 void perf_event_wakeup(struct perf_event *event)
7233 {
7234 ring_buffer_wakeup(event);
7235
7236 if (event->pending_kill) {
7237 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
7238 event->pending_kill = 0;
7239 }
7240 }
7241
perf_sigtrap(struct perf_event * event)7242 static void perf_sigtrap(struct perf_event *event)
7243 {
7244 /*
7245 * Both perf_pending_task() and perf_pending_irq() can race with the
7246 * task exiting.
7247 */
7248 if (current->flags & PF_EXITING)
7249 return;
7250
7251 /*
7252 * We'd expect this to only occur if the irq_work is delayed and either
7253 * ctx->task or current has changed in the meantime. This can be the
7254 * case on architectures that do not implement arch_irq_work_raise().
7255 */
7256 if (WARN_ON_ONCE(event->ctx->task != current))
7257 return;
7258
7259 send_sig_perf((void __user *)event->pending_addr,
7260 event->orig_type, event->attr.sig_data);
7261 }
7262
7263 /*
7264 * Deliver the pending work in-event-context or follow the context.
7265 */
__perf_pending_disable(struct perf_event * event)7266 static void __perf_pending_disable(struct perf_event *event)
7267 {
7268 int cpu = READ_ONCE(event->oncpu);
7269
7270 /*
7271 * If the event isn't running; we done. event_sched_out() will have
7272 * taken care of things.
7273 */
7274 if (cpu < 0)
7275 return;
7276
7277 /*
7278 * Yay, we hit home and are in the context of the event.
7279 */
7280 if (cpu == smp_processor_id()) {
7281 if (event->pending_disable) {
7282 event->pending_disable = 0;
7283 perf_event_disable_local(event);
7284 }
7285 return;
7286 }
7287
7288 /*
7289 * CPU-A CPU-B
7290 *
7291 * perf_event_disable_inatomic()
7292 * @pending_disable = 1;
7293 * irq_work_queue();
7294 *
7295 * sched-out
7296 * @pending_disable = 0;
7297 *
7298 * sched-in
7299 * perf_event_disable_inatomic()
7300 * @pending_disable = 1;
7301 * irq_work_queue(); // FAILS
7302 *
7303 * irq_work_run()
7304 * perf_pending_disable()
7305 *
7306 * But the event runs on CPU-B and wants disabling there.
7307 */
7308 irq_work_queue_on(&event->pending_disable_irq, cpu);
7309 }
7310
perf_pending_disable(struct irq_work * entry)7311 static void perf_pending_disable(struct irq_work *entry)
7312 {
7313 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq);
7314 int rctx;
7315
7316 /*
7317 * If we 'fail' here, that's OK, it means recursion is already disabled
7318 * and we won't recurse 'further'.
7319 */
7320 rctx = perf_swevent_get_recursion_context();
7321 __perf_pending_disable(event);
7322 if (rctx >= 0)
7323 perf_swevent_put_recursion_context(rctx);
7324 }
7325
perf_pending_irq(struct irq_work * entry)7326 static void perf_pending_irq(struct irq_work *entry)
7327 {
7328 struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
7329 int rctx;
7330
7331 /*
7332 * If we 'fail' here, that's OK, it means recursion is already disabled
7333 * and we won't recurse 'further'.
7334 */
7335 rctx = perf_swevent_get_recursion_context();
7336
7337 /*
7338 * The wakeup isn't bound to the context of the event -- it can happen
7339 * irrespective of where the event is.
7340 */
7341 if (event->pending_wakeup) {
7342 event->pending_wakeup = 0;
7343 perf_event_wakeup(event);
7344 }
7345
7346 if (rctx >= 0)
7347 perf_swevent_put_recursion_context(rctx);
7348 }
7349
perf_pending_task(struct callback_head * head)7350 static void perf_pending_task(struct callback_head *head)
7351 {
7352 struct perf_event *event = container_of(head, struct perf_event, pending_task);
7353 int rctx;
7354
7355 /*
7356 * If we 'fail' here, that's OK, it means recursion is already disabled
7357 * and we won't recurse 'further'.
7358 */
7359 rctx = perf_swevent_get_recursion_context();
7360
7361 if (event->pending_work) {
7362 event->pending_work = 0;
7363 perf_sigtrap(event);
7364 local_dec(&event->ctx->nr_no_switch_fast);
7365 }
7366 put_event(event);
7367
7368 if (rctx >= 0)
7369 perf_swevent_put_recursion_context(rctx);
7370 }
7371
7372 #ifdef CONFIG_GUEST_PERF_EVENTS
7373 struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
7374
7375 DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
7376 DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
7377 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
7378
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)7379 void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
7380 {
7381 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
7382 return;
7383
7384 rcu_assign_pointer(perf_guest_cbs, cbs);
7385 static_call_update(__perf_guest_state, cbs->state);
7386 static_call_update(__perf_guest_get_ip, cbs->get_ip);
7387
7388 /* Implementing ->handle_intel_pt_intr is optional. */
7389 if (cbs->handle_intel_pt_intr)
7390 static_call_update(__perf_guest_handle_intel_pt_intr,
7391 cbs->handle_intel_pt_intr);
7392 }
7393 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
7394
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)7395 void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
7396 {
7397 if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
7398 return;
7399
7400 rcu_assign_pointer(perf_guest_cbs, NULL);
7401 static_call_update(__perf_guest_state, (void *)&__static_call_return0);
7402 static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
7403 static_call_update(__perf_guest_handle_intel_pt_intr,
7404 (void *)&__static_call_return0);
7405 synchronize_rcu();
7406 }
7407 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
7408 #endif
7409
should_sample_guest(struct perf_event * event)7410 static bool should_sample_guest(struct perf_event *event)
7411 {
7412 return !event->attr.exclude_guest && perf_guest_state();
7413 }
7414
perf_misc_flags(struct perf_event * event,struct pt_regs * regs)7415 unsigned long perf_misc_flags(struct perf_event *event,
7416 struct pt_regs *regs)
7417 {
7418 if (should_sample_guest(event))
7419 return perf_arch_guest_misc_flags(regs);
7420
7421 return perf_arch_misc_flags(regs);
7422 }
7423
perf_instruction_pointer(struct perf_event * event,struct pt_regs * regs)7424 unsigned long perf_instruction_pointer(struct perf_event *event,
7425 struct pt_regs *regs)
7426 {
7427 if (should_sample_guest(event))
7428 return perf_guest_get_ip();
7429
7430 return perf_arch_instruction_pointer(regs);
7431 }
7432
7433 static void
perf_output_sample_regs(struct perf_output_handle * handle,struct pt_regs * regs,u64 mask)7434 perf_output_sample_regs(struct perf_output_handle *handle,
7435 struct pt_regs *regs, u64 mask)
7436 {
7437 int bit;
7438 DECLARE_BITMAP(_mask, 64);
7439
7440 bitmap_from_u64(_mask, mask);
7441 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
7442 u64 val;
7443
7444 val = perf_reg_value(regs, bit);
7445 perf_output_put(handle, val);
7446 }
7447 }
7448
perf_sample_regs_user(struct perf_regs * regs_user,struct pt_regs * regs)7449 static void perf_sample_regs_user(struct perf_regs *regs_user,
7450 struct pt_regs *regs)
7451 {
7452 if (user_mode(regs)) {
7453 regs_user->abi = perf_reg_abi(current);
7454 regs_user->regs = regs;
7455 } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
7456 perf_get_regs_user(regs_user, regs);
7457 } else {
7458 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
7459 regs_user->regs = NULL;
7460 }
7461 }
7462
perf_sample_regs_intr(struct perf_regs * regs_intr,struct pt_regs * regs)7463 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
7464 struct pt_regs *regs)
7465 {
7466 regs_intr->regs = regs;
7467 regs_intr->abi = perf_reg_abi(current);
7468 }
7469
7470
7471 /*
7472 * Get remaining task size from user stack pointer.
7473 *
7474 * It'd be better to take stack vma map and limit this more
7475 * precisely, but there's no way to get it safely under interrupt,
7476 * so using TASK_SIZE as limit.
7477 */
perf_ustack_task_size(struct pt_regs * regs)7478 static u64 perf_ustack_task_size(struct pt_regs *regs)
7479 {
7480 unsigned long addr = perf_user_stack_pointer(regs);
7481
7482 if (!addr || addr >= TASK_SIZE)
7483 return 0;
7484
7485 return TASK_SIZE - addr;
7486 }
7487
7488 static u16
perf_sample_ustack_size(u16 stack_size,u16 header_size,struct pt_regs * regs)7489 perf_sample_ustack_size(u16 stack_size, u16 header_size,
7490 struct pt_regs *regs)
7491 {
7492 u64 task_size;
7493
7494 /* No regs, no stack pointer, no dump. */
7495 if (!regs)
7496 return 0;
7497
7498 /* No mm, no stack, no dump. */
7499 if (!current->mm)
7500 return 0;
7501
7502 /*
7503 * Check if we fit in with the requested stack size into the:
7504 * - TASK_SIZE
7505 * If we don't, we limit the size to the TASK_SIZE.
7506 *
7507 * - remaining sample size
7508 * If we don't, we customize the stack size to
7509 * fit in to the remaining sample size.
7510 */
7511
7512 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
7513 stack_size = min(stack_size, (u16) task_size);
7514
7515 /* Current header size plus static size and dynamic size. */
7516 header_size += 2 * sizeof(u64);
7517
7518 /* Do we fit in with the current stack dump size? */
7519 if ((u16) (header_size + stack_size) < header_size) {
7520 /*
7521 * If we overflow the maximum size for the sample,
7522 * we customize the stack dump size to fit in.
7523 */
7524 stack_size = USHRT_MAX - header_size - sizeof(u64);
7525 stack_size = round_up(stack_size, sizeof(u64));
7526 }
7527
7528 return stack_size;
7529 }
7530
7531 static void
perf_output_sample_ustack(struct perf_output_handle * handle,u64 dump_size,struct pt_regs * regs)7532 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
7533 struct pt_regs *regs)
7534 {
7535 /* Case of a kernel thread, nothing to dump */
7536 if (!regs) {
7537 u64 size = 0;
7538 perf_output_put(handle, size);
7539 } else {
7540 unsigned long sp;
7541 unsigned int rem;
7542 u64 dyn_size;
7543
7544 /*
7545 * We dump:
7546 * static size
7547 * - the size requested by user or the best one we can fit
7548 * in to the sample max size
7549 * data
7550 * - user stack dump data
7551 * dynamic size
7552 * - the actual dumped size
7553 */
7554
7555 /* Static size. */
7556 perf_output_put(handle, dump_size);
7557
7558 /* Data. */
7559 sp = perf_user_stack_pointer(regs);
7560 rem = __output_copy_user(handle, (void *) sp, dump_size);
7561 dyn_size = dump_size - rem;
7562
7563 perf_output_skip(handle, rem);
7564
7565 /* Dynamic size. */
7566 perf_output_put(handle, dyn_size);
7567 }
7568 }
7569
perf_prepare_sample_aux(struct perf_event * event,struct perf_sample_data * data,size_t size)7570 static unsigned long perf_prepare_sample_aux(struct perf_event *event,
7571 struct perf_sample_data *data,
7572 size_t size)
7573 {
7574 struct perf_event *sampler = event->aux_event;
7575 struct perf_buffer *rb;
7576
7577 data->aux_size = 0;
7578
7579 if (!sampler)
7580 goto out;
7581
7582 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
7583 goto out;
7584
7585 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
7586 goto out;
7587
7588 rb = ring_buffer_get(sampler);
7589 if (!rb)
7590 goto out;
7591
7592 /*
7593 * If this is an NMI hit inside sampling code, don't take
7594 * the sample. See also perf_aux_sample_output().
7595 */
7596 if (READ_ONCE(rb->aux_in_sampling)) {
7597 data->aux_size = 0;
7598 } else {
7599 size = min_t(size_t, size, perf_aux_size(rb));
7600 data->aux_size = ALIGN(size, sizeof(u64));
7601 }
7602 ring_buffer_put(rb);
7603
7604 out:
7605 return data->aux_size;
7606 }
7607
perf_pmu_snapshot_aux(struct perf_buffer * rb,struct perf_event * event,struct perf_output_handle * handle,unsigned long size)7608 static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
7609 struct perf_event *event,
7610 struct perf_output_handle *handle,
7611 unsigned long size)
7612 {
7613 unsigned long flags;
7614 long ret;
7615
7616 /*
7617 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
7618 * paths. If we start calling them in NMI context, they may race with
7619 * the IRQ ones, that is, for example, re-starting an event that's just
7620 * been stopped, which is why we're using a separate callback that
7621 * doesn't change the event state.
7622 *
7623 * IRQs need to be disabled to prevent IPIs from racing with us.
7624 */
7625 local_irq_save(flags);
7626 /*
7627 * Guard against NMI hits inside the critical section;
7628 * see also perf_prepare_sample_aux().
7629 */
7630 WRITE_ONCE(rb->aux_in_sampling, 1);
7631 barrier();
7632
7633 ret = event->pmu->snapshot_aux(event, handle, size);
7634
7635 barrier();
7636 WRITE_ONCE(rb->aux_in_sampling, 0);
7637 local_irq_restore(flags);
7638
7639 return ret;
7640 }
7641
perf_aux_sample_output(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * data)7642 static void perf_aux_sample_output(struct perf_event *event,
7643 struct perf_output_handle *handle,
7644 struct perf_sample_data *data)
7645 {
7646 struct perf_event *sampler = event->aux_event;
7647 struct perf_buffer *rb;
7648 unsigned long pad;
7649 long size;
7650
7651 if (WARN_ON_ONCE(!sampler || !data->aux_size))
7652 return;
7653
7654 rb = ring_buffer_get(sampler);
7655 if (!rb)
7656 return;
7657
7658 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
7659
7660 /*
7661 * An error here means that perf_output_copy() failed (returned a
7662 * non-zero surplus that it didn't copy), which in its current
7663 * enlightened implementation is not possible. If that changes, we'd
7664 * like to know.
7665 */
7666 if (WARN_ON_ONCE(size < 0))
7667 goto out_put;
7668
7669 /*
7670 * The pad comes from ALIGN()ing data->aux_size up to u64 in
7671 * perf_prepare_sample_aux(), so should not be more than that.
7672 */
7673 pad = data->aux_size - size;
7674 if (WARN_ON_ONCE(pad >= sizeof(u64)))
7675 pad = 8;
7676
7677 if (pad) {
7678 u64 zero = 0;
7679 perf_output_copy(handle, &zero, pad);
7680 }
7681
7682 out_put:
7683 ring_buffer_put(rb);
7684 }
7685
7686 /*
7687 * A set of common sample data types saved even for non-sample records
7688 * when event->attr.sample_id_all is set.
7689 */
7690 #define PERF_SAMPLE_ID_ALL (PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
7691 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
7692 PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
7693
__perf_event_header__init_id(struct perf_sample_data * data,struct perf_event * event,u64 sample_type)7694 static void __perf_event_header__init_id(struct perf_sample_data *data,
7695 struct perf_event *event,
7696 u64 sample_type)
7697 {
7698 data->type = event->attr.sample_type;
7699 data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
7700
7701 if (sample_type & PERF_SAMPLE_TID) {
7702 /* namespace issues */
7703 data->tid_entry.pid = perf_event_pid(event, current);
7704 data->tid_entry.tid = perf_event_tid(event, current);
7705 }
7706
7707 if (sample_type & PERF_SAMPLE_TIME)
7708 data->time = perf_event_clock(event);
7709
7710 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
7711 data->id = primary_event_id(event);
7712
7713 if (sample_type & PERF_SAMPLE_STREAM_ID)
7714 data->stream_id = event->id;
7715
7716 if (sample_type & PERF_SAMPLE_CPU) {
7717 data->cpu_entry.cpu = raw_smp_processor_id();
7718 data->cpu_entry.reserved = 0;
7719 }
7720 }
7721
perf_event_header__init_id(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7722 void perf_event_header__init_id(struct perf_event_header *header,
7723 struct perf_sample_data *data,
7724 struct perf_event *event)
7725 {
7726 if (event->attr.sample_id_all) {
7727 header->size += event->id_header_size;
7728 __perf_event_header__init_id(data, event, event->attr.sample_type);
7729 }
7730 }
7731
__perf_event__output_id_sample(struct perf_output_handle * handle,struct perf_sample_data * data)7732 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
7733 struct perf_sample_data *data)
7734 {
7735 u64 sample_type = data->type;
7736
7737 if (sample_type & PERF_SAMPLE_TID)
7738 perf_output_put(handle, data->tid_entry);
7739
7740 if (sample_type & PERF_SAMPLE_TIME)
7741 perf_output_put(handle, data->time);
7742
7743 if (sample_type & PERF_SAMPLE_ID)
7744 perf_output_put(handle, data->id);
7745
7746 if (sample_type & PERF_SAMPLE_STREAM_ID)
7747 perf_output_put(handle, data->stream_id);
7748
7749 if (sample_type & PERF_SAMPLE_CPU)
7750 perf_output_put(handle, data->cpu_entry);
7751
7752 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7753 perf_output_put(handle, data->id);
7754 }
7755
perf_event__output_id_sample(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * sample)7756 void perf_event__output_id_sample(struct perf_event *event,
7757 struct perf_output_handle *handle,
7758 struct perf_sample_data *sample)
7759 {
7760 if (event->attr.sample_id_all)
7761 __perf_event__output_id_sample(handle, sample);
7762 }
7763
perf_output_read_one(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7764 static void perf_output_read_one(struct perf_output_handle *handle,
7765 struct perf_event *event,
7766 u64 enabled, u64 running)
7767 {
7768 u64 read_format = event->attr.read_format;
7769 u64 values[5];
7770 int n = 0;
7771
7772 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr));
7773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
7774 values[n++] = enabled +
7775 atomic64_read(&event->child_total_time_enabled);
7776 }
7777 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
7778 values[n++] = running +
7779 atomic64_read(&event->child_total_time_running);
7780 }
7781 if (read_format & PERF_FORMAT_ID)
7782 values[n++] = primary_event_id(event);
7783 if (read_format & PERF_FORMAT_LOST)
7784 values[n++] = atomic64_read(&event->lost_samples);
7785
7786 __output_copy(handle, values, n * sizeof(u64));
7787 }
7788
perf_output_read_group(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)7789 static void perf_output_read_group(struct perf_output_handle *handle,
7790 struct perf_event *event,
7791 u64 enabled, u64 running)
7792 {
7793 struct perf_event *leader = event->group_leader, *sub;
7794 u64 read_format = event->attr.read_format;
7795 unsigned long flags;
7796 u64 values[6];
7797 int n = 0;
7798 bool self = has_inherit_and_sample_read(&event->attr);
7799
7800 /*
7801 * Disabling interrupts avoids all counter scheduling
7802 * (context switches, timer based rotation and IPIs).
7803 */
7804 local_irq_save(flags);
7805
7806 values[n++] = 1 + leader->nr_siblings;
7807
7808 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
7809 values[n++] = enabled;
7810
7811 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
7812 values[n++] = running;
7813
7814 if ((leader != event) && !handle->skip_read)
7815 perf_pmu_read(leader);
7816
7817 values[n++] = perf_event_count(leader, self);
7818 if (read_format & PERF_FORMAT_ID)
7819 values[n++] = primary_event_id(leader);
7820 if (read_format & PERF_FORMAT_LOST)
7821 values[n++] = atomic64_read(&leader->lost_samples);
7822
7823 __output_copy(handle, values, n * sizeof(u64));
7824
7825 for_each_sibling_event(sub, leader) {
7826 n = 0;
7827
7828 if ((sub != event) && !handle->skip_read)
7829 perf_pmu_read(sub);
7830
7831 values[n++] = perf_event_count(sub, self);
7832 if (read_format & PERF_FORMAT_ID)
7833 values[n++] = primary_event_id(sub);
7834 if (read_format & PERF_FORMAT_LOST)
7835 values[n++] = atomic64_read(&sub->lost_samples);
7836
7837 __output_copy(handle, values, n * sizeof(u64));
7838 }
7839
7840 local_irq_restore(flags);
7841 }
7842
7843 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
7844 PERF_FORMAT_TOTAL_TIME_RUNNING)
7845
7846 /*
7847 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
7848 *
7849 * The problem is that its both hard and excessively expensive to iterate the
7850 * child list, not to mention that its impossible to IPI the children running
7851 * on another CPU, from interrupt/NMI context.
7852 *
7853 * Instead the combination of PERF_SAMPLE_READ and inherit will track per-thread
7854 * counts rather than attempting to accumulate some value across all children on
7855 * all cores.
7856 */
perf_output_read(struct perf_output_handle * handle,struct perf_event * event)7857 static void perf_output_read(struct perf_output_handle *handle,
7858 struct perf_event *event)
7859 {
7860 u64 enabled = 0, running = 0, now;
7861 u64 read_format = event->attr.read_format;
7862
7863 /*
7864 * compute total_time_enabled, total_time_running
7865 * based on snapshot values taken when the event
7866 * was last scheduled in.
7867 *
7868 * we cannot simply called update_context_time()
7869 * because of locking issue as we are called in
7870 * NMI context
7871 */
7872 if (read_format & PERF_FORMAT_TOTAL_TIMES)
7873 calc_timer_values(event, &now, &enabled, &running);
7874
7875 if (event->attr.read_format & PERF_FORMAT_GROUP)
7876 perf_output_read_group(handle, event, enabled, running);
7877 else
7878 perf_output_read_one(handle, event, enabled, running);
7879 }
7880
perf_output_sample(struct perf_output_handle * handle,struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)7881 void perf_output_sample(struct perf_output_handle *handle,
7882 struct perf_event_header *header,
7883 struct perf_sample_data *data,
7884 struct perf_event *event)
7885 {
7886 u64 sample_type = data->type;
7887
7888 if (data->sample_flags & PERF_SAMPLE_READ)
7889 handle->skip_read = 1;
7890
7891 perf_output_put(handle, *header);
7892
7893 if (sample_type & PERF_SAMPLE_IDENTIFIER)
7894 perf_output_put(handle, data->id);
7895
7896 if (sample_type & PERF_SAMPLE_IP)
7897 perf_output_put(handle, data->ip);
7898
7899 if (sample_type & PERF_SAMPLE_TID)
7900 perf_output_put(handle, data->tid_entry);
7901
7902 if (sample_type & PERF_SAMPLE_TIME)
7903 perf_output_put(handle, data->time);
7904
7905 if (sample_type & PERF_SAMPLE_ADDR)
7906 perf_output_put(handle, data->addr);
7907
7908 if (sample_type & PERF_SAMPLE_ID)
7909 perf_output_put(handle, data->id);
7910
7911 if (sample_type & PERF_SAMPLE_STREAM_ID)
7912 perf_output_put(handle, data->stream_id);
7913
7914 if (sample_type & PERF_SAMPLE_CPU)
7915 perf_output_put(handle, data->cpu_entry);
7916
7917 if (sample_type & PERF_SAMPLE_PERIOD)
7918 perf_output_put(handle, data->period);
7919
7920 if (sample_type & PERF_SAMPLE_READ)
7921 perf_output_read(handle, event);
7922
7923 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
7924 int size = 1;
7925
7926 size += data->callchain->nr;
7927 size *= sizeof(u64);
7928 __output_copy(handle, data->callchain, size);
7929 }
7930
7931 if (sample_type & PERF_SAMPLE_RAW) {
7932 struct perf_raw_record *raw = data->raw;
7933
7934 if (raw) {
7935 struct perf_raw_frag *frag = &raw->frag;
7936
7937 perf_output_put(handle, raw->size);
7938 do {
7939 if (frag->copy) {
7940 __output_custom(handle, frag->copy,
7941 frag->data, frag->size);
7942 } else {
7943 __output_copy(handle, frag->data,
7944 frag->size);
7945 }
7946 if (perf_raw_frag_last(frag))
7947 break;
7948 frag = frag->next;
7949 } while (1);
7950 if (frag->pad)
7951 __output_skip(handle, NULL, frag->pad);
7952 } else {
7953 struct {
7954 u32 size;
7955 u32 data;
7956 } raw = {
7957 .size = sizeof(u32),
7958 .data = 0,
7959 };
7960 perf_output_put(handle, raw);
7961 }
7962 }
7963
7964 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
7965 if (data->br_stack) {
7966 size_t size;
7967
7968 size = data->br_stack->nr
7969 * sizeof(struct perf_branch_entry);
7970
7971 perf_output_put(handle, data->br_stack->nr);
7972 if (branch_sample_hw_index(event))
7973 perf_output_put(handle, data->br_stack->hw_idx);
7974 perf_output_copy(handle, data->br_stack->entries, size);
7975 /*
7976 * Add the extension space which is appended
7977 * right after the struct perf_branch_stack.
7978 */
7979 if (data->br_stack_cntr) {
7980 size = data->br_stack->nr * sizeof(u64);
7981 perf_output_copy(handle, data->br_stack_cntr, size);
7982 }
7983 } else {
7984 /*
7985 * we always store at least the value of nr
7986 */
7987 u64 nr = 0;
7988 perf_output_put(handle, nr);
7989 }
7990 }
7991
7992 if (sample_type & PERF_SAMPLE_REGS_USER) {
7993 u64 abi = data->regs_user.abi;
7994
7995 /*
7996 * If there are no regs to dump, notice it through
7997 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
7998 */
7999 perf_output_put(handle, abi);
8000
8001 if (abi) {
8002 u64 mask = event->attr.sample_regs_user;
8003 perf_output_sample_regs(handle,
8004 data->regs_user.regs,
8005 mask);
8006 }
8007 }
8008
8009 if (sample_type & PERF_SAMPLE_STACK_USER) {
8010 perf_output_sample_ustack(handle,
8011 data->stack_user_size,
8012 data->regs_user.regs);
8013 }
8014
8015 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
8016 perf_output_put(handle, data->weight.full);
8017
8018 if (sample_type & PERF_SAMPLE_DATA_SRC)
8019 perf_output_put(handle, data->data_src.val);
8020
8021 if (sample_type & PERF_SAMPLE_TRANSACTION)
8022 perf_output_put(handle, data->txn);
8023
8024 if (sample_type & PERF_SAMPLE_REGS_INTR) {
8025 u64 abi = data->regs_intr.abi;
8026 /*
8027 * If there are no regs to dump, notice it through
8028 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
8029 */
8030 perf_output_put(handle, abi);
8031
8032 if (abi) {
8033 u64 mask = event->attr.sample_regs_intr;
8034
8035 perf_output_sample_regs(handle,
8036 data->regs_intr.regs,
8037 mask);
8038 }
8039 }
8040
8041 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
8042 perf_output_put(handle, data->phys_addr);
8043
8044 if (sample_type & PERF_SAMPLE_CGROUP)
8045 perf_output_put(handle, data->cgroup);
8046
8047 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
8048 perf_output_put(handle, data->data_page_size);
8049
8050 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
8051 perf_output_put(handle, data->code_page_size);
8052
8053 if (sample_type & PERF_SAMPLE_AUX) {
8054 perf_output_put(handle, data->aux_size);
8055
8056 if (data->aux_size)
8057 perf_aux_sample_output(event, handle, data);
8058 }
8059
8060 if (!event->attr.watermark) {
8061 int wakeup_events = event->attr.wakeup_events;
8062
8063 if (wakeup_events) {
8064 struct perf_buffer *rb = handle->rb;
8065 int events = local_inc_return(&rb->events);
8066
8067 if (events >= wakeup_events) {
8068 local_sub(wakeup_events, &rb->events);
8069 local_inc(&rb->wakeup);
8070 }
8071 }
8072 }
8073 }
8074
perf_virt_to_phys(u64 virt)8075 static u64 perf_virt_to_phys(u64 virt)
8076 {
8077 u64 phys_addr = 0;
8078
8079 if (!virt)
8080 return 0;
8081
8082 if (virt >= TASK_SIZE) {
8083 /* If it's vmalloc()d memory, leave phys_addr as 0 */
8084 if (virt_addr_valid((void *)(uintptr_t)virt) &&
8085 !(virt >= VMALLOC_START && virt < VMALLOC_END))
8086 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
8087 } else {
8088 /*
8089 * Walking the pages tables for user address.
8090 * Interrupts are disabled, so it prevents any tear down
8091 * of the page tables.
8092 * Try IRQ-safe get_user_page_fast_only first.
8093 * If failed, leave phys_addr as 0.
8094 */
8095 if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
8096 struct page *p;
8097
8098 pagefault_disable();
8099 if (get_user_page_fast_only(virt, 0, &p)) {
8100 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
8101 put_page(p);
8102 }
8103 pagefault_enable();
8104 }
8105 }
8106
8107 return phys_addr;
8108 }
8109
8110 /*
8111 * Return the pagetable size of a given virtual address.
8112 */
perf_get_pgtable_size(struct mm_struct * mm,unsigned long addr)8113 static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
8114 {
8115 u64 size = 0;
8116
8117 #ifdef CONFIG_HAVE_GUP_FAST
8118 pgd_t *pgdp, pgd;
8119 p4d_t *p4dp, p4d;
8120 pud_t *pudp, pud;
8121 pmd_t *pmdp, pmd;
8122 pte_t *ptep, pte;
8123
8124 pgdp = pgd_offset(mm, addr);
8125 pgd = READ_ONCE(*pgdp);
8126 if (pgd_none(pgd))
8127 return 0;
8128
8129 if (pgd_leaf(pgd))
8130 return pgd_leaf_size(pgd);
8131
8132 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
8133 p4d = READ_ONCE(*p4dp);
8134 if (!p4d_present(p4d))
8135 return 0;
8136
8137 if (p4d_leaf(p4d))
8138 return p4d_leaf_size(p4d);
8139
8140 pudp = pud_offset_lockless(p4dp, p4d, addr);
8141 pud = READ_ONCE(*pudp);
8142 if (!pud_present(pud))
8143 return 0;
8144
8145 if (pud_leaf(pud))
8146 return pud_leaf_size(pud);
8147
8148 pmdp = pmd_offset_lockless(pudp, pud, addr);
8149 again:
8150 pmd = pmdp_get_lockless(pmdp);
8151 if (!pmd_present(pmd))
8152 return 0;
8153
8154 if (pmd_leaf(pmd))
8155 return pmd_leaf_size(pmd);
8156
8157 ptep = pte_offset_map(&pmd, addr);
8158 if (!ptep)
8159 goto again;
8160
8161 pte = ptep_get_lockless(ptep);
8162 if (pte_present(pte))
8163 size = __pte_leaf_size(pmd, pte);
8164 pte_unmap(ptep);
8165 #endif /* CONFIG_HAVE_GUP_FAST */
8166
8167 return size;
8168 }
8169
perf_get_page_size(unsigned long addr)8170 static u64 perf_get_page_size(unsigned long addr)
8171 {
8172 struct mm_struct *mm;
8173 unsigned long flags;
8174 u64 size;
8175
8176 if (!addr)
8177 return 0;
8178
8179 /*
8180 * Software page-table walkers must disable IRQs,
8181 * which prevents any tear down of the page tables.
8182 */
8183 local_irq_save(flags);
8184
8185 mm = current->mm;
8186 if (!mm) {
8187 /*
8188 * For kernel threads and the like, use init_mm so that
8189 * we can find kernel memory.
8190 */
8191 mm = &init_mm;
8192 }
8193
8194 size = perf_get_pgtable_size(mm, addr);
8195
8196 local_irq_restore(flags);
8197
8198 return size;
8199 }
8200
8201 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
8202
8203 struct perf_callchain_entry *
perf_callchain(struct perf_event * event,struct pt_regs * regs)8204 perf_callchain(struct perf_event *event, struct pt_regs *regs)
8205 {
8206 bool kernel = !event->attr.exclude_callchain_kernel;
8207 bool user = !event->attr.exclude_callchain_user &&
8208 !(current->flags & (PF_KTHREAD | PF_USER_WORKER));
8209 /* Disallow cross-task user callchains. */
8210 bool crosstask = event->ctx->task && event->ctx->task != current;
8211 const u32 max_stack = event->attr.sample_max_stack;
8212 struct perf_callchain_entry *callchain;
8213
8214 if (!current->mm)
8215 user = false;
8216
8217 if (!kernel && !user)
8218 return &__empty_callchain;
8219
8220 callchain = get_perf_callchain(regs, kernel, user,
8221 max_stack, crosstask, true);
8222 return callchain ?: &__empty_callchain;
8223 }
8224
__cond_set(u64 flags,u64 s,u64 d)8225 static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
8226 {
8227 return d * !!(flags & s);
8228 }
8229
perf_prepare_sample(struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)8230 void perf_prepare_sample(struct perf_sample_data *data,
8231 struct perf_event *event,
8232 struct pt_regs *regs)
8233 {
8234 u64 sample_type = event->attr.sample_type;
8235 u64 filtered_sample_type;
8236
8237 /*
8238 * Add the sample flags that are dependent to others. And clear the
8239 * sample flags that have already been done by the PMU driver.
8240 */
8241 filtered_sample_type = sample_type;
8242 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
8243 PERF_SAMPLE_IP);
8244 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
8245 PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
8246 filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
8247 PERF_SAMPLE_REGS_USER);
8248 filtered_sample_type &= ~data->sample_flags;
8249
8250 if (filtered_sample_type == 0) {
8251 /* Make sure it has the correct data->type for output */
8252 data->type = event->attr.sample_type;
8253 return;
8254 }
8255
8256 __perf_event_header__init_id(data, event, filtered_sample_type);
8257
8258 if (filtered_sample_type & PERF_SAMPLE_IP) {
8259 data->ip = perf_instruction_pointer(event, regs);
8260 data->sample_flags |= PERF_SAMPLE_IP;
8261 }
8262
8263 if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
8264 perf_sample_save_callchain(data, event, regs);
8265
8266 if (filtered_sample_type & PERF_SAMPLE_RAW) {
8267 data->raw = NULL;
8268 data->dyn_size += sizeof(u64);
8269 data->sample_flags |= PERF_SAMPLE_RAW;
8270 }
8271
8272 if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
8273 data->br_stack = NULL;
8274 data->dyn_size += sizeof(u64);
8275 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
8276 }
8277
8278 if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
8279 perf_sample_regs_user(&data->regs_user, regs);
8280
8281 /*
8282 * It cannot use the filtered_sample_type here as REGS_USER can be set
8283 * by STACK_USER (using __cond_set() above) and we don't want to update
8284 * the dyn_size if it's not requested by users.
8285 */
8286 if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
8287 /* regs dump ABI info */
8288 int size = sizeof(u64);
8289
8290 if (data->regs_user.regs) {
8291 u64 mask = event->attr.sample_regs_user;
8292 size += hweight64(mask) * sizeof(u64);
8293 }
8294
8295 data->dyn_size += size;
8296 data->sample_flags |= PERF_SAMPLE_REGS_USER;
8297 }
8298
8299 if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
8300 /*
8301 * Either we need PERF_SAMPLE_STACK_USER bit to be always
8302 * processed as the last one or have additional check added
8303 * in case new sample type is added, because we could eat
8304 * up the rest of the sample size.
8305 */
8306 u16 stack_size = event->attr.sample_stack_user;
8307 u16 header_size = perf_sample_data_size(data, event);
8308 u16 size = sizeof(u64);
8309
8310 stack_size = perf_sample_ustack_size(stack_size, header_size,
8311 data->regs_user.regs);
8312
8313 /*
8314 * If there is something to dump, add space for the dump
8315 * itself and for the field that tells the dynamic size,
8316 * which is how many have been actually dumped.
8317 */
8318 if (stack_size)
8319 size += sizeof(u64) + stack_size;
8320
8321 data->stack_user_size = stack_size;
8322 data->dyn_size += size;
8323 data->sample_flags |= PERF_SAMPLE_STACK_USER;
8324 }
8325
8326 if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
8327 data->weight.full = 0;
8328 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
8329 }
8330
8331 if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
8332 data->data_src.val = PERF_MEM_NA;
8333 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
8334 }
8335
8336 if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
8337 data->txn = 0;
8338 data->sample_flags |= PERF_SAMPLE_TRANSACTION;
8339 }
8340
8341 if (filtered_sample_type & PERF_SAMPLE_ADDR) {
8342 data->addr = 0;
8343 data->sample_flags |= PERF_SAMPLE_ADDR;
8344 }
8345
8346 if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
8347 /* regs dump ABI info */
8348 int size = sizeof(u64);
8349
8350 perf_sample_regs_intr(&data->regs_intr, regs);
8351
8352 if (data->regs_intr.regs) {
8353 u64 mask = event->attr.sample_regs_intr;
8354
8355 size += hweight64(mask) * sizeof(u64);
8356 }
8357
8358 data->dyn_size += size;
8359 data->sample_flags |= PERF_SAMPLE_REGS_INTR;
8360 }
8361
8362 if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
8363 data->phys_addr = perf_virt_to_phys(data->addr);
8364 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
8365 }
8366
8367 #ifdef CONFIG_CGROUP_PERF
8368 if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
8369 struct cgroup *cgrp;
8370
8371 /* protected by RCU */
8372 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
8373 data->cgroup = cgroup_id(cgrp);
8374 data->sample_flags |= PERF_SAMPLE_CGROUP;
8375 }
8376 #endif
8377
8378 /*
8379 * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't
8380 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
8381 * but the value will not dump to the userspace.
8382 */
8383 if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
8384 data->data_page_size = perf_get_page_size(data->addr);
8385 data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
8386 }
8387
8388 if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
8389 data->code_page_size = perf_get_page_size(data->ip);
8390 data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
8391 }
8392
8393 if (filtered_sample_type & PERF_SAMPLE_AUX) {
8394 u64 size;
8395 u16 header_size = perf_sample_data_size(data, event);
8396
8397 header_size += sizeof(u64); /* size */
8398
8399 /*
8400 * Given the 16bit nature of header::size, an AUX sample can
8401 * easily overflow it, what with all the preceding sample bits.
8402 * Make sure this doesn't happen by using up to U16_MAX bytes
8403 * per sample in total (rounded down to 8 byte boundary).
8404 */
8405 size = min_t(size_t, U16_MAX - header_size,
8406 event->attr.aux_sample_size);
8407 size = rounddown(size, 8);
8408 size = perf_prepare_sample_aux(event, data, size);
8409
8410 WARN_ON_ONCE(size + header_size > U16_MAX);
8411 data->dyn_size += size + sizeof(u64); /* size above */
8412 data->sample_flags |= PERF_SAMPLE_AUX;
8413 }
8414 }
8415
perf_prepare_header(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)8416 void perf_prepare_header(struct perf_event_header *header,
8417 struct perf_sample_data *data,
8418 struct perf_event *event,
8419 struct pt_regs *regs)
8420 {
8421 header->type = PERF_RECORD_SAMPLE;
8422 header->size = perf_sample_data_size(data, event);
8423 header->misc = perf_misc_flags(event, regs);
8424
8425 /*
8426 * If you're adding more sample types here, you likely need to do
8427 * something about the overflowing header::size, like repurpose the
8428 * lowest 3 bits of size, which should be always zero at the moment.
8429 * This raises a more important question, do we really need 512k sized
8430 * samples and why, so good argumentation is in order for whatever you
8431 * do here next.
8432 */
8433 WARN_ON_ONCE(header->size & 7);
8434 }
8435
__perf_event_aux_pause(struct perf_event * event,bool pause)8436 static void __perf_event_aux_pause(struct perf_event *event, bool pause)
8437 {
8438 if (pause) {
8439 if (!event->hw.aux_paused) {
8440 event->hw.aux_paused = 1;
8441 event->pmu->stop(event, PERF_EF_PAUSE);
8442 }
8443 } else {
8444 if (event->hw.aux_paused) {
8445 event->hw.aux_paused = 0;
8446 event->pmu->start(event, PERF_EF_RESUME);
8447 }
8448 }
8449 }
8450
perf_event_aux_pause(struct perf_event * event,bool pause)8451 static void perf_event_aux_pause(struct perf_event *event, bool pause)
8452 {
8453 struct perf_buffer *rb;
8454
8455 if (WARN_ON_ONCE(!event))
8456 return;
8457
8458 rb = ring_buffer_get(event);
8459 if (!rb)
8460 return;
8461
8462 scoped_guard (irqsave) {
8463 /*
8464 * Guard against self-recursion here. Another event could trip
8465 * this same from NMI context.
8466 */
8467 if (READ_ONCE(rb->aux_in_pause_resume))
8468 break;
8469
8470 WRITE_ONCE(rb->aux_in_pause_resume, 1);
8471 barrier();
8472 __perf_event_aux_pause(event, pause);
8473 barrier();
8474 WRITE_ONCE(rb->aux_in_pause_resume, 0);
8475 }
8476 ring_buffer_put(rb);
8477 }
8478
8479 static __always_inline int
__perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs,int (* output_begin)(struct perf_output_handle *,struct perf_sample_data *,struct perf_event *,unsigned int))8480 __perf_event_output(struct perf_event *event,
8481 struct perf_sample_data *data,
8482 struct pt_regs *regs,
8483 int (*output_begin)(struct perf_output_handle *,
8484 struct perf_sample_data *,
8485 struct perf_event *,
8486 unsigned int))
8487 {
8488 struct perf_output_handle handle;
8489 struct perf_event_header header;
8490 int err;
8491
8492 /* protect the callchain buffers */
8493 rcu_read_lock();
8494
8495 perf_prepare_sample(data, event, regs);
8496 perf_prepare_header(&header, data, event, regs);
8497
8498 err = output_begin(&handle, data, event, header.size);
8499 if (err)
8500 goto exit;
8501
8502 perf_output_sample(&handle, &header, data, event);
8503
8504 perf_output_end(&handle);
8505
8506 exit:
8507 rcu_read_unlock();
8508 return err;
8509 }
8510
8511 void
perf_event_output_forward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8512 perf_event_output_forward(struct perf_event *event,
8513 struct perf_sample_data *data,
8514 struct pt_regs *regs)
8515 {
8516 __perf_event_output(event, data, regs, perf_output_begin_forward);
8517 }
8518
8519 void
perf_event_output_backward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8520 perf_event_output_backward(struct perf_event *event,
8521 struct perf_sample_data *data,
8522 struct pt_regs *regs)
8523 {
8524 __perf_event_output(event, data, regs, perf_output_begin_backward);
8525 }
8526
8527 int
perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8528 perf_event_output(struct perf_event *event,
8529 struct perf_sample_data *data,
8530 struct pt_regs *regs)
8531 {
8532 return __perf_event_output(event, data, regs, perf_output_begin);
8533 }
8534
8535 /*
8536 * read event_id
8537 */
8538
8539 struct perf_read_event {
8540 struct perf_event_header header;
8541
8542 u32 pid;
8543 u32 tid;
8544 };
8545
8546 static void
perf_event_read_event(struct perf_event * event,struct task_struct * task)8547 perf_event_read_event(struct perf_event *event,
8548 struct task_struct *task)
8549 {
8550 struct perf_output_handle handle;
8551 struct perf_sample_data sample;
8552 struct perf_read_event read_event = {
8553 .header = {
8554 .type = PERF_RECORD_READ,
8555 .misc = 0,
8556 .size = sizeof(read_event) + event->read_size,
8557 },
8558 .pid = perf_event_pid(event, task),
8559 .tid = perf_event_tid(event, task),
8560 };
8561 int ret;
8562
8563 perf_event_header__init_id(&read_event.header, &sample, event);
8564 ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
8565 if (ret)
8566 return;
8567
8568 perf_output_put(&handle, read_event);
8569 perf_output_read(&handle, event);
8570 perf_event__output_id_sample(event, &handle, &sample);
8571
8572 perf_output_end(&handle);
8573 }
8574
8575 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8576
8577 static void
perf_iterate_ctx(struct perf_event_context * ctx,perf_iterate_f output,void * data,bool all)8578 perf_iterate_ctx(struct perf_event_context *ctx,
8579 perf_iterate_f output,
8580 void *data, bool all)
8581 {
8582 struct perf_event *event;
8583
8584 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
8585 if (!all) {
8586 if (event->state < PERF_EVENT_STATE_INACTIVE)
8587 continue;
8588 if (!event_filter_match(event))
8589 continue;
8590 }
8591
8592 output(event, data);
8593 }
8594 }
8595
perf_iterate_sb_cpu(perf_iterate_f output,void * data)8596 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
8597 {
8598 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
8599 struct perf_event *event;
8600
8601 list_for_each_entry_rcu(event, &pel->list, sb_list) {
8602 /*
8603 * Skip events that are not fully formed yet; ensure that
8604 * if we observe event->ctx, both event and ctx will be
8605 * complete enough. See perf_install_in_context().
8606 */
8607 if (!smp_load_acquire(&event->ctx))
8608 continue;
8609
8610 if (event->state < PERF_EVENT_STATE_INACTIVE)
8611 continue;
8612 if (!event_filter_match(event))
8613 continue;
8614 output(event, data);
8615 }
8616 }
8617
8618 /*
8619 * Iterate all events that need to receive side-band events.
8620 *
8621 * For new callers; ensure that account_pmu_sb_event() includes
8622 * your event, otherwise it might not get delivered.
8623 */
8624 static void
perf_iterate_sb(perf_iterate_f output,void * data,struct perf_event_context * task_ctx)8625 perf_iterate_sb(perf_iterate_f output, void *data,
8626 struct perf_event_context *task_ctx)
8627 {
8628 struct perf_event_context *ctx;
8629
8630 rcu_read_lock();
8631 preempt_disable();
8632
8633 /*
8634 * If we have task_ctx != NULL we only notify the task context itself.
8635 * The task_ctx is set only for EXIT events before releasing task
8636 * context.
8637 */
8638 if (task_ctx) {
8639 perf_iterate_ctx(task_ctx, output, data, false);
8640 goto done;
8641 }
8642
8643 perf_iterate_sb_cpu(output, data);
8644
8645 ctx = rcu_dereference(current->perf_event_ctxp);
8646 if (ctx)
8647 perf_iterate_ctx(ctx, output, data, false);
8648 done:
8649 preempt_enable();
8650 rcu_read_unlock();
8651 }
8652
8653 /*
8654 * Clear all file-based filters at exec, they'll have to be
8655 * re-instated when/if these objects are mmapped again.
8656 */
perf_event_addr_filters_exec(struct perf_event * event,void * data)8657 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
8658 {
8659 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8660 struct perf_addr_filter *filter;
8661 unsigned int restart = 0, count = 0;
8662 unsigned long flags;
8663
8664 if (!has_addr_filter(event))
8665 return;
8666
8667 raw_spin_lock_irqsave(&ifh->lock, flags);
8668 list_for_each_entry(filter, &ifh->list, entry) {
8669 if (filter->path.dentry) {
8670 event->addr_filter_ranges[count].start = 0;
8671 event->addr_filter_ranges[count].size = 0;
8672 restart++;
8673 }
8674
8675 count++;
8676 }
8677
8678 if (restart)
8679 event->addr_filters_gen++;
8680 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8681
8682 if (restart)
8683 perf_event_stop(event, 1);
8684 }
8685
perf_event_exec(void)8686 void perf_event_exec(void)
8687 {
8688 struct perf_event_context *ctx;
8689
8690 ctx = perf_pin_task_context(current);
8691 if (!ctx)
8692 return;
8693
8694 perf_event_enable_on_exec(ctx);
8695 perf_event_remove_on_exec(ctx);
8696 scoped_guard(rcu)
8697 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
8698
8699 perf_unpin_context(ctx);
8700 put_ctx(ctx);
8701 }
8702
8703 struct remote_output {
8704 struct perf_buffer *rb;
8705 int err;
8706 };
8707
__perf_event_output_stop(struct perf_event * event,void * data)8708 static void __perf_event_output_stop(struct perf_event *event, void *data)
8709 {
8710 struct perf_event *parent = event->parent;
8711 struct remote_output *ro = data;
8712 struct perf_buffer *rb = ro->rb;
8713 struct stop_event_data sd = {
8714 .event = event,
8715 };
8716
8717 if (!has_aux(event))
8718 return;
8719
8720 if (!parent)
8721 parent = event;
8722
8723 /*
8724 * In case of inheritance, it will be the parent that links to the
8725 * ring-buffer, but it will be the child that's actually using it.
8726 *
8727 * We are using event::rb to determine if the event should be stopped,
8728 * however this may race with ring_buffer_attach() (through set_output),
8729 * which will make us skip the event that actually needs to be stopped.
8730 * So ring_buffer_attach() has to stop an aux event before re-assigning
8731 * its rb pointer.
8732 */
8733 if (rcu_dereference(parent->rb) == rb)
8734 ro->err = __perf_event_stop(&sd);
8735 }
8736
__perf_pmu_output_stop(void * info)8737 static int __perf_pmu_output_stop(void *info)
8738 {
8739 struct perf_event *event = info;
8740 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
8741 struct remote_output ro = {
8742 .rb = event->rb,
8743 };
8744
8745 rcu_read_lock();
8746 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
8747 if (cpuctx->task_ctx)
8748 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
8749 &ro, false);
8750 rcu_read_unlock();
8751
8752 return ro.err;
8753 }
8754
perf_pmu_output_stop(struct perf_event * event)8755 static void perf_pmu_output_stop(struct perf_event *event)
8756 {
8757 struct perf_event *iter;
8758 int err, cpu;
8759
8760 restart:
8761 rcu_read_lock();
8762 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
8763 /*
8764 * For per-CPU events, we need to make sure that neither they
8765 * nor their children are running; for cpu==-1 events it's
8766 * sufficient to stop the event itself if it's active, since
8767 * it can't have children.
8768 */
8769 cpu = iter->cpu;
8770 if (cpu == -1)
8771 cpu = READ_ONCE(iter->oncpu);
8772
8773 if (cpu == -1)
8774 continue;
8775
8776 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
8777 if (err == -EAGAIN) {
8778 rcu_read_unlock();
8779 goto restart;
8780 }
8781 }
8782 rcu_read_unlock();
8783 }
8784
8785 /*
8786 * task tracking -- fork/exit
8787 *
8788 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
8789 */
8790
8791 struct perf_task_event {
8792 struct task_struct *task;
8793 struct perf_event_context *task_ctx;
8794
8795 struct {
8796 struct perf_event_header header;
8797
8798 u32 pid;
8799 u32 ppid;
8800 u32 tid;
8801 u32 ptid;
8802 u64 time;
8803 } event_id;
8804 };
8805
perf_event_task_match(struct perf_event * event)8806 static int perf_event_task_match(struct perf_event *event)
8807 {
8808 return event->attr.comm || event->attr.mmap ||
8809 event->attr.mmap2 || event->attr.mmap_data ||
8810 event->attr.task;
8811 }
8812
perf_event_task_output(struct perf_event * event,void * data)8813 static void perf_event_task_output(struct perf_event *event,
8814 void *data)
8815 {
8816 struct perf_task_event *task_event = data;
8817 struct perf_output_handle handle;
8818 struct perf_sample_data sample;
8819 struct task_struct *task = task_event->task;
8820 int ret, size = task_event->event_id.header.size;
8821
8822 if (!perf_event_task_match(event))
8823 return;
8824
8825 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
8826
8827 ret = perf_output_begin(&handle, &sample, event,
8828 task_event->event_id.header.size);
8829 if (ret)
8830 goto out;
8831
8832 task_event->event_id.pid = perf_event_pid(event, task);
8833 task_event->event_id.tid = perf_event_tid(event, task);
8834
8835 if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
8836 task_event->event_id.ppid = perf_event_pid(event,
8837 task->real_parent);
8838 task_event->event_id.ptid = perf_event_pid(event,
8839 task->real_parent);
8840 } else { /* PERF_RECORD_FORK */
8841 task_event->event_id.ppid = perf_event_pid(event, current);
8842 task_event->event_id.ptid = perf_event_tid(event, current);
8843 }
8844
8845 task_event->event_id.time = perf_event_clock(event);
8846
8847 perf_output_put(&handle, task_event->event_id);
8848
8849 perf_event__output_id_sample(event, &handle, &sample);
8850
8851 perf_output_end(&handle);
8852 out:
8853 task_event->event_id.header.size = size;
8854 }
8855
perf_event_task(struct task_struct * task,struct perf_event_context * task_ctx,int new)8856 static void perf_event_task(struct task_struct *task,
8857 struct perf_event_context *task_ctx,
8858 int new)
8859 {
8860 struct perf_task_event task_event;
8861
8862 if (!atomic_read(&nr_comm_events) &&
8863 !atomic_read(&nr_mmap_events) &&
8864 !atomic_read(&nr_task_events))
8865 return;
8866
8867 task_event = (struct perf_task_event){
8868 .task = task,
8869 .task_ctx = task_ctx,
8870 .event_id = {
8871 .header = {
8872 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
8873 .misc = 0,
8874 .size = sizeof(task_event.event_id),
8875 },
8876 /* .pid */
8877 /* .ppid */
8878 /* .tid */
8879 /* .ptid */
8880 /* .time */
8881 },
8882 };
8883
8884 perf_iterate_sb(perf_event_task_output,
8885 &task_event,
8886 task_ctx);
8887 }
8888
8889 /*
8890 * Allocate data for a new task when profiling system-wide
8891 * events which require PMU specific data
8892 */
8893 static void
perf_event_alloc_task_data(struct task_struct * child,struct task_struct * parent)8894 perf_event_alloc_task_data(struct task_struct *child,
8895 struct task_struct *parent)
8896 {
8897 struct kmem_cache *ctx_cache = NULL;
8898 struct perf_ctx_data *cd;
8899
8900 if (!refcount_read(&global_ctx_data_ref))
8901 return;
8902
8903 scoped_guard (rcu) {
8904 cd = rcu_dereference(parent->perf_ctx_data);
8905 if (cd)
8906 ctx_cache = cd->ctx_cache;
8907 }
8908
8909 if (!ctx_cache)
8910 return;
8911
8912 guard(percpu_read)(&global_ctx_data_rwsem);
8913 scoped_guard (rcu) {
8914 cd = rcu_dereference(child->perf_ctx_data);
8915 if (!cd) {
8916 /*
8917 * A system-wide event may be unaccount,
8918 * when attaching the perf_ctx_data.
8919 */
8920 if (!refcount_read(&global_ctx_data_ref))
8921 return;
8922 goto attach;
8923 }
8924
8925 if (!cd->global) {
8926 cd->global = 1;
8927 refcount_inc(&cd->refcount);
8928 }
8929 }
8930
8931 return;
8932 attach:
8933 attach_task_ctx_data(child, ctx_cache, true);
8934 }
8935
perf_event_fork(struct task_struct * task)8936 void perf_event_fork(struct task_struct *task)
8937 {
8938 perf_event_task(task, NULL, 1);
8939 perf_event_namespaces(task);
8940 perf_event_alloc_task_data(task, current);
8941 }
8942
8943 /*
8944 * comm tracking
8945 */
8946
8947 struct perf_comm_event {
8948 struct task_struct *task;
8949 char *comm;
8950 int comm_size;
8951
8952 struct {
8953 struct perf_event_header header;
8954
8955 u32 pid;
8956 u32 tid;
8957 } event_id;
8958 };
8959
perf_event_comm_match(struct perf_event * event)8960 static int perf_event_comm_match(struct perf_event *event)
8961 {
8962 return event->attr.comm;
8963 }
8964
perf_event_comm_output(struct perf_event * event,void * data)8965 static void perf_event_comm_output(struct perf_event *event,
8966 void *data)
8967 {
8968 struct perf_comm_event *comm_event = data;
8969 struct perf_output_handle handle;
8970 struct perf_sample_data sample;
8971 int size = comm_event->event_id.header.size;
8972 int ret;
8973
8974 if (!perf_event_comm_match(event))
8975 return;
8976
8977 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
8978 ret = perf_output_begin(&handle, &sample, event,
8979 comm_event->event_id.header.size);
8980
8981 if (ret)
8982 goto out;
8983
8984 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
8985 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
8986
8987 perf_output_put(&handle, comm_event->event_id);
8988 __output_copy(&handle, comm_event->comm,
8989 comm_event->comm_size);
8990
8991 perf_event__output_id_sample(event, &handle, &sample);
8992
8993 perf_output_end(&handle);
8994 out:
8995 comm_event->event_id.header.size = size;
8996 }
8997
perf_event_comm_event(struct perf_comm_event * comm_event)8998 static void perf_event_comm_event(struct perf_comm_event *comm_event)
8999 {
9000 char comm[TASK_COMM_LEN];
9001 unsigned int size;
9002
9003 memset(comm, 0, sizeof(comm));
9004 strscpy(comm, comm_event->task->comm);
9005 size = ALIGN(strlen(comm)+1, sizeof(u64));
9006
9007 comm_event->comm = comm;
9008 comm_event->comm_size = size;
9009
9010 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
9011
9012 perf_iterate_sb(perf_event_comm_output,
9013 comm_event,
9014 NULL);
9015 }
9016
perf_event_comm(struct task_struct * task,bool exec)9017 void perf_event_comm(struct task_struct *task, bool exec)
9018 {
9019 struct perf_comm_event comm_event;
9020
9021 if (!atomic_read(&nr_comm_events))
9022 return;
9023
9024 comm_event = (struct perf_comm_event){
9025 .task = task,
9026 /* .comm */
9027 /* .comm_size */
9028 .event_id = {
9029 .header = {
9030 .type = PERF_RECORD_COMM,
9031 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
9032 /* .size */
9033 },
9034 /* .pid */
9035 /* .tid */
9036 },
9037 };
9038
9039 perf_event_comm_event(&comm_event);
9040 }
9041
9042 /*
9043 * namespaces tracking
9044 */
9045
9046 struct perf_namespaces_event {
9047 struct task_struct *task;
9048
9049 struct {
9050 struct perf_event_header header;
9051
9052 u32 pid;
9053 u32 tid;
9054 u64 nr_namespaces;
9055 struct perf_ns_link_info link_info[NR_NAMESPACES];
9056 } event_id;
9057 };
9058
perf_event_namespaces_match(struct perf_event * event)9059 static int perf_event_namespaces_match(struct perf_event *event)
9060 {
9061 return event->attr.namespaces;
9062 }
9063
perf_event_namespaces_output(struct perf_event * event,void * data)9064 static void perf_event_namespaces_output(struct perf_event *event,
9065 void *data)
9066 {
9067 struct perf_namespaces_event *namespaces_event = data;
9068 struct perf_output_handle handle;
9069 struct perf_sample_data sample;
9070 u16 header_size = namespaces_event->event_id.header.size;
9071 int ret;
9072
9073 if (!perf_event_namespaces_match(event))
9074 return;
9075
9076 perf_event_header__init_id(&namespaces_event->event_id.header,
9077 &sample, event);
9078 ret = perf_output_begin(&handle, &sample, event,
9079 namespaces_event->event_id.header.size);
9080 if (ret)
9081 goto out;
9082
9083 namespaces_event->event_id.pid = perf_event_pid(event,
9084 namespaces_event->task);
9085 namespaces_event->event_id.tid = perf_event_tid(event,
9086 namespaces_event->task);
9087
9088 perf_output_put(&handle, namespaces_event->event_id);
9089
9090 perf_event__output_id_sample(event, &handle, &sample);
9091
9092 perf_output_end(&handle);
9093 out:
9094 namespaces_event->event_id.header.size = header_size;
9095 }
9096
perf_fill_ns_link_info(struct perf_ns_link_info * ns_link_info,struct task_struct * task,const struct proc_ns_operations * ns_ops)9097 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
9098 struct task_struct *task,
9099 const struct proc_ns_operations *ns_ops)
9100 {
9101 struct path ns_path;
9102 struct inode *ns_inode;
9103 int error;
9104
9105 error = ns_get_path(&ns_path, task, ns_ops);
9106 if (!error) {
9107 ns_inode = ns_path.dentry->d_inode;
9108 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
9109 ns_link_info->ino = ns_inode->i_ino;
9110 path_put(&ns_path);
9111 }
9112 }
9113
perf_event_namespaces(struct task_struct * task)9114 void perf_event_namespaces(struct task_struct *task)
9115 {
9116 struct perf_namespaces_event namespaces_event;
9117 struct perf_ns_link_info *ns_link_info;
9118
9119 if (!atomic_read(&nr_namespaces_events))
9120 return;
9121
9122 namespaces_event = (struct perf_namespaces_event){
9123 .task = task,
9124 .event_id = {
9125 .header = {
9126 .type = PERF_RECORD_NAMESPACES,
9127 .misc = 0,
9128 .size = sizeof(namespaces_event.event_id),
9129 },
9130 /* .pid */
9131 /* .tid */
9132 .nr_namespaces = NR_NAMESPACES,
9133 /* .link_info[NR_NAMESPACES] */
9134 },
9135 };
9136
9137 ns_link_info = namespaces_event.event_id.link_info;
9138
9139 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
9140 task, &mntns_operations);
9141
9142 #ifdef CONFIG_USER_NS
9143 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
9144 task, &userns_operations);
9145 #endif
9146 #ifdef CONFIG_NET_NS
9147 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
9148 task, &netns_operations);
9149 #endif
9150 #ifdef CONFIG_UTS_NS
9151 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
9152 task, &utsns_operations);
9153 #endif
9154 #ifdef CONFIG_IPC_NS
9155 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
9156 task, &ipcns_operations);
9157 #endif
9158 #ifdef CONFIG_PID_NS
9159 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
9160 task, &pidns_operations);
9161 #endif
9162 #ifdef CONFIG_CGROUPS
9163 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
9164 task, &cgroupns_operations);
9165 #endif
9166
9167 perf_iterate_sb(perf_event_namespaces_output,
9168 &namespaces_event,
9169 NULL);
9170 }
9171
9172 /*
9173 * cgroup tracking
9174 */
9175 #ifdef CONFIG_CGROUP_PERF
9176
9177 struct perf_cgroup_event {
9178 char *path;
9179 int path_size;
9180 struct {
9181 struct perf_event_header header;
9182 u64 id;
9183 char path[];
9184 } event_id;
9185 };
9186
perf_event_cgroup_match(struct perf_event * event)9187 static int perf_event_cgroup_match(struct perf_event *event)
9188 {
9189 return event->attr.cgroup;
9190 }
9191
perf_event_cgroup_output(struct perf_event * event,void * data)9192 static void perf_event_cgroup_output(struct perf_event *event, void *data)
9193 {
9194 struct perf_cgroup_event *cgroup_event = data;
9195 struct perf_output_handle handle;
9196 struct perf_sample_data sample;
9197 u16 header_size = cgroup_event->event_id.header.size;
9198 int ret;
9199
9200 if (!perf_event_cgroup_match(event))
9201 return;
9202
9203 perf_event_header__init_id(&cgroup_event->event_id.header,
9204 &sample, event);
9205 ret = perf_output_begin(&handle, &sample, event,
9206 cgroup_event->event_id.header.size);
9207 if (ret)
9208 goto out;
9209
9210 perf_output_put(&handle, cgroup_event->event_id);
9211 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
9212
9213 perf_event__output_id_sample(event, &handle, &sample);
9214
9215 perf_output_end(&handle);
9216 out:
9217 cgroup_event->event_id.header.size = header_size;
9218 }
9219
perf_event_cgroup(struct cgroup * cgrp)9220 static void perf_event_cgroup(struct cgroup *cgrp)
9221 {
9222 struct perf_cgroup_event cgroup_event;
9223 char path_enomem[16] = "//enomem";
9224 char *pathname;
9225 size_t size;
9226
9227 if (!atomic_read(&nr_cgroup_events))
9228 return;
9229
9230 cgroup_event = (struct perf_cgroup_event){
9231 .event_id = {
9232 .header = {
9233 .type = PERF_RECORD_CGROUP,
9234 .misc = 0,
9235 .size = sizeof(cgroup_event.event_id),
9236 },
9237 .id = cgroup_id(cgrp),
9238 },
9239 };
9240
9241 pathname = kmalloc(PATH_MAX, GFP_KERNEL);
9242 if (pathname == NULL) {
9243 cgroup_event.path = path_enomem;
9244 } else {
9245 /* just to be sure to have enough space for alignment */
9246 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
9247 cgroup_event.path = pathname;
9248 }
9249
9250 /*
9251 * Since our buffer works in 8 byte units we need to align our string
9252 * size to a multiple of 8. However, we must guarantee the tail end is
9253 * zero'd out to avoid leaking random bits to userspace.
9254 */
9255 size = strlen(cgroup_event.path) + 1;
9256 while (!IS_ALIGNED(size, sizeof(u64)))
9257 cgroup_event.path[size++] = '\0';
9258
9259 cgroup_event.event_id.header.size += size;
9260 cgroup_event.path_size = size;
9261
9262 perf_iterate_sb(perf_event_cgroup_output,
9263 &cgroup_event,
9264 NULL);
9265
9266 kfree(pathname);
9267 }
9268
9269 #endif
9270
9271 /*
9272 * mmap tracking
9273 */
9274
9275 struct perf_mmap_event {
9276 struct vm_area_struct *vma;
9277
9278 const char *file_name;
9279 int file_size;
9280 int maj, min;
9281 u64 ino;
9282 u64 ino_generation;
9283 u32 prot, flags;
9284 u8 build_id[BUILD_ID_SIZE_MAX];
9285 u32 build_id_size;
9286
9287 struct {
9288 struct perf_event_header header;
9289
9290 u32 pid;
9291 u32 tid;
9292 u64 start;
9293 u64 len;
9294 u64 pgoff;
9295 } event_id;
9296 };
9297
perf_event_mmap_match(struct perf_event * event,void * data)9298 static int perf_event_mmap_match(struct perf_event *event,
9299 void *data)
9300 {
9301 struct perf_mmap_event *mmap_event = data;
9302 struct vm_area_struct *vma = mmap_event->vma;
9303 int executable = vma->vm_flags & VM_EXEC;
9304
9305 return (!executable && event->attr.mmap_data) ||
9306 (executable && (event->attr.mmap || event->attr.mmap2));
9307 }
9308
perf_event_mmap_output(struct perf_event * event,void * data)9309 static void perf_event_mmap_output(struct perf_event *event,
9310 void *data)
9311 {
9312 struct perf_mmap_event *mmap_event = data;
9313 struct perf_output_handle handle;
9314 struct perf_sample_data sample;
9315 int size = mmap_event->event_id.header.size;
9316 u32 type = mmap_event->event_id.header.type;
9317 bool use_build_id;
9318 int ret;
9319
9320 if (!perf_event_mmap_match(event, data))
9321 return;
9322
9323 if (event->attr.mmap2) {
9324 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
9325 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
9326 mmap_event->event_id.header.size += sizeof(mmap_event->min);
9327 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
9328 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
9329 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
9330 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
9331 }
9332
9333 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
9334 ret = perf_output_begin(&handle, &sample, event,
9335 mmap_event->event_id.header.size);
9336 if (ret)
9337 goto out;
9338
9339 mmap_event->event_id.pid = perf_event_pid(event, current);
9340 mmap_event->event_id.tid = perf_event_tid(event, current);
9341
9342 use_build_id = event->attr.build_id && mmap_event->build_id_size;
9343
9344 if (event->attr.mmap2 && use_build_id)
9345 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
9346
9347 perf_output_put(&handle, mmap_event->event_id);
9348
9349 if (event->attr.mmap2) {
9350 if (use_build_id) {
9351 u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 };
9352
9353 __output_copy(&handle, size, 4);
9354 __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX);
9355 } else {
9356 perf_output_put(&handle, mmap_event->maj);
9357 perf_output_put(&handle, mmap_event->min);
9358 perf_output_put(&handle, mmap_event->ino);
9359 perf_output_put(&handle, mmap_event->ino_generation);
9360 }
9361 perf_output_put(&handle, mmap_event->prot);
9362 perf_output_put(&handle, mmap_event->flags);
9363 }
9364
9365 __output_copy(&handle, mmap_event->file_name,
9366 mmap_event->file_size);
9367
9368 perf_event__output_id_sample(event, &handle, &sample);
9369
9370 perf_output_end(&handle);
9371 out:
9372 mmap_event->event_id.header.size = size;
9373 mmap_event->event_id.header.type = type;
9374 }
9375
perf_event_mmap_event(struct perf_mmap_event * mmap_event)9376 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
9377 {
9378 struct vm_area_struct *vma = mmap_event->vma;
9379 struct file *file = vma->vm_file;
9380 int maj = 0, min = 0;
9381 u64 ino = 0, gen = 0;
9382 u32 prot = 0, flags = 0;
9383 unsigned int size;
9384 char tmp[16];
9385 char *buf = NULL;
9386 char *name = NULL;
9387
9388 if (vma->vm_flags & VM_READ)
9389 prot |= PROT_READ;
9390 if (vma->vm_flags & VM_WRITE)
9391 prot |= PROT_WRITE;
9392 if (vma->vm_flags & VM_EXEC)
9393 prot |= PROT_EXEC;
9394
9395 if (vma->vm_flags & VM_MAYSHARE)
9396 flags = MAP_SHARED;
9397 else
9398 flags = MAP_PRIVATE;
9399
9400 if (vma->vm_flags & VM_LOCKED)
9401 flags |= MAP_LOCKED;
9402 if (is_vm_hugetlb_page(vma))
9403 flags |= MAP_HUGETLB;
9404
9405 if (file) {
9406 struct inode *inode;
9407 dev_t dev;
9408
9409 buf = kmalloc(PATH_MAX, GFP_KERNEL);
9410 if (!buf) {
9411 name = "//enomem";
9412 goto cpy_name;
9413 }
9414 /*
9415 * d_path() works from the end of the rb backwards, so we
9416 * need to add enough zero bytes after the string to handle
9417 * the 64bit alignment we do later.
9418 */
9419 name = file_path(file, buf, PATH_MAX - sizeof(u64));
9420 if (IS_ERR(name)) {
9421 name = "//toolong";
9422 goto cpy_name;
9423 }
9424 inode = file_inode(vma->vm_file);
9425 dev = inode->i_sb->s_dev;
9426 ino = inode->i_ino;
9427 gen = inode->i_generation;
9428 maj = MAJOR(dev);
9429 min = MINOR(dev);
9430
9431 goto got_name;
9432 } else {
9433 if (vma->vm_ops && vma->vm_ops->name)
9434 name = (char *) vma->vm_ops->name(vma);
9435 if (!name)
9436 name = (char *)arch_vma_name(vma);
9437 if (!name) {
9438 if (vma_is_initial_heap(vma))
9439 name = "[heap]";
9440 else if (vma_is_initial_stack(vma))
9441 name = "[stack]";
9442 else
9443 name = "//anon";
9444 }
9445 }
9446
9447 cpy_name:
9448 strscpy(tmp, name);
9449 name = tmp;
9450 got_name:
9451 /*
9452 * Since our buffer works in 8 byte units we need to align our string
9453 * size to a multiple of 8. However, we must guarantee the tail end is
9454 * zero'd out to avoid leaking random bits to userspace.
9455 */
9456 size = strlen(name)+1;
9457 while (!IS_ALIGNED(size, sizeof(u64)))
9458 name[size++] = '\0';
9459
9460 mmap_event->file_name = name;
9461 mmap_event->file_size = size;
9462 mmap_event->maj = maj;
9463 mmap_event->min = min;
9464 mmap_event->ino = ino;
9465 mmap_event->ino_generation = gen;
9466 mmap_event->prot = prot;
9467 mmap_event->flags = flags;
9468
9469 if (!(vma->vm_flags & VM_EXEC))
9470 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
9471
9472 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
9473
9474 if (atomic_read(&nr_build_id_events))
9475 build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size);
9476
9477 perf_iterate_sb(perf_event_mmap_output,
9478 mmap_event,
9479 NULL);
9480
9481 kfree(buf);
9482 }
9483
9484 /*
9485 * Check whether inode and address range match filter criteria.
9486 */
perf_addr_filter_match(struct perf_addr_filter * filter,struct file * file,unsigned long offset,unsigned long size)9487 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
9488 struct file *file, unsigned long offset,
9489 unsigned long size)
9490 {
9491 /* d_inode(NULL) won't be equal to any mapped user-space file */
9492 if (!filter->path.dentry)
9493 return false;
9494
9495 if (d_inode(filter->path.dentry) != file_inode(file))
9496 return false;
9497
9498 if (filter->offset > offset + size)
9499 return false;
9500
9501 if (filter->offset + filter->size < offset)
9502 return false;
9503
9504 return true;
9505 }
9506
perf_addr_filter_vma_adjust(struct perf_addr_filter * filter,struct vm_area_struct * vma,struct perf_addr_filter_range * fr)9507 static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
9508 struct vm_area_struct *vma,
9509 struct perf_addr_filter_range *fr)
9510 {
9511 unsigned long vma_size = vma->vm_end - vma->vm_start;
9512 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
9513 struct file *file = vma->vm_file;
9514
9515 if (!perf_addr_filter_match(filter, file, off, vma_size))
9516 return false;
9517
9518 if (filter->offset < off) {
9519 fr->start = vma->vm_start;
9520 fr->size = min(vma_size, filter->size - (off - filter->offset));
9521 } else {
9522 fr->start = vma->vm_start + filter->offset - off;
9523 fr->size = min(vma->vm_end - fr->start, filter->size);
9524 }
9525
9526 return true;
9527 }
9528
__perf_addr_filters_adjust(struct perf_event * event,void * data)9529 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
9530 {
9531 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
9532 struct vm_area_struct *vma = data;
9533 struct perf_addr_filter *filter;
9534 unsigned int restart = 0, count = 0;
9535 unsigned long flags;
9536
9537 if (!has_addr_filter(event))
9538 return;
9539
9540 if (!vma->vm_file)
9541 return;
9542
9543 raw_spin_lock_irqsave(&ifh->lock, flags);
9544 list_for_each_entry(filter, &ifh->list, entry) {
9545 if (perf_addr_filter_vma_adjust(filter, vma,
9546 &event->addr_filter_ranges[count]))
9547 restart++;
9548
9549 count++;
9550 }
9551
9552 if (restart)
9553 event->addr_filters_gen++;
9554 raw_spin_unlock_irqrestore(&ifh->lock, flags);
9555
9556 if (restart)
9557 perf_event_stop(event, 1);
9558 }
9559
9560 /*
9561 * Adjust all task's events' filters to the new vma
9562 */
perf_addr_filters_adjust(struct vm_area_struct * vma)9563 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
9564 {
9565 struct perf_event_context *ctx;
9566
9567 /*
9568 * Data tracing isn't supported yet and as such there is no need
9569 * to keep track of anything that isn't related to executable code:
9570 */
9571 if (!(vma->vm_flags & VM_EXEC))
9572 return;
9573
9574 rcu_read_lock();
9575 ctx = rcu_dereference(current->perf_event_ctxp);
9576 if (ctx)
9577 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
9578 rcu_read_unlock();
9579 }
9580
perf_event_mmap(struct vm_area_struct * vma)9581 void perf_event_mmap(struct vm_area_struct *vma)
9582 {
9583 struct perf_mmap_event mmap_event;
9584
9585 if (!atomic_read(&nr_mmap_events))
9586 return;
9587
9588 mmap_event = (struct perf_mmap_event){
9589 .vma = vma,
9590 /* .file_name */
9591 /* .file_size */
9592 .event_id = {
9593 .header = {
9594 .type = PERF_RECORD_MMAP,
9595 .misc = PERF_RECORD_MISC_USER,
9596 /* .size */
9597 },
9598 /* .pid */
9599 /* .tid */
9600 .start = vma->vm_start,
9601 .len = vma->vm_end - vma->vm_start,
9602 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
9603 },
9604 /* .maj (attr_mmap2 only) */
9605 /* .min (attr_mmap2 only) */
9606 /* .ino (attr_mmap2 only) */
9607 /* .ino_generation (attr_mmap2 only) */
9608 /* .prot (attr_mmap2 only) */
9609 /* .flags (attr_mmap2 only) */
9610 };
9611
9612 perf_addr_filters_adjust(vma);
9613 perf_event_mmap_event(&mmap_event);
9614 }
9615
perf_event_aux_event(struct perf_event * event,unsigned long head,unsigned long size,u64 flags)9616 void perf_event_aux_event(struct perf_event *event, unsigned long head,
9617 unsigned long size, u64 flags)
9618 {
9619 struct perf_output_handle handle;
9620 struct perf_sample_data sample;
9621 struct perf_aux_event {
9622 struct perf_event_header header;
9623 u64 offset;
9624 u64 size;
9625 u64 flags;
9626 } rec = {
9627 .header = {
9628 .type = PERF_RECORD_AUX,
9629 .misc = 0,
9630 .size = sizeof(rec),
9631 },
9632 .offset = head,
9633 .size = size,
9634 .flags = flags,
9635 };
9636 int ret;
9637
9638 perf_event_header__init_id(&rec.header, &sample, event);
9639 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9640
9641 if (ret)
9642 return;
9643
9644 perf_output_put(&handle, rec);
9645 perf_event__output_id_sample(event, &handle, &sample);
9646
9647 perf_output_end(&handle);
9648 }
9649
9650 /*
9651 * Lost/dropped samples logging
9652 */
perf_log_lost_samples(struct perf_event * event,u64 lost)9653 void perf_log_lost_samples(struct perf_event *event, u64 lost)
9654 {
9655 struct perf_output_handle handle;
9656 struct perf_sample_data sample;
9657 int ret;
9658
9659 struct {
9660 struct perf_event_header header;
9661 u64 lost;
9662 } lost_samples_event = {
9663 .header = {
9664 .type = PERF_RECORD_LOST_SAMPLES,
9665 .misc = 0,
9666 .size = sizeof(lost_samples_event),
9667 },
9668 .lost = lost,
9669 };
9670
9671 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
9672
9673 ret = perf_output_begin(&handle, &sample, event,
9674 lost_samples_event.header.size);
9675 if (ret)
9676 return;
9677
9678 perf_output_put(&handle, lost_samples_event);
9679 perf_event__output_id_sample(event, &handle, &sample);
9680 perf_output_end(&handle);
9681 }
9682
9683 /*
9684 * context_switch tracking
9685 */
9686
9687 struct perf_switch_event {
9688 struct task_struct *task;
9689 struct task_struct *next_prev;
9690
9691 struct {
9692 struct perf_event_header header;
9693 u32 next_prev_pid;
9694 u32 next_prev_tid;
9695 } event_id;
9696 };
9697
perf_event_switch_match(struct perf_event * event)9698 static int perf_event_switch_match(struct perf_event *event)
9699 {
9700 return event->attr.context_switch;
9701 }
9702
perf_event_switch_output(struct perf_event * event,void * data)9703 static void perf_event_switch_output(struct perf_event *event, void *data)
9704 {
9705 struct perf_switch_event *se = data;
9706 struct perf_output_handle handle;
9707 struct perf_sample_data sample;
9708 int ret;
9709
9710 if (!perf_event_switch_match(event))
9711 return;
9712
9713 /* Only CPU-wide events are allowed to see next/prev pid/tid */
9714 if (event->ctx->task) {
9715 se->event_id.header.type = PERF_RECORD_SWITCH;
9716 se->event_id.header.size = sizeof(se->event_id.header);
9717 } else {
9718 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
9719 se->event_id.header.size = sizeof(se->event_id);
9720 se->event_id.next_prev_pid =
9721 perf_event_pid(event, se->next_prev);
9722 se->event_id.next_prev_tid =
9723 perf_event_tid(event, se->next_prev);
9724 }
9725
9726 perf_event_header__init_id(&se->event_id.header, &sample, event);
9727
9728 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
9729 if (ret)
9730 return;
9731
9732 if (event->ctx->task)
9733 perf_output_put(&handle, se->event_id.header);
9734 else
9735 perf_output_put(&handle, se->event_id);
9736
9737 perf_event__output_id_sample(event, &handle, &sample);
9738
9739 perf_output_end(&handle);
9740 }
9741
perf_event_switch(struct task_struct * task,struct task_struct * next_prev,bool sched_in)9742 static void perf_event_switch(struct task_struct *task,
9743 struct task_struct *next_prev, bool sched_in)
9744 {
9745 struct perf_switch_event switch_event;
9746
9747 /* N.B. caller checks nr_switch_events != 0 */
9748
9749 switch_event = (struct perf_switch_event){
9750 .task = task,
9751 .next_prev = next_prev,
9752 .event_id = {
9753 .header = {
9754 /* .type */
9755 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
9756 /* .size */
9757 },
9758 /* .next_prev_pid */
9759 /* .next_prev_tid */
9760 },
9761 };
9762
9763 if (!sched_in && task_is_runnable(task)) {
9764 switch_event.event_id.header.misc |=
9765 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
9766 }
9767
9768 perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
9769 }
9770
9771 /*
9772 * IRQ throttle logging
9773 */
9774
perf_log_throttle(struct perf_event * event,int enable)9775 static void perf_log_throttle(struct perf_event *event, int enable)
9776 {
9777 struct perf_output_handle handle;
9778 struct perf_sample_data sample;
9779 int ret;
9780
9781 struct {
9782 struct perf_event_header header;
9783 u64 time;
9784 u64 id;
9785 u64 stream_id;
9786 } throttle_event = {
9787 .header = {
9788 .type = PERF_RECORD_THROTTLE,
9789 .misc = 0,
9790 .size = sizeof(throttle_event),
9791 },
9792 .time = perf_event_clock(event),
9793 .id = primary_event_id(event),
9794 .stream_id = event->id,
9795 };
9796
9797 if (enable)
9798 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
9799
9800 perf_event_header__init_id(&throttle_event.header, &sample, event);
9801
9802 ret = perf_output_begin(&handle, &sample, event,
9803 throttle_event.header.size);
9804 if (ret)
9805 return;
9806
9807 perf_output_put(&handle, throttle_event);
9808 perf_event__output_id_sample(event, &handle, &sample);
9809 perf_output_end(&handle);
9810 }
9811
9812 /*
9813 * ksymbol register/unregister tracking
9814 */
9815
9816 struct perf_ksymbol_event {
9817 const char *name;
9818 int name_len;
9819 struct {
9820 struct perf_event_header header;
9821 u64 addr;
9822 u32 len;
9823 u16 ksym_type;
9824 u16 flags;
9825 } event_id;
9826 };
9827
perf_event_ksymbol_match(struct perf_event * event)9828 static int perf_event_ksymbol_match(struct perf_event *event)
9829 {
9830 return event->attr.ksymbol;
9831 }
9832
perf_event_ksymbol_output(struct perf_event * event,void * data)9833 static void perf_event_ksymbol_output(struct perf_event *event, void *data)
9834 {
9835 struct perf_ksymbol_event *ksymbol_event = data;
9836 struct perf_output_handle handle;
9837 struct perf_sample_data sample;
9838 int ret;
9839
9840 if (!perf_event_ksymbol_match(event))
9841 return;
9842
9843 perf_event_header__init_id(&ksymbol_event->event_id.header,
9844 &sample, event);
9845 ret = perf_output_begin(&handle, &sample, event,
9846 ksymbol_event->event_id.header.size);
9847 if (ret)
9848 return;
9849
9850 perf_output_put(&handle, ksymbol_event->event_id);
9851 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
9852 perf_event__output_id_sample(event, &handle, &sample);
9853
9854 perf_output_end(&handle);
9855 }
9856
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)9857 void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
9858 const char *sym)
9859 {
9860 struct perf_ksymbol_event ksymbol_event;
9861 char name[KSYM_NAME_LEN];
9862 u16 flags = 0;
9863 int name_len;
9864
9865 if (!atomic_read(&nr_ksymbol_events))
9866 return;
9867
9868 if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
9869 ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
9870 goto err;
9871
9872 strscpy(name, sym);
9873 name_len = strlen(name) + 1;
9874 while (!IS_ALIGNED(name_len, sizeof(u64)))
9875 name[name_len++] = '\0';
9876 BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
9877
9878 if (unregister)
9879 flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
9880
9881 ksymbol_event = (struct perf_ksymbol_event){
9882 .name = name,
9883 .name_len = name_len,
9884 .event_id = {
9885 .header = {
9886 .type = PERF_RECORD_KSYMBOL,
9887 .size = sizeof(ksymbol_event.event_id) +
9888 name_len,
9889 },
9890 .addr = addr,
9891 .len = len,
9892 .ksym_type = ksym_type,
9893 .flags = flags,
9894 },
9895 };
9896
9897 perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
9898 return;
9899 err:
9900 WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
9901 }
9902
9903 /*
9904 * bpf program load/unload tracking
9905 */
9906
9907 struct perf_bpf_event {
9908 struct bpf_prog *prog;
9909 struct {
9910 struct perf_event_header header;
9911 u16 type;
9912 u16 flags;
9913 u32 id;
9914 u8 tag[BPF_TAG_SIZE];
9915 } event_id;
9916 };
9917
perf_event_bpf_match(struct perf_event * event)9918 static int perf_event_bpf_match(struct perf_event *event)
9919 {
9920 return event->attr.bpf_event;
9921 }
9922
perf_event_bpf_output(struct perf_event * event,void * data)9923 static void perf_event_bpf_output(struct perf_event *event, void *data)
9924 {
9925 struct perf_bpf_event *bpf_event = data;
9926 struct perf_output_handle handle;
9927 struct perf_sample_data sample;
9928 int ret;
9929
9930 if (!perf_event_bpf_match(event))
9931 return;
9932
9933 perf_event_header__init_id(&bpf_event->event_id.header,
9934 &sample, event);
9935 ret = perf_output_begin(&handle, &sample, event,
9936 bpf_event->event_id.header.size);
9937 if (ret)
9938 return;
9939
9940 perf_output_put(&handle, bpf_event->event_id);
9941 perf_event__output_id_sample(event, &handle, &sample);
9942
9943 perf_output_end(&handle);
9944 }
9945
perf_event_bpf_emit_ksymbols(struct bpf_prog * prog,enum perf_bpf_event_type type)9946 static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
9947 enum perf_bpf_event_type type)
9948 {
9949 bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
9950 int i;
9951
9952 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
9953 (u64)(unsigned long)prog->bpf_func,
9954 prog->jited_len, unregister,
9955 prog->aux->ksym.name);
9956
9957 for (i = 1; i < prog->aux->func_cnt; i++) {
9958 struct bpf_prog *subprog = prog->aux->func[i];
9959
9960 perf_event_ksymbol(
9961 PERF_RECORD_KSYMBOL_TYPE_BPF,
9962 (u64)(unsigned long)subprog->bpf_func,
9963 subprog->jited_len, unregister,
9964 subprog->aux->ksym.name);
9965 }
9966 }
9967
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)9968 void perf_event_bpf_event(struct bpf_prog *prog,
9969 enum perf_bpf_event_type type,
9970 u16 flags)
9971 {
9972 struct perf_bpf_event bpf_event;
9973
9974 switch (type) {
9975 case PERF_BPF_EVENT_PROG_LOAD:
9976 case PERF_BPF_EVENT_PROG_UNLOAD:
9977 if (atomic_read(&nr_ksymbol_events))
9978 perf_event_bpf_emit_ksymbols(prog, type);
9979 break;
9980 default:
9981 return;
9982 }
9983
9984 if (!atomic_read(&nr_bpf_events))
9985 return;
9986
9987 bpf_event = (struct perf_bpf_event){
9988 .prog = prog,
9989 .event_id = {
9990 .header = {
9991 .type = PERF_RECORD_BPF_EVENT,
9992 .size = sizeof(bpf_event.event_id),
9993 },
9994 .type = type,
9995 .flags = flags,
9996 .id = prog->aux->id,
9997 },
9998 };
9999
10000 BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
10001
10002 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
10003 perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
10004 }
10005
10006 struct perf_text_poke_event {
10007 const void *old_bytes;
10008 const void *new_bytes;
10009 size_t pad;
10010 u16 old_len;
10011 u16 new_len;
10012
10013 struct {
10014 struct perf_event_header header;
10015
10016 u64 addr;
10017 } event_id;
10018 };
10019
perf_event_text_poke_match(struct perf_event * event)10020 static int perf_event_text_poke_match(struct perf_event *event)
10021 {
10022 return event->attr.text_poke;
10023 }
10024
perf_event_text_poke_output(struct perf_event * event,void * data)10025 static void perf_event_text_poke_output(struct perf_event *event, void *data)
10026 {
10027 struct perf_text_poke_event *text_poke_event = data;
10028 struct perf_output_handle handle;
10029 struct perf_sample_data sample;
10030 u64 padding = 0;
10031 int ret;
10032
10033 if (!perf_event_text_poke_match(event))
10034 return;
10035
10036 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
10037
10038 ret = perf_output_begin(&handle, &sample, event,
10039 text_poke_event->event_id.header.size);
10040 if (ret)
10041 return;
10042
10043 perf_output_put(&handle, text_poke_event->event_id);
10044 perf_output_put(&handle, text_poke_event->old_len);
10045 perf_output_put(&handle, text_poke_event->new_len);
10046
10047 __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len);
10048 __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len);
10049
10050 if (text_poke_event->pad)
10051 __output_copy(&handle, &padding, text_poke_event->pad);
10052
10053 perf_event__output_id_sample(event, &handle, &sample);
10054
10055 perf_output_end(&handle);
10056 }
10057
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)10058 void perf_event_text_poke(const void *addr, const void *old_bytes,
10059 size_t old_len, const void *new_bytes, size_t new_len)
10060 {
10061 struct perf_text_poke_event text_poke_event;
10062 size_t tot, pad;
10063
10064 if (!atomic_read(&nr_text_poke_events))
10065 return;
10066
10067 tot = sizeof(text_poke_event.old_len) + old_len;
10068 tot += sizeof(text_poke_event.new_len) + new_len;
10069 pad = ALIGN(tot, sizeof(u64)) - tot;
10070
10071 text_poke_event = (struct perf_text_poke_event){
10072 .old_bytes = old_bytes,
10073 .new_bytes = new_bytes,
10074 .pad = pad,
10075 .old_len = old_len,
10076 .new_len = new_len,
10077 .event_id = {
10078 .header = {
10079 .type = PERF_RECORD_TEXT_POKE,
10080 .misc = PERF_RECORD_MISC_KERNEL,
10081 .size = sizeof(text_poke_event.event_id) + tot + pad,
10082 },
10083 .addr = (unsigned long)addr,
10084 },
10085 };
10086
10087 perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL);
10088 }
10089
perf_event_itrace_started(struct perf_event * event)10090 void perf_event_itrace_started(struct perf_event *event)
10091 {
10092 WRITE_ONCE(event->attach_state, event->attach_state | PERF_ATTACH_ITRACE);
10093 }
10094
perf_log_itrace_start(struct perf_event * event)10095 static void perf_log_itrace_start(struct perf_event *event)
10096 {
10097 struct perf_output_handle handle;
10098 struct perf_sample_data sample;
10099 struct perf_aux_event {
10100 struct perf_event_header header;
10101 u32 pid;
10102 u32 tid;
10103 } rec;
10104 int ret;
10105
10106 if (event->parent)
10107 event = event->parent;
10108
10109 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
10110 event->attach_state & PERF_ATTACH_ITRACE)
10111 return;
10112
10113 rec.header.type = PERF_RECORD_ITRACE_START;
10114 rec.header.misc = 0;
10115 rec.header.size = sizeof(rec);
10116 rec.pid = perf_event_pid(event, current);
10117 rec.tid = perf_event_tid(event, current);
10118
10119 perf_event_header__init_id(&rec.header, &sample, event);
10120 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
10121
10122 if (ret)
10123 return;
10124
10125 perf_output_put(&handle, rec);
10126 perf_event__output_id_sample(event, &handle, &sample);
10127
10128 perf_output_end(&handle);
10129 }
10130
perf_report_aux_output_id(struct perf_event * event,u64 hw_id)10131 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
10132 {
10133 struct perf_output_handle handle;
10134 struct perf_sample_data sample;
10135 struct perf_aux_event {
10136 struct perf_event_header header;
10137 u64 hw_id;
10138 } rec;
10139 int ret;
10140
10141 if (event->parent)
10142 event = event->parent;
10143
10144 rec.header.type = PERF_RECORD_AUX_OUTPUT_HW_ID;
10145 rec.header.misc = 0;
10146 rec.header.size = sizeof(rec);
10147 rec.hw_id = hw_id;
10148
10149 perf_event_header__init_id(&rec.header, &sample, event);
10150 ret = perf_output_begin(&handle, &sample, event, rec.header.size);
10151
10152 if (ret)
10153 return;
10154
10155 perf_output_put(&handle, rec);
10156 perf_event__output_id_sample(event, &handle, &sample);
10157
10158 perf_output_end(&handle);
10159 }
10160 EXPORT_SYMBOL_GPL(perf_report_aux_output_id);
10161
10162 static int
__perf_event_account_interrupt(struct perf_event * event,int throttle)10163 __perf_event_account_interrupt(struct perf_event *event, int throttle)
10164 {
10165 struct hw_perf_event *hwc = &event->hw;
10166 int ret = 0;
10167 u64 seq;
10168
10169 seq = __this_cpu_read(perf_throttled_seq);
10170 if (seq != hwc->interrupts_seq) {
10171 hwc->interrupts_seq = seq;
10172 hwc->interrupts = 1;
10173 } else {
10174 hwc->interrupts++;
10175 }
10176
10177 if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) {
10178 __this_cpu_inc(perf_throttled_count);
10179 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
10180 perf_event_throttle_group(event);
10181 ret = 1;
10182 }
10183
10184 if (event->attr.freq) {
10185 u64 now = perf_clock();
10186 s64 delta = now - hwc->freq_time_stamp;
10187
10188 hwc->freq_time_stamp = now;
10189
10190 if (delta > 0 && delta < 2*TICK_NSEC)
10191 perf_adjust_period(event, delta, hwc->last_period, true);
10192 }
10193
10194 return ret;
10195 }
10196
perf_event_account_interrupt(struct perf_event * event)10197 int perf_event_account_interrupt(struct perf_event *event)
10198 {
10199 return __perf_event_account_interrupt(event, 1);
10200 }
10201
sample_is_allowed(struct perf_event * event,struct pt_regs * regs)10202 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
10203 {
10204 /*
10205 * Due to interrupt latency (AKA "skid"), we may enter the
10206 * kernel before taking an overflow, even if the PMU is only
10207 * counting user events.
10208 */
10209 if (event->attr.exclude_kernel && !user_mode(regs))
10210 return false;
10211
10212 return true;
10213 }
10214
10215 #ifdef CONFIG_BPF_SYSCALL
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10216 static int bpf_overflow_handler(struct perf_event *event,
10217 struct perf_sample_data *data,
10218 struct pt_regs *regs)
10219 {
10220 struct bpf_perf_event_data_kern ctx = {
10221 .data = data,
10222 .event = event,
10223 };
10224 struct bpf_prog *prog;
10225 int ret = 0;
10226
10227 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
10228 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
10229 goto out;
10230 rcu_read_lock();
10231 prog = READ_ONCE(event->prog);
10232 if (prog) {
10233 perf_prepare_sample(data, event, regs);
10234 ret = bpf_prog_run(prog, &ctx);
10235 }
10236 rcu_read_unlock();
10237 out:
10238 __this_cpu_dec(bpf_prog_active);
10239
10240 return ret;
10241 }
10242
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10243 static inline int perf_event_set_bpf_handler(struct perf_event *event,
10244 struct bpf_prog *prog,
10245 u64 bpf_cookie)
10246 {
10247 if (event->overflow_handler_context)
10248 /* hw breakpoint or kernel counter */
10249 return -EINVAL;
10250
10251 if (event->prog)
10252 return -EEXIST;
10253
10254 if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
10255 return -EINVAL;
10256
10257 if (event->attr.precise_ip &&
10258 prog->call_get_stack &&
10259 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10260 event->attr.exclude_callchain_kernel ||
10261 event->attr.exclude_callchain_user)) {
10262 /*
10263 * On perf_event with precise_ip, calling bpf_get_stack()
10264 * may trigger unwinder warnings and occasional crashes.
10265 * bpf_get_[stack|stackid] works around this issue by using
10266 * callchain attached to perf_sample_data. If the
10267 * perf_event does not full (kernel and user) callchain
10268 * attached to perf_sample_data, do not allow attaching BPF
10269 * program that calls bpf_get_[stack|stackid].
10270 */
10271 return -EPROTO;
10272 }
10273
10274 event->prog = prog;
10275 event->bpf_cookie = bpf_cookie;
10276 return 0;
10277 }
10278
perf_event_free_bpf_handler(struct perf_event * event)10279 static inline void perf_event_free_bpf_handler(struct perf_event *event)
10280 {
10281 struct bpf_prog *prog = event->prog;
10282
10283 if (!prog)
10284 return;
10285
10286 event->prog = NULL;
10287 bpf_prog_put(prog);
10288 }
10289 #else
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10290 static inline int bpf_overflow_handler(struct perf_event *event,
10291 struct perf_sample_data *data,
10292 struct pt_regs *regs)
10293 {
10294 return 1;
10295 }
10296
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10297 static inline int perf_event_set_bpf_handler(struct perf_event *event,
10298 struct bpf_prog *prog,
10299 u64 bpf_cookie)
10300 {
10301 return -EOPNOTSUPP;
10302 }
10303
perf_event_free_bpf_handler(struct perf_event * event)10304 static inline void perf_event_free_bpf_handler(struct perf_event *event)
10305 {
10306 }
10307 #endif
10308
10309 /*
10310 * Generic event overflow handling, sampling.
10311 */
10312
__perf_event_overflow(struct perf_event * event,int throttle,struct perf_sample_data * data,struct pt_regs * regs)10313 static int __perf_event_overflow(struct perf_event *event,
10314 int throttle, struct perf_sample_data *data,
10315 struct pt_regs *regs)
10316 {
10317 int events = atomic_read(&event->event_limit);
10318 int ret = 0;
10319
10320 /*
10321 * Non-sampling counters might still use the PMI to fold short
10322 * hardware counters, ignore those.
10323 */
10324 if (unlikely(!is_sampling_event(event)))
10325 return 0;
10326
10327 ret = __perf_event_account_interrupt(event, throttle);
10328
10329 if (event->attr.aux_pause)
10330 perf_event_aux_pause(event->aux_event, true);
10331
10332 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
10333 !bpf_overflow_handler(event, data, regs))
10334 goto out;
10335
10336 /*
10337 * XXX event_limit might not quite work as expected on inherited
10338 * events
10339 */
10340
10341 event->pending_kill = POLL_IN;
10342 if (events && atomic_dec_and_test(&event->event_limit)) {
10343 ret = 1;
10344 event->pending_kill = POLL_HUP;
10345 perf_event_disable_inatomic(event);
10346 event->pmu->stop(event, 0);
10347 }
10348
10349 if (event->attr.sigtrap) {
10350 /*
10351 * The desired behaviour of sigtrap vs invalid samples is a bit
10352 * tricky; on the one hand, one should not loose the SIGTRAP if
10353 * it is the first event, on the other hand, we should also not
10354 * trigger the WARN or override the data address.
10355 */
10356 bool valid_sample = sample_is_allowed(event, regs);
10357 unsigned int pending_id = 1;
10358 enum task_work_notify_mode notify_mode;
10359
10360 if (regs)
10361 pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
10362
10363 notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
10364
10365 if (!event->pending_work &&
10366 !task_work_add(current, &event->pending_task, notify_mode)) {
10367 event->pending_work = pending_id;
10368 local_inc(&event->ctx->nr_no_switch_fast);
10369 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
10370
10371 event->pending_addr = 0;
10372 if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
10373 event->pending_addr = data->addr;
10374
10375 } else if (event->attr.exclude_kernel && valid_sample) {
10376 /*
10377 * Should not be able to return to user space without
10378 * consuming pending_work; with exceptions:
10379 *
10380 * 1. Where !exclude_kernel, events can overflow again
10381 * in the kernel without returning to user space.
10382 *
10383 * 2. Events that can overflow again before the IRQ-
10384 * work without user space progress (e.g. hrtimer).
10385 * To approximate progress (with false negatives),
10386 * check 32-bit hash of the current IP.
10387 */
10388 WARN_ON_ONCE(event->pending_work != pending_id);
10389 }
10390 }
10391
10392 READ_ONCE(event->overflow_handler)(event, data, regs);
10393
10394 if (*perf_event_fasync(event) && event->pending_kill) {
10395 event->pending_wakeup = 1;
10396 irq_work_queue(&event->pending_irq);
10397 }
10398 out:
10399 if (event->attr.aux_resume)
10400 perf_event_aux_pause(event->aux_event, false);
10401
10402 return ret;
10403 }
10404
perf_event_overflow(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10405 int perf_event_overflow(struct perf_event *event,
10406 struct perf_sample_data *data,
10407 struct pt_regs *regs)
10408 {
10409 return __perf_event_overflow(event, 1, data, regs);
10410 }
10411
10412 /*
10413 * Generic software event infrastructure
10414 */
10415
10416 struct swevent_htable {
10417 struct swevent_hlist *swevent_hlist;
10418 struct mutex hlist_mutex;
10419 int hlist_refcount;
10420 };
10421 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
10422
10423 /*
10424 * We directly increment event->count and keep a second value in
10425 * event->hw.period_left to count intervals. This period event
10426 * is kept in the range [-sample_period, 0] so that we can use the
10427 * sign as trigger.
10428 */
10429
perf_swevent_set_period(struct perf_event * event)10430 u64 perf_swevent_set_period(struct perf_event *event)
10431 {
10432 struct hw_perf_event *hwc = &event->hw;
10433 u64 period = hwc->last_period;
10434 u64 nr, offset;
10435 s64 old, val;
10436
10437 hwc->last_period = hwc->sample_period;
10438
10439 old = local64_read(&hwc->period_left);
10440 do {
10441 val = old;
10442 if (val < 0)
10443 return 0;
10444
10445 nr = div64_u64(period + val, period);
10446 offset = nr * period;
10447 val -= offset;
10448 } while (!local64_try_cmpxchg(&hwc->period_left, &old, val));
10449
10450 return nr;
10451 }
10452
perf_swevent_overflow(struct perf_event * event,u64 overflow,struct perf_sample_data * data,struct pt_regs * regs)10453 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
10454 struct perf_sample_data *data,
10455 struct pt_regs *regs)
10456 {
10457 struct hw_perf_event *hwc = &event->hw;
10458 int throttle = 0;
10459
10460 if (!overflow)
10461 overflow = perf_swevent_set_period(event);
10462
10463 if (hwc->interrupts == MAX_INTERRUPTS)
10464 return;
10465
10466 for (; overflow; overflow--) {
10467 if (__perf_event_overflow(event, throttle,
10468 data, regs)) {
10469 /*
10470 * We inhibit the overflow from happening when
10471 * hwc->interrupts == MAX_INTERRUPTS.
10472 */
10473 break;
10474 }
10475 throttle = 1;
10476 }
10477 }
10478
perf_swevent_event(struct perf_event * event,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)10479 static void perf_swevent_event(struct perf_event *event, u64 nr,
10480 struct perf_sample_data *data,
10481 struct pt_regs *regs)
10482 {
10483 struct hw_perf_event *hwc = &event->hw;
10484
10485 local64_add(nr, &event->count);
10486
10487 if (!regs)
10488 return;
10489
10490 if (!is_sampling_event(event))
10491 return;
10492
10493 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
10494 data->period = nr;
10495 return perf_swevent_overflow(event, 1, data, regs);
10496 } else
10497 data->period = event->hw.last_period;
10498
10499 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
10500 return perf_swevent_overflow(event, 1, data, regs);
10501
10502 if (local64_add_negative(nr, &hwc->period_left))
10503 return;
10504
10505 perf_swevent_overflow(event, 0, data, regs);
10506 }
10507
perf_exclude_event(struct perf_event * event,struct pt_regs * regs)10508 int perf_exclude_event(struct perf_event *event, struct pt_regs *regs)
10509 {
10510 if (event->hw.state & PERF_HES_STOPPED)
10511 return 1;
10512
10513 if (regs) {
10514 if (event->attr.exclude_user && user_mode(regs))
10515 return 1;
10516
10517 if (event->attr.exclude_kernel && !user_mode(regs))
10518 return 1;
10519 }
10520
10521 return 0;
10522 }
10523
perf_swevent_match(struct perf_event * event,enum perf_type_id type,u32 event_id,struct perf_sample_data * data,struct pt_regs * regs)10524 static int perf_swevent_match(struct perf_event *event,
10525 enum perf_type_id type,
10526 u32 event_id,
10527 struct perf_sample_data *data,
10528 struct pt_regs *regs)
10529 {
10530 if (event->attr.type != type)
10531 return 0;
10532
10533 if (event->attr.config != event_id)
10534 return 0;
10535
10536 if (perf_exclude_event(event, regs))
10537 return 0;
10538
10539 return 1;
10540 }
10541
swevent_hash(u64 type,u32 event_id)10542 static inline u64 swevent_hash(u64 type, u32 event_id)
10543 {
10544 u64 val = event_id | (type << 32);
10545
10546 return hash_64(val, SWEVENT_HLIST_BITS);
10547 }
10548
10549 static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist * hlist,u64 type,u32 event_id)10550 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
10551 {
10552 u64 hash = swevent_hash(type, event_id);
10553
10554 return &hlist->heads[hash];
10555 }
10556
10557 /* For the read side: events when they trigger */
10558 static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable * swhash,u64 type,u32 event_id)10559 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
10560 {
10561 struct swevent_hlist *hlist;
10562
10563 hlist = rcu_dereference(swhash->swevent_hlist);
10564 if (!hlist)
10565 return NULL;
10566
10567 return __find_swevent_head(hlist, type, event_id);
10568 }
10569
10570 /* For the event head insertion and removal in the hlist */
10571 static inline struct hlist_head *
find_swevent_head(struct swevent_htable * swhash,struct perf_event * event)10572 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
10573 {
10574 struct swevent_hlist *hlist;
10575 u32 event_id = event->attr.config;
10576 u64 type = event->attr.type;
10577
10578 /*
10579 * Event scheduling is always serialized against hlist allocation
10580 * and release. Which makes the protected version suitable here.
10581 * The context lock guarantees that.
10582 */
10583 hlist = rcu_dereference_protected(swhash->swevent_hlist,
10584 lockdep_is_held(&event->ctx->lock));
10585 if (!hlist)
10586 return NULL;
10587
10588 return __find_swevent_head(hlist, type, event_id);
10589 }
10590
do_perf_sw_event(enum perf_type_id type,u32 event_id,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)10591 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
10592 u64 nr,
10593 struct perf_sample_data *data,
10594 struct pt_regs *regs)
10595 {
10596 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
10597 struct perf_event *event;
10598 struct hlist_head *head;
10599
10600 rcu_read_lock();
10601 head = find_swevent_head_rcu(swhash, type, event_id);
10602 if (!head)
10603 goto end;
10604
10605 hlist_for_each_entry_rcu(event, head, hlist_entry) {
10606 if (perf_swevent_match(event, type, event_id, data, regs))
10607 perf_swevent_event(event, nr, data, regs);
10608 }
10609 end:
10610 rcu_read_unlock();
10611 }
10612
10613 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
10614
perf_swevent_get_recursion_context(void)10615 int perf_swevent_get_recursion_context(void)
10616 {
10617 return get_recursion_context(current->perf_recursion);
10618 }
10619 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
10620
perf_swevent_put_recursion_context(int rctx)10621 void perf_swevent_put_recursion_context(int rctx)
10622 {
10623 put_recursion_context(current->perf_recursion, rctx);
10624 }
10625
___perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)10626 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
10627 {
10628 struct perf_sample_data data;
10629
10630 if (WARN_ON_ONCE(!regs))
10631 return;
10632
10633 perf_sample_data_init(&data, addr, 0);
10634 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
10635 }
10636
__perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)10637 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
10638 {
10639 int rctx;
10640
10641 preempt_disable_notrace();
10642 rctx = perf_swevent_get_recursion_context();
10643 if (unlikely(rctx < 0))
10644 goto fail;
10645
10646 ___perf_sw_event(event_id, nr, regs, addr);
10647
10648 perf_swevent_put_recursion_context(rctx);
10649 fail:
10650 preempt_enable_notrace();
10651 }
10652
perf_swevent_read(struct perf_event * event)10653 static void perf_swevent_read(struct perf_event *event)
10654 {
10655 }
10656
perf_swevent_add(struct perf_event * event,int flags)10657 static int perf_swevent_add(struct perf_event *event, int flags)
10658 {
10659 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
10660 struct hw_perf_event *hwc = &event->hw;
10661 struct hlist_head *head;
10662
10663 if (is_sampling_event(event)) {
10664 hwc->last_period = hwc->sample_period;
10665 perf_swevent_set_period(event);
10666 }
10667
10668 hwc->state = !(flags & PERF_EF_START);
10669
10670 head = find_swevent_head(swhash, event);
10671 if (WARN_ON_ONCE(!head))
10672 return -EINVAL;
10673
10674 hlist_add_head_rcu(&event->hlist_entry, head);
10675 perf_event_update_userpage(event);
10676
10677 return 0;
10678 }
10679
perf_swevent_del(struct perf_event * event,int flags)10680 static void perf_swevent_del(struct perf_event *event, int flags)
10681 {
10682 hlist_del_rcu(&event->hlist_entry);
10683 }
10684
perf_swevent_start(struct perf_event * event,int flags)10685 static void perf_swevent_start(struct perf_event *event, int flags)
10686 {
10687 event->hw.state = 0;
10688 }
10689
perf_swevent_stop(struct perf_event * event,int flags)10690 static void perf_swevent_stop(struct perf_event *event, int flags)
10691 {
10692 event->hw.state = PERF_HES_STOPPED;
10693 }
10694
10695 /* Deref the hlist from the update side */
10696 static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable * swhash)10697 swevent_hlist_deref(struct swevent_htable *swhash)
10698 {
10699 return rcu_dereference_protected(swhash->swevent_hlist,
10700 lockdep_is_held(&swhash->hlist_mutex));
10701 }
10702
swevent_hlist_release(struct swevent_htable * swhash)10703 static void swevent_hlist_release(struct swevent_htable *swhash)
10704 {
10705 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
10706
10707 if (!hlist)
10708 return;
10709
10710 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
10711 kfree_rcu(hlist, rcu_head);
10712 }
10713
swevent_hlist_put_cpu(int cpu)10714 static void swevent_hlist_put_cpu(int cpu)
10715 {
10716 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
10717
10718 mutex_lock(&swhash->hlist_mutex);
10719
10720 if (!--swhash->hlist_refcount)
10721 swevent_hlist_release(swhash);
10722
10723 mutex_unlock(&swhash->hlist_mutex);
10724 }
10725
swevent_hlist_put(void)10726 static void swevent_hlist_put(void)
10727 {
10728 int cpu;
10729
10730 for_each_possible_cpu(cpu)
10731 swevent_hlist_put_cpu(cpu);
10732 }
10733
swevent_hlist_get_cpu(int cpu)10734 static int swevent_hlist_get_cpu(int cpu)
10735 {
10736 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
10737 int err = 0;
10738
10739 mutex_lock(&swhash->hlist_mutex);
10740 if (!swevent_hlist_deref(swhash) &&
10741 cpumask_test_cpu(cpu, perf_online_mask)) {
10742 struct swevent_hlist *hlist;
10743
10744 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
10745 if (!hlist) {
10746 err = -ENOMEM;
10747 goto exit;
10748 }
10749 rcu_assign_pointer(swhash->swevent_hlist, hlist);
10750 }
10751 swhash->hlist_refcount++;
10752 exit:
10753 mutex_unlock(&swhash->hlist_mutex);
10754
10755 return err;
10756 }
10757
swevent_hlist_get(void)10758 static int swevent_hlist_get(void)
10759 {
10760 int err, cpu, failed_cpu;
10761
10762 mutex_lock(&pmus_lock);
10763 for_each_possible_cpu(cpu) {
10764 err = swevent_hlist_get_cpu(cpu);
10765 if (err) {
10766 failed_cpu = cpu;
10767 goto fail;
10768 }
10769 }
10770 mutex_unlock(&pmus_lock);
10771 return 0;
10772 fail:
10773 for_each_possible_cpu(cpu) {
10774 if (cpu == failed_cpu)
10775 break;
10776 swevent_hlist_put_cpu(cpu);
10777 }
10778 mutex_unlock(&pmus_lock);
10779 return err;
10780 }
10781
10782 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
10783
sw_perf_event_destroy(struct perf_event * event)10784 static void sw_perf_event_destroy(struct perf_event *event)
10785 {
10786 u64 event_id = event->attr.config;
10787
10788 WARN_ON(event->parent);
10789
10790 static_key_slow_dec(&perf_swevent_enabled[event_id]);
10791 swevent_hlist_put();
10792 }
10793
10794 static struct pmu perf_cpu_clock; /* fwd declaration */
10795 static struct pmu perf_task_clock;
10796
perf_swevent_init(struct perf_event * event)10797 static int perf_swevent_init(struct perf_event *event)
10798 {
10799 u64 event_id = event->attr.config;
10800
10801 if (event->attr.type != PERF_TYPE_SOFTWARE)
10802 return -ENOENT;
10803
10804 /*
10805 * no branch sampling for software events
10806 */
10807 if (has_branch_stack(event))
10808 return -EOPNOTSUPP;
10809
10810 switch (event_id) {
10811 case PERF_COUNT_SW_CPU_CLOCK:
10812 event->attr.type = perf_cpu_clock.type;
10813 return -ENOENT;
10814 case PERF_COUNT_SW_TASK_CLOCK:
10815 event->attr.type = perf_task_clock.type;
10816 return -ENOENT;
10817
10818 default:
10819 break;
10820 }
10821
10822 if (event_id >= PERF_COUNT_SW_MAX)
10823 return -ENOENT;
10824
10825 if (!event->parent) {
10826 int err;
10827
10828 err = swevent_hlist_get();
10829 if (err)
10830 return err;
10831
10832 static_key_slow_inc(&perf_swevent_enabled[event_id]);
10833 event->destroy = sw_perf_event_destroy;
10834 }
10835
10836 return 0;
10837 }
10838
10839 static struct pmu perf_swevent = {
10840 .task_ctx_nr = perf_sw_context,
10841
10842 .capabilities = PERF_PMU_CAP_NO_NMI,
10843
10844 .event_init = perf_swevent_init,
10845 .add = perf_swevent_add,
10846 .del = perf_swevent_del,
10847 .start = perf_swevent_start,
10848 .stop = perf_swevent_stop,
10849 .read = perf_swevent_read,
10850 };
10851
10852 #ifdef CONFIG_EVENT_TRACING
10853
tp_perf_event_destroy(struct perf_event * event)10854 static void tp_perf_event_destroy(struct perf_event *event)
10855 {
10856 perf_trace_destroy(event);
10857 }
10858
perf_tp_event_init(struct perf_event * event)10859 static int perf_tp_event_init(struct perf_event *event)
10860 {
10861 int err;
10862
10863 if (event->attr.type != PERF_TYPE_TRACEPOINT)
10864 return -ENOENT;
10865
10866 /*
10867 * no branch sampling for tracepoint events
10868 */
10869 if (has_branch_stack(event))
10870 return -EOPNOTSUPP;
10871
10872 err = perf_trace_init(event);
10873 if (err)
10874 return err;
10875
10876 event->destroy = tp_perf_event_destroy;
10877
10878 return 0;
10879 }
10880
10881 static struct pmu perf_tracepoint = {
10882 .task_ctx_nr = perf_sw_context,
10883
10884 .event_init = perf_tp_event_init,
10885 .add = perf_trace_add,
10886 .del = perf_trace_del,
10887 .start = perf_swevent_start,
10888 .stop = perf_swevent_stop,
10889 .read = perf_swevent_read,
10890 };
10891
perf_tp_filter_match(struct perf_event * event,struct perf_raw_record * raw)10892 static int perf_tp_filter_match(struct perf_event *event,
10893 struct perf_raw_record *raw)
10894 {
10895 void *record = raw->frag.data;
10896
10897 /* only top level events have filters set */
10898 if (event->parent)
10899 event = event->parent;
10900
10901 if (likely(!event->filter) || filter_match_preds(event->filter, record))
10902 return 1;
10903 return 0;
10904 }
10905
perf_tp_event_match(struct perf_event * event,struct perf_raw_record * raw,struct pt_regs * regs)10906 static int perf_tp_event_match(struct perf_event *event,
10907 struct perf_raw_record *raw,
10908 struct pt_regs *regs)
10909 {
10910 if (event->hw.state & PERF_HES_STOPPED)
10911 return 0;
10912 /*
10913 * If exclude_kernel, only trace user-space tracepoints (uprobes)
10914 */
10915 if (event->attr.exclude_kernel && !user_mode(regs))
10916 return 0;
10917
10918 if (!perf_tp_filter_match(event, raw))
10919 return 0;
10920
10921 return 1;
10922 }
10923
perf_trace_run_bpf_submit(void * raw_data,int size,int rctx,struct trace_event_call * call,u64 count,struct pt_regs * regs,struct hlist_head * head,struct task_struct * task)10924 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
10925 struct trace_event_call *call, u64 count,
10926 struct pt_regs *regs, struct hlist_head *head,
10927 struct task_struct *task)
10928 {
10929 if (bpf_prog_array_valid(call)) {
10930 *(struct pt_regs **)raw_data = regs;
10931 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
10932 perf_swevent_put_recursion_context(rctx);
10933 return;
10934 }
10935 }
10936 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
10937 rctx, task);
10938 }
10939 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
10940
__perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_raw_record * raw,struct perf_event * event)10941 static void __perf_tp_event_target_task(u64 count, void *record,
10942 struct pt_regs *regs,
10943 struct perf_sample_data *data,
10944 struct perf_raw_record *raw,
10945 struct perf_event *event)
10946 {
10947 struct trace_entry *entry = record;
10948
10949 if (event->attr.config != entry->type)
10950 return;
10951 /* Cannot deliver synchronous signal to other task. */
10952 if (event->attr.sigtrap)
10953 return;
10954 if (perf_tp_event_match(event, raw, regs)) {
10955 perf_sample_data_init(data, 0, 0);
10956 perf_sample_save_raw_data(data, event, raw);
10957 perf_swevent_event(event, count, data, regs);
10958 }
10959 }
10960
perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_raw_record * raw,struct perf_event_context * ctx)10961 static void perf_tp_event_target_task(u64 count, void *record,
10962 struct pt_regs *regs,
10963 struct perf_sample_data *data,
10964 struct perf_raw_record *raw,
10965 struct perf_event_context *ctx)
10966 {
10967 unsigned int cpu = smp_processor_id();
10968 struct pmu *pmu = &perf_tracepoint;
10969 struct perf_event *event, *sibling;
10970
10971 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10972 __perf_tp_event_target_task(count, record, regs, data, raw, event);
10973 for_each_sibling_event(sibling, event)
10974 __perf_tp_event_target_task(count, record, regs, data, raw, sibling);
10975 }
10976
10977 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10978 __perf_tp_event_target_task(count, record, regs, data, raw, event);
10979 for_each_sibling_event(sibling, event)
10980 __perf_tp_event_target_task(count, record, regs, data, raw, sibling);
10981 }
10982 }
10983
perf_tp_event(u16 event_type,u64 count,void * record,int entry_size,struct pt_regs * regs,struct hlist_head * head,int rctx,struct task_struct * task)10984 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
10985 struct pt_regs *regs, struct hlist_head *head, int rctx,
10986 struct task_struct *task)
10987 {
10988 struct perf_sample_data data;
10989 struct perf_event *event;
10990
10991 struct perf_raw_record raw = {
10992 .frag = {
10993 .size = entry_size,
10994 .data = record,
10995 },
10996 };
10997
10998 perf_trace_buf_update(record, event_type);
10999
11000 hlist_for_each_entry_rcu(event, head, hlist_entry) {
11001 if (perf_tp_event_match(event, &raw, regs)) {
11002 /*
11003 * Here use the same on-stack perf_sample_data,
11004 * some members in data are event-specific and
11005 * need to be re-computed for different sweveents.
11006 * Re-initialize data->sample_flags safely to avoid
11007 * the problem that next event skips preparing data
11008 * because data->sample_flags is set.
11009 */
11010 perf_sample_data_init(&data, 0, 0);
11011 perf_sample_save_raw_data(&data, event, &raw);
11012 perf_swevent_event(event, count, &data, regs);
11013 }
11014 }
11015
11016 /*
11017 * If we got specified a target task, also iterate its context and
11018 * deliver this event there too.
11019 */
11020 if (task && task != current) {
11021 struct perf_event_context *ctx;
11022
11023 rcu_read_lock();
11024 ctx = rcu_dereference(task->perf_event_ctxp);
11025 if (!ctx)
11026 goto unlock;
11027
11028 raw_spin_lock(&ctx->lock);
11029 perf_tp_event_target_task(count, record, regs, &data, &raw, ctx);
11030 raw_spin_unlock(&ctx->lock);
11031 unlock:
11032 rcu_read_unlock();
11033 }
11034
11035 perf_swevent_put_recursion_context(rctx);
11036 }
11037 EXPORT_SYMBOL_GPL(perf_tp_event);
11038
11039 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
11040 /*
11041 * Flags in config, used by dynamic PMU kprobe and uprobe
11042 * The flags should match following PMU_FORMAT_ATTR().
11043 *
11044 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
11045 * if not set, create kprobe/uprobe
11046 *
11047 * The following values specify a reference counter (or semaphore in the
11048 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
11049 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
11050 *
11051 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
11052 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
11053 */
11054 enum perf_probe_config {
11055 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
11056 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
11057 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
11058 };
11059
11060 PMU_FORMAT_ATTR(retprobe, "config:0");
11061 #endif
11062
11063 #ifdef CONFIG_KPROBE_EVENTS
11064 static struct attribute *kprobe_attrs[] = {
11065 &format_attr_retprobe.attr,
11066 NULL,
11067 };
11068
11069 static struct attribute_group kprobe_format_group = {
11070 .name = "format",
11071 .attrs = kprobe_attrs,
11072 };
11073
11074 static const struct attribute_group *kprobe_attr_groups[] = {
11075 &kprobe_format_group,
11076 NULL,
11077 };
11078
11079 static int perf_kprobe_event_init(struct perf_event *event);
11080 static struct pmu perf_kprobe = {
11081 .task_ctx_nr = perf_sw_context,
11082 .event_init = perf_kprobe_event_init,
11083 .add = perf_trace_add,
11084 .del = perf_trace_del,
11085 .start = perf_swevent_start,
11086 .stop = perf_swevent_stop,
11087 .read = perf_swevent_read,
11088 .attr_groups = kprobe_attr_groups,
11089 };
11090
perf_kprobe_event_init(struct perf_event * event)11091 static int perf_kprobe_event_init(struct perf_event *event)
11092 {
11093 int err;
11094 bool is_retprobe;
11095
11096 if (event->attr.type != perf_kprobe.type)
11097 return -ENOENT;
11098
11099 if (!perfmon_capable())
11100 return -EACCES;
11101
11102 /*
11103 * no branch sampling for probe events
11104 */
11105 if (has_branch_stack(event))
11106 return -EOPNOTSUPP;
11107
11108 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
11109 err = perf_kprobe_init(event, is_retprobe);
11110 if (err)
11111 return err;
11112
11113 event->destroy = perf_kprobe_destroy;
11114
11115 return 0;
11116 }
11117 #endif /* CONFIG_KPROBE_EVENTS */
11118
11119 #ifdef CONFIG_UPROBE_EVENTS
11120 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
11121
11122 static struct attribute *uprobe_attrs[] = {
11123 &format_attr_retprobe.attr,
11124 &format_attr_ref_ctr_offset.attr,
11125 NULL,
11126 };
11127
11128 static struct attribute_group uprobe_format_group = {
11129 .name = "format",
11130 .attrs = uprobe_attrs,
11131 };
11132
11133 static const struct attribute_group *uprobe_attr_groups[] = {
11134 &uprobe_format_group,
11135 NULL,
11136 };
11137
11138 static int perf_uprobe_event_init(struct perf_event *event);
11139 static struct pmu perf_uprobe = {
11140 .task_ctx_nr = perf_sw_context,
11141 .event_init = perf_uprobe_event_init,
11142 .add = perf_trace_add,
11143 .del = perf_trace_del,
11144 .start = perf_swevent_start,
11145 .stop = perf_swevent_stop,
11146 .read = perf_swevent_read,
11147 .attr_groups = uprobe_attr_groups,
11148 };
11149
perf_uprobe_event_init(struct perf_event * event)11150 static int perf_uprobe_event_init(struct perf_event *event)
11151 {
11152 int err;
11153 unsigned long ref_ctr_offset;
11154 bool is_retprobe;
11155
11156 if (event->attr.type != perf_uprobe.type)
11157 return -ENOENT;
11158
11159 if (!capable(CAP_SYS_ADMIN))
11160 return -EACCES;
11161
11162 /*
11163 * no branch sampling for probe events
11164 */
11165 if (has_branch_stack(event))
11166 return -EOPNOTSUPP;
11167
11168 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
11169 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
11170 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
11171 if (err)
11172 return err;
11173
11174 event->destroy = perf_uprobe_destroy;
11175
11176 return 0;
11177 }
11178 #endif /* CONFIG_UPROBE_EVENTS */
11179
perf_tp_register(void)11180 static inline void perf_tp_register(void)
11181 {
11182 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
11183 #ifdef CONFIG_KPROBE_EVENTS
11184 perf_pmu_register(&perf_kprobe, "kprobe", -1);
11185 #endif
11186 #ifdef CONFIG_UPROBE_EVENTS
11187 perf_pmu_register(&perf_uprobe, "uprobe", -1);
11188 #endif
11189 }
11190
perf_event_free_filter(struct perf_event * event)11191 static void perf_event_free_filter(struct perf_event *event)
11192 {
11193 ftrace_profile_free_filter(event);
11194 }
11195
11196 /*
11197 * returns true if the event is a tracepoint, or a kprobe/upprobe created
11198 * with perf_event_open()
11199 */
perf_event_is_tracing(struct perf_event * event)11200 static inline bool perf_event_is_tracing(struct perf_event *event)
11201 {
11202 if (event->pmu == &perf_tracepoint)
11203 return true;
11204 #ifdef CONFIG_KPROBE_EVENTS
11205 if (event->pmu == &perf_kprobe)
11206 return true;
11207 #endif
11208 #ifdef CONFIG_UPROBE_EVENTS
11209 if (event->pmu == &perf_uprobe)
11210 return true;
11211 #endif
11212 return false;
11213 }
11214
__perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11215 static int __perf_event_set_bpf_prog(struct perf_event *event,
11216 struct bpf_prog *prog,
11217 u64 bpf_cookie)
11218 {
11219 bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
11220
11221 if (event->state <= PERF_EVENT_STATE_REVOKED)
11222 return -ENODEV;
11223
11224 if (!perf_event_is_tracing(event))
11225 return perf_event_set_bpf_handler(event, prog, bpf_cookie);
11226
11227 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
11228 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
11229 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
11230 is_syscall_tp = is_syscall_trace_event(event->tp_event);
11231 if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
11232 /* bpf programs can only be attached to u/kprobe or tracepoint */
11233 return -EINVAL;
11234
11235 if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
11236 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
11237 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
11238 return -EINVAL;
11239
11240 if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe)
11241 /* only uprobe programs are allowed to be sleepable */
11242 return -EINVAL;
11243
11244 /* Kprobe override only works for kprobes, not uprobes. */
11245 if (prog->kprobe_override && !is_kprobe)
11246 return -EINVAL;
11247
11248 /* Writing to context allowed only for uprobes. */
11249 if (prog->aux->kprobe_write_ctx && !is_uprobe)
11250 return -EINVAL;
11251
11252 if (is_tracepoint || is_syscall_tp) {
11253 int off = trace_event_get_offsets(event->tp_event);
11254
11255 if (prog->aux->max_ctx_offset > off)
11256 return -EACCES;
11257 }
11258
11259 return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
11260 }
11261
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11262 int perf_event_set_bpf_prog(struct perf_event *event,
11263 struct bpf_prog *prog,
11264 u64 bpf_cookie)
11265 {
11266 struct perf_event_context *ctx;
11267 int ret;
11268
11269 ctx = perf_event_ctx_lock(event);
11270 ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
11271 perf_event_ctx_unlock(event, ctx);
11272
11273 return ret;
11274 }
11275
perf_event_free_bpf_prog(struct perf_event * event)11276 void perf_event_free_bpf_prog(struct perf_event *event)
11277 {
11278 if (!event->prog)
11279 return;
11280
11281 if (!perf_event_is_tracing(event)) {
11282 perf_event_free_bpf_handler(event);
11283 return;
11284 }
11285 perf_event_detach_bpf_prog(event);
11286 }
11287
11288 #else
11289
perf_tp_register(void)11290 static inline void perf_tp_register(void)
11291 {
11292 }
11293
perf_event_free_filter(struct perf_event * event)11294 static void perf_event_free_filter(struct perf_event *event)
11295 {
11296 }
11297
__perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11298 static int __perf_event_set_bpf_prog(struct perf_event *event,
11299 struct bpf_prog *prog,
11300 u64 bpf_cookie)
11301 {
11302 return -ENOENT;
11303 }
11304
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11305 int perf_event_set_bpf_prog(struct perf_event *event,
11306 struct bpf_prog *prog,
11307 u64 bpf_cookie)
11308 {
11309 return -ENOENT;
11310 }
11311
perf_event_free_bpf_prog(struct perf_event * event)11312 void perf_event_free_bpf_prog(struct perf_event *event)
11313 {
11314 }
11315 #endif /* CONFIG_EVENT_TRACING */
11316
11317 #ifdef CONFIG_HAVE_HW_BREAKPOINT
perf_bp_event(struct perf_event * bp,void * data)11318 void perf_bp_event(struct perf_event *bp, void *data)
11319 {
11320 struct perf_sample_data sample;
11321 struct pt_regs *regs = data;
11322
11323 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
11324
11325 if (!bp->hw.state && !perf_exclude_event(bp, regs))
11326 perf_swevent_event(bp, 1, &sample, regs);
11327 }
11328 #endif
11329
11330 /*
11331 * Allocate a new address filter
11332 */
11333 static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event * event,struct list_head * filters)11334 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
11335 {
11336 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
11337 struct perf_addr_filter *filter;
11338
11339 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
11340 if (!filter)
11341 return NULL;
11342
11343 INIT_LIST_HEAD(&filter->entry);
11344 list_add_tail(&filter->entry, filters);
11345
11346 return filter;
11347 }
11348
free_filters_list(struct list_head * filters)11349 static void free_filters_list(struct list_head *filters)
11350 {
11351 struct perf_addr_filter *filter, *iter;
11352
11353 list_for_each_entry_safe(filter, iter, filters, entry) {
11354 path_put(&filter->path);
11355 list_del(&filter->entry);
11356 kfree(filter);
11357 }
11358 }
11359
11360 /*
11361 * Free existing address filters and optionally install new ones
11362 */
perf_addr_filters_splice(struct perf_event * event,struct list_head * head)11363 static void perf_addr_filters_splice(struct perf_event *event,
11364 struct list_head *head)
11365 {
11366 unsigned long flags;
11367 LIST_HEAD(list);
11368
11369 if (!has_addr_filter(event))
11370 return;
11371
11372 /* don't bother with children, they don't have their own filters */
11373 if (event->parent)
11374 return;
11375
11376 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
11377
11378 list_splice_init(&event->addr_filters.list, &list);
11379 if (head)
11380 list_splice(head, &event->addr_filters.list);
11381
11382 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
11383
11384 free_filters_list(&list);
11385 }
11386
perf_free_addr_filters(struct perf_event * event)11387 static void perf_free_addr_filters(struct perf_event *event)
11388 {
11389 /*
11390 * Used during free paths, there is no concurrency.
11391 */
11392 if (list_empty(&event->addr_filters.list))
11393 return;
11394
11395 perf_addr_filters_splice(event, NULL);
11396 }
11397
11398 /*
11399 * Scan through mm's vmas and see if one of them matches the
11400 * @filter; if so, adjust filter's address range.
11401 * Called with mm::mmap_lock down for reading.
11402 */
perf_addr_filter_apply(struct perf_addr_filter * filter,struct mm_struct * mm,struct perf_addr_filter_range * fr)11403 static void perf_addr_filter_apply(struct perf_addr_filter *filter,
11404 struct mm_struct *mm,
11405 struct perf_addr_filter_range *fr)
11406 {
11407 struct vm_area_struct *vma;
11408 VMA_ITERATOR(vmi, mm, 0);
11409
11410 for_each_vma(vmi, vma) {
11411 if (!vma->vm_file)
11412 continue;
11413
11414 if (perf_addr_filter_vma_adjust(filter, vma, fr))
11415 return;
11416 }
11417 }
11418
11419 /*
11420 * Update event's address range filters based on the
11421 * task's existing mappings, if any.
11422 */
perf_event_addr_filters_apply(struct perf_event * event)11423 static void perf_event_addr_filters_apply(struct perf_event *event)
11424 {
11425 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
11426 struct task_struct *task = READ_ONCE(event->ctx->task);
11427 struct perf_addr_filter *filter;
11428 struct mm_struct *mm = NULL;
11429 unsigned int count = 0;
11430 unsigned long flags;
11431
11432 /*
11433 * We may observe TASK_TOMBSTONE, which means that the event tear-down
11434 * will stop on the parent's child_mutex that our caller is also holding
11435 */
11436 if (task == TASK_TOMBSTONE)
11437 return;
11438
11439 if (ifh->nr_file_filters) {
11440 mm = get_task_mm(task);
11441 if (!mm)
11442 goto restart;
11443
11444 mmap_read_lock(mm);
11445 }
11446
11447 raw_spin_lock_irqsave(&ifh->lock, flags);
11448 list_for_each_entry(filter, &ifh->list, entry) {
11449 if (filter->path.dentry) {
11450 /*
11451 * Adjust base offset if the filter is associated to a
11452 * binary that needs to be mapped:
11453 */
11454 event->addr_filter_ranges[count].start = 0;
11455 event->addr_filter_ranges[count].size = 0;
11456
11457 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
11458 } else {
11459 event->addr_filter_ranges[count].start = filter->offset;
11460 event->addr_filter_ranges[count].size = filter->size;
11461 }
11462
11463 count++;
11464 }
11465
11466 event->addr_filters_gen++;
11467 raw_spin_unlock_irqrestore(&ifh->lock, flags);
11468
11469 if (ifh->nr_file_filters) {
11470 mmap_read_unlock(mm);
11471
11472 mmput(mm);
11473 }
11474
11475 restart:
11476 perf_event_stop(event, 1);
11477 }
11478
11479 /*
11480 * Address range filtering: limiting the data to certain
11481 * instruction address ranges. Filters are ioctl()ed to us from
11482 * userspace as ascii strings.
11483 *
11484 * Filter string format:
11485 *
11486 * ACTION RANGE_SPEC
11487 * where ACTION is one of the
11488 * * "filter": limit the trace to this region
11489 * * "start": start tracing from this address
11490 * * "stop": stop tracing at this address/region;
11491 * RANGE_SPEC is
11492 * * for kernel addresses: <start address>[/<size>]
11493 * * for object files: <start address>[/<size>]@</path/to/object/file>
11494 *
11495 * if <size> is not specified or is zero, the range is treated as a single
11496 * address; not valid for ACTION=="filter".
11497 */
11498 enum {
11499 IF_ACT_NONE = -1,
11500 IF_ACT_FILTER,
11501 IF_ACT_START,
11502 IF_ACT_STOP,
11503 IF_SRC_FILE,
11504 IF_SRC_KERNEL,
11505 IF_SRC_FILEADDR,
11506 IF_SRC_KERNELADDR,
11507 };
11508
11509 enum {
11510 IF_STATE_ACTION = 0,
11511 IF_STATE_SOURCE,
11512 IF_STATE_END,
11513 };
11514
11515 static const match_table_t if_tokens = {
11516 { IF_ACT_FILTER, "filter" },
11517 { IF_ACT_START, "start" },
11518 { IF_ACT_STOP, "stop" },
11519 { IF_SRC_FILE, "%u/%u@%s" },
11520 { IF_SRC_KERNEL, "%u/%u" },
11521 { IF_SRC_FILEADDR, "%u@%s" },
11522 { IF_SRC_KERNELADDR, "%u" },
11523 { IF_ACT_NONE, NULL },
11524 };
11525
11526 /*
11527 * Address filter string parser
11528 */
11529 static int
perf_event_parse_addr_filter(struct perf_event * event,char * fstr,struct list_head * filters)11530 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
11531 struct list_head *filters)
11532 {
11533 struct perf_addr_filter *filter = NULL;
11534 char *start, *orig, *filename = NULL;
11535 substring_t args[MAX_OPT_ARGS];
11536 int state = IF_STATE_ACTION, token;
11537 unsigned int kernel = 0;
11538 int ret = -EINVAL;
11539
11540 orig = fstr = kstrdup(fstr, GFP_KERNEL);
11541 if (!fstr)
11542 return -ENOMEM;
11543
11544 while ((start = strsep(&fstr, " ,\n")) != NULL) {
11545 static const enum perf_addr_filter_action_t actions[] = {
11546 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER,
11547 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START,
11548 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP,
11549 };
11550 ret = -EINVAL;
11551
11552 if (!*start)
11553 continue;
11554
11555 /* filter definition begins */
11556 if (state == IF_STATE_ACTION) {
11557 filter = perf_addr_filter_new(event, filters);
11558 if (!filter)
11559 goto fail;
11560 }
11561
11562 token = match_token(start, if_tokens, args);
11563 switch (token) {
11564 case IF_ACT_FILTER:
11565 case IF_ACT_START:
11566 case IF_ACT_STOP:
11567 if (state != IF_STATE_ACTION)
11568 goto fail;
11569
11570 filter->action = actions[token];
11571 state = IF_STATE_SOURCE;
11572 break;
11573
11574 case IF_SRC_KERNELADDR:
11575 case IF_SRC_KERNEL:
11576 kernel = 1;
11577 fallthrough;
11578
11579 case IF_SRC_FILEADDR:
11580 case IF_SRC_FILE:
11581 if (state != IF_STATE_SOURCE)
11582 goto fail;
11583
11584 *args[0].to = 0;
11585 ret = kstrtoul(args[0].from, 0, &filter->offset);
11586 if (ret)
11587 goto fail;
11588
11589 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
11590 *args[1].to = 0;
11591 ret = kstrtoul(args[1].from, 0, &filter->size);
11592 if (ret)
11593 goto fail;
11594 }
11595
11596 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
11597 int fpos = token == IF_SRC_FILE ? 2 : 1;
11598
11599 kfree(filename);
11600 filename = match_strdup(&args[fpos]);
11601 if (!filename) {
11602 ret = -ENOMEM;
11603 goto fail;
11604 }
11605 }
11606
11607 state = IF_STATE_END;
11608 break;
11609
11610 default:
11611 goto fail;
11612 }
11613
11614 /*
11615 * Filter definition is fully parsed, validate and install it.
11616 * Make sure that it doesn't contradict itself or the event's
11617 * attribute.
11618 */
11619 if (state == IF_STATE_END) {
11620 ret = -EINVAL;
11621
11622 /*
11623 * ACTION "filter" must have a non-zero length region
11624 * specified.
11625 */
11626 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
11627 !filter->size)
11628 goto fail;
11629
11630 if (!kernel) {
11631 if (!filename)
11632 goto fail;
11633
11634 /*
11635 * For now, we only support file-based filters
11636 * in per-task events; doing so for CPU-wide
11637 * events requires additional context switching
11638 * trickery, since same object code will be
11639 * mapped at different virtual addresses in
11640 * different processes.
11641 */
11642 ret = -EOPNOTSUPP;
11643 if (!event->ctx->task)
11644 goto fail;
11645
11646 /* look up the path and grab its inode */
11647 ret = kern_path(filename, LOOKUP_FOLLOW,
11648 &filter->path);
11649 if (ret)
11650 goto fail;
11651
11652 ret = -EINVAL;
11653 if (!filter->path.dentry ||
11654 !S_ISREG(d_inode(filter->path.dentry)
11655 ->i_mode))
11656 goto fail;
11657
11658 event->addr_filters.nr_file_filters++;
11659 }
11660
11661 /* ready to consume more filters */
11662 kfree(filename);
11663 filename = NULL;
11664 state = IF_STATE_ACTION;
11665 filter = NULL;
11666 kernel = 0;
11667 }
11668 }
11669
11670 if (state != IF_STATE_ACTION)
11671 goto fail;
11672
11673 kfree(filename);
11674 kfree(orig);
11675
11676 return 0;
11677
11678 fail:
11679 kfree(filename);
11680 free_filters_list(filters);
11681 kfree(orig);
11682
11683 return ret;
11684 }
11685
11686 static int
perf_event_set_addr_filter(struct perf_event * event,char * filter_str)11687 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
11688 {
11689 LIST_HEAD(filters);
11690 int ret;
11691
11692 /*
11693 * Since this is called in perf_ioctl() path, we're already holding
11694 * ctx::mutex.
11695 */
11696 lockdep_assert_held(&event->ctx->mutex);
11697
11698 if (WARN_ON_ONCE(event->parent))
11699 return -EINVAL;
11700
11701 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
11702 if (ret)
11703 goto fail_clear_files;
11704
11705 ret = event->pmu->addr_filters_validate(&filters);
11706 if (ret)
11707 goto fail_free_filters;
11708
11709 /* remove existing filters, if any */
11710 perf_addr_filters_splice(event, &filters);
11711
11712 /* install new filters */
11713 perf_event_for_each_child(event, perf_event_addr_filters_apply);
11714
11715 return ret;
11716
11717 fail_free_filters:
11718 free_filters_list(&filters);
11719
11720 fail_clear_files:
11721 event->addr_filters.nr_file_filters = 0;
11722
11723 return ret;
11724 }
11725
perf_event_set_filter(struct perf_event * event,void __user * arg)11726 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
11727 {
11728 int ret = -EINVAL;
11729 char *filter_str;
11730
11731 filter_str = strndup_user(arg, PAGE_SIZE);
11732 if (IS_ERR(filter_str))
11733 return PTR_ERR(filter_str);
11734
11735 #ifdef CONFIG_EVENT_TRACING
11736 if (perf_event_is_tracing(event)) {
11737 struct perf_event_context *ctx = event->ctx;
11738
11739 /*
11740 * Beware, here be dragons!!
11741 *
11742 * the tracepoint muck will deadlock against ctx->mutex, but
11743 * the tracepoint stuff does not actually need it. So
11744 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
11745 * already have a reference on ctx.
11746 *
11747 * This can result in event getting moved to a different ctx,
11748 * but that does not affect the tracepoint state.
11749 */
11750 mutex_unlock(&ctx->mutex);
11751 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
11752 mutex_lock(&ctx->mutex);
11753 } else
11754 #endif
11755 if (has_addr_filter(event))
11756 ret = perf_event_set_addr_filter(event, filter_str);
11757
11758 kfree(filter_str);
11759 return ret;
11760 }
11761
11762 /*
11763 * hrtimer based swevent callback
11764 */
11765
perf_swevent_hrtimer(struct hrtimer * hrtimer)11766 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
11767 {
11768 enum hrtimer_restart ret = HRTIMER_RESTART;
11769 struct perf_sample_data data;
11770 struct pt_regs *regs;
11771 struct perf_event *event;
11772 u64 period;
11773
11774 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
11775
11776 if (event->state != PERF_EVENT_STATE_ACTIVE)
11777 return HRTIMER_NORESTART;
11778
11779 event->pmu->read(event);
11780
11781 perf_sample_data_init(&data, 0, event->hw.last_period);
11782 regs = get_irq_regs();
11783
11784 if (regs && !perf_exclude_event(event, regs)) {
11785 if (!(event->attr.exclude_idle && is_idle_task(current)))
11786 if (__perf_event_overflow(event, 1, &data, regs))
11787 ret = HRTIMER_NORESTART;
11788 }
11789
11790 period = max_t(u64, 10000, event->hw.sample_period);
11791 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
11792
11793 return ret;
11794 }
11795
perf_swevent_start_hrtimer(struct perf_event * event)11796 static void perf_swevent_start_hrtimer(struct perf_event *event)
11797 {
11798 struct hw_perf_event *hwc = &event->hw;
11799 s64 period;
11800
11801 if (!is_sampling_event(event))
11802 return;
11803
11804 period = local64_read(&hwc->period_left);
11805 if (period) {
11806 if (period < 0)
11807 period = 10000;
11808
11809 local64_set(&hwc->period_left, 0);
11810 } else {
11811 period = max_t(u64, 10000, hwc->sample_period);
11812 }
11813 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
11814 HRTIMER_MODE_REL_PINNED_HARD);
11815 }
11816
perf_swevent_cancel_hrtimer(struct perf_event * event)11817 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
11818 {
11819 struct hw_perf_event *hwc = &event->hw;
11820
11821 /*
11822 * The throttle can be triggered in the hrtimer handler.
11823 * The HRTIMER_NORESTART should be used to stop the timer,
11824 * rather than hrtimer_cancel(). See perf_swevent_hrtimer()
11825 */
11826 if (is_sampling_event(event) && (hwc->interrupts != MAX_INTERRUPTS)) {
11827 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
11828 local64_set(&hwc->period_left, ktime_to_ns(remaining));
11829
11830 hrtimer_cancel(&hwc->hrtimer);
11831 }
11832 }
11833
perf_swevent_init_hrtimer(struct perf_event * event)11834 static void perf_swevent_init_hrtimer(struct perf_event *event)
11835 {
11836 struct hw_perf_event *hwc = &event->hw;
11837
11838 if (!is_sampling_event(event))
11839 return;
11840
11841 hrtimer_setup(&hwc->hrtimer, perf_swevent_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
11842
11843 /*
11844 * Since hrtimers have a fixed rate, we can do a static freq->period
11845 * mapping and avoid the whole period adjust feedback stuff.
11846 */
11847 if (event->attr.freq) {
11848 long freq = event->attr.sample_freq;
11849
11850 event->attr.sample_period = NSEC_PER_SEC / freq;
11851 hwc->sample_period = event->attr.sample_period;
11852 local64_set(&hwc->period_left, hwc->sample_period);
11853 hwc->last_period = hwc->sample_period;
11854 event->attr.freq = 0;
11855 }
11856 }
11857
11858 /*
11859 * Software event: cpu wall time clock
11860 */
11861
cpu_clock_event_update(struct perf_event * event)11862 static void cpu_clock_event_update(struct perf_event *event)
11863 {
11864 s64 prev;
11865 u64 now;
11866
11867 now = local_clock();
11868 prev = local64_xchg(&event->hw.prev_count, now);
11869 local64_add(now - prev, &event->count);
11870 }
11871
cpu_clock_event_start(struct perf_event * event,int flags)11872 static void cpu_clock_event_start(struct perf_event *event, int flags)
11873 {
11874 local64_set(&event->hw.prev_count, local_clock());
11875 perf_swevent_start_hrtimer(event);
11876 }
11877
cpu_clock_event_stop(struct perf_event * event,int flags)11878 static void cpu_clock_event_stop(struct perf_event *event, int flags)
11879 {
11880 perf_swevent_cancel_hrtimer(event);
11881 if (flags & PERF_EF_UPDATE)
11882 cpu_clock_event_update(event);
11883 }
11884
cpu_clock_event_add(struct perf_event * event,int flags)11885 static int cpu_clock_event_add(struct perf_event *event, int flags)
11886 {
11887 if (flags & PERF_EF_START)
11888 cpu_clock_event_start(event, flags);
11889 perf_event_update_userpage(event);
11890
11891 return 0;
11892 }
11893
cpu_clock_event_del(struct perf_event * event,int flags)11894 static void cpu_clock_event_del(struct perf_event *event, int flags)
11895 {
11896 cpu_clock_event_stop(event, flags);
11897 }
11898
cpu_clock_event_read(struct perf_event * event)11899 static void cpu_clock_event_read(struct perf_event *event)
11900 {
11901 cpu_clock_event_update(event);
11902 }
11903
cpu_clock_event_init(struct perf_event * event)11904 static int cpu_clock_event_init(struct perf_event *event)
11905 {
11906 if (event->attr.type != perf_cpu_clock.type)
11907 return -ENOENT;
11908
11909 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
11910 return -ENOENT;
11911
11912 /*
11913 * no branch sampling for software events
11914 */
11915 if (has_branch_stack(event))
11916 return -EOPNOTSUPP;
11917
11918 perf_swevent_init_hrtimer(event);
11919
11920 return 0;
11921 }
11922
11923 static struct pmu perf_cpu_clock = {
11924 .task_ctx_nr = perf_sw_context,
11925
11926 .capabilities = PERF_PMU_CAP_NO_NMI,
11927 .dev = PMU_NULL_DEV,
11928
11929 .event_init = cpu_clock_event_init,
11930 .add = cpu_clock_event_add,
11931 .del = cpu_clock_event_del,
11932 .start = cpu_clock_event_start,
11933 .stop = cpu_clock_event_stop,
11934 .read = cpu_clock_event_read,
11935 };
11936
11937 /*
11938 * Software event: task time clock
11939 */
11940
task_clock_event_update(struct perf_event * event,u64 now)11941 static void task_clock_event_update(struct perf_event *event, u64 now)
11942 {
11943 u64 prev;
11944 s64 delta;
11945
11946 prev = local64_xchg(&event->hw.prev_count, now);
11947 delta = now - prev;
11948 local64_add(delta, &event->count);
11949 }
11950
task_clock_event_start(struct perf_event * event,int flags)11951 static void task_clock_event_start(struct perf_event *event, int flags)
11952 {
11953 local64_set(&event->hw.prev_count, event->ctx->time);
11954 perf_swevent_start_hrtimer(event);
11955 }
11956
task_clock_event_stop(struct perf_event * event,int flags)11957 static void task_clock_event_stop(struct perf_event *event, int flags)
11958 {
11959 perf_swevent_cancel_hrtimer(event);
11960 if (flags & PERF_EF_UPDATE)
11961 task_clock_event_update(event, event->ctx->time);
11962 }
11963
task_clock_event_add(struct perf_event * event,int flags)11964 static int task_clock_event_add(struct perf_event *event, int flags)
11965 {
11966 if (flags & PERF_EF_START)
11967 task_clock_event_start(event, flags);
11968 perf_event_update_userpage(event);
11969
11970 return 0;
11971 }
11972
task_clock_event_del(struct perf_event * event,int flags)11973 static void task_clock_event_del(struct perf_event *event, int flags)
11974 {
11975 task_clock_event_stop(event, PERF_EF_UPDATE);
11976 }
11977
task_clock_event_read(struct perf_event * event)11978 static void task_clock_event_read(struct perf_event *event)
11979 {
11980 u64 now = perf_clock();
11981 u64 delta = now - event->ctx->timestamp;
11982 u64 time = event->ctx->time + delta;
11983
11984 task_clock_event_update(event, time);
11985 }
11986
task_clock_event_init(struct perf_event * event)11987 static int task_clock_event_init(struct perf_event *event)
11988 {
11989 if (event->attr.type != perf_task_clock.type)
11990 return -ENOENT;
11991
11992 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
11993 return -ENOENT;
11994
11995 /*
11996 * no branch sampling for software events
11997 */
11998 if (has_branch_stack(event))
11999 return -EOPNOTSUPP;
12000
12001 perf_swevent_init_hrtimer(event);
12002
12003 return 0;
12004 }
12005
12006 static struct pmu perf_task_clock = {
12007 .task_ctx_nr = perf_sw_context,
12008
12009 .capabilities = PERF_PMU_CAP_NO_NMI,
12010 .dev = PMU_NULL_DEV,
12011
12012 .event_init = task_clock_event_init,
12013 .add = task_clock_event_add,
12014 .del = task_clock_event_del,
12015 .start = task_clock_event_start,
12016 .stop = task_clock_event_stop,
12017 .read = task_clock_event_read,
12018 };
12019
perf_pmu_nop_void(struct pmu * pmu)12020 static void perf_pmu_nop_void(struct pmu *pmu)
12021 {
12022 }
12023
perf_pmu_nop_txn(struct pmu * pmu,unsigned int flags)12024 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
12025 {
12026 }
12027
perf_pmu_nop_int(struct pmu * pmu)12028 static int perf_pmu_nop_int(struct pmu *pmu)
12029 {
12030 return 0;
12031 }
12032
perf_event_nop_int(struct perf_event * event,u64 value)12033 static int perf_event_nop_int(struct perf_event *event, u64 value)
12034 {
12035 return 0;
12036 }
12037
12038 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
12039
perf_pmu_start_txn(struct pmu * pmu,unsigned int flags)12040 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
12041 {
12042 __this_cpu_write(nop_txn_flags, flags);
12043
12044 if (flags & ~PERF_PMU_TXN_ADD)
12045 return;
12046
12047 perf_pmu_disable(pmu);
12048 }
12049
perf_pmu_commit_txn(struct pmu * pmu)12050 static int perf_pmu_commit_txn(struct pmu *pmu)
12051 {
12052 unsigned int flags = __this_cpu_read(nop_txn_flags);
12053
12054 __this_cpu_write(nop_txn_flags, 0);
12055
12056 if (flags & ~PERF_PMU_TXN_ADD)
12057 return 0;
12058
12059 perf_pmu_enable(pmu);
12060 return 0;
12061 }
12062
perf_pmu_cancel_txn(struct pmu * pmu)12063 static void perf_pmu_cancel_txn(struct pmu *pmu)
12064 {
12065 unsigned int flags = __this_cpu_read(nop_txn_flags);
12066
12067 __this_cpu_write(nop_txn_flags, 0);
12068
12069 if (flags & ~PERF_PMU_TXN_ADD)
12070 return;
12071
12072 perf_pmu_enable(pmu);
12073 }
12074
perf_event_idx_default(struct perf_event * event)12075 static int perf_event_idx_default(struct perf_event *event)
12076 {
12077 return 0;
12078 }
12079
12080 /*
12081 * Let userspace know that this PMU supports address range filtering:
12082 */
nr_addr_filters_show(struct device * dev,struct device_attribute * attr,char * page)12083 static ssize_t nr_addr_filters_show(struct device *dev,
12084 struct device_attribute *attr,
12085 char *page)
12086 {
12087 struct pmu *pmu = dev_get_drvdata(dev);
12088
12089 return sysfs_emit(page, "%d\n", pmu->nr_addr_filters);
12090 }
12091 DEVICE_ATTR_RO(nr_addr_filters);
12092
12093 static struct idr pmu_idr;
12094
12095 static ssize_t
type_show(struct device * dev,struct device_attribute * attr,char * page)12096 type_show(struct device *dev, struct device_attribute *attr, char *page)
12097 {
12098 struct pmu *pmu = dev_get_drvdata(dev);
12099
12100 return sysfs_emit(page, "%d\n", pmu->type);
12101 }
12102 static DEVICE_ATTR_RO(type);
12103
12104 static ssize_t
perf_event_mux_interval_ms_show(struct device * dev,struct device_attribute * attr,char * page)12105 perf_event_mux_interval_ms_show(struct device *dev,
12106 struct device_attribute *attr,
12107 char *page)
12108 {
12109 struct pmu *pmu = dev_get_drvdata(dev);
12110
12111 return sysfs_emit(page, "%d\n", pmu->hrtimer_interval_ms);
12112 }
12113
12114 static DEFINE_MUTEX(mux_interval_mutex);
12115
12116 static ssize_t
perf_event_mux_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)12117 perf_event_mux_interval_ms_store(struct device *dev,
12118 struct device_attribute *attr,
12119 const char *buf, size_t count)
12120 {
12121 struct pmu *pmu = dev_get_drvdata(dev);
12122 int timer, cpu, ret;
12123
12124 ret = kstrtoint(buf, 0, &timer);
12125 if (ret)
12126 return ret;
12127
12128 if (timer < 1)
12129 return -EINVAL;
12130
12131 /* same value, noting to do */
12132 if (timer == pmu->hrtimer_interval_ms)
12133 return count;
12134
12135 mutex_lock(&mux_interval_mutex);
12136 pmu->hrtimer_interval_ms = timer;
12137
12138 /* update all cpuctx for this PMU */
12139 cpus_read_lock();
12140 for_each_online_cpu(cpu) {
12141 struct perf_cpu_pmu_context *cpc;
12142 cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
12143 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
12144
12145 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
12146 }
12147 cpus_read_unlock();
12148 mutex_unlock(&mux_interval_mutex);
12149
12150 return count;
12151 }
12152 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
12153
perf_scope_cpu_topology_cpumask(unsigned int scope,int cpu)12154 static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu)
12155 {
12156 switch (scope) {
12157 case PERF_PMU_SCOPE_CORE:
12158 return topology_sibling_cpumask(cpu);
12159 case PERF_PMU_SCOPE_DIE:
12160 return topology_die_cpumask(cpu);
12161 case PERF_PMU_SCOPE_CLUSTER:
12162 return topology_cluster_cpumask(cpu);
12163 case PERF_PMU_SCOPE_PKG:
12164 return topology_core_cpumask(cpu);
12165 case PERF_PMU_SCOPE_SYS_WIDE:
12166 return cpu_online_mask;
12167 }
12168
12169 return NULL;
12170 }
12171
perf_scope_cpumask(unsigned int scope)12172 static inline struct cpumask *perf_scope_cpumask(unsigned int scope)
12173 {
12174 switch (scope) {
12175 case PERF_PMU_SCOPE_CORE:
12176 return perf_online_core_mask;
12177 case PERF_PMU_SCOPE_DIE:
12178 return perf_online_die_mask;
12179 case PERF_PMU_SCOPE_CLUSTER:
12180 return perf_online_cluster_mask;
12181 case PERF_PMU_SCOPE_PKG:
12182 return perf_online_pkg_mask;
12183 case PERF_PMU_SCOPE_SYS_WIDE:
12184 return perf_online_sys_mask;
12185 }
12186
12187 return NULL;
12188 }
12189
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)12190 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
12191 char *buf)
12192 {
12193 struct pmu *pmu = dev_get_drvdata(dev);
12194 struct cpumask *mask = perf_scope_cpumask(pmu->scope);
12195
12196 if (mask)
12197 return cpumap_print_to_pagebuf(true, buf, mask);
12198 return 0;
12199 }
12200
12201 static DEVICE_ATTR_RO(cpumask);
12202
12203 static struct attribute *pmu_dev_attrs[] = {
12204 &dev_attr_type.attr,
12205 &dev_attr_perf_event_mux_interval_ms.attr,
12206 &dev_attr_nr_addr_filters.attr,
12207 &dev_attr_cpumask.attr,
12208 NULL,
12209 };
12210
pmu_dev_is_visible(struct kobject * kobj,struct attribute * a,int n)12211 static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
12212 {
12213 struct device *dev = kobj_to_dev(kobj);
12214 struct pmu *pmu = dev_get_drvdata(dev);
12215
12216 if (n == 2 && !pmu->nr_addr_filters)
12217 return 0;
12218
12219 /* cpumask */
12220 if (n == 3 && pmu->scope == PERF_PMU_SCOPE_NONE)
12221 return 0;
12222
12223 return a->mode;
12224 }
12225
12226 static struct attribute_group pmu_dev_attr_group = {
12227 .is_visible = pmu_dev_is_visible,
12228 .attrs = pmu_dev_attrs,
12229 };
12230
12231 static const struct attribute_group *pmu_dev_groups[] = {
12232 &pmu_dev_attr_group,
12233 NULL,
12234 };
12235
12236 static int pmu_bus_running;
12237 static const struct bus_type pmu_bus = {
12238 .name = "event_source",
12239 .dev_groups = pmu_dev_groups,
12240 };
12241
pmu_dev_release(struct device * dev)12242 static void pmu_dev_release(struct device *dev)
12243 {
12244 kfree(dev);
12245 }
12246
pmu_dev_alloc(struct pmu * pmu)12247 static int pmu_dev_alloc(struct pmu *pmu)
12248 {
12249 int ret = -ENOMEM;
12250
12251 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
12252 if (!pmu->dev)
12253 goto out;
12254
12255 pmu->dev->groups = pmu->attr_groups;
12256 device_initialize(pmu->dev);
12257
12258 dev_set_drvdata(pmu->dev, pmu);
12259 pmu->dev->bus = &pmu_bus;
12260 pmu->dev->parent = pmu->parent;
12261 pmu->dev->release = pmu_dev_release;
12262
12263 ret = dev_set_name(pmu->dev, "%s", pmu->name);
12264 if (ret)
12265 goto free_dev;
12266
12267 ret = device_add(pmu->dev);
12268 if (ret)
12269 goto free_dev;
12270
12271 if (pmu->attr_update) {
12272 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
12273 if (ret)
12274 goto del_dev;
12275 }
12276
12277 out:
12278 return ret;
12279
12280 del_dev:
12281 device_del(pmu->dev);
12282
12283 free_dev:
12284 put_device(pmu->dev);
12285 pmu->dev = NULL;
12286 goto out;
12287 }
12288
12289 static struct lock_class_key cpuctx_mutex;
12290 static struct lock_class_key cpuctx_lock;
12291
idr_cmpxchg(struct idr * idr,unsigned long id,void * old,void * new)12292 static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)
12293 {
12294 void *tmp, *val = idr_find(idr, id);
12295
12296 if (val != old)
12297 return false;
12298
12299 tmp = idr_replace(idr, new, id);
12300 if (IS_ERR(tmp))
12301 return false;
12302
12303 WARN_ON_ONCE(tmp != val);
12304 return true;
12305 }
12306
perf_pmu_free(struct pmu * pmu)12307 static void perf_pmu_free(struct pmu *pmu)
12308 {
12309 if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
12310 if (pmu->nr_addr_filters)
12311 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
12312 device_del(pmu->dev);
12313 put_device(pmu->dev);
12314 }
12315
12316 if (pmu->cpu_pmu_context) {
12317 int cpu;
12318
12319 for_each_possible_cpu(cpu) {
12320 struct perf_cpu_pmu_context *cpc;
12321
12322 cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
12323 if (!cpc)
12324 continue;
12325 if (cpc->epc.embedded) {
12326 /* refcount managed */
12327 put_pmu_ctx(&cpc->epc);
12328 continue;
12329 }
12330 kfree(cpc);
12331 }
12332 free_percpu(pmu->cpu_pmu_context);
12333 }
12334 }
12335
DEFINE_FREE(pmu_unregister,struct pmu *,if (_T)perf_pmu_free (_T))12336 DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T))
12337
12338 int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
12339 {
12340 int cpu, max = PERF_TYPE_MAX;
12341
12342 struct pmu *pmu __free(pmu_unregister) = _pmu;
12343 guard(mutex)(&pmus_lock);
12344
12345 if (WARN_ONCE(!name, "Can not register anonymous pmu.\n"))
12346 return -EINVAL;
12347
12348 if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE,
12349 "Can not register a pmu with an invalid scope.\n"))
12350 return -EINVAL;
12351
12352 pmu->name = name;
12353
12354 if (type >= 0)
12355 max = type;
12356
12357 CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL);
12358 if (pmu_type.id < 0)
12359 return pmu_type.id;
12360
12361 WARN_ON(type >= 0 && pmu_type.id != type);
12362
12363 pmu->type = pmu_type.id;
12364 atomic_set(&pmu->exclusive_cnt, 0);
12365
12366 if (pmu_bus_running && !pmu->dev) {
12367 int ret = pmu_dev_alloc(pmu);
12368 if (ret)
12369 return ret;
12370 }
12371
12372 pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context *);
12373 if (!pmu->cpu_pmu_context)
12374 return -ENOMEM;
12375
12376 for_each_possible_cpu(cpu) {
12377 struct perf_cpu_pmu_context *cpc =
12378 kmalloc_node(sizeof(struct perf_cpu_pmu_context),
12379 GFP_KERNEL | __GFP_ZERO,
12380 cpu_to_node(cpu));
12381
12382 if (!cpc)
12383 return -ENOMEM;
12384
12385 *per_cpu_ptr(pmu->cpu_pmu_context, cpu) = cpc;
12386 __perf_init_event_pmu_context(&cpc->epc, pmu);
12387 __perf_mux_hrtimer_init(cpc, cpu);
12388 }
12389
12390 if (!pmu->start_txn) {
12391 if (pmu->pmu_enable) {
12392 /*
12393 * If we have pmu_enable/pmu_disable calls, install
12394 * transaction stubs that use that to try and batch
12395 * hardware accesses.
12396 */
12397 pmu->start_txn = perf_pmu_start_txn;
12398 pmu->commit_txn = perf_pmu_commit_txn;
12399 pmu->cancel_txn = perf_pmu_cancel_txn;
12400 } else {
12401 pmu->start_txn = perf_pmu_nop_txn;
12402 pmu->commit_txn = perf_pmu_nop_int;
12403 pmu->cancel_txn = perf_pmu_nop_void;
12404 }
12405 }
12406
12407 if (!pmu->pmu_enable) {
12408 pmu->pmu_enable = perf_pmu_nop_void;
12409 pmu->pmu_disable = perf_pmu_nop_void;
12410 }
12411
12412 if (!pmu->check_period)
12413 pmu->check_period = perf_event_nop_int;
12414
12415 if (!pmu->event_idx)
12416 pmu->event_idx = perf_event_idx_default;
12417
12418 INIT_LIST_HEAD(&pmu->events);
12419 spin_lock_init(&pmu->events_lock);
12420
12421 /*
12422 * Now that the PMU is complete, make it visible to perf_try_init_event().
12423 */
12424 if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
12425 return -EINVAL;
12426 list_add_rcu(&pmu->entry, &pmus);
12427
12428 take_idr_id(pmu_type);
12429 _pmu = no_free_ptr(pmu); // let it rip
12430 return 0;
12431 }
12432 EXPORT_SYMBOL_GPL(perf_pmu_register);
12433
__pmu_detach_event(struct pmu * pmu,struct perf_event * event,struct perf_event_context * ctx)12434 static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
12435 struct perf_event_context *ctx)
12436 {
12437 /*
12438 * De-schedule the event and mark it REVOKED.
12439 */
12440 perf_event_exit_event(event, ctx, true);
12441
12442 /*
12443 * All _free_event() bits that rely on event->pmu:
12444 *
12445 * Notably, perf_mmap() relies on the ordering here.
12446 */
12447 scoped_guard (mutex, &event->mmap_mutex) {
12448 WARN_ON_ONCE(pmu->event_unmapped);
12449 /*
12450 * Mostly an empty lock sequence, such that perf_mmap(), which
12451 * relies on mmap_mutex, is sure to observe the state change.
12452 */
12453 }
12454
12455 perf_event_free_bpf_prog(event);
12456 perf_free_addr_filters(event);
12457
12458 if (event->destroy) {
12459 event->destroy(event);
12460 event->destroy = NULL;
12461 }
12462
12463 if (event->pmu_ctx) {
12464 put_pmu_ctx(event->pmu_ctx);
12465 event->pmu_ctx = NULL;
12466 }
12467
12468 exclusive_event_destroy(event);
12469 module_put(pmu->module);
12470
12471 event->pmu = NULL; /* force fault instead of UAF */
12472 }
12473
pmu_detach_event(struct pmu * pmu,struct perf_event * event)12474 static void pmu_detach_event(struct pmu *pmu, struct perf_event *event)
12475 {
12476 struct perf_event_context *ctx;
12477
12478 ctx = perf_event_ctx_lock(event);
12479 __pmu_detach_event(pmu, event, ctx);
12480 perf_event_ctx_unlock(event, ctx);
12481
12482 scoped_guard (spinlock, &pmu->events_lock)
12483 list_del(&event->pmu_list);
12484 }
12485
pmu_get_event(struct pmu * pmu)12486 static struct perf_event *pmu_get_event(struct pmu *pmu)
12487 {
12488 struct perf_event *event;
12489
12490 guard(spinlock)(&pmu->events_lock);
12491 list_for_each_entry(event, &pmu->events, pmu_list) {
12492 if (atomic_long_inc_not_zero(&event->refcount))
12493 return event;
12494 }
12495
12496 return NULL;
12497 }
12498
pmu_empty(struct pmu * pmu)12499 static bool pmu_empty(struct pmu *pmu)
12500 {
12501 guard(spinlock)(&pmu->events_lock);
12502 return list_empty(&pmu->events);
12503 }
12504
pmu_detach_events(struct pmu * pmu)12505 static void pmu_detach_events(struct pmu *pmu)
12506 {
12507 struct perf_event *event;
12508
12509 for (;;) {
12510 event = pmu_get_event(pmu);
12511 if (!event)
12512 break;
12513
12514 pmu_detach_event(pmu, event);
12515 put_event(event);
12516 }
12517
12518 /*
12519 * wait for pending _free_event()s
12520 */
12521 wait_var_event(pmu, pmu_empty(pmu));
12522 }
12523
perf_pmu_unregister(struct pmu * pmu)12524 int perf_pmu_unregister(struct pmu *pmu)
12525 {
12526 scoped_guard (mutex, &pmus_lock) {
12527 if (!idr_cmpxchg(&pmu_idr, pmu->type, pmu, NULL))
12528 return -EINVAL;
12529
12530 list_del_rcu(&pmu->entry);
12531 }
12532
12533 /*
12534 * We dereference the pmu list under both SRCU and regular RCU, so
12535 * synchronize against both of those.
12536 *
12537 * Notably, the entirety of event creation, from perf_init_event()
12538 * (which will now fail, because of the above) until
12539 * perf_install_in_context() should be under SRCU such that
12540 * this synchronizes against event creation. This avoids trying to
12541 * detach events that are not fully formed.
12542 */
12543 synchronize_srcu(&pmus_srcu);
12544 synchronize_rcu();
12545
12546 if (pmu->event_unmapped && !pmu_empty(pmu)) {
12547 /*
12548 * Can't force remove events when pmu::event_unmapped()
12549 * is used in perf_mmap_close().
12550 */
12551 guard(mutex)(&pmus_lock);
12552 idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu);
12553 list_add_rcu(&pmu->entry, &pmus);
12554 return -EBUSY;
12555 }
12556
12557 scoped_guard (mutex, &pmus_lock)
12558 idr_remove(&pmu_idr, pmu->type);
12559
12560 /*
12561 * PMU is removed from the pmus list, so no new events will
12562 * be created, now take care of the existing ones.
12563 */
12564 pmu_detach_events(pmu);
12565
12566 /*
12567 * PMU is unused, make it go away.
12568 */
12569 perf_pmu_free(pmu);
12570 return 0;
12571 }
12572 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
12573
has_extended_regs(struct perf_event * event)12574 static inline bool has_extended_regs(struct perf_event *event)
12575 {
12576 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
12577 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
12578 }
12579
perf_try_init_event(struct pmu * pmu,struct perf_event * event)12580 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
12581 {
12582 struct perf_event_context *ctx = NULL;
12583 int ret;
12584
12585 if (!try_module_get(pmu->module))
12586 return -ENODEV;
12587
12588 /*
12589 * A number of pmu->event_init() methods iterate the sibling_list to,
12590 * for example, validate if the group fits on the PMU. Therefore,
12591 * if this is a sibling event, acquire the ctx->mutex to protect
12592 * the sibling_list.
12593 */
12594 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
12595 /*
12596 * This ctx->mutex can nest when we're called through
12597 * inheritance. See the perf_event_ctx_lock_nested() comment.
12598 */
12599 ctx = perf_event_ctx_lock_nested(event->group_leader,
12600 SINGLE_DEPTH_NESTING);
12601 BUG_ON(!ctx);
12602 }
12603
12604 event->pmu = pmu;
12605 ret = pmu->event_init(event);
12606
12607 if (ctx)
12608 perf_event_ctx_unlock(event->group_leader, ctx);
12609
12610 if (ret)
12611 goto err_pmu;
12612
12613 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
12614 has_extended_regs(event)) {
12615 ret = -EOPNOTSUPP;
12616 goto err_destroy;
12617 }
12618
12619 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
12620 event_has_any_exclude_flag(event)) {
12621 ret = -EINVAL;
12622 goto err_destroy;
12623 }
12624
12625 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
12626 const struct cpumask *cpumask;
12627 struct cpumask *pmu_cpumask;
12628 int cpu;
12629
12630 cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
12631 pmu_cpumask = perf_scope_cpumask(pmu->scope);
12632
12633 ret = -ENODEV;
12634 if (!pmu_cpumask || !cpumask)
12635 goto err_destroy;
12636
12637 cpu = cpumask_any_and(pmu_cpumask, cpumask);
12638 if (cpu >= nr_cpu_ids)
12639 goto err_destroy;
12640
12641 event->event_caps |= PERF_EV_CAP_READ_SCOPE;
12642 }
12643
12644 return 0;
12645
12646 err_destroy:
12647 if (event->destroy) {
12648 event->destroy(event);
12649 event->destroy = NULL;
12650 }
12651
12652 err_pmu:
12653 event->pmu = NULL;
12654 module_put(pmu->module);
12655 return ret;
12656 }
12657
perf_init_event(struct perf_event * event)12658 static struct pmu *perf_init_event(struct perf_event *event)
12659 {
12660 bool extended_type = false;
12661 struct pmu *pmu;
12662 int type, ret;
12663
12664 guard(srcu)(&pmus_srcu); /* pmu idr/list access */
12665
12666 /*
12667 * Save original type before calling pmu->event_init() since certain
12668 * pmus overwrites event->attr.type to forward event to another pmu.
12669 */
12670 event->orig_type = event->attr.type;
12671
12672 /* Try parent's PMU first: */
12673 if (event->parent && event->parent->pmu) {
12674 pmu = event->parent->pmu;
12675 ret = perf_try_init_event(pmu, event);
12676 if (!ret)
12677 return pmu;
12678 }
12679
12680 /*
12681 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
12682 * are often aliases for PERF_TYPE_RAW.
12683 */
12684 type = event->attr.type;
12685 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
12686 type = event->attr.config >> PERF_PMU_TYPE_SHIFT;
12687 if (!type) {
12688 type = PERF_TYPE_RAW;
12689 } else {
12690 extended_type = true;
12691 event->attr.config &= PERF_HW_EVENT_MASK;
12692 }
12693 }
12694
12695 again:
12696 scoped_guard (rcu)
12697 pmu = idr_find(&pmu_idr, type);
12698 if (pmu) {
12699 if (event->attr.type != type && type != PERF_TYPE_RAW &&
12700 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE))
12701 return ERR_PTR(-ENOENT);
12702
12703 ret = perf_try_init_event(pmu, event);
12704 if (ret == -ENOENT && event->attr.type != type && !extended_type) {
12705 type = event->attr.type;
12706 goto again;
12707 }
12708
12709 if (ret)
12710 return ERR_PTR(ret);
12711
12712 return pmu;
12713 }
12714
12715 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
12716 ret = perf_try_init_event(pmu, event);
12717 if (!ret)
12718 return pmu;
12719
12720 if (ret != -ENOENT)
12721 return ERR_PTR(ret);
12722 }
12723
12724 return ERR_PTR(-ENOENT);
12725 }
12726
attach_sb_event(struct perf_event * event)12727 static void attach_sb_event(struct perf_event *event)
12728 {
12729 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
12730
12731 raw_spin_lock(&pel->lock);
12732 list_add_rcu(&event->sb_list, &pel->list);
12733 raw_spin_unlock(&pel->lock);
12734 }
12735
12736 /*
12737 * We keep a list of all !task (and therefore per-cpu) events
12738 * that need to receive side-band records.
12739 *
12740 * This avoids having to scan all the various PMU per-cpu contexts
12741 * looking for them.
12742 */
account_pmu_sb_event(struct perf_event * event)12743 static void account_pmu_sb_event(struct perf_event *event)
12744 {
12745 if (is_sb_event(event))
12746 attach_sb_event(event);
12747 }
12748
12749 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
account_freq_event_nohz(void)12750 static void account_freq_event_nohz(void)
12751 {
12752 #ifdef CONFIG_NO_HZ_FULL
12753 /* Lock so we don't race with concurrent unaccount */
12754 spin_lock(&nr_freq_lock);
12755 if (atomic_inc_return(&nr_freq_events) == 1)
12756 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
12757 spin_unlock(&nr_freq_lock);
12758 #endif
12759 }
12760
account_freq_event(void)12761 static void account_freq_event(void)
12762 {
12763 if (tick_nohz_full_enabled())
12764 account_freq_event_nohz();
12765 else
12766 atomic_inc(&nr_freq_events);
12767 }
12768
12769
account_event(struct perf_event * event)12770 static void account_event(struct perf_event *event)
12771 {
12772 bool inc = false;
12773
12774 if (event->parent)
12775 return;
12776
12777 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
12778 inc = true;
12779 if (event->attr.mmap || event->attr.mmap_data)
12780 atomic_inc(&nr_mmap_events);
12781 if (event->attr.build_id)
12782 atomic_inc(&nr_build_id_events);
12783 if (event->attr.comm)
12784 atomic_inc(&nr_comm_events);
12785 if (event->attr.namespaces)
12786 atomic_inc(&nr_namespaces_events);
12787 if (event->attr.cgroup)
12788 atomic_inc(&nr_cgroup_events);
12789 if (event->attr.task)
12790 atomic_inc(&nr_task_events);
12791 if (event->attr.freq)
12792 account_freq_event();
12793 if (event->attr.context_switch) {
12794 atomic_inc(&nr_switch_events);
12795 inc = true;
12796 }
12797 if (has_branch_stack(event))
12798 inc = true;
12799 if (is_cgroup_event(event))
12800 inc = true;
12801 if (event->attr.ksymbol)
12802 atomic_inc(&nr_ksymbol_events);
12803 if (event->attr.bpf_event)
12804 atomic_inc(&nr_bpf_events);
12805 if (event->attr.text_poke)
12806 atomic_inc(&nr_text_poke_events);
12807
12808 if (inc) {
12809 /*
12810 * We need the mutex here because static_branch_enable()
12811 * must complete *before* the perf_sched_count increment
12812 * becomes visible.
12813 */
12814 if (atomic_inc_not_zero(&perf_sched_count))
12815 goto enabled;
12816
12817 mutex_lock(&perf_sched_mutex);
12818 if (!atomic_read(&perf_sched_count)) {
12819 static_branch_enable(&perf_sched_events);
12820 /*
12821 * Guarantee that all CPUs observe they key change and
12822 * call the perf scheduling hooks before proceeding to
12823 * install events that need them.
12824 */
12825 synchronize_rcu();
12826 }
12827 /*
12828 * Now that we have waited for the sync_sched(), allow further
12829 * increments to by-pass the mutex.
12830 */
12831 atomic_inc(&perf_sched_count);
12832 mutex_unlock(&perf_sched_mutex);
12833 }
12834 enabled:
12835
12836 account_pmu_sb_event(event);
12837 }
12838
12839 /*
12840 * Allocate and initialize an event structure
12841 */
12842 static struct perf_event *
perf_event_alloc(struct perf_event_attr * attr,int cpu,struct task_struct * task,struct perf_event * group_leader,struct perf_event * parent_event,perf_overflow_handler_t overflow_handler,void * context,int cgroup_fd)12843 perf_event_alloc(struct perf_event_attr *attr, int cpu,
12844 struct task_struct *task,
12845 struct perf_event *group_leader,
12846 struct perf_event *parent_event,
12847 perf_overflow_handler_t overflow_handler,
12848 void *context, int cgroup_fd)
12849 {
12850 struct pmu *pmu;
12851 struct hw_perf_event *hwc;
12852 long err = -EINVAL;
12853 int node;
12854
12855 if ((unsigned)cpu >= nr_cpu_ids) {
12856 if (!task || cpu != -1)
12857 return ERR_PTR(-EINVAL);
12858 }
12859 if (attr->sigtrap && !task) {
12860 /* Requires a task: avoid signalling random tasks. */
12861 return ERR_PTR(-EINVAL);
12862 }
12863
12864 node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
12865 struct perf_event *event __free(__free_event) =
12866 kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
12867 if (!event)
12868 return ERR_PTR(-ENOMEM);
12869
12870 /*
12871 * Single events are their own group leaders, with an
12872 * empty sibling list:
12873 */
12874 if (!group_leader)
12875 group_leader = event;
12876
12877 mutex_init(&event->child_mutex);
12878 INIT_LIST_HEAD(&event->child_list);
12879
12880 INIT_LIST_HEAD(&event->event_entry);
12881 INIT_LIST_HEAD(&event->sibling_list);
12882 INIT_LIST_HEAD(&event->active_list);
12883 init_event_group(event);
12884 INIT_LIST_HEAD(&event->rb_entry);
12885 INIT_LIST_HEAD(&event->active_entry);
12886 INIT_LIST_HEAD(&event->addr_filters.list);
12887 INIT_HLIST_NODE(&event->hlist_entry);
12888 INIT_LIST_HEAD(&event->pmu_list);
12889
12890
12891 init_waitqueue_head(&event->waitq);
12892 init_irq_work(&event->pending_irq, perf_pending_irq);
12893 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
12894 init_task_work(&event->pending_task, perf_pending_task);
12895
12896 mutex_init(&event->mmap_mutex);
12897 raw_spin_lock_init(&event->addr_filters.lock);
12898
12899 atomic_long_set(&event->refcount, 1);
12900 event->cpu = cpu;
12901 event->attr = *attr;
12902 event->group_leader = group_leader;
12903 event->pmu = NULL;
12904 event->oncpu = -1;
12905
12906 event->parent = parent_event;
12907
12908 event->ns = get_pid_ns(task_active_pid_ns(current));
12909 event->id = atomic64_inc_return(&perf_event_id);
12910
12911 event->state = PERF_EVENT_STATE_INACTIVE;
12912
12913 if (parent_event)
12914 event->event_caps = parent_event->event_caps;
12915
12916 if (task) {
12917 event->attach_state = PERF_ATTACH_TASK;
12918 /*
12919 * XXX pmu::event_init needs to know what task to account to
12920 * and we cannot use the ctx information because we need the
12921 * pmu before we get a ctx.
12922 */
12923 event->hw.target = get_task_struct(task);
12924 }
12925
12926 event->clock = &local_clock;
12927 if (parent_event)
12928 event->clock = parent_event->clock;
12929
12930 if (!overflow_handler && parent_event) {
12931 overflow_handler = parent_event->overflow_handler;
12932 context = parent_event->overflow_handler_context;
12933 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
12934 if (parent_event->prog) {
12935 struct bpf_prog *prog = parent_event->prog;
12936
12937 bpf_prog_inc(prog);
12938 event->prog = prog;
12939 }
12940 #endif
12941 }
12942
12943 if (overflow_handler) {
12944 event->overflow_handler = overflow_handler;
12945 event->overflow_handler_context = context;
12946 } else if (is_write_backward(event)){
12947 event->overflow_handler = perf_event_output_backward;
12948 event->overflow_handler_context = NULL;
12949 } else {
12950 event->overflow_handler = perf_event_output_forward;
12951 event->overflow_handler_context = NULL;
12952 }
12953
12954 perf_event__state_init(event);
12955
12956 pmu = NULL;
12957
12958 hwc = &event->hw;
12959 hwc->sample_period = attr->sample_period;
12960 if (is_event_in_freq_mode(event))
12961 hwc->sample_period = 1;
12962 hwc->last_period = hwc->sample_period;
12963
12964 local64_set(&hwc->period_left, hwc->sample_period);
12965
12966 /*
12967 * We do not support PERF_SAMPLE_READ on inherited events unless
12968 * PERF_SAMPLE_TID is also selected, which allows inherited events to
12969 * collect per-thread samples.
12970 * See perf_output_read().
12971 */
12972 if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
12973 return ERR_PTR(-EINVAL);
12974
12975 if (!has_branch_stack(event))
12976 event->attr.branch_sample_type = 0;
12977
12978 pmu = perf_init_event(event);
12979 if (IS_ERR(pmu))
12980 return (void*)pmu;
12981
12982 /*
12983 * The PERF_ATTACH_TASK_DATA is set in the event_init()->hw_config().
12984 * The attach should be right after the perf_init_event().
12985 * Otherwise, the __free_event() would mistakenly detach the non-exist
12986 * perf_ctx_data because of the other errors between them.
12987 */
12988 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
12989 err = attach_perf_ctx_data(event);
12990 if (err)
12991 return ERR_PTR(err);
12992 }
12993
12994 /*
12995 * Disallow uncore-task events. Similarly, disallow uncore-cgroup
12996 * events (they don't make sense as the cgroup will be different
12997 * on other CPUs in the uncore mask).
12998 */
12999 if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1))
13000 return ERR_PTR(-EINVAL);
13001
13002 if (event->attr.aux_output &&
13003 (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
13004 event->attr.aux_pause || event->attr.aux_resume))
13005 return ERR_PTR(-EOPNOTSUPP);
13006
13007 if (event->attr.aux_pause && event->attr.aux_resume)
13008 return ERR_PTR(-EINVAL);
13009
13010 if (event->attr.aux_start_paused) {
13011 if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
13012 return ERR_PTR(-EOPNOTSUPP);
13013 event->hw.aux_paused = 1;
13014 }
13015
13016 if (cgroup_fd != -1) {
13017 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
13018 if (err)
13019 return ERR_PTR(err);
13020 }
13021
13022 err = exclusive_event_init(event);
13023 if (err)
13024 return ERR_PTR(err);
13025
13026 if (has_addr_filter(event)) {
13027 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
13028 sizeof(struct perf_addr_filter_range),
13029 GFP_KERNEL);
13030 if (!event->addr_filter_ranges)
13031 return ERR_PTR(-ENOMEM);
13032
13033 /*
13034 * Clone the parent's vma offsets: they are valid until exec()
13035 * even if the mm is not shared with the parent.
13036 */
13037 if (event->parent) {
13038 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
13039
13040 raw_spin_lock_irq(&ifh->lock);
13041 memcpy(event->addr_filter_ranges,
13042 event->parent->addr_filter_ranges,
13043 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
13044 raw_spin_unlock_irq(&ifh->lock);
13045 }
13046
13047 /* force hw sync on the address filters */
13048 event->addr_filters_gen = 1;
13049 }
13050
13051 if (!event->parent) {
13052 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
13053 err = get_callchain_buffers(attr->sample_max_stack);
13054 if (err)
13055 return ERR_PTR(err);
13056 event->attach_state |= PERF_ATTACH_CALLCHAIN;
13057 }
13058 }
13059
13060 err = security_perf_event_alloc(event);
13061 if (err)
13062 return ERR_PTR(err);
13063
13064 /* symmetric to unaccount_event() in _free_event() */
13065 account_event(event);
13066
13067 /*
13068 * Event creation should be under SRCU, see perf_pmu_unregister().
13069 */
13070 lockdep_assert_held(&pmus_srcu);
13071 scoped_guard (spinlock, &pmu->events_lock)
13072 list_add(&event->pmu_list, &pmu->events);
13073
13074 return_ptr(event);
13075 }
13076
perf_copy_attr(struct perf_event_attr __user * uattr,struct perf_event_attr * attr)13077 static int perf_copy_attr(struct perf_event_attr __user *uattr,
13078 struct perf_event_attr *attr)
13079 {
13080 u32 size;
13081 int ret;
13082
13083 /* Zero the full structure, so that a short copy will be nice. */
13084 memset(attr, 0, sizeof(*attr));
13085
13086 ret = get_user(size, &uattr->size);
13087 if (ret)
13088 return ret;
13089
13090 /* ABI compatibility quirk: */
13091 if (!size)
13092 size = PERF_ATTR_SIZE_VER0;
13093 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
13094 goto err_size;
13095
13096 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
13097 if (ret) {
13098 if (ret == -E2BIG)
13099 goto err_size;
13100 return ret;
13101 }
13102
13103 attr->size = size;
13104
13105 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
13106 return -EINVAL;
13107
13108 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
13109 return -EINVAL;
13110
13111 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
13112 return -EINVAL;
13113
13114 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
13115 u64 mask = attr->branch_sample_type;
13116
13117 /* only using defined bits */
13118 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
13119 return -EINVAL;
13120
13121 /* at least one branch bit must be set */
13122 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
13123 return -EINVAL;
13124
13125 /* propagate priv level, when not set for branch */
13126 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
13127
13128 /* exclude_kernel checked on syscall entry */
13129 if (!attr->exclude_kernel)
13130 mask |= PERF_SAMPLE_BRANCH_KERNEL;
13131
13132 if (!attr->exclude_user)
13133 mask |= PERF_SAMPLE_BRANCH_USER;
13134
13135 if (!attr->exclude_hv)
13136 mask |= PERF_SAMPLE_BRANCH_HV;
13137 /*
13138 * adjust user setting (for HW filter setup)
13139 */
13140 attr->branch_sample_type = mask;
13141 }
13142 /* privileged levels capture (kernel, hv): check permissions */
13143 if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
13144 ret = perf_allow_kernel();
13145 if (ret)
13146 return ret;
13147 }
13148 }
13149
13150 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
13151 ret = perf_reg_validate(attr->sample_regs_user);
13152 if (ret)
13153 return ret;
13154 }
13155
13156 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
13157 if (!arch_perf_have_user_stack_dump())
13158 return -ENOSYS;
13159
13160 /*
13161 * We have __u32 type for the size, but so far
13162 * we can only use __u16 as maximum due to the
13163 * __u16 sample size limit.
13164 */
13165 if (attr->sample_stack_user >= USHRT_MAX)
13166 return -EINVAL;
13167 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
13168 return -EINVAL;
13169 }
13170
13171 if (!attr->sample_max_stack)
13172 attr->sample_max_stack = sysctl_perf_event_max_stack;
13173
13174 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
13175 ret = perf_reg_validate(attr->sample_regs_intr);
13176
13177 #ifndef CONFIG_CGROUP_PERF
13178 if (attr->sample_type & PERF_SAMPLE_CGROUP)
13179 return -EINVAL;
13180 #endif
13181 if ((attr->sample_type & PERF_SAMPLE_WEIGHT) &&
13182 (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
13183 return -EINVAL;
13184
13185 if (!attr->inherit && attr->inherit_thread)
13186 return -EINVAL;
13187
13188 if (attr->remove_on_exec && attr->enable_on_exec)
13189 return -EINVAL;
13190
13191 if (attr->sigtrap && !attr->remove_on_exec)
13192 return -EINVAL;
13193
13194 out:
13195 return ret;
13196
13197 err_size:
13198 put_user(sizeof(*attr), &uattr->size);
13199 ret = -E2BIG;
13200 goto out;
13201 }
13202
mutex_lock_double(struct mutex * a,struct mutex * b)13203 static void mutex_lock_double(struct mutex *a, struct mutex *b)
13204 {
13205 if (b < a)
13206 swap(a, b);
13207
13208 mutex_lock(a);
13209 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
13210 }
13211
13212 static int
perf_event_set_output(struct perf_event * event,struct perf_event * output_event)13213 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
13214 {
13215 struct perf_buffer *rb = NULL;
13216 int ret = -EINVAL;
13217
13218 if (!output_event) {
13219 mutex_lock(&event->mmap_mutex);
13220 goto set;
13221 }
13222
13223 /* don't allow circular references */
13224 if (event == output_event)
13225 goto out;
13226
13227 /*
13228 * Don't allow cross-cpu buffers
13229 */
13230 if (output_event->cpu != event->cpu)
13231 goto out;
13232
13233 /*
13234 * If its not a per-cpu rb, it must be the same task.
13235 */
13236 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
13237 goto out;
13238
13239 /*
13240 * Mixing clocks in the same buffer is trouble you don't need.
13241 */
13242 if (output_event->clock != event->clock)
13243 goto out;
13244
13245 /*
13246 * Either writing ring buffer from beginning or from end.
13247 * Mixing is not allowed.
13248 */
13249 if (is_write_backward(output_event) != is_write_backward(event))
13250 goto out;
13251
13252 /*
13253 * If both events generate aux data, they must be on the same PMU
13254 */
13255 if (has_aux(event) && has_aux(output_event) &&
13256 event->pmu != output_event->pmu)
13257 goto out;
13258
13259 /*
13260 * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
13261 * output_event is already on rb->event_list, and the list iteration
13262 * restarts after every removal, it is guaranteed this new event is
13263 * observed *OR* if output_event is already removed, it's guaranteed we
13264 * observe !rb->mmap_count.
13265 */
13266 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
13267 set:
13268 /* Can't redirect output if we've got an active mmap() */
13269 if (refcount_read(&event->mmap_count))
13270 goto unlock;
13271
13272 if (output_event) {
13273 if (output_event->state <= PERF_EVENT_STATE_REVOKED)
13274 goto unlock;
13275
13276 /* get the rb we want to redirect to */
13277 rb = ring_buffer_get(output_event);
13278 if (!rb)
13279 goto unlock;
13280
13281 /* did we race against perf_mmap_close() */
13282 if (!refcount_read(&rb->mmap_count)) {
13283 ring_buffer_put(rb);
13284 goto unlock;
13285 }
13286 }
13287
13288 ring_buffer_attach(event, rb);
13289
13290 ret = 0;
13291 unlock:
13292 mutex_unlock(&event->mmap_mutex);
13293 if (output_event)
13294 mutex_unlock(&output_event->mmap_mutex);
13295
13296 out:
13297 return ret;
13298 }
13299
perf_event_set_clock(struct perf_event * event,clockid_t clk_id)13300 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
13301 {
13302 bool nmi_safe = false;
13303
13304 switch (clk_id) {
13305 case CLOCK_MONOTONIC:
13306 event->clock = &ktime_get_mono_fast_ns;
13307 nmi_safe = true;
13308 break;
13309
13310 case CLOCK_MONOTONIC_RAW:
13311 event->clock = &ktime_get_raw_fast_ns;
13312 nmi_safe = true;
13313 break;
13314
13315 case CLOCK_REALTIME:
13316 event->clock = &ktime_get_real_ns;
13317 break;
13318
13319 case CLOCK_BOOTTIME:
13320 event->clock = &ktime_get_boottime_ns;
13321 break;
13322
13323 case CLOCK_TAI:
13324 event->clock = &ktime_get_clocktai_ns;
13325 break;
13326
13327 default:
13328 return -EINVAL;
13329 }
13330
13331 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
13332 return -EINVAL;
13333
13334 return 0;
13335 }
13336
13337 static bool
perf_check_permission(struct perf_event_attr * attr,struct task_struct * task)13338 perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
13339 {
13340 unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
13341 bool is_capable = perfmon_capable();
13342
13343 if (attr->sigtrap) {
13344 /*
13345 * perf_event_attr::sigtrap sends signals to the other task.
13346 * Require the current task to also have CAP_KILL.
13347 */
13348 rcu_read_lock();
13349 is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
13350 rcu_read_unlock();
13351
13352 /*
13353 * If the required capabilities aren't available, checks for
13354 * ptrace permissions: upgrade to ATTACH, since sending signals
13355 * can effectively change the target task.
13356 */
13357 ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
13358 }
13359
13360 /*
13361 * Preserve ptrace permission check for backwards compatibility. The
13362 * ptrace check also includes checks that the current task and other
13363 * task have matching uids, and is therefore not done here explicitly.
13364 */
13365 return is_capable || ptrace_may_access(task, ptrace_mode);
13366 }
13367
13368 /**
13369 * sys_perf_event_open - open a performance event, associate it to a task/cpu
13370 *
13371 * @attr_uptr: event_id type attributes for monitoring/sampling
13372 * @pid: target pid
13373 * @cpu: target cpu
13374 * @group_fd: group leader event fd
13375 * @flags: perf event open flags
13376 */
SYSCALL_DEFINE5(perf_event_open,struct perf_event_attr __user *,attr_uptr,pid_t,pid,int,cpu,int,group_fd,unsigned long,flags)13377 SYSCALL_DEFINE5(perf_event_open,
13378 struct perf_event_attr __user *, attr_uptr,
13379 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
13380 {
13381 struct perf_event *group_leader = NULL, *output_event = NULL;
13382 struct perf_event_pmu_context *pmu_ctx;
13383 struct perf_event *event, *sibling;
13384 struct perf_event_attr attr;
13385 struct perf_event_context *ctx;
13386 struct file *event_file = NULL;
13387 struct task_struct *task = NULL;
13388 struct pmu *pmu;
13389 int event_fd;
13390 int move_group = 0;
13391 int err;
13392 int f_flags = O_RDWR;
13393 int cgroup_fd = -1;
13394
13395 /* for future expandability... */
13396 if (flags & ~PERF_FLAG_ALL)
13397 return -EINVAL;
13398
13399 err = perf_copy_attr(attr_uptr, &attr);
13400 if (err)
13401 return err;
13402
13403 /* Do we allow access to perf_event_open(2) ? */
13404 err = security_perf_event_open(PERF_SECURITY_OPEN);
13405 if (err)
13406 return err;
13407
13408 if (!attr.exclude_kernel) {
13409 err = perf_allow_kernel();
13410 if (err)
13411 return err;
13412 }
13413
13414 if (attr.namespaces) {
13415 if (!perfmon_capable())
13416 return -EACCES;
13417 }
13418
13419 if (attr.freq) {
13420 if (attr.sample_freq > sysctl_perf_event_sample_rate)
13421 return -EINVAL;
13422 } else {
13423 if (attr.sample_period & (1ULL << 63))
13424 return -EINVAL;
13425 }
13426
13427 /* Only privileged users can get physical addresses */
13428 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
13429 err = perf_allow_kernel();
13430 if (err)
13431 return err;
13432 }
13433
13434 /* REGS_INTR can leak data, lockdown must prevent this */
13435 if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
13436 err = security_locked_down(LOCKDOWN_PERF);
13437 if (err)
13438 return err;
13439 }
13440
13441 /*
13442 * In cgroup mode, the pid argument is used to pass the fd
13443 * opened to the cgroup directory in cgroupfs. The cpu argument
13444 * designates the cpu on which to monitor threads from that
13445 * cgroup.
13446 */
13447 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
13448 return -EINVAL;
13449
13450 if (flags & PERF_FLAG_FD_CLOEXEC)
13451 f_flags |= O_CLOEXEC;
13452
13453 event_fd = get_unused_fd_flags(f_flags);
13454 if (event_fd < 0)
13455 return event_fd;
13456
13457 /*
13458 * Event creation should be under SRCU, see perf_pmu_unregister().
13459 */
13460 guard(srcu)(&pmus_srcu);
13461
13462 CLASS(fd, group)(group_fd); // group_fd == -1 => empty
13463 if (group_fd != -1) {
13464 if (!is_perf_file(group)) {
13465 err = -EBADF;
13466 goto err_fd;
13467 }
13468 group_leader = fd_file(group)->private_data;
13469 if (group_leader->state <= PERF_EVENT_STATE_REVOKED) {
13470 err = -ENODEV;
13471 goto err_fd;
13472 }
13473 if (flags & PERF_FLAG_FD_OUTPUT)
13474 output_event = group_leader;
13475 if (flags & PERF_FLAG_FD_NO_GROUP)
13476 group_leader = NULL;
13477 }
13478
13479 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
13480 task = find_lively_task_by_vpid(pid);
13481 if (IS_ERR(task)) {
13482 err = PTR_ERR(task);
13483 goto err_fd;
13484 }
13485 }
13486
13487 if (task && group_leader &&
13488 group_leader->attr.inherit != attr.inherit) {
13489 err = -EINVAL;
13490 goto err_task;
13491 }
13492
13493 if (flags & PERF_FLAG_PID_CGROUP)
13494 cgroup_fd = pid;
13495
13496 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
13497 NULL, NULL, cgroup_fd);
13498 if (IS_ERR(event)) {
13499 err = PTR_ERR(event);
13500 goto err_task;
13501 }
13502
13503 if (is_sampling_event(event)) {
13504 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
13505 err = -EOPNOTSUPP;
13506 goto err_alloc;
13507 }
13508 }
13509
13510 /*
13511 * Special case software events and allow them to be part of
13512 * any hardware group.
13513 */
13514 pmu = event->pmu;
13515
13516 if (attr.use_clockid) {
13517 err = perf_event_set_clock(event, attr.clockid);
13518 if (err)
13519 goto err_alloc;
13520 }
13521
13522 if (pmu->task_ctx_nr == perf_sw_context)
13523 event->event_caps |= PERF_EV_CAP_SOFTWARE;
13524
13525 if (task) {
13526 err = down_read_interruptible(&task->signal->exec_update_lock);
13527 if (err)
13528 goto err_alloc;
13529
13530 /*
13531 * We must hold exec_update_lock across this and any potential
13532 * perf_install_in_context() call for this new event to
13533 * serialize against exec() altering our credentials (and the
13534 * perf_event_exit_task() that could imply).
13535 */
13536 err = -EACCES;
13537 if (!perf_check_permission(&attr, task))
13538 goto err_cred;
13539 }
13540
13541 /*
13542 * Get the target context (task or percpu):
13543 */
13544 ctx = find_get_context(task, event);
13545 if (IS_ERR(ctx)) {
13546 err = PTR_ERR(ctx);
13547 goto err_cred;
13548 }
13549
13550 mutex_lock(&ctx->mutex);
13551
13552 if (ctx->task == TASK_TOMBSTONE) {
13553 err = -ESRCH;
13554 goto err_locked;
13555 }
13556
13557 if (!task) {
13558 /*
13559 * Check if the @cpu we're creating an event for is online.
13560 *
13561 * We use the perf_cpu_context::ctx::mutex to serialize against
13562 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
13563 */
13564 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
13565
13566 if (!cpuctx->online) {
13567 err = -ENODEV;
13568 goto err_locked;
13569 }
13570 }
13571
13572 if (group_leader) {
13573 err = -EINVAL;
13574
13575 /*
13576 * Do not allow a recursive hierarchy (this new sibling
13577 * becoming part of another group-sibling):
13578 */
13579 if (group_leader->group_leader != group_leader)
13580 goto err_locked;
13581
13582 /* All events in a group should have the same clock */
13583 if (group_leader->clock != event->clock)
13584 goto err_locked;
13585
13586 /*
13587 * Make sure we're both events for the same CPU;
13588 * grouping events for different CPUs is broken; since
13589 * you can never concurrently schedule them anyhow.
13590 */
13591 if (group_leader->cpu != event->cpu)
13592 goto err_locked;
13593
13594 /*
13595 * Make sure we're both on the same context; either task or cpu.
13596 */
13597 if (group_leader->ctx != ctx)
13598 goto err_locked;
13599
13600 /*
13601 * Only a group leader can be exclusive or pinned
13602 */
13603 if (attr.exclusive || attr.pinned)
13604 goto err_locked;
13605
13606 if (is_software_event(event) &&
13607 !in_software_context(group_leader)) {
13608 /*
13609 * If the event is a sw event, but the group_leader
13610 * is on hw context.
13611 *
13612 * Allow the addition of software events to hw
13613 * groups, this is safe because software events
13614 * never fail to schedule.
13615 *
13616 * Note the comment that goes with struct
13617 * perf_event_pmu_context.
13618 */
13619 pmu = group_leader->pmu_ctx->pmu;
13620 } else if (!is_software_event(event)) {
13621 if (is_software_event(group_leader) &&
13622 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
13623 /*
13624 * In case the group is a pure software group, and we
13625 * try to add a hardware event, move the whole group to
13626 * the hardware context.
13627 */
13628 move_group = 1;
13629 }
13630
13631 /* Don't allow group of multiple hw events from different pmus */
13632 if (!in_software_context(group_leader) &&
13633 group_leader->pmu_ctx->pmu != pmu)
13634 goto err_locked;
13635 }
13636 }
13637
13638 /*
13639 * Now that we're certain of the pmu; find the pmu_ctx.
13640 */
13641 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
13642 if (IS_ERR(pmu_ctx)) {
13643 err = PTR_ERR(pmu_ctx);
13644 goto err_locked;
13645 }
13646 event->pmu_ctx = pmu_ctx;
13647
13648 if (output_event) {
13649 err = perf_event_set_output(event, output_event);
13650 if (err)
13651 goto err_context;
13652 }
13653
13654 if (!perf_event_validate_size(event)) {
13655 err = -E2BIG;
13656 goto err_context;
13657 }
13658
13659 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
13660 err = -EINVAL;
13661 goto err_context;
13662 }
13663
13664 /*
13665 * Must be under the same ctx::mutex as perf_install_in_context(),
13666 * because we need to serialize with concurrent event creation.
13667 */
13668 if (!exclusive_event_installable(event, ctx)) {
13669 err = -EBUSY;
13670 goto err_context;
13671 }
13672
13673 WARN_ON_ONCE(ctx->parent_ctx);
13674
13675 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags);
13676 if (IS_ERR(event_file)) {
13677 err = PTR_ERR(event_file);
13678 event_file = NULL;
13679 goto err_context;
13680 }
13681
13682 /*
13683 * This is the point on no return; we cannot fail hereafter. This is
13684 * where we start modifying current state.
13685 */
13686
13687 if (move_group) {
13688 perf_remove_from_context(group_leader, 0);
13689 put_pmu_ctx(group_leader->pmu_ctx);
13690
13691 for_each_sibling_event(sibling, group_leader) {
13692 perf_remove_from_context(sibling, 0);
13693 put_pmu_ctx(sibling->pmu_ctx);
13694 }
13695
13696 /*
13697 * Install the group siblings before the group leader.
13698 *
13699 * Because a group leader will try and install the entire group
13700 * (through the sibling list, which is still in-tact), we can
13701 * end up with siblings installed in the wrong context.
13702 *
13703 * By installing siblings first we NO-OP because they're not
13704 * reachable through the group lists.
13705 */
13706 for_each_sibling_event(sibling, group_leader) {
13707 sibling->pmu_ctx = pmu_ctx;
13708 get_pmu_ctx(pmu_ctx);
13709 perf_event__state_init(sibling);
13710 perf_install_in_context(ctx, sibling, sibling->cpu);
13711 }
13712
13713 /*
13714 * Removing from the context ends up with disabled
13715 * event. What we want here is event in the initial
13716 * startup state, ready to be add into new context.
13717 */
13718 group_leader->pmu_ctx = pmu_ctx;
13719 get_pmu_ctx(pmu_ctx);
13720 perf_event__state_init(group_leader);
13721 perf_install_in_context(ctx, group_leader, group_leader->cpu);
13722 }
13723
13724 /*
13725 * Precalculate sample_data sizes; do while holding ctx::mutex such
13726 * that we're serialized against further additions and before
13727 * perf_install_in_context() which is the point the event is active and
13728 * can use these values.
13729 */
13730 perf_event__header_size(event);
13731 perf_event__id_header_size(event);
13732
13733 event->owner = current;
13734
13735 perf_install_in_context(ctx, event, event->cpu);
13736 perf_unpin_context(ctx);
13737
13738 mutex_unlock(&ctx->mutex);
13739
13740 if (task) {
13741 up_read(&task->signal->exec_update_lock);
13742 put_task_struct(task);
13743 }
13744
13745 mutex_lock(¤t->perf_event_mutex);
13746 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
13747 mutex_unlock(¤t->perf_event_mutex);
13748
13749 /*
13750 * File reference in group guarantees that group_leader has been
13751 * kept alive until we place the new event on the sibling_list.
13752 * This ensures destruction of the group leader will find
13753 * the pointer to itself in perf_group_detach().
13754 */
13755 fd_install(event_fd, event_file);
13756 return event_fd;
13757
13758 err_context:
13759 put_pmu_ctx(event->pmu_ctx);
13760 event->pmu_ctx = NULL; /* _free_event() */
13761 err_locked:
13762 mutex_unlock(&ctx->mutex);
13763 perf_unpin_context(ctx);
13764 put_ctx(ctx);
13765 err_cred:
13766 if (task)
13767 up_read(&task->signal->exec_update_lock);
13768 err_alloc:
13769 put_event(event);
13770 err_task:
13771 if (task)
13772 put_task_struct(task);
13773 err_fd:
13774 put_unused_fd(event_fd);
13775 return err;
13776 }
13777
13778 /**
13779 * perf_event_create_kernel_counter
13780 *
13781 * @attr: attributes of the counter to create
13782 * @cpu: cpu in which the counter is bound
13783 * @task: task to profile (NULL for percpu)
13784 * @overflow_handler: callback to trigger when we hit the event
13785 * @context: context data could be used in overflow_handler callback
13786 */
13787 struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr * attr,int cpu,struct task_struct * task,perf_overflow_handler_t overflow_handler,void * context)13788 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
13789 struct task_struct *task,
13790 perf_overflow_handler_t overflow_handler,
13791 void *context)
13792 {
13793 struct perf_event_pmu_context *pmu_ctx;
13794 struct perf_event_context *ctx;
13795 struct perf_event *event;
13796 struct pmu *pmu;
13797 int err;
13798
13799 /*
13800 * Grouping is not supported for kernel events, neither is 'AUX',
13801 * make sure the caller's intentions are adjusted.
13802 */
13803 if (attr->aux_output || attr->aux_action)
13804 return ERR_PTR(-EINVAL);
13805
13806 /*
13807 * Event creation should be under SRCU, see perf_pmu_unregister().
13808 */
13809 guard(srcu)(&pmus_srcu);
13810
13811 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
13812 overflow_handler, context, -1);
13813 if (IS_ERR(event)) {
13814 err = PTR_ERR(event);
13815 goto err;
13816 }
13817
13818 /* Mark owner so we could distinguish it from user events. */
13819 event->owner = TASK_TOMBSTONE;
13820 pmu = event->pmu;
13821
13822 if (pmu->task_ctx_nr == perf_sw_context)
13823 event->event_caps |= PERF_EV_CAP_SOFTWARE;
13824
13825 /*
13826 * Get the target context (task or percpu):
13827 */
13828 ctx = find_get_context(task, event);
13829 if (IS_ERR(ctx)) {
13830 err = PTR_ERR(ctx);
13831 goto err_alloc;
13832 }
13833
13834 WARN_ON_ONCE(ctx->parent_ctx);
13835 mutex_lock(&ctx->mutex);
13836 if (ctx->task == TASK_TOMBSTONE) {
13837 err = -ESRCH;
13838 goto err_unlock;
13839 }
13840
13841 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
13842 if (IS_ERR(pmu_ctx)) {
13843 err = PTR_ERR(pmu_ctx);
13844 goto err_unlock;
13845 }
13846 event->pmu_ctx = pmu_ctx;
13847
13848 if (!task) {
13849 /*
13850 * Check if the @cpu we're creating an event for is online.
13851 *
13852 * We use the perf_cpu_context::ctx::mutex to serialize against
13853 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
13854 */
13855 struct perf_cpu_context *cpuctx =
13856 container_of(ctx, struct perf_cpu_context, ctx);
13857 if (!cpuctx->online) {
13858 err = -ENODEV;
13859 goto err_pmu_ctx;
13860 }
13861 }
13862
13863 if (!exclusive_event_installable(event, ctx)) {
13864 err = -EBUSY;
13865 goto err_pmu_ctx;
13866 }
13867
13868 perf_install_in_context(ctx, event, event->cpu);
13869 perf_unpin_context(ctx);
13870 mutex_unlock(&ctx->mutex);
13871
13872 return event;
13873
13874 err_pmu_ctx:
13875 put_pmu_ctx(pmu_ctx);
13876 event->pmu_ctx = NULL; /* _free_event() */
13877 err_unlock:
13878 mutex_unlock(&ctx->mutex);
13879 perf_unpin_context(ctx);
13880 put_ctx(ctx);
13881 err_alloc:
13882 put_event(event);
13883 err:
13884 return ERR_PTR(err);
13885 }
13886 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
13887
__perf_pmu_remove(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct perf_event_groups * groups,struct list_head * events)13888 static void __perf_pmu_remove(struct perf_event_context *ctx,
13889 int cpu, struct pmu *pmu,
13890 struct perf_event_groups *groups,
13891 struct list_head *events)
13892 {
13893 struct perf_event *event, *sibling;
13894
13895 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
13896 perf_remove_from_context(event, 0);
13897 put_pmu_ctx(event->pmu_ctx);
13898 list_add(&event->migrate_entry, events);
13899
13900 for_each_sibling_event(sibling, event) {
13901 perf_remove_from_context(sibling, 0);
13902 put_pmu_ctx(sibling->pmu_ctx);
13903 list_add(&sibling->migrate_entry, events);
13904 }
13905 }
13906 }
13907
__perf_pmu_install_event(struct pmu * pmu,struct perf_event_context * ctx,int cpu,struct perf_event * event)13908 static void __perf_pmu_install_event(struct pmu *pmu,
13909 struct perf_event_context *ctx,
13910 int cpu, struct perf_event *event)
13911 {
13912 struct perf_event_pmu_context *epc;
13913 struct perf_event_context *old_ctx = event->ctx;
13914
13915 get_ctx(ctx); /* normally find_get_context() */
13916
13917 event->cpu = cpu;
13918 epc = find_get_pmu_context(pmu, ctx, event);
13919 event->pmu_ctx = epc;
13920
13921 if (event->state >= PERF_EVENT_STATE_OFF)
13922 event->state = PERF_EVENT_STATE_INACTIVE;
13923 perf_install_in_context(ctx, event, cpu);
13924
13925 /*
13926 * Now that event->ctx is updated and visible, put the old ctx.
13927 */
13928 put_ctx(old_ctx);
13929 }
13930
__perf_pmu_install(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct list_head * events)13931 static void __perf_pmu_install(struct perf_event_context *ctx,
13932 int cpu, struct pmu *pmu, struct list_head *events)
13933 {
13934 struct perf_event *event, *tmp;
13935
13936 /*
13937 * Re-instate events in 2 passes.
13938 *
13939 * Skip over group leaders and only install siblings on this first
13940 * pass, siblings will not get enabled without a leader, however a
13941 * leader will enable its siblings, even if those are still on the old
13942 * context.
13943 */
13944 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13945 if (event->group_leader == event)
13946 continue;
13947
13948 list_del(&event->migrate_entry);
13949 __perf_pmu_install_event(pmu, ctx, cpu, event);
13950 }
13951
13952 /*
13953 * Once all the siblings are setup properly, install the group leaders
13954 * to make it go.
13955 */
13956 list_for_each_entry_safe(event, tmp, events, migrate_entry) {
13957 list_del(&event->migrate_entry);
13958 __perf_pmu_install_event(pmu, ctx, cpu, event);
13959 }
13960 }
13961
perf_pmu_migrate_context(struct pmu * pmu,int src_cpu,int dst_cpu)13962 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
13963 {
13964 struct perf_event_context *src_ctx, *dst_ctx;
13965 LIST_HEAD(events);
13966
13967 /*
13968 * Since per-cpu context is persistent, no need to grab an extra
13969 * reference.
13970 */
13971 src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
13972 dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
13973
13974 /*
13975 * See perf_event_ctx_lock() for comments on the details
13976 * of swizzling perf_event::ctx.
13977 */
13978 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
13979
13980 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
13981 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
13982
13983 if (!list_empty(&events)) {
13984 /*
13985 * Wait for the events to quiesce before re-instating them.
13986 */
13987 synchronize_rcu();
13988
13989 __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
13990 }
13991
13992 mutex_unlock(&dst_ctx->mutex);
13993 mutex_unlock(&src_ctx->mutex);
13994 }
13995 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
13996
sync_child_event(struct perf_event * child_event)13997 static void sync_child_event(struct perf_event *child_event)
13998 {
13999 struct perf_event *parent_event = child_event->parent;
14000 u64 child_val;
14001
14002 if (child_event->attr.inherit_stat) {
14003 struct task_struct *task = child_event->ctx->task;
14004
14005 if (task && task != TASK_TOMBSTONE)
14006 perf_event_read_event(child_event, task);
14007 }
14008
14009 child_val = perf_event_count(child_event, false);
14010
14011 /*
14012 * Add back the child's count to the parent's count:
14013 */
14014 atomic64_add(child_val, &parent_event->child_count);
14015 atomic64_add(child_event->total_time_enabled,
14016 &parent_event->child_total_time_enabled);
14017 atomic64_add(child_event->total_time_running,
14018 &parent_event->child_total_time_running);
14019 }
14020
14021 static void
perf_event_exit_event(struct perf_event * event,struct perf_event_context * ctx,bool revoke)14022 perf_event_exit_event(struct perf_event *event,
14023 struct perf_event_context *ctx, bool revoke)
14024 {
14025 struct perf_event *parent_event = event->parent;
14026 unsigned long detach_flags = DETACH_EXIT;
14027 unsigned int attach_state;
14028
14029 if (parent_event) {
14030 /*
14031 * Do not destroy the 'original' grouping; because of the
14032 * context switch optimization the original events could've
14033 * ended up in a random child task.
14034 *
14035 * If we were to destroy the original group, all group related
14036 * operations would cease to function properly after this
14037 * random child dies.
14038 *
14039 * Do destroy all inherited groups, we don't care about those
14040 * and being thorough is better.
14041 */
14042 detach_flags |= DETACH_GROUP | DETACH_CHILD;
14043 mutex_lock(&parent_event->child_mutex);
14044 /* PERF_ATTACH_ITRACE might be set concurrently */
14045 attach_state = READ_ONCE(event->attach_state);
14046 }
14047
14048 if (revoke)
14049 detach_flags |= DETACH_GROUP | DETACH_REVOKE;
14050
14051 perf_remove_from_context(event, detach_flags);
14052 /*
14053 * Child events can be freed.
14054 */
14055 if (parent_event) {
14056 mutex_unlock(&parent_event->child_mutex);
14057
14058 /*
14059 * Match the refcount initialization. Make sure it doesn't happen
14060 * twice if pmu_detach_event() calls it on an already exited task.
14061 */
14062 if (attach_state & PERF_ATTACH_CHILD) {
14063 /*
14064 * Kick perf_poll() for is_event_hup();
14065 */
14066 perf_event_wakeup(parent_event);
14067 /*
14068 * pmu_detach_event() will have an extra refcount.
14069 * perf_pending_task() might have one too.
14070 */
14071 put_event(event);
14072 }
14073
14074 return;
14075 }
14076
14077 /*
14078 * Parent events are governed by their filedesc, retain them.
14079 */
14080 perf_event_wakeup(event);
14081 }
14082
perf_event_exit_task_context(struct task_struct * task,bool exit)14083 static void perf_event_exit_task_context(struct task_struct *task, bool exit)
14084 {
14085 struct perf_event_context *ctx, *clone_ctx = NULL;
14086 struct perf_event *child_event, *next;
14087
14088 ctx = perf_pin_task_context(task);
14089 if (!ctx)
14090 return;
14091
14092 /*
14093 * In order to reduce the amount of tricky in ctx tear-down, we hold
14094 * ctx::mutex over the entire thing. This serializes against almost
14095 * everything that wants to access the ctx.
14096 *
14097 * The exception is sys_perf_event_open() /
14098 * perf_event_create_kernel_count() which does find_get_context()
14099 * without ctx::mutex (it cannot because of the move_group double mutex
14100 * lock thing). See the comments in perf_install_in_context().
14101 */
14102 mutex_lock(&ctx->mutex);
14103
14104 /*
14105 * In a single ctx::lock section, de-schedule the events and detach the
14106 * context from the task such that we cannot ever get it scheduled back
14107 * in.
14108 */
14109 raw_spin_lock_irq(&ctx->lock);
14110 if (exit)
14111 task_ctx_sched_out(ctx, NULL, EVENT_ALL);
14112
14113 /*
14114 * Now that the context is inactive, destroy the task <-> ctx relation
14115 * and mark the context dead.
14116 */
14117 RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
14118 put_ctx(ctx); /* cannot be last */
14119 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
14120 put_task_struct(task); /* cannot be last */
14121
14122 clone_ctx = unclone_ctx(ctx);
14123 raw_spin_unlock_irq(&ctx->lock);
14124
14125 if (clone_ctx)
14126 put_ctx(clone_ctx);
14127
14128 /*
14129 * Report the task dead after unscheduling the events so that we
14130 * won't get any samples after PERF_RECORD_EXIT. We can however still
14131 * get a few PERF_RECORD_READ events.
14132 */
14133 if (exit)
14134 perf_event_task(task, ctx, 0);
14135
14136 list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
14137 perf_event_exit_event(child_event, ctx, false);
14138
14139 mutex_unlock(&ctx->mutex);
14140
14141 if (!exit) {
14142 /*
14143 * perf_event_release_kernel() could still have a reference on
14144 * this context. In that case we must wait for these events to
14145 * have been freed (in particular all their references to this
14146 * task must've been dropped).
14147 *
14148 * Without this copy_process() will unconditionally free this
14149 * task (irrespective of its reference count) and
14150 * _free_event()'s put_task_struct(event->hw.target) will be a
14151 * use-after-free.
14152 *
14153 * Wait for all events to drop their context reference.
14154 */
14155 wait_var_event(&ctx->refcount,
14156 refcount_read(&ctx->refcount) == 1);
14157 }
14158 put_ctx(ctx);
14159 }
14160
14161 /*
14162 * When a task exits, feed back event values to parent events.
14163 *
14164 * Can be called with exec_update_lock held when called from
14165 * setup_new_exec().
14166 */
perf_event_exit_task(struct task_struct * task)14167 void perf_event_exit_task(struct task_struct *task)
14168 {
14169 struct perf_event *event, *tmp;
14170
14171 WARN_ON_ONCE(task != current);
14172
14173 mutex_lock(&task->perf_event_mutex);
14174 list_for_each_entry_safe(event, tmp, &task->perf_event_list,
14175 owner_entry) {
14176 list_del_init(&event->owner_entry);
14177
14178 /*
14179 * Ensure the list deletion is visible before we clear
14180 * the owner, closes a race against perf_release() where
14181 * we need to serialize on the owner->perf_event_mutex.
14182 */
14183 smp_store_release(&event->owner, NULL);
14184 }
14185 mutex_unlock(&task->perf_event_mutex);
14186
14187 perf_event_exit_task_context(task, true);
14188
14189 /*
14190 * The perf_event_exit_task_context calls perf_event_task
14191 * with task's task_ctx, which generates EXIT events for
14192 * task contexts and sets task->perf_event_ctxp[] to NULL.
14193 * At this point we need to send EXIT events to cpu contexts.
14194 */
14195 perf_event_task(task, NULL, 0);
14196
14197 /*
14198 * Detach the perf_ctx_data for the system-wide event.
14199 */
14200 guard(percpu_read)(&global_ctx_data_rwsem);
14201 detach_task_ctx_data(task);
14202 }
14203
14204 /*
14205 * Free a context as created by inheritance by perf_event_init_task() below,
14206 * used by fork() in case of fail.
14207 *
14208 * Even though the task has never lived, the context and events have been
14209 * exposed through the child_list, so we must take care tearing it all down.
14210 */
perf_event_free_task(struct task_struct * task)14211 void perf_event_free_task(struct task_struct *task)
14212 {
14213 perf_event_exit_task_context(task, false);
14214 }
14215
perf_event_delayed_put(struct task_struct * task)14216 void perf_event_delayed_put(struct task_struct *task)
14217 {
14218 WARN_ON_ONCE(task->perf_event_ctxp);
14219 }
14220
perf_event_get(unsigned int fd)14221 struct file *perf_event_get(unsigned int fd)
14222 {
14223 struct file *file = fget(fd);
14224 if (!file)
14225 return ERR_PTR(-EBADF);
14226
14227 if (file->f_op != &perf_fops) {
14228 fput(file);
14229 return ERR_PTR(-EBADF);
14230 }
14231
14232 return file;
14233 }
14234
perf_get_event(struct file * file)14235 const struct perf_event *perf_get_event(struct file *file)
14236 {
14237 if (file->f_op != &perf_fops)
14238 return ERR_PTR(-EINVAL);
14239
14240 return file->private_data;
14241 }
14242
perf_event_attrs(struct perf_event * event)14243 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
14244 {
14245 if (!event)
14246 return ERR_PTR(-EINVAL);
14247
14248 return &event->attr;
14249 }
14250
perf_allow_kernel(void)14251 int perf_allow_kernel(void)
14252 {
14253 if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
14254 return -EACCES;
14255
14256 return security_perf_event_open(PERF_SECURITY_KERNEL);
14257 }
14258 EXPORT_SYMBOL_GPL(perf_allow_kernel);
14259
14260 /*
14261 * Inherit an event from parent task to child task.
14262 *
14263 * Returns:
14264 * - valid pointer on success
14265 * - NULL for orphaned events
14266 * - IS_ERR() on error
14267 */
14268 static struct perf_event *
inherit_event(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event * group_leader,struct perf_event_context * child_ctx)14269 inherit_event(struct perf_event *parent_event,
14270 struct task_struct *parent,
14271 struct perf_event_context *parent_ctx,
14272 struct task_struct *child,
14273 struct perf_event *group_leader,
14274 struct perf_event_context *child_ctx)
14275 {
14276 enum perf_event_state parent_state = parent_event->state;
14277 struct perf_event_pmu_context *pmu_ctx;
14278 struct perf_event *child_event;
14279 unsigned long flags;
14280
14281 /*
14282 * Instead of creating recursive hierarchies of events,
14283 * we link inherited events back to the original parent,
14284 * which has a filp for sure, which we use as the reference
14285 * count:
14286 */
14287 if (parent_event->parent)
14288 parent_event = parent_event->parent;
14289
14290 if (parent_event->state <= PERF_EVENT_STATE_REVOKED)
14291 return NULL;
14292
14293 /*
14294 * Event creation should be under SRCU, see perf_pmu_unregister().
14295 */
14296 guard(srcu)(&pmus_srcu);
14297
14298 child_event = perf_event_alloc(&parent_event->attr,
14299 parent_event->cpu,
14300 child,
14301 group_leader, parent_event,
14302 NULL, NULL, -1);
14303 if (IS_ERR(child_event))
14304 return child_event;
14305
14306 get_ctx(child_ctx);
14307 child_event->ctx = child_ctx;
14308
14309 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
14310 if (IS_ERR(pmu_ctx)) {
14311 free_event(child_event);
14312 return ERR_CAST(pmu_ctx);
14313 }
14314 child_event->pmu_ctx = pmu_ctx;
14315
14316 /*
14317 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
14318 * must be under the same lock in order to serialize against
14319 * perf_event_release_kernel(), such that either we must observe
14320 * is_orphaned_event() or they will observe us on the child_list.
14321 */
14322 mutex_lock(&parent_event->child_mutex);
14323 if (is_orphaned_event(parent_event) ||
14324 !atomic_long_inc_not_zero(&parent_event->refcount)) {
14325 mutex_unlock(&parent_event->child_mutex);
14326 free_event(child_event);
14327 return NULL;
14328 }
14329
14330 /*
14331 * Make the child state follow the state of the parent event,
14332 * not its attr.disabled bit. We hold the parent's mutex,
14333 * so we won't race with perf_event_{en, dis}able_family.
14334 */
14335 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
14336 child_event->state = PERF_EVENT_STATE_INACTIVE;
14337 else
14338 child_event->state = PERF_EVENT_STATE_OFF;
14339
14340 if (parent_event->attr.freq) {
14341 u64 sample_period = parent_event->hw.sample_period;
14342 struct hw_perf_event *hwc = &child_event->hw;
14343
14344 hwc->sample_period = sample_period;
14345 hwc->last_period = sample_period;
14346
14347 local64_set(&hwc->period_left, sample_period);
14348 }
14349
14350 child_event->overflow_handler = parent_event->overflow_handler;
14351 child_event->overflow_handler_context
14352 = parent_event->overflow_handler_context;
14353
14354 /*
14355 * Precalculate sample_data sizes
14356 */
14357 perf_event__header_size(child_event);
14358 perf_event__id_header_size(child_event);
14359
14360 /*
14361 * Link it up in the child's context:
14362 */
14363 raw_spin_lock_irqsave(&child_ctx->lock, flags);
14364 add_event_to_ctx(child_event, child_ctx);
14365 child_event->attach_state |= PERF_ATTACH_CHILD;
14366 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
14367
14368 /*
14369 * Link this into the parent event's child list
14370 */
14371 list_add_tail(&child_event->child_list, &parent_event->child_list);
14372 mutex_unlock(&parent_event->child_mutex);
14373
14374 return child_event;
14375 }
14376
14377 /*
14378 * Inherits an event group.
14379 *
14380 * This will quietly suppress orphaned events; !inherit_event() is not an error.
14381 * This matches with perf_event_release_kernel() removing all child events.
14382 *
14383 * Returns:
14384 * - 0 on success
14385 * - <0 on error
14386 */
inherit_group(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event_context * child_ctx)14387 static int inherit_group(struct perf_event *parent_event,
14388 struct task_struct *parent,
14389 struct perf_event_context *parent_ctx,
14390 struct task_struct *child,
14391 struct perf_event_context *child_ctx)
14392 {
14393 struct perf_event *leader;
14394 struct perf_event *sub;
14395 struct perf_event *child_ctr;
14396
14397 leader = inherit_event(parent_event, parent, parent_ctx,
14398 child, NULL, child_ctx);
14399 if (IS_ERR(leader))
14400 return PTR_ERR(leader);
14401 /*
14402 * @leader can be NULL here because of is_orphaned_event(). In this
14403 * case inherit_event() will create individual events, similar to what
14404 * perf_group_detach() would do anyway.
14405 */
14406 for_each_sibling_event(sub, parent_event) {
14407 child_ctr = inherit_event(sub, parent, parent_ctx,
14408 child, leader, child_ctx);
14409 if (IS_ERR(child_ctr))
14410 return PTR_ERR(child_ctr);
14411
14412 if (sub->aux_event == parent_event && child_ctr &&
14413 !perf_get_aux_event(child_ctr, leader))
14414 return -EINVAL;
14415 }
14416 if (leader)
14417 leader->group_generation = parent_event->group_generation;
14418 return 0;
14419 }
14420
14421 /*
14422 * Creates the child task context and tries to inherit the event-group.
14423 *
14424 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
14425 * inherited_all set when we 'fail' to inherit an orphaned event; this is
14426 * consistent with perf_event_release_kernel() removing all child events.
14427 *
14428 * Returns:
14429 * - 0 on success
14430 * - <0 on error
14431 */
14432 static int
inherit_task_group(struct perf_event * event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,u64 clone_flags,int * inherited_all)14433 inherit_task_group(struct perf_event *event, struct task_struct *parent,
14434 struct perf_event_context *parent_ctx,
14435 struct task_struct *child,
14436 u64 clone_flags, int *inherited_all)
14437 {
14438 struct perf_event_context *child_ctx;
14439 int ret;
14440
14441 if (!event->attr.inherit ||
14442 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
14443 /* Do not inherit if sigtrap and signal handlers were cleared. */
14444 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
14445 *inherited_all = 0;
14446 return 0;
14447 }
14448
14449 child_ctx = child->perf_event_ctxp;
14450 if (!child_ctx) {
14451 /*
14452 * This is executed from the parent task context, so
14453 * inherit events that have been marked for cloning.
14454 * First allocate and initialize a context for the
14455 * child.
14456 */
14457 child_ctx = alloc_perf_context(child);
14458 if (!child_ctx)
14459 return -ENOMEM;
14460
14461 child->perf_event_ctxp = child_ctx;
14462 }
14463
14464 ret = inherit_group(event, parent, parent_ctx, child, child_ctx);
14465 if (ret)
14466 *inherited_all = 0;
14467
14468 return ret;
14469 }
14470
14471 /*
14472 * Initialize the perf_event context in task_struct
14473 */
perf_event_init_context(struct task_struct * child,u64 clone_flags)14474 static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
14475 {
14476 struct perf_event_context *child_ctx, *parent_ctx;
14477 struct perf_event_context *cloned_ctx;
14478 struct perf_event *event;
14479 struct task_struct *parent = current;
14480 int inherited_all = 1;
14481 unsigned long flags;
14482 int ret = 0;
14483
14484 if (likely(!parent->perf_event_ctxp))
14485 return 0;
14486
14487 /*
14488 * If the parent's context is a clone, pin it so it won't get
14489 * swapped under us.
14490 */
14491 parent_ctx = perf_pin_task_context(parent);
14492 if (!parent_ctx)
14493 return 0;
14494
14495 /*
14496 * No need to check if parent_ctx != NULL here; since we saw
14497 * it non-NULL earlier, the only reason for it to become NULL
14498 * is if we exit, and since we're currently in the middle of
14499 * a fork we can't be exiting at the same time.
14500 */
14501
14502 /*
14503 * Lock the parent list. No need to lock the child - not PID
14504 * hashed yet and not running, so nobody can access it.
14505 */
14506 mutex_lock(&parent_ctx->mutex);
14507
14508 /*
14509 * We dont have to disable NMIs - we are only looking at
14510 * the list, not manipulating it:
14511 */
14512 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
14513 ret = inherit_task_group(event, parent, parent_ctx,
14514 child, clone_flags, &inherited_all);
14515 if (ret)
14516 goto out_unlock;
14517 }
14518
14519 /*
14520 * We can't hold ctx->lock when iterating the ->flexible_group list due
14521 * to allocations, but we need to prevent rotation because
14522 * rotate_ctx() will change the list from interrupt context.
14523 */
14524 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
14525 parent_ctx->rotate_disable = 1;
14526 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
14527
14528 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
14529 ret = inherit_task_group(event, parent, parent_ctx,
14530 child, clone_flags, &inherited_all);
14531 if (ret)
14532 goto out_unlock;
14533 }
14534
14535 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
14536 parent_ctx->rotate_disable = 0;
14537
14538 child_ctx = child->perf_event_ctxp;
14539
14540 if (child_ctx && inherited_all) {
14541 /*
14542 * Mark the child context as a clone of the parent
14543 * context, or of whatever the parent is a clone of.
14544 *
14545 * Note that if the parent is a clone, the holding of
14546 * parent_ctx->lock avoids it from being uncloned.
14547 */
14548 cloned_ctx = parent_ctx->parent_ctx;
14549 if (cloned_ctx) {
14550 child_ctx->parent_ctx = cloned_ctx;
14551 child_ctx->parent_gen = parent_ctx->parent_gen;
14552 } else {
14553 child_ctx->parent_ctx = parent_ctx;
14554 child_ctx->parent_gen = parent_ctx->generation;
14555 }
14556 get_ctx(child_ctx->parent_ctx);
14557 }
14558
14559 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
14560 out_unlock:
14561 mutex_unlock(&parent_ctx->mutex);
14562
14563 perf_unpin_context(parent_ctx);
14564 put_ctx(parent_ctx);
14565
14566 return ret;
14567 }
14568
14569 /*
14570 * Initialize the perf_event context in task_struct
14571 */
perf_event_init_task(struct task_struct * child,u64 clone_flags)14572 int perf_event_init_task(struct task_struct *child, u64 clone_flags)
14573 {
14574 int ret;
14575
14576 memset(child->perf_recursion, 0, sizeof(child->perf_recursion));
14577 child->perf_event_ctxp = NULL;
14578 mutex_init(&child->perf_event_mutex);
14579 INIT_LIST_HEAD(&child->perf_event_list);
14580 child->perf_ctx_data = NULL;
14581
14582 ret = perf_event_init_context(child, clone_flags);
14583 if (ret) {
14584 perf_event_free_task(child);
14585 return ret;
14586 }
14587
14588 return 0;
14589 }
14590
perf_event_init_all_cpus(void)14591 static void __init perf_event_init_all_cpus(void)
14592 {
14593 struct swevent_htable *swhash;
14594 struct perf_cpu_context *cpuctx;
14595 int cpu;
14596
14597 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
14598 zalloc_cpumask_var(&perf_online_core_mask, GFP_KERNEL);
14599 zalloc_cpumask_var(&perf_online_die_mask, GFP_KERNEL);
14600 zalloc_cpumask_var(&perf_online_cluster_mask, GFP_KERNEL);
14601 zalloc_cpumask_var(&perf_online_pkg_mask, GFP_KERNEL);
14602 zalloc_cpumask_var(&perf_online_sys_mask, GFP_KERNEL);
14603
14604
14605 for_each_possible_cpu(cpu) {
14606 swhash = &per_cpu(swevent_htable, cpu);
14607 mutex_init(&swhash->hlist_mutex);
14608
14609 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
14610 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
14611
14612 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
14613
14614 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
14615 __perf_event_init_context(&cpuctx->ctx);
14616 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
14617 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
14618 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
14619 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
14620 cpuctx->heap = cpuctx->heap_default;
14621 }
14622 }
14623
perf_swevent_init_cpu(unsigned int cpu)14624 static void perf_swevent_init_cpu(unsigned int cpu)
14625 {
14626 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
14627
14628 mutex_lock(&swhash->hlist_mutex);
14629 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
14630 struct swevent_hlist *hlist;
14631
14632 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
14633 WARN_ON(!hlist);
14634 rcu_assign_pointer(swhash->swevent_hlist, hlist);
14635 }
14636 mutex_unlock(&swhash->hlist_mutex);
14637 }
14638
14639 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
__perf_event_exit_context(void * __info)14640 static void __perf_event_exit_context(void *__info)
14641 {
14642 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
14643 struct perf_event_context *ctx = __info;
14644 struct perf_event *event;
14645
14646 raw_spin_lock(&ctx->lock);
14647 ctx_sched_out(ctx, NULL, EVENT_TIME);
14648 list_for_each_entry(event, &ctx->event_list, event_entry)
14649 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
14650 raw_spin_unlock(&ctx->lock);
14651 }
14652
perf_event_clear_cpumask(unsigned int cpu)14653 static void perf_event_clear_cpumask(unsigned int cpu)
14654 {
14655 int target[PERF_PMU_MAX_SCOPE];
14656 unsigned int scope;
14657 struct pmu *pmu;
14658
14659 cpumask_clear_cpu(cpu, perf_online_mask);
14660
14661 for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
14662 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
14663 struct cpumask *pmu_cpumask = perf_scope_cpumask(scope);
14664
14665 target[scope] = -1;
14666 if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
14667 continue;
14668
14669 if (!cpumask_test_and_clear_cpu(cpu, pmu_cpumask))
14670 continue;
14671 target[scope] = cpumask_any_but(cpumask, cpu);
14672 if (target[scope] < nr_cpu_ids)
14673 cpumask_set_cpu(target[scope], pmu_cpumask);
14674 }
14675
14676 /* migrate */
14677 list_for_each_entry(pmu, &pmus, entry) {
14678 if (pmu->scope == PERF_PMU_SCOPE_NONE ||
14679 WARN_ON_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE))
14680 continue;
14681
14682 if (target[pmu->scope] >= 0 && target[pmu->scope] < nr_cpu_ids)
14683 perf_pmu_migrate_context(pmu, cpu, target[pmu->scope]);
14684 }
14685 }
14686
perf_event_exit_cpu_context(int cpu)14687 static void perf_event_exit_cpu_context(int cpu)
14688 {
14689 struct perf_cpu_context *cpuctx;
14690 struct perf_event_context *ctx;
14691
14692 // XXX simplify cpuctx->online
14693 mutex_lock(&pmus_lock);
14694 /*
14695 * Clear the cpumasks, and migrate to other CPUs if possible.
14696 * Must be invoked before the __perf_event_exit_context.
14697 */
14698 perf_event_clear_cpumask(cpu);
14699 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
14700 ctx = &cpuctx->ctx;
14701
14702 mutex_lock(&ctx->mutex);
14703 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
14704 cpuctx->online = 0;
14705 mutex_unlock(&ctx->mutex);
14706 mutex_unlock(&pmus_lock);
14707 }
14708 #else
14709
perf_event_exit_cpu_context(int cpu)14710 static void perf_event_exit_cpu_context(int cpu) { }
14711
14712 #endif
14713
perf_event_setup_cpumask(unsigned int cpu)14714 static void perf_event_setup_cpumask(unsigned int cpu)
14715 {
14716 struct cpumask *pmu_cpumask;
14717 unsigned int scope;
14718
14719 /*
14720 * Early boot stage, the cpumask hasn't been set yet.
14721 * The perf_online_<domain>_masks includes the first CPU of each domain.
14722 * Always unconditionally set the boot CPU for the perf_online_<domain>_masks.
14723 */
14724 if (cpumask_empty(perf_online_mask)) {
14725 for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
14726 pmu_cpumask = perf_scope_cpumask(scope);
14727 if (WARN_ON_ONCE(!pmu_cpumask))
14728 continue;
14729 cpumask_set_cpu(cpu, pmu_cpumask);
14730 }
14731 goto end;
14732 }
14733
14734 for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
14735 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
14736
14737 pmu_cpumask = perf_scope_cpumask(scope);
14738
14739 if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
14740 continue;
14741
14742 if (!cpumask_empty(cpumask) &&
14743 cpumask_any_and(pmu_cpumask, cpumask) >= nr_cpu_ids)
14744 cpumask_set_cpu(cpu, pmu_cpumask);
14745 }
14746 end:
14747 cpumask_set_cpu(cpu, perf_online_mask);
14748 }
14749
perf_event_init_cpu(unsigned int cpu)14750 int perf_event_init_cpu(unsigned int cpu)
14751 {
14752 struct perf_cpu_context *cpuctx;
14753 struct perf_event_context *ctx;
14754
14755 perf_swevent_init_cpu(cpu);
14756
14757 mutex_lock(&pmus_lock);
14758 perf_event_setup_cpumask(cpu);
14759 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
14760 ctx = &cpuctx->ctx;
14761
14762 mutex_lock(&ctx->mutex);
14763 cpuctx->online = 1;
14764 mutex_unlock(&ctx->mutex);
14765 mutex_unlock(&pmus_lock);
14766
14767 return 0;
14768 }
14769
perf_event_exit_cpu(unsigned int cpu)14770 int perf_event_exit_cpu(unsigned int cpu)
14771 {
14772 perf_event_exit_cpu_context(cpu);
14773 return 0;
14774 }
14775
14776 static int
perf_reboot(struct notifier_block * notifier,unsigned long val,void * v)14777 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
14778 {
14779 int cpu;
14780
14781 for_each_online_cpu(cpu)
14782 perf_event_exit_cpu(cpu);
14783
14784 return NOTIFY_OK;
14785 }
14786
14787 /*
14788 * Run the perf reboot notifier at the very last possible moment so that
14789 * the generic watchdog code runs as long as possible.
14790 */
14791 static struct notifier_block perf_reboot_notifier = {
14792 .notifier_call = perf_reboot,
14793 .priority = INT_MIN,
14794 };
14795
perf_event_init(void)14796 void __init perf_event_init(void)
14797 {
14798 int ret;
14799
14800 idr_init(&pmu_idr);
14801
14802 perf_event_init_all_cpus();
14803 init_srcu_struct(&pmus_srcu);
14804 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
14805 perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1);
14806 perf_pmu_register(&perf_task_clock, "task_clock", -1);
14807 perf_tp_register();
14808 perf_event_init_cpu(smp_processor_id());
14809 register_reboot_notifier(&perf_reboot_notifier);
14810
14811 ret = init_hw_breakpoint();
14812 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
14813
14814 perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC);
14815
14816 /*
14817 * Build time assertion that we keep the data_head at the intended
14818 * location. IOW, validation we got the __reserved[] size right.
14819 */
14820 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
14821 != 1024);
14822 }
14823
perf_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)14824 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
14825 char *page)
14826 {
14827 struct perf_pmu_events_attr *pmu_attr =
14828 container_of(attr, struct perf_pmu_events_attr, attr);
14829
14830 if (pmu_attr->event_str)
14831 return sprintf(page, "%s\n", pmu_attr->event_str);
14832
14833 return 0;
14834 }
14835 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
14836
perf_event_sysfs_init(void)14837 static int __init perf_event_sysfs_init(void)
14838 {
14839 struct pmu *pmu;
14840 int ret;
14841
14842 mutex_lock(&pmus_lock);
14843
14844 ret = bus_register(&pmu_bus);
14845 if (ret)
14846 goto unlock;
14847
14848 list_for_each_entry(pmu, &pmus, entry) {
14849 if (pmu->dev)
14850 continue;
14851
14852 ret = pmu_dev_alloc(pmu);
14853 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
14854 }
14855 pmu_bus_running = 1;
14856 ret = 0;
14857
14858 unlock:
14859 mutex_unlock(&pmus_lock);
14860
14861 return ret;
14862 }
14863 device_initcall(perf_event_sysfs_init);
14864
14865 #ifdef CONFIG_CGROUP_PERF
14866 static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)14867 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
14868 {
14869 struct perf_cgroup *jc;
14870
14871 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
14872 if (!jc)
14873 return ERR_PTR(-ENOMEM);
14874
14875 jc->info = alloc_percpu(struct perf_cgroup_info);
14876 if (!jc->info) {
14877 kfree(jc);
14878 return ERR_PTR(-ENOMEM);
14879 }
14880
14881 return &jc->css;
14882 }
14883
perf_cgroup_css_free(struct cgroup_subsys_state * css)14884 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
14885 {
14886 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
14887
14888 free_percpu(jc->info);
14889 kfree(jc);
14890 }
14891
perf_cgroup_css_online(struct cgroup_subsys_state * css)14892 static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
14893 {
14894 perf_event_cgroup(css->cgroup);
14895 return 0;
14896 }
14897
__perf_cgroup_move(void * info)14898 static int __perf_cgroup_move(void *info)
14899 {
14900 struct task_struct *task = info;
14901
14902 preempt_disable();
14903 perf_cgroup_switch(task);
14904 preempt_enable();
14905
14906 return 0;
14907 }
14908
perf_cgroup_attach(struct cgroup_taskset * tset)14909 static void perf_cgroup_attach(struct cgroup_taskset *tset)
14910 {
14911 struct task_struct *task;
14912 struct cgroup_subsys_state *css;
14913
14914 cgroup_taskset_for_each(task, css, tset)
14915 task_function_call(task, __perf_cgroup_move, task);
14916 }
14917
14918 struct cgroup_subsys perf_event_cgrp_subsys = {
14919 .css_alloc = perf_cgroup_css_alloc,
14920 .css_free = perf_cgroup_css_free,
14921 .css_online = perf_cgroup_css_online,
14922 .attach = perf_cgroup_attach,
14923 /*
14924 * Implicitly enable on dfl hierarchy so that perf events can
14925 * always be filtered by cgroup2 path as long as perf_event
14926 * controller is not mounted on a legacy hierarchy.
14927 */
14928 .implicit_on_dfl = true,
14929 .threaded = true,
14930 };
14931 #endif /* CONFIG_CGROUP_PERF */
14932
14933 DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
14934