xref: /linux/kernel/events/core.c (revision 507e190946297c34a27d9366b0661d5e506fdd03)
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49 #include <linux/sched/clock.h>
50 #include <linux/sched/mm.h>
51 #include <linux/proc_ns.h>
52 #include <linux/mount.h>
53 
54 #include "internal.h"
55 
56 #include <asm/irq_regs.h>
57 
58 typedef int (*remote_function_f)(void *);
59 
60 struct remote_function_call {
61 	struct task_struct	*p;
62 	remote_function_f	func;
63 	void			*info;
64 	int			ret;
65 };
66 
67 static void remote_function(void *data)
68 {
69 	struct remote_function_call *tfc = data;
70 	struct task_struct *p = tfc->p;
71 
72 	if (p) {
73 		/* -EAGAIN */
74 		if (task_cpu(p) != smp_processor_id())
75 			return;
76 
77 		/*
78 		 * Now that we're on right CPU with IRQs disabled, we can test
79 		 * if we hit the right task without races.
80 		 */
81 
82 		tfc->ret = -ESRCH; /* No such (running) process */
83 		if (p != current)
84 			return;
85 	}
86 
87 	tfc->ret = tfc->func(tfc->info);
88 }
89 
90 /**
91  * task_function_call - call a function on the cpu on which a task runs
92  * @p:		the task to evaluate
93  * @func:	the function to be called
94  * @info:	the function call argument
95  *
96  * Calls the function @func when the task is currently running. This might
97  * be on the current CPU, which just calls the function directly
98  *
99  * returns: @func return value, or
100  *	    -ESRCH  - when the process isn't running
101  *	    -EAGAIN - when the process moved away
102  */
103 static int
104 task_function_call(struct task_struct *p, remote_function_f func, void *info)
105 {
106 	struct remote_function_call data = {
107 		.p	= p,
108 		.func	= func,
109 		.info	= info,
110 		.ret	= -EAGAIN,
111 	};
112 	int ret;
113 
114 	do {
115 		ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
116 		if (!ret)
117 			ret = data.ret;
118 	} while (ret == -EAGAIN);
119 
120 	return ret;
121 }
122 
123 /**
124  * cpu_function_call - call a function on the cpu
125  * @func:	the function to be called
126  * @info:	the function call argument
127  *
128  * Calls the function @func on the remote cpu.
129  *
130  * returns: @func return value or -ENXIO when the cpu is offline
131  */
132 static int cpu_function_call(int cpu, remote_function_f func, void *info)
133 {
134 	struct remote_function_call data = {
135 		.p	= NULL,
136 		.func	= func,
137 		.info	= info,
138 		.ret	= -ENXIO, /* No such CPU */
139 	};
140 
141 	smp_call_function_single(cpu, remote_function, &data, 1);
142 
143 	return data.ret;
144 }
145 
146 static inline struct perf_cpu_context *
147 __get_cpu_context(struct perf_event_context *ctx)
148 {
149 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
150 }
151 
152 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
153 			  struct perf_event_context *ctx)
154 {
155 	raw_spin_lock(&cpuctx->ctx.lock);
156 	if (ctx)
157 		raw_spin_lock(&ctx->lock);
158 }
159 
160 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
161 			    struct perf_event_context *ctx)
162 {
163 	if (ctx)
164 		raw_spin_unlock(&ctx->lock);
165 	raw_spin_unlock(&cpuctx->ctx.lock);
166 }
167 
168 #define TASK_TOMBSTONE ((void *)-1L)
169 
170 static bool is_kernel_event(struct perf_event *event)
171 {
172 	return READ_ONCE(event->owner) == TASK_TOMBSTONE;
173 }
174 
175 /*
176  * On task ctx scheduling...
177  *
178  * When !ctx->nr_events a task context will not be scheduled. This means
179  * we can disable the scheduler hooks (for performance) without leaving
180  * pending task ctx state.
181  *
182  * This however results in two special cases:
183  *
184  *  - removing the last event from a task ctx; this is relatively straight
185  *    forward and is done in __perf_remove_from_context.
186  *
187  *  - adding the first event to a task ctx; this is tricky because we cannot
188  *    rely on ctx->is_active and therefore cannot use event_function_call().
189  *    See perf_install_in_context().
190  *
191  * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
192  */
193 
194 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
195 			struct perf_event_context *, void *);
196 
197 struct event_function_struct {
198 	struct perf_event *event;
199 	event_f func;
200 	void *data;
201 };
202 
203 static int event_function(void *info)
204 {
205 	struct event_function_struct *efs = info;
206 	struct perf_event *event = efs->event;
207 	struct perf_event_context *ctx = event->ctx;
208 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
209 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
210 	int ret = 0;
211 
212 	WARN_ON_ONCE(!irqs_disabled());
213 
214 	perf_ctx_lock(cpuctx, task_ctx);
215 	/*
216 	 * Since we do the IPI call without holding ctx->lock things can have
217 	 * changed, double check we hit the task we set out to hit.
218 	 */
219 	if (ctx->task) {
220 		if (ctx->task != current) {
221 			ret = -ESRCH;
222 			goto unlock;
223 		}
224 
225 		/*
226 		 * We only use event_function_call() on established contexts,
227 		 * and event_function() is only ever called when active (or
228 		 * rather, we'll have bailed in task_function_call() or the
229 		 * above ctx->task != current test), therefore we must have
230 		 * ctx->is_active here.
231 		 */
232 		WARN_ON_ONCE(!ctx->is_active);
233 		/*
234 		 * And since we have ctx->is_active, cpuctx->task_ctx must
235 		 * match.
236 		 */
237 		WARN_ON_ONCE(task_ctx != ctx);
238 	} else {
239 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
240 	}
241 
242 	efs->func(event, cpuctx, ctx, efs->data);
243 unlock:
244 	perf_ctx_unlock(cpuctx, task_ctx);
245 
246 	return ret;
247 }
248 
249 static void event_function_call(struct perf_event *event, event_f func, void *data)
250 {
251 	struct perf_event_context *ctx = event->ctx;
252 	struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
253 	struct event_function_struct efs = {
254 		.event = event,
255 		.func = func,
256 		.data = data,
257 	};
258 
259 	if (!event->parent) {
260 		/*
261 		 * If this is a !child event, we must hold ctx::mutex to
262 		 * stabilize the the event->ctx relation. See
263 		 * perf_event_ctx_lock().
264 		 */
265 		lockdep_assert_held(&ctx->mutex);
266 	}
267 
268 	if (!task) {
269 		cpu_function_call(event->cpu, event_function, &efs);
270 		return;
271 	}
272 
273 	if (task == TASK_TOMBSTONE)
274 		return;
275 
276 again:
277 	if (!task_function_call(task, event_function, &efs))
278 		return;
279 
280 	raw_spin_lock_irq(&ctx->lock);
281 	/*
282 	 * Reload the task pointer, it might have been changed by
283 	 * a concurrent perf_event_context_sched_out().
284 	 */
285 	task = ctx->task;
286 	if (task == TASK_TOMBSTONE) {
287 		raw_spin_unlock_irq(&ctx->lock);
288 		return;
289 	}
290 	if (ctx->is_active) {
291 		raw_spin_unlock_irq(&ctx->lock);
292 		goto again;
293 	}
294 	func(event, NULL, ctx, data);
295 	raw_spin_unlock_irq(&ctx->lock);
296 }
297 
298 /*
299  * Similar to event_function_call() + event_function(), but hard assumes IRQs
300  * are already disabled and we're on the right CPU.
301  */
302 static void event_function_local(struct perf_event *event, event_f func, void *data)
303 {
304 	struct perf_event_context *ctx = event->ctx;
305 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
306 	struct task_struct *task = READ_ONCE(ctx->task);
307 	struct perf_event_context *task_ctx = NULL;
308 
309 	WARN_ON_ONCE(!irqs_disabled());
310 
311 	if (task) {
312 		if (task == TASK_TOMBSTONE)
313 			return;
314 
315 		task_ctx = ctx;
316 	}
317 
318 	perf_ctx_lock(cpuctx, task_ctx);
319 
320 	task = ctx->task;
321 	if (task == TASK_TOMBSTONE)
322 		goto unlock;
323 
324 	if (task) {
325 		/*
326 		 * We must be either inactive or active and the right task,
327 		 * otherwise we're screwed, since we cannot IPI to somewhere
328 		 * else.
329 		 */
330 		if (ctx->is_active) {
331 			if (WARN_ON_ONCE(task != current))
332 				goto unlock;
333 
334 			if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
335 				goto unlock;
336 		}
337 	} else {
338 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
339 	}
340 
341 	func(event, cpuctx, ctx, data);
342 unlock:
343 	perf_ctx_unlock(cpuctx, task_ctx);
344 }
345 
346 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
347 		       PERF_FLAG_FD_OUTPUT  |\
348 		       PERF_FLAG_PID_CGROUP |\
349 		       PERF_FLAG_FD_CLOEXEC)
350 
351 /*
352  * branch priv levels that need permission checks
353  */
354 #define PERF_SAMPLE_BRANCH_PERM_PLM \
355 	(PERF_SAMPLE_BRANCH_KERNEL |\
356 	 PERF_SAMPLE_BRANCH_HV)
357 
358 enum event_type_t {
359 	EVENT_FLEXIBLE = 0x1,
360 	EVENT_PINNED = 0x2,
361 	EVENT_TIME = 0x4,
362 	/* see ctx_resched() for details */
363 	EVENT_CPU = 0x8,
364 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
365 };
366 
367 /*
368  * perf_sched_events : >0 events exist
369  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
370  */
371 
372 static void perf_sched_delayed(struct work_struct *work);
373 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
374 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
375 static DEFINE_MUTEX(perf_sched_mutex);
376 static atomic_t perf_sched_count;
377 
378 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
379 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
380 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
381 
382 static atomic_t nr_mmap_events __read_mostly;
383 static atomic_t nr_comm_events __read_mostly;
384 static atomic_t nr_namespaces_events __read_mostly;
385 static atomic_t nr_task_events __read_mostly;
386 static atomic_t nr_freq_events __read_mostly;
387 static atomic_t nr_switch_events __read_mostly;
388 
389 static LIST_HEAD(pmus);
390 static DEFINE_MUTEX(pmus_lock);
391 static struct srcu_struct pmus_srcu;
392 static cpumask_var_t perf_online_mask;
393 
394 /*
395  * perf event paranoia level:
396  *  -1 - not paranoid at all
397  *   0 - disallow raw tracepoint access for unpriv
398  *   1 - disallow cpu events for unpriv
399  *   2 - disallow kernel profiling for unpriv
400  */
401 int sysctl_perf_event_paranoid __read_mostly = 2;
402 
403 /* Minimum for 512 kiB + 1 user control page */
404 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
405 
406 /*
407  * max perf event sample rate
408  */
409 #define DEFAULT_MAX_SAMPLE_RATE		100000
410 #define DEFAULT_SAMPLE_PERIOD_NS	(NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
411 #define DEFAULT_CPU_TIME_MAX_PERCENT	25
412 
413 int sysctl_perf_event_sample_rate __read_mostly	= DEFAULT_MAX_SAMPLE_RATE;
414 
415 static int max_samples_per_tick __read_mostly	= DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
416 static int perf_sample_period_ns __read_mostly	= DEFAULT_SAMPLE_PERIOD_NS;
417 
418 static int perf_sample_allowed_ns __read_mostly =
419 	DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
420 
421 static void update_perf_cpu_limits(void)
422 {
423 	u64 tmp = perf_sample_period_ns;
424 
425 	tmp *= sysctl_perf_cpu_time_max_percent;
426 	tmp = div_u64(tmp, 100);
427 	if (!tmp)
428 		tmp = 1;
429 
430 	WRITE_ONCE(perf_sample_allowed_ns, tmp);
431 }
432 
433 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
434 
435 int perf_proc_update_handler(struct ctl_table *table, int write,
436 		void __user *buffer, size_t *lenp,
437 		loff_t *ppos)
438 {
439 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
440 
441 	if (ret || !write)
442 		return ret;
443 
444 	/*
445 	 * If throttling is disabled don't allow the write:
446 	 */
447 	if (sysctl_perf_cpu_time_max_percent == 100 ||
448 	    sysctl_perf_cpu_time_max_percent == 0)
449 		return -EINVAL;
450 
451 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
452 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 	update_perf_cpu_limits();
454 
455 	return 0;
456 }
457 
458 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
459 
460 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
461 				void __user *buffer, size_t *lenp,
462 				loff_t *ppos)
463 {
464 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
465 
466 	if (ret || !write)
467 		return ret;
468 
469 	if (sysctl_perf_cpu_time_max_percent == 100 ||
470 	    sysctl_perf_cpu_time_max_percent == 0) {
471 		printk(KERN_WARNING
472 		       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
473 		WRITE_ONCE(perf_sample_allowed_ns, 0);
474 	} else {
475 		update_perf_cpu_limits();
476 	}
477 
478 	return 0;
479 }
480 
481 /*
482  * perf samples are done in some very critical code paths (NMIs).
483  * If they take too much CPU time, the system can lock up and not
484  * get any real work done.  This will drop the sample rate when
485  * we detect that events are taking too long.
486  */
487 #define NR_ACCUMULATED_SAMPLES 128
488 static DEFINE_PER_CPU(u64, running_sample_length);
489 
490 static u64 __report_avg;
491 static u64 __report_allowed;
492 
493 static void perf_duration_warn(struct irq_work *w)
494 {
495 	printk_ratelimited(KERN_INFO
496 		"perf: interrupt took too long (%lld > %lld), lowering "
497 		"kernel.perf_event_max_sample_rate to %d\n",
498 		__report_avg, __report_allowed,
499 		sysctl_perf_event_sample_rate);
500 }
501 
502 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
503 
504 void perf_sample_event_took(u64 sample_len_ns)
505 {
506 	u64 max_len = READ_ONCE(perf_sample_allowed_ns);
507 	u64 running_len;
508 	u64 avg_len;
509 	u32 max;
510 
511 	if (max_len == 0)
512 		return;
513 
514 	/* Decay the counter by 1 average sample. */
515 	running_len = __this_cpu_read(running_sample_length);
516 	running_len -= running_len/NR_ACCUMULATED_SAMPLES;
517 	running_len += sample_len_ns;
518 	__this_cpu_write(running_sample_length, running_len);
519 
520 	/*
521 	 * Note: this will be biased artifically low until we have
522 	 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
523 	 * from having to maintain a count.
524 	 */
525 	avg_len = running_len/NR_ACCUMULATED_SAMPLES;
526 	if (avg_len <= max_len)
527 		return;
528 
529 	__report_avg = avg_len;
530 	__report_allowed = max_len;
531 
532 	/*
533 	 * Compute a throttle threshold 25% below the current duration.
534 	 */
535 	avg_len += avg_len / 4;
536 	max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
537 	if (avg_len < max)
538 		max /= (u32)avg_len;
539 	else
540 		max = 1;
541 
542 	WRITE_ONCE(perf_sample_allowed_ns, avg_len);
543 	WRITE_ONCE(max_samples_per_tick, max);
544 
545 	sysctl_perf_event_sample_rate = max * HZ;
546 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
547 
548 	if (!irq_work_queue(&perf_duration_work)) {
549 		early_printk("perf: interrupt took too long (%lld > %lld), lowering "
550 			     "kernel.perf_event_max_sample_rate to %d\n",
551 			     __report_avg, __report_allowed,
552 			     sysctl_perf_event_sample_rate);
553 	}
554 }
555 
556 static atomic64_t perf_event_id;
557 
558 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
559 			      enum event_type_t event_type);
560 
561 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
562 			     enum event_type_t event_type,
563 			     struct task_struct *task);
564 
565 static void update_context_time(struct perf_event_context *ctx);
566 static u64 perf_event_time(struct perf_event *event);
567 
568 void __weak perf_event_print_debug(void)	{ }
569 
570 extern __weak const char *perf_pmu_name(void)
571 {
572 	return "pmu";
573 }
574 
575 static inline u64 perf_clock(void)
576 {
577 	return local_clock();
578 }
579 
580 static inline u64 perf_event_clock(struct perf_event *event)
581 {
582 	return event->clock();
583 }
584 
585 #ifdef CONFIG_CGROUP_PERF
586 
587 static inline bool
588 perf_cgroup_match(struct perf_event *event)
589 {
590 	struct perf_event_context *ctx = event->ctx;
591 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
592 
593 	/* @event doesn't care about cgroup */
594 	if (!event->cgrp)
595 		return true;
596 
597 	/* wants specific cgroup scope but @cpuctx isn't associated with any */
598 	if (!cpuctx->cgrp)
599 		return false;
600 
601 	/*
602 	 * Cgroup scoping is recursive.  An event enabled for a cgroup is
603 	 * also enabled for all its descendant cgroups.  If @cpuctx's
604 	 * cgroup is a descendant of @event's (the test covers identity
605 	 * case), it's a match.
606 	 */
607 	return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
608 				    event->cgrp->css.cgroup);
609 }
610 
611 static inline void perf_detach_cgroup(struct perf_event *event)
612 {
613 	css_put(&event->cgrp->css);
614 	event->cgrp = NULL;
615 }
616 
617 static inline int is_cgroup_event(struct perf_event *event)
618 {
619 	return event->cgrp != NULL;
620 }
621 
622 static inline u64 perf_cgroup_event_time(struct perf_event *event)
623 {
624 	struct perf_cgroup_info *t;
625 
626 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
627 	return t->time;
628 }
629 
630 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
631 {
632 	struct perf_cgroup_info *info;
633 	u64 now;
634 
635 	now = perf_clock();
636 
637 	info = this_cpu_ptr(cgrp->info);
638 
639 	info->time += now - info->timestamp;
640 	info->timestamp = now;
641 }
642 
643 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
644 {
645 	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
646 	if (cgrp_out)
647 		__update_cgrp_time(cgrp_out);
648 }
649 
650 static inline void update_cgrp_time_from_event(struct perf_event *event)
651 {
652 	struct perf_cgroup *cgrp;
653 
654 	/*
655 	 * ensure we access cgroup data only when needed and
656 	 * when we know the cgroup is pinned (css_get)
657 	 */
658 	if (!is_cgroup_event(event))
659 		return;
660 
661 	cgrp = perf_cgroup_from_task(current, event->ctx);
662 	/*
663 	 * Do not update time when cgroup is not active
664 	 */
665 	if (cgrp == event->cgrp)
666 		__update_cgrp_time(event->cgrp);
667 }
668 
669 static inline void
670 perf_cgroup_set_timestamp(struct task_struct *task,
671 			  struct perf_event_context *ctx)
672 {
673 	struct perf_cgroup *cgrp;
674 	struct perf_cgroup_info *info;
675 
676 	/*
677 	 * ctx->lock held by caller
678 	 * ensure we do not access cgroup data
679 	 * unless we have the cgroup pinned (css_get)
680 	 */
681 	if (!task || !ctx->nr_cgroups)
682 		return;
683 
684 	cgrp = perf_cgroup_from_task(task, ctx);
685 	info = this_cpu_ptr(cgrp->info);
686 	info->timestamp = ctx->timestamp;
687 }
688 
689 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
690 
691 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
692 #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
693 
694 /*
695  * reschedule events based on the cgroup constraint of task.
696  *
697  * mode SWOUT : schedule out everything
698  * mode SWIN : schedule in based on cgroup for next
699  */
700 static void perf_cgroup_switch(struct task_struct *task, int mode)
701 {
702 	struct perf_cpu_context *cpuctx;
703 	struct list_head *list;
704 	unsigned long flags;
705 
706 	/*
707 	 * Disable interrupts and preemption to avoid this CPU's
708 	 * cgrp_cpuctx_entry to change under us.
709 	 */
710 	local_irq_save(flags);
711 
712 	list = this_cpu_ptr(&cgrp_cpuctx_list);
713 	list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
714 		WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
715 
716 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
717 		perf_pmu_disable(cpuctx->ctx.pmu);
718 
719 		if (mode & PERF_CGROUP_SWOUT) {
720 			cpu_ctx_sched_out(cpuctx, EVENT_ALL);
721 			/*
722 			 * must not be done before ctxswout due
723 			 * to event_filter_match() in event_sched_out()
724 			 */
725 			cpuctx->cgrp = NULL;
726 		}
727 
728 		if (mode & PERF_CGROUP_SWIN) {
729 			WARN_ON_ONCE(cpuctx->cgrp);
730 			/*
731 			 * set cgrp before ctxsw in to allow
732 			 * event_filter_match() to not have to pass
733 			 * task around
734 			 * we pass the cpuctx->ctx to perf_cgroup_from_task()
735 			 * because cgorup events are only per-cpu
736 			 */
737 			cpuctx->cgrp = perf_cgroup_from_task(task,
738 							     &cpuctx->ctx);
739 			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
740 		}
741 		perf_pmu_enable(cpuctx->ctx.pmu);
742 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
743 	}
744 
745 	local_irq_restore(flags);
746 }
747 
748 static inline void perf_cgroup_sched_out(struct task_struct *task,
749 					 struct task_struct *next)
750 {
751 	struct perf_cgroup *cgrp1;
752 	struct perf_cgroup *cgrp2 = NULL;
753 
754 	rcu_read_lock();
755 	/*
756 	 * we come here when we know perf_cgroup_events > 0
757 	 * we do not need to pass the ctx here because we know
758 	 * we are holding the rcu lock
759 	 */
760 	cgrp1 = perf_cgroup_from_task(task, NULL);
761 	cgrp2 = perf_cgroup_from_task(next, NULL);
762 
763 	/*
764 	 * only schedule out current cgroup events if we know
765 	 * that we are switching to a different cgroup. Otherwise,
766 	 * do no touch the cgroup events.
767 	 */
768 	if (cgrp1 != cgrp2)
769 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
770 
771 	rcu_read_unlock();
772 }
773 
774 static inline void perf_cgroup_sched_in(struct task_struct *prev,
775 					struct task_struct *task)
776 {
777 	struct perf_cgroup *cgrp1;
778 	struct perf_cgroup *cgrp2 = NULL;
779 
780 	rcu_read_lock();
781 	/*
782 	 * we come here when we know perf_cgroup_events > 0
783 	 * we do not need to pass the ctx here because we know
784 	 * we are holding the rcu lock
785 	 */
786 	cgrp1 = perf_cgroup_from_task(task, NULL);
787 	cgrp2 = perf_cgroup_from_task(prev, NULL);
788 
789 	/*
790 	 * only need to schedule in cgroup events if we are changing
791 	 * cgroup during ctxsw. Cgroup events were not scheduled
792 	 * out of ctxsw out if that was not the case.
793 	 */
794 	if (cgrp1 != cgrp2)
795 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
796 
797 	rcu_read_unlock();
798 }
799 
800 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
801 				      struct perf_event_attr *attr,
802 				      struct perf_event *group_leader)
803 {
804 	struct perf_cgroup *cgrp;
805 	struct cgroup_subsys_state *css;
806 	struct fd f = fdget(fd);
807 	int ret = 0;
808 
809 	if (!f.file)
810 		return -EBADF;
811 
812 	css = css_tryget_online_from_dir(f.file->f_path.dentry,
813 					 &perf_event_cgrp_subsys);
814 	if (IS_ERR(css)) {
815 		ret = PTR_ERR(css);
816 		goto out;
817 	}
818 
819 	cgrp = container_of(css, struct perf_cgroup, css);
820 	event->cgrp = cgrp;
821 
822 	/*
823 	 * all events in a group must monitor
824 	 * the same cgroup because a task belongs
825 	 * to only one perf cgroup at a time
826 	 */
827 	if (group_leader && group_leader->cgrp != cgrp) {
828 		perf_detach_cgroup(event);
829 		ret = -EINVAL;
830 	}
831 out:
832 	fdput(f);
833 	return ret;
834 }
835 
836 static inline void
837 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
838 {
839 	struct perf_cgroup_info *t;
840 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
841 	event->shadow_ctx_time = now - t->timestamp;
842 }
843 
844 static inline void
845 perf_cgroup_defer_enabled(struct perf_event *event)
846 {
847 	/*
848 	 * when the current task's perf cgroup does not match
849 	 * the event's, we need to remember to call the
850 	 * perf_mark_enable() function the first time a task with
851 	 * a matching perf cgroup is scheduled in.
852 	 */
853 	if (is_cgroup_event(event) && !perf_cgroup_match(event))
854 		event->cgrp_defer_enabled = 1;
855 }
856 
857 static inline void
858 perf_cgroup_mark_enabled(struct perf_event *event,
859 			 struct perf_event_context *ctx)
860 {
861 	struct perf_event *sub;
862 	u64 tstamp = perf_event_time(event);
863 
864 	if (!event->cgrp_defer_enabled)
865 		return;
866 
867 	event->cgrp_defer_enabled = 0;
868 
869 	event->tstamp_enabled = tstamp - event->total_time_enabled;
870 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
871 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
872 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
873 			sub->cgrp_defer_enabled = 0;
874 		}
875 	}
876 }
877 
878 /*
879  * Update cpuctx->cgrp so that it is set when first cgroup event is added and
880  * cleared when last cgroup event is removed.
881  */
882 static inline void
883 list_update_cgroup_event(struct perf_event *event,
884 			 struct perf_event_context *ctx, bool add)
885 {
886 	struct perf_cpu_context *cpuctx;
887 	struct list_head *cpuctx_entry;
888 
889 	if (!is_cgroup_event(event))
890 		return;
891 
892 	if (add && ctx->nr_cgroups++)
893 		return;
894 	else if (!add && --ctx->nr_cgroups)
895 		return;
896 	/*
897 	 * Because cgroup events are always per-cpu events,
898 	 * this will always be called from the right CPU.
899 	 */
900 	cpuctx = __get_cpu_context(ctx);
901 	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
902 	/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
903 	if (add) {
904 		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
905 		if (perf_cgroup_from_task(current, ctx) == event->cgrp)
906 			cpuctx->cgrp = event->cgrp;
907 	} else {
908 		list_del(cpuctx_entry);
909 		cpuctx->cgrp = NULL;
910 	}
911 }
912 
913 #else /* !CONFIG_CGROUP_PERF */
914 
915 static inline bool
916 perf_cgroup_match(struct perf_event *event)
917 {
918 	return true;
919 }
920 
921 static inline void perf_detach_cgroup(struct perf_event *event)
922 {}
923 
924 static inline int is_cgroup_event(struct perf_event *event)
925 {
926 	return 0;
927 }
928 
929 static inline void update_cgrp_time_from_event(struct perf_event *event)
930 {
931 }
932 
933 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
934 {
935 }
936 
937 static inline void perf_cgroup_sched_out(struct task_struct *task,
938 					 struct task_struct *next)
939 {
940 }
941 
942 static inline void perf_cgroup_sched_in(struct task_struct *prev,
943 					struct task_struct *task)
944 {
945 }
946 
947 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
948 				      struct perf_event_attr *attr,
949 				      struct perf_event *group_leader)
950 {
951 	return -EINVAL;
952 }
953 
954 static inline void
955 perf_cgroup_set_timestamp(struct task_struct *task,
956 			  struct perf_event_context *ctx)
957 {
958 }
959 
960 void
961 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
962 {
963 }
964 
965 static inline void
966 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
967 {
968 }
969 
970 static inline u64 perf_cgroup_event_time(struct perf_event *event)
971 {
972 	return 0;
973 }
974 
975 static inline void
976 perf_cgroup_defer_enabled(struct perf_event *event)
977 {
978 }
979 
980 static inline void
981 perf_cgroup_mark_enabled(struct perf_event *event,
982 			 struct perf_event_context *ctx)
983 {
984 }
985 
986 static inline void
987 list_update_cgroup_event(struct perf_event *event,
988 			 struct perf_event_context *ctx, bool add)
989 {
990 }
991 
992 #endif
993 
994 /*
995  * set default to be dependent on timer tick just
996  * like original code
997  */
998 #define PERF_CPU_HRTIMER (1000 / HZ)
999 /*
1000  * function must be called with interrupts disabled
1001  */
1002 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1003 {
1004 	struct perf_cpu_context *cpuctx;
1005 	int rotations = 0;
1006 
1007 	WARN_ON(!irqs_disabled());
1008 
1009 	cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
1010 	rotations = perf_rotate_context(cpuctx);
1011 
1012 	raw_spin_lock(&cpuctx->hrtimer_lock);
1013 	if (rotations)
1014 		hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
1015 	else
1016 		cpuctx->hrtimer_active = 0;
1017 	raw_spin_unlock(&cpuctx->hrtimer_lock);
1018 
1019 	return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1020 }
1021 
1022 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
1023 {
1024 	struct hrtimer *timer = &cpuctx->hrtimer;
1025 	struct pmu *pmu = cpuctx->ctx.pmu;
1026 	u64 interval;
1027 
1028 	/* no multiplexing needed for SW PMU */
1029 	if (pmu->task_ctx_nr == perf_sw_context)
1030 		return;
1031 
1032 	/*
1033 	 * check default is sane, if not set then force to
1034 	 * default interval (1/tick)
1035 	 */
1036 	interval = pmu->hrtimer_interval_ms;
1037 	if (interval < 1)
1038 		interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1039 
1040 	cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1041 
1042 	raw_spin_lock_init(&cpuctx->hrtimer_lock);
1043 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1044 	timer->function = perf_mux_hrtimer_handler;
1045 }
1046 
1047 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
1048 {
1049 	struct hrtimer *timer = &cpuctx->hrtimer;
1050 	struct pmu *pmu = cpuctx->ctx.pmu;
1051 	unsigned long flags;
1052 
1053 	/* not for SW PMU */
1054 	if (pmu->task_ctx_nr == perf_sw_context)
1055 		return 0;
1056 
1057 	raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1058 	if (!cpuctx->hrtimer_active) {
1059 		cpuctx->hrtimer_active = 1;
1060 		hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1061 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1062 	}
1063 	raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
1064 
1065 	return 0;
1066 }
1067 
1068 void perf_pmu_disable(struct pmu *pmu)
1069 {
1070 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
1071 	if (!(*count)++)
1072 		pmu->pmu_disable(pmu);
1073 }
1074 
1075 void perf_pmu_enable(struct pmu *pmu)
1076 {
1077 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
1078 	if (!--(*count))
1079 		pmu->pmu_enable(pmu);
1080 }
1081 
1082 static DEFINE_PER_CPU(struct list_head, active_ctx_list);
1083 
1084 /*
1085  * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1086  * perf_event_task_tick() are fully serialized because they're strictly cpu
1087  * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1088  * disabled, while perf_event_task_tick is called from IRQ context.
1089  */
1090 static void perf_event_ctx_activate(struct perf_event_context *ctx)
1091 {
1092 	struct list_head *head = this_cpu_ptr(&active_ctx_list);
1093 
1094 	WARN_ON(!irqs_disabled());
1095 
1096 	WARN_ON(!list_empty(&ctx->active_ctx_list));
1097 
1098 	list_add(&ctx->active_ctx_list, head);
1099 }
1100 
1101 static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1102 {
1103 	WARN_ON(!irqs_disabled());
1104 
1105 	WARN_ON(list_empty(&ctx->active_ctx_list));
1106 
1107 	list_del_init(&ctx->active_ctx_list);
1108 }
1109 
1110 static void get_ctx(struct perf_event_context *ctx)
1111 {
1112 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1113 }
1114 
1115 static void free_ctx(struct rcu_head *head)
1116 {
1117 	struct perf_event_context *ctx;
1118 
1119 	ctx = container_of(head, struct perf_event_context, rcu_head);
1120 	kfree(ctx->task_ctx_data);
1121 	kfree(ctx);
1122 }
1123 
1124 static void put_ctx(struct perf_event_context *ctx)
1125 {
1126 	if (atomic_dec_and_test(&ctx->refcount)) {
1127 		if (ctx->parent_ctx)
1128 			put_ctx(ctx->parent_ctx);
1129 		if (ctx->task && ctx->task != TASK_TOMBSTONE)
1130 			put_task_struct(ctx->task);
1131 		call_rcu(&ctx->rcu_head, free_ctx);
1132 	}
1133 }
1134 
1135 /*
1136  * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1137  * perf_pmu_migrate_context() we need some magic.
1138  *
1139  * Those places that change perf_event::ctx will hold both
1140  * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1141  *
1142  * Lock ordering is by mutex address. There are two other sites where
1143  * perf_event_context::mutex nests and those are:
1144  *
1145  *  - perf_event_exit_task_context()	[ child , 0 ]
1146  *      perf_event_exit_event()
1147  *        put_event()			[ parent, 1 ]
1148  *
1149  *  - perf_event_init_context()		[ parent, 0 ]
1150  *      inherit_task_group()
1151  *        inherit_group()
1152  *          inherit_event()
1153  *            perf_event_alloc()
1154  *              perf_init_event()
1155  *                perf_try_init_event()	[ child , 1 ]
1156  *
1157  * While it appears there is an obvious deadlock here -- the parent and child
1158  * nesting levels are inverted between the two. This is in fact safe because
1159  * life-time rules separate them. That is an exiting task cannot fork, and a
1160  * spawning task cannot (yet) exit.
1161  *
1162  * But remember that that these are parent<->child context relations, and
1163  * migration does not affect children, therefore these two orderings should not
1164  * interact.
1165  *
1166  * The change in perf_event::ctx does not affect children (as claimed above)
1167  * because the sys_perf_event_open() case will install a new event and break
1168  * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1169  * concerned with cpuctx and that doesn't have children.
1170  *
1171  * The places that change perf_event::ctx will issue:
1172  *
1173  *   perf_remove_from_context();
1174  *   synchronize_rcu();
1175  *   perf_install_in_context();
1176  *
1177  * to affect the change. The remove_from_context() + synchronize_rcu() should
1178  * quiesce the event, after which we can install it in the new location. This
1179  * means that only external vectors (perf_fops, prctl) can perturb the event
1180  * while in transit. Therefore all such accessors should also acquire
1181  * perf_event_context::mutex to serialize against this.
1182  *
1183  * However; because event->ctx can change while we're waiting to acquire
1184  * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1185  * function.
1186  *
1187  * Lock order:
1188  *    cred_guard_mutex
1189  *	task_struct::perf_event_mutex
1190  *	  perf_event_context::mutex
1191  *	    perf_event::child_mutex;
1192  *	      perf_event_context::lock
1193  *	    perf_event::mmap_mutex
1194  *	    mmap_sem
1195  */
1196 static struct perf_event_context *
1197 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1198 {
1199 	struct perf_event_context *ctx;
1200 
1201 again:
1202 	rcu_read_lock();
1203 	ctx = ACCESS_ONCE(event->ctx);
1204 	if (!atomic_inc_not_zero(&ctx->refcount)) {
1205 		rcu_read_unlock();
1206 		goto again;
1207 	}
1208 	rcu_read_unlock();
1209 
1210 	mutex_lock_nested(&ctx->mutex, nesting);
1211 	if (event->ctx != ctx) {
1212 		mutex_unlock(&ctx->mutex);
1213 		put_ctx(ctx);
1214 		goto again;
1215 	}
1216 
1217 	return ctx;
1218 }
1219 
1220 static inline struct perf_event_context *
1221 perf_event_ctx_lock(struct perf_event *event)
1222 {
1223 	return perf_event_ctx_lock_nested(event, 0);
1224 }
1225 
1226 static void perf_event_ctx_unlock(struct perf_event *event,
1227 				  struct perf_event_context *ctx)
1228 {
1229 	mutex_unlock(&ctx->mutex);
1230 	put_ctx(ctx);
1231 }
1232 
1233 /*
1234  * This must be done under the ctx->lock, such as to serialize against
1235  * context_equiv(), therefore we cannot call put_ctx() since that might end up
1236  * calling scheduler related locks and ctx->lock nests inside those.
1237  */
1238 static __must_check struct perf_event_context *
1239 unclone_ctx(struct perf_event_context *ctx)
1240 {
1241 	struct perf_event_context *parent_ctx = ctx->parent_ctx;
1242 
1243 	lockdep_assert_held(&ctx->lock);
1244 
1245 	if (parent_ctx)
1246 		ctx->parent_ctx = NULL;
1247 	ctx->generation++;
1248 
1249 	return parent_ctx;
1250 }
1251 
1252 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1253 {
1254 	/*
1255 	 * only top level events have the pid namespace they were created in
1256 	 */
1257 	if (event->parent)
1258 		event = event->parent;
1259 
1260 	return task_tgid_nr_ns(p, event->ns);
1261 }
1262 
1263 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1264 {
1265 	/*
1266 	 * only top level events have the pid namespace they were created in
1267 	 */
1268 	if (event->parent)
1269 		event = event->parent;
1270 
1271 	return task_pid_nr_ns(p, event->ns);
1272 }
1273 
1274 /*
1275  * If we inherit events we want to return the parent event id
1276  * to userspace.
1277  */
1278 static u64 primary_event_id(struct perf_event *event)
1279 {
1280 	u64 id = event->id;
1281 
1282 	if (event->parent)
1283 		id = event->parent->id;
1284 
1285 	return id;
1286 }
1287 
1288 /*
1289  * Get the perf_event_context for a task and lock it.
1290  *
1291  * This has to cope with with the fact that until it is locked,
1292  * the context could get moved to another task.
1293  */
1294 static struct perf_event_context *
1295 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1296 {
1297 	struct perf_event_context *ctx;
1298 
1299 retry:
1300 	/*
1301 	 * One of the few rules of preemptible RCU is that one cannot do
1302 	 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1303 	 * part of the read side critical section was irqs-enabled -- see
1304 	 * rcu_read_unlock_special().
1305 	 *
1306 	 * Since ctx->lock nests under rq->lock we must ensure the entire read
1307 	 * side critical section has interrupts disabled.
1308 	 */
1309 	local_irq_save(*flags);
1310 	rcu_read_lock();
1311 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1312 	if (ctx) {
1313 		/*
1314 		 * If this context is a clone of another, it might
1315 		 * get swapped for another underneath us by
1316 		 * perf_event_task_sched_out, though the
1317 		 * rcu_read_lock() protects us from any context
1318 		 * getting freed.  Lock the context and check if it
1319 		 * got swapped before we could get the lock, and retry
1320 		 * if so.  If we locked the right context, then it
1321 		 * can't get swapped on us any more.
1322 		 */
1323 		raw_spin_lock(&ctx->lock);
1324 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1325 			raw_spin_unlock(&ctx->lock);
1326 			rcu_read_unlock();
1327 			local_irq_restore(*flags);
1328 			goto retry;
1329 		}
1330 
1331 		if (ctx->task == TASK_TOMBSTONE ||
1332 		    !atomic_inc_not_zero(&ctx->refcount)) {
1333 			raw_spin_unlock(&ctx->lock);
1334 			ctx = NULL;
1335 		} else {
1336 			WARN_ON_ONCE(ctx->task != task);
1337 		}
1338 	}
1339 	rcu_read_unlock();
1340 	if (!ctx)
1341 		local_irq_restore(*flags);
1342 	return ctx;
1343 }
1344 
1345 /*
1346  * Get the context for a task and increment its pin_count so it
1347  * can't get swapped to another task.  This also increments its
1348  * reference count so that the context can't get freed.
1349  */
1350 static struct perf_event_context *
1351 perf_pin_task_context(struct task_struct *task, int ctxn)
1352 {
1353 	struct perf_event_context *ctx;
1354 	unsigned long flags;
1355 
1356 	ctx = perf_lock_task_context(task, ctxn, &flags);
1357 	if (ctx) {
1358 		++ctx->pin_count;
1359 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
1360 	}
1361 	return ctx;
1362 }
1363 
1364 static void perf_unpin_context(struct perf_event_context *ctx)
1365 {
1366 	unsigned long flags;
1367 
1368 	raw_spin_lock_irqsave(&ctx->lock, flags);
1369 	--ctx->pin_count;
1370 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
1371 }
1372 
1373 /*
1374  * Update the record of the current time in a context.
1375  */
1376 static void update_context_time(struct perf_event_context *ctx)
1377 {
1378 	u64 now = perf_clock();
1379 
1380 	ctx->time += now - ctx->timestamp;
1381 	ctx->timestamp = now;
1382 }
1383 
1384 static u64 perf_event_time(struct perf_event *event)
1385 {
1386 	struct perf_event_context *ctx = event->ctx;
1387 
1388 	if (is_cgroup_event(event))
1389 		return perf_cgroup_event_time(event);
1390 
1391 	return ctx ? ctx->time : 0;
1392 }
1393 
1394 /*
1395  * Update the total_time_enabled and total_time_running fields for a event.
1396  */
1397 static void update_event_times(struct perf_event *event)
1398 {
1399 	struct perf_event_context *ctx = event->ctx;
1400 	u64 run_end;
1401 
1402 	lockdep_assert_held(&ctx->lock);
1403 
1404 	if (event->state < PERF_EVENT_STATE_INACTIVE ||
1405 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1406 		return;
1407 
1408 	/*
1409 	 * in cgroup mode, time_enabled represents
1410 	 * the time the event was enabled AND active
1411 	 * tasks were in the monitored cgroup. This is
1412 	 * independent of the activity of the context as
1413 	 * there may be a mix of cgroup and non-cgroup events.
1414 	 *
1415 	 * That is why we treat cgroup events differently
1416 	 * here.
1417 	 */
1418 	if (is_cgroup_event(event))
1419 		run_end = perf_cgroup_event_time(event);
1420 	else if (ctx->is_active)
1421 		run_end = ctx->time;
1422 	else
1423 		run_end = event->tstamp_stopped;
1424 
1425 	event->total_time_enabled = run_end - event->tstamp_enabled;
1426 
1427 	if (event->state == PERF_EVENT_STATE_INACTIVE)
1428 		run_end = event->tstamp_stopped;
1429 	else
1430 		run_end = perf_event_time(event);
1431 
1432 	event->total_time_running = run_end - event->tstamp_running;
1433 
1434 }
1435 
1436 /*
1437  * Update total_time_enabled and total_time_running for all events in a group.
1438  */
1439 static void update_group_times(struct perf_event *leader)
1440 {
1441 	struct perf_event *event;
1442 
1443 	update_event_times(leader);
1444 	list_for_each_entry(event, &leader->sibling_list, group_entry)
1445 		update_event_times(event);
1446 }
1447 
1448 static enum event_type_t get_event_type(struct perf_event *event)
1449 {
1450 	struct perf_event_context *ctx = event->ctx;
1451 	enum event_type_t event_type;
1452 
1453 	lockdep_assert_held(&ctx->lock);
1454 
1455 	/*
1456 	 * It's 'group type', really, because if our group leader is
1457 	 * pinned, so are we.
1458 	 */
1459 	if (event->group_leader != event)
1460 		event = event->group_leader;
1461 
1462 	event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1463 	if (!ctx->task)
1464 		event_type |= EVENT_CPU;
1465 
1466 	return event_type;
1467 }
1468 
1469 static struct list_head *
1470 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1471 {
1472 	if (event->attr.pinned)
1473 		return &ctx->pinned_groups;
1474 	else
1475 		return &ctx->flexible_groups;
1476 }
1477 
1478 /*
1479  * Add a event from the lists for its context.
1480  * Must be called with ctx->mutex and ctx->lock held.
1481  */
1482 static void
1483 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1484 {
1485 	lockdep_assert_held(&ctx->lock);
1486 
1487 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1488 	event->attach_state |= PERF_ATTACH_CONTEXT;
1489 
1490 	/*
1491 	 * If we're a stand alone event or group leader, we go to the context
1492 	 * list, group events are kept attached to the group so that
1493 	 * perf_group_detach can, at all times, locate all siblings.
1494 	 */
1495 	if (event->group_leader == event) {
1496 		struct list_head *list;
1497 
1498 		event->group_caps = event->event_caps;
1499 
1500 		list = ctx_group_list(event, ctx);
1501 		list_add_tail(&event->group_entry, list);
1502 	}
1503 
1504 	list_update_cgroup_event(event, ctx, true);
1505 
1506 	list_add_rcu(&event->event_entry, &ctx->event_list);
1507 	ctx->nr_events++;
1508 	if (event->attr.inherit_stat)
1509 		ctx->nr_stat++;
1510 
1511 	ctx->generation++;
1512 }
1513 
1514 /*
1515  * Initialize event state based on the perf_event_attr::disabled.
1516  */
1517 static inline void perf_event__state_init(struct perf_event *event)
1518 {
1519 	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1520 					      PERF_EVENT_STATE_INACTIVE;
1521 }
1522 
1523 static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1524 {
1525 	int entry = sizeof(u64); /* value */
1526 	int size = 0;
1527 	int nr = 1;
1528 
1529 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1530 		size += sizeof(u64);
1531 
1532 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1533 		size += sizeof(u64);
1534 
1535 	if (event->attr.read_format & PERF_FORMAT_ID)
1536 		entry += sizeof(u64);
1537 
1538 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
1539 		nr += nr_siblings;
1540 		size += sizeof(u64);
1541 	}
1542 
1543 	size += entry * nr;
1544 	event->read_size = size;
1545 }
1546 
1547 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1548 {
1549 	struct perf_sample_data *data;
1550 	u16 size = 0;
1551 
1552 	if (sample_type & PERF_SAMPLE_IP)
1553 		size += sizeof(data->ip);
1554 
1555 	if (sample_type & PERF_SAMPLE_ADDR)
1556 		size += sizeof(data->addr);
1557 
1558 	if (sample_type & PERF_SAMPLE_PERIOD)
1559 		size += sizeof(data->period);
1560 
1561 	if (sample_type & PERF_SAMPLE_WEIGHT)
1562 		size += sizeof(data->weight);
1563 
1564 	if (sample_type & PERF_SAMPLE_READ)
1565 		size += event->read_size;
1566 
1567 	if (sample_type & PERF_SAMPLE_DATA_SRC)
1568 		size += sizeof(data->data_src.val);
1569 
1570 	if (sample_type & PERF_SAMPLE_TRANSACTION)
1571 		size += sizeof(data->txn);
1572 
1573 	event->header_size = size;
1574 }
1575 
1576 /*
1577  * Called at perf_event creation and when events are attached/detached from a
1578  * group.
1579  */
1580 static void perf_event__header_size(struct perf_event *event)
1581 {
1582 	__perf_event_read_size(event,
1583 			       event->group_leader->nr_siblings);
1584 	__perf_event_header_size(event, event->attr.sample_type);
1585 }
1586 
1587 static void perf_event__id_header_size(struct perf_event *event)
1588 {
1589 	struct perf_sample_data *data;
1590 	u64 sample_type = event->attr.sample_type;
1591 	u16 size = 0;
1592 
1593 	if (sample_type & PERF_SAMPLE_TID)
1594 		size += sizeof(data->tid_entry);
1595 
1596 	if (sample_type & PERF_SAMPLE_TIME)
1597 		size += sizeof(data->time);
1598 
1599 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1600 		size += sizeof(data->id);
1601 
1602 	if (sample_type & PERF_SAMPLE_ID)
1603 		size += sizeof(data->id);
1604 
1605 	if (sample_type & PERF_SAMPLE_STREAM_ID)
1606 		size += sizeof(data->stream_id);
1607 
1608 	if (sample_type & PERF_SAMPLE_CPU)
1609 		size += sizeof(data->cpu_entry);
1610 
1611 	event->id_header_size = size;
1612 }
1613 
1614 static bool perf_event_validate_size(struct perf_event *event)
1615 {
1616 	/*
1617 	 * The values computed here will be over-written when we actually
1618 	 * attach the event.
1619 	 */
1620 	__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1621 	__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1622 	perf_event__id_header_size(event);
1623 
1624 	/*
1625 	 * Sum the lot; should not exceed the 64k limit we have on records.
1626 	 * Conservative limit to allow for callchains and other variable fields.
1627 	 */
1628 	if (event->read_size + event->header_size +
1629 	    event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1630 		return false;
1631 
1632 	return true;
1633 }
1634 
1635 static void perf_group_attach(struct perf_event *event)
1636 {
1637 	struct perf_event *group_leader = event->group_leader, *pos;
1638 
1639 	lockdep_assert_held(&event->ctx->lock);
1640 
1641 	/*
1642 	 * We can have double attach due to group movement in perf_event_open.
1643 	 */
1644 	if (event->attach_state & PERF_ATTACH_GROUP)
1645 		return;
1646 
1647 	event->attach_state |= PERF_ATTACH_GROUP;
1648 
1649 	if (group_leader == event)
1650 		return;
1651 
1652 	WARN_ON_ONCE(group_leader->ctx != event->ctx);
1653 
1654 	group_leader->group_caps &= event->event_caps;
1655 
1656 	list_add_tail(&event->group_entry, &group_leader->sibling_list);
1657 	group_leader->nr_siblings++;
1658 
1659 	perf_event__header_size(group_leader);
1660 
1661 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1662 		perf_event__header_size(pos);
1663 }
1664 
1665 /*
1666  * Remove a event from the lists for its context.
1667  * Must be called with ctx->mutex and ctx->lock held.
1668  */
1669 static void
1670 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1671 {
1672 	WARN_ON_ONCE(event->ctx != ctx);
1673 	lockdep_assert_held(&ctx->lock);
1674 
1675 	/*
1676 	 * We can have double detach due to exit/hot-unplug + close.
1677 	 */
1678 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1679 		return;
1680 
1681 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
1682 
1683 	list_update_cgroup_event(event, ctx, false);
1684 
1685 	ctx->nr_events--;
1686 	if (event->attr.inherit_stat)
1687 		ctx->nr_stat--;
1688 
1689 	list_del_rcu(&event->event_entry);
1690 
1691 	if (event->group_leader == event)
1692 		list_del_init(&event->group_entry);
1693 
1694 	update_group_times(event);
1695 
1696 	/*
1697 	 * If event was in error state, then keep it
1698 	 * that way, otherwise bogus counts will be
1699 	 * returned on read(). The only way to get out
1700 	 * of error state is by explicit re-enabling
1701 	 * of the event
1702 	 */
1703 	if (event->state > PERF_EVENT_STATE_OFF)
1704 		event->state = PERF_EVENT_STATE_OFF;
1705 
1706 	ctx->generation++;
1707 }
1708 
1709 static void perf_group_detach(struct perf_event *event)
1710 {
1711 	struct perf_event *sibling, *tmp;
1712 	struct list_head *list = NULL;
1713 
1714 	lockdep_assert_held(&event->ctx->lock);
1715 
1716 	/*
1717 	 * We can have double detach due to exit/hot-unplug + close.
1718 	 */
1719 	if (!(event->attach_state & PERF_ATTACH_GROUP))
1720 		return;
1721 
1722 	event->attach_state &= ~PERF_ATTACH_GROUP;
1723 
1724 	/*
1725 	 * If this is a sibling, remove it from its group.
1726 	 */
1727 	if (event->group_leader != event) {
1728 		list_del_init(&event->group_entry);
1729 		event->group_leader->nr_siblings--;
1730 		goto out;
1731 	}
1732 
1733 	if (!list_empty(&event->group_entry))
1734 		list = &event->group_entry;
1735 
1736 	/*
1737 	 * If this was a group event with sibling events then
1738 	 * upgrade the siblings to singleton events by adding them
1739 	 * to whatever list we are on.
1740 	 */
1741 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1742 		if (list)
1743 			list_move_tail(&sibling->group_entry, list);
1744 		sibling->group_leader = sibling;
1745 
1746 		/* Inherit group flags from the previous leader */
1747 		sibling->group_caps = event->group_caps;
1748 
1749 		WARN_ON_ONCE(sibling->ctx != event->ctx);
1750 	}
1751 
1752 out:
1753 	perf_event__header_size(event->group_leader);
1754 
1755 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1756 		perf_event__header_size(tmp);
1757 }
1758 
1759 static bool is_orphaned_event(struct perf_event *event)
1760 {
1761 	return event->state == PERF_EVENT_STATE_DEAD;
1762 }
1763 
1764 static inline int __pmu_filter_match(struct perf_event *event)
1765 {
1766 	struct pmu *pmu = event->pmu;
1767 	return pmu->filter_match ? pmu->filter_match(event) : 1;
1768 }
1769 
1770 /*
1771  * Check whether we should attempt to schedule an event group based on
1772  * PMU-specific filtering. An event group can consist of HW and SW events,
1773  * potentially with a SW leader, so we must check all the filters, to
1774  * determine whether a group is schedulable:
1775  */
1776 static inline int pmu_filter_match(struct perf_event *event)
1777 {
1778 	struct perf_event *child;
1779 
1780 	if (!__pmu_filter_match(event))
1781 		return 0;
1782 
1783 	list_for_each_entry(child, &event->sibling_list, group_entry) {
1784 		if (!__pmu_filter_match(child))
1785 			return 0;
1786 	}
1787 
1788 	return 1;
1789 }
1790 
1791 static inline int
1792 event_filter_match(struct perf_event *event)
1793 {
1794 	return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1795 	       perf_cgroup_match(event) && pmu_filter_match(event);
1796 }
1797 
1798 static void
1799 event_sched_out(struct perf_event *event,
1800 		  struct perf_cpu_context *cpuctx,
1801 		  struct perf_event_context *ctx)
1802 {
1803 	u64 tstamp = perf_event_time(event);
1804 	u64 delta;
1805 
1806 	WARN_ON_ONCE(event->ctx != ctx);
1807 	lockdep_assert_held(&ctx->lock);
1808 
1809 	/*
1810 	 * An event which could not be activated because of
1811 	 * filter mismatch still needs to have its timings
1812 	 * maintained, otherwise bogus information is return
1813 	 * via read() for time_enabled, time_running:
1814 	 */
1815 	if (event->state == PERF_EVENT_STATE_INACTIVE &&
1816 	    !event_filter_match(event)) {
1817 		delta = tstamp - event->tstamp_stopped;
1818 		event->tstamp_running += delta;
1819 		event->tstamp_stopped = tstamp;
1820 	}
1821 
1822 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1823 		return;
1824 
1825 	perf_pmu_disable(event->pmu);
1826 
1827 	event->tstamp_stopped = tstamp;
1828 	event->pmu->del(event, 0);
1829 	event->oncpu = -1;
1830 	event->state = PERF_EVENT_STATE_INACTIVE;
1831 	if (event->pending_disable) {
1832 		event->pending_disable = 0;
1833 		event->state = PERF_EVENT_STATE_OFF;
1834 	}
1835 
1836 	if (!is_software_event(event))
1837 		cpuctx->active_oncpu--;
1838 	if (!--ctx->nr_active)
1839 		perf_event_ctx_deactivate(ctx);
1840 	if (event->attr.freq && event->attr.sample_freq)
1841 		ctx->nr_freq--;
1842 	if (event->attr.exclusive || !cpuctx->active_oncpu)
1843 		cpuctx->exclusive = 0;
1844 
1845 	perf_pmu_enable(event->pmu);
1846 }
1847 
1848 static void
1849 group_sched_out(struct perf_event *group_event,
1850 		struct perf_cpu_context *cpuctx,
1851 		struct perf_event_context *ctx)
1852 {
1853 	struct perf_event *event;
1854 	int state = group_event->state;
1855 
1856 	perf_pmu_disable(ctx->pmu);
1857 
1858 	event_sched_out(group_event, cpuctx, ctx);
1859 
1860 	/*
1861 	 * Schedule out siblings (if any):
1862 	 */
1863 	list_for_each_entry(event, &group_event->sibling_list, group_entry)
1864 		event_sched_out(event, cpuctx, ctx);
1865 
1866 	perf_pmu_enable(ctx->pmu);
1867 
1868 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1869 		cpuctx->exclusive = 0;
1870 }
1871 
1872 #define DETACH_GROUP	0x01UL
1873 
1874 /*
1875  * Cross CPU call to remove a performance event
1876  *
1877  * We disable the event on the hardware level first. After that we
1878  * remove it from the context list.
1879  */
1880 static void
1881 __perf_remove_from_context(struct perf_event *event,
1882 			   struct perf_cpu_context *cpuctx,
1883 			   struct perf_event_context *ctx,
1884 			   void *info)
1885 {
1886 	unsigned long flags = (unsigned long)info;
1887 
1888 	event_sched_out(event, cpuctx, ctx);
1889 	if (flags & DETACH_GROUP)
1890 		perf_group_detach(event);
1891 	list_del_event(event, ctx);
1892 
1893 	if (!ctx->nr_events && ctx->is_active) {
1894 		ctx->is_active = 0;
1895 		if (ctx->task) {
1896 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1897 			cpuctx->task_ctx = NULL;
1898 		}
1899 	}
1900 }
1901 
1902 /*
1903  * Remove the event from a task's (or a CPU's) list of events.
1904  *
1905  * If event->ctx is a cloned context, callers must make sure that
1906  * every task struct that event->ctx->task could possibly point to
1907  * remains valid.  This is OK when called from perf_release since
1908  * that only calls us on the top-level context, which can't be a clone.
1909  * When called from perf_event_exit_task, it's OK because the
1910  * context has been detached from its task.
1911  */
1912 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1913 {
1914 	struct perf_event_context *ctx = event->ctx;
1915 
1916 	lockdep_assert_held(&ctx->mutex);
1917 
1918 	event_function_call(event, __perf_remove_from_context, (void *)flags);
1919 
1920 	/*
1921 	 * The above event_function_call() can NO-OP when it hits
1922 	 * TASK_TOMBSTONE. In that case we must already have been detached
1923 	 * from the context (by perf_event_exit_event()) but the grouping
1924 	 * might still be in-tact.
1925 	 */
1926 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1927 	if ((flags & DETACH_GROUP) &&
1928 	    (event->attach_state & PERF_ATTACH_GROUP)) {
1929 		/*
1930 		 * Since in that case we cannot possibly be scheduled, simply
1931 		 * detach now.
1932 		 */
1933 		raw_spin_lock_irq(&ctx->lock);
1934 		perf_group_detach(event);
1935 		raw_spin_unlock_irq(&ctx->lock);
1936 	}
1937 }
1938 
1939 /*
1940  * Cross CPU call to disable a performance event
1941  */
1942 static void __perf_event_disable(struct perf_event *event,
1943 				 struct perf_cpu_context *cpuctx,
1944 				 struct perf_event_context *ctx,
1945 				 void *info)
1946 {
1947 	if (event->state < PERF_EVENT_STATE_INACTIVE)
1948 		return;
1949 
1950 	update_context_time(ctx);
1951 	update_cgrp_time_from_event(event);
1952 	update_group_times(event);
1953 	if (event == event->group_leader)
1954 		group_sched_out(event, cpuctx, ctx);
1955 	else
1956 		event_sched_out(event, cpuctx, ctx);
1957 	event->state = PERF_EVENT_STATE_OFF;
1958 }
1959 
1960 /*
1961  * Disable a event.
1962  *
1963  * If event->ctx is a cloned context, callers must make sure that
1964  * every task struct that event->ctx->task could possibly point to
1965  * remains valid.  This condition is satisifed when called through
1966  * perf_event_for_each_child or perf_event_for_each because they
1967  * hold the top-level event's child_mutex, so any descendant that
1968  * goes to exit will block in perf_event_exit_event().
1969  *
1970  * When called from perf_pending_event it's OK because event->ctx
1971  * is the current context on this CPU and preemption is disabled,
1972  * hence we can't get into perf_event_task_sched_out for this context.
1973  */
1974 static void _perf_event_disable(struct perf_event *event)
1975 {
1976 	struct perf_event_context *ctx = event->ctx;
1977 
1978 	raw_spin_lock_irq(&ctx->lock);
1979 	if (event->state <= PERF_EVENT_STATE_OFF) {
1980 		raw_spin_unlock_irq(&ctx->lock);
1981 		return;
1982 	}
1983 	raw_spin_unlock_irq(&ctx->lock);
1984 
1985 	event_function_call(event, __perf_event_disable, NULL);
1986 }
1987 
1988 void perf_event_disable_local(struct perf_event *event)
1989 {
1990 	event_function_local(event, __perf_event_disable, NULL);
1991 }
1992 
1993 /*
1994  * Strictly speaking kernel users cannot create groups and therefore this
1995  * interface does not need the perf_event_ctx_lock() magic.
1996  */
1997 void perf_event_disable(struct perf_event *event)
1998 {
1999 	struct perf_event_context *ctx;
2000 
2001 	ctx = perf_event_ctx_lock(event);
2002 	_perf_event_disable(event);
2003 	perf_event_ctx_unlock(event, ctx);
2004 }
2005 EXPORT_SYMBOL_GPL(perf_event_disable);
2006 
2007 void perf_event_disable_inatomic(struct perf_event *event)
2008 {
2009 	event->pending_disable = 1;
2010 	irq_work_queue(&event->pending);
2011 }
2012 
2013 static void perf_set_shadow_time(struct perf_event *event,
2014 				 struct perf_event_context *ctx,
2015 				 u64 tstamp)
2016 {
2017 	/*
2018 	 * use the correct time source for the time snapshot
2019 	 *
2020 	 * We could get by without this by leveraging the
2021 	 * fact that to get to this function, the caller
2022 	 * has most likely already called update_context_time()
2023 	 * and update_cgrp_time_xx() and thus both timestamp
2024 	 * are identical (or very close). Given that tstamp is,
2025 	 * already adjusted for cgroup, we could say that:
2026 	 *    tstamp - ctx->timestamp
2027 	 * is equivalent to
2028 	 *    tstamp - cgrp->timestamp.
2029 	 *
2030 	 * Then, in perf_output_read(), the calculation would
2031 	 * work with no changes because:
2032 	 * - event is guaranteed scheduled in
2033 	 * - no scheduled out in between
2034 	 * - thus the timestamp would be the same
2035 	 *
2036 	 * But this is a bit hairy.
2037 	 *
2038 	 * So instead, we have an explicit cgroup call to remain
2039 	 * within the time time source all along. We believe it
2040 	 * is cleaner and simpler to understand.
2041 	 */
2042 	if (is_cgroup_event(event))
2043 		perf_cgroup_set_shadow_time(event, tstamp);
2044 	else
2045 		event->shadow_ctx_time = tstamp - ctx->timestamp;
2046 }
2047 
2048 #define MAX_INTERRUPTS (~0ULL)
2049 
2050 static void perf_log_throttle(struct perf_event *event, int enable);
2051 static void perf_log_itrace_start(struct perf_event *event);
2052 
2053 static int
2054 event_sched_in(struct perf_event *event,
2055 		 struct perf_cpu_context *cpuctx,
2056 		 struct perf_event_context *ctx)
2057 {
2058 	u64 tstamp = perf_event_time(event);
2059 	int ret = 0;
2060 
2061 	lockdep_assert_held(&ctx->lock);
2062 
2063 	if (event->state <= PERF_EVENT_STATE_OFF)
2064 		return 0;
2065 
2066 	WRITE_ONCE(event->oncpu, smp_processor_id());
2067 	/*
2068 	 * Order event::oncpu write to happen before the ACTIVE state
2069 	 * is visible.
2070 	 */
2071 	smp_wmb();
2072 	WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
2073 
2074 	/*
2075 	 * Unthrottle events, since we scheduled we might have missed several
2076 	 * ticks already, also for a heavily scheduling task there is little
2077 	 * guarantee it'll get a tick in a timely manner.
2078 	 */
2079 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2080 		perf_log_throttle(event, 1);
2081 		event->hw.interrupts = 0;
2082 	}
2083 
2084 	/*
2085 	 * The new state must be visible before we turn it on in the hardware:
2086 	 */
2087 	smp_wmb();
2088 
2089 	perf_pmu_disable(event->pmu);
2090 
2091 	perf_set_shadow_time(event, ctx, tstamp);
2092 
2093 	perf_log_itrace_start(event);
2094 
2095 	if (event->pmu->add(event, PERF_EF_START)) {
2096 		event->state = PERF_EVENT_STATE_INACTIVE;
2097 		event->oncpu = -1;
2098 		ret = -EAGAIN;
2099 		goto out;
2100 	}
2101 
2102 	event->tstamp_running += tstamp - event->tstamp_stopped;
2103 
2104 	if (!is_software_event(event))
2105 		cpuctx->active_oncpu++;
2106 	if (!ctx->nr_active++)
2107 		perf_event_ctx_activate(ctx);
2108 	if (event->attr.freq && event->attr.sample_freq)
2109 		ctx->nr_freq++;
2110 
2111 	if (event->attr.exclusive)
2112 		cpuctx->exclusive = 1;
2113 
2114 out:
2115 	perf_pmu_enable(event->pmu);
2116 
2117 	return ret;
2118 }
2119 
2120 static int
2121 group_sched_in(struct perf_event *group_event,
2122 	       struct perf_cpu_context *cpuctx,
2123 	       struct perf_event_context *ctx)
2124 {
2125 	struct perf_event *event, *partial_group = NULL;
2126 	struct pmu *pmu = ctx->pmu;
2127 	u64 now = ctx->time;
2128 	bool simulate = false;
2129 
2130 	if (group_event->state == PERF_EVENT_STATE_OFF)
2131 		return 0;
2132 
2133 	pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2134 
2135 	if (event_sched_in(group_event, cpuctx, ctx)) {
2136 		pmu->cancel_txn(pmu);
2137 		perf_mux_hrtimer_restart(cpuctx);
2138 		return -EAGAIN;
2139 	}
2140 
2141 	/*
2142 	 * Schedule in siblings as one group (if any):
2143 	 */
2144 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2145 		if (event_sched_in(event, cpuctx, ctx)) {
2146 			partial_group = event;
2147 			goto group_error;
2148 		}
2149 	}
2150 
2151 	if (!pmu->commit_txn(pmu))
2152 		return 0;
2153 
2154 group_error:
2155 	/*
2156 	 * Groups can be scheduled in as one unit only, so undo any
2157 	 * partial group before returning:
2158 	 * The events up to the failed event are scheduled out normally,
2159 	 * tstamp_stopped will be updated.
2160 	 *
2161 	 * The failed events and the remaining siblings need to have
2162 	 * their timings updated as if they had gone thru event_sched_in()
2163 	 * and event_sched_out(). This is required to get consistent timings
2164 	 * across the group. This also takes care of the case where the group
2165 	 * could never be scheduled by ensuring tstamp_stopped is set to mark
2166 	 * the time the event was actually stopped, such that time delta
2167 	 * calculation in update_event_times() is correct.
2168 	 */
2169 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2170 		if (event == partial_group)
2171 			simulate = true;
2172 
2173 		if (simulate) {
2174 			event->tstamp_running += now - event->tstamp_stopped;
2175 			event->tstamp_stopped = now;
2176 		} else {
2177 			event_sched_out(event, cpuctx, ctx);
2178 		}
2179 	}
2180 	event_sched_out(group_event, cpuctx, ctx);
2181 
2182 	pmu->cancel_txn(pmu);
2183 
2184 	perf_mux_hrtimer_restart(cpuctx);
2185 
2186 	return -EAGAIN;
2187 }
2188 
2189 /*
2190  * Work out whether we can put this event group on the CPU now.
2191  */
2192 static int group_can_go_on(struct perf_event *event,
2193 			   struct perf_cpu_context *cpuctx,
2194 			   int can_add_hw)
2195 {
2196 	/*
2197 	 * Groups consisting entirely of software events can always go on.
2198 	 */
2199 	if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2200 		return 1;
2201 	/*
2202 	 * If an exclusive group is already on, no other hardware
2203 	 * events can go on.
2204 	 */
2205 	if (cpuctx->exclusive)
2206 		return 0;
2207 	/*
2208 	 * If this group is exclusive and there are already
2209 	 * events on the CPU, it can't go on.
2210 	 */
2211 	if (event->attr.exclusive && cpuctx->active_oncpu)
2212 		return 0;
2213 	/*
2214 	 * Otherwise, try to add it if all previous groups were able
2215 	 * to go on.
2216 	 */
2217 	return can_add_hw;
2218 }
2219 
2220 static void add_event_to_ctx(struct perf_event *event,
2221 			       struct perf_event_context *ctx)
2222 {
2223 	u64 tstamp = perf_event_time(event);
2224 
2225 	list_add_event(event, ctx);
2226 	perf_group_attach(event);
2227 	event->tstamp_enabled = tstamp;
2228 	event->tstamp_running = tstamp;
2229 	event->tstamp_stopped = tstamp;
2230 }
2231 
2232 static void ctx_sched_out(struct perf_event_context *ctx,
2233 			  struct perf_cpu_context *cpuctx,
2234 			  enum event_type_t event_type);
2235 static void
2236 ctx_sched_in(struct perf_event_context *ctx,
2237 	     struct perf_cpu_context *cpuctx,
2238 	     enum event_type_t event_type,
2239 	     struct task_struct *task);
2240 
2241 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2242 			       struct perf_event_context *ctx,
2243 			       enum event_type_t event_type)
2244 {
2245 	if (!cpuctx->task_ctx)
2246 		return;
2247 
2248 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2249 		return;
2250 
2251 	ctx_sched_out(ctx, cpuctx, event_type);
2252 }
2253 
2254 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2255 				struct perf_event_context *ctx,
2256 				struct task_struct *task)
2257 {
2258 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2259 	if (ctx)
2260 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2261 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2262 	if (ctx)
2263 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2264 }
2265 
2266 /*
2267  * We want to maintain the following priority of scheduling:
2268  *  - CPU pinned (EVENT_CPU | EVENT_PINNED)
2269  *  - task pinned (EVENT_PINNED)
2270  *  - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2271  *  - task flexible (EVENT_FLEXIBLE).
2272  *
2273  * In order to avoid unscheduling and scheduling back in everything every
2274  * time an event is added, only do it for the groups of equal priority and
2275  * below.
2276  *
2277  * This can be called after a batch operation on task events, in which case
2278  * event_type is a bit mask of the types of events involved. For CPU events,
2279  * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2280  */
2281 static void ctx_resched(struct perf_cpu_context *cpuctx,
2282 			struct perf_event_context *task_ctx,
2283 			enum event_type_t event_type)
2284 {
2285 	enum event_type_t ctx_event_type = event_type & EVENT_ALL;
2286 	bool cpu_event = !!(event_type & EVENT_CPU);
2287 
2288 	/*
2289 	 * If pinned groups are involved, flexible groups also need to be
2290 	 * scheduled out.
2291 	 */
2292 	if (event_type & EVENT_PINNED)
2293 		event_type |= EVENT_FLEXIBLE;
2294 
2295 	perf_pmu_disable(cpuctx->ctx.pmu);
2296 	if (task_ctx)
2297 		task_ctx_sched_out(cpuctx, task_ctx, event_type);
2298 
2299 	/*
2300 	 * Decide which cpu ctx groups to schedule out based on the types
2301 	 * of events that caused rescheduling:
2302 	 *  - EVENT_CPU: schedule out corresponding groups;
2303 	 *  - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2304 	 *  - otherwise, do nothing more.
2305 	 */
2306 	if (cpu_event)
2307 		cpu_ctx_sched_out(cpuctx, ctx_event_type);
2308 	else if (ctx_event_type & EVENT_PINNED)
2309 		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2310 
2311 	perf_event_sched_in(cpuctx, task_ctx, current);
2312 	perf_pmu_enable(cpuctx->ctx.pmu);
2313 }
2314 
2315 /*
2316  * Cross CPU call to install and enable a performance event
2317  *
2318  * Very similar to remote_function() + event_function() but cannot assume that
2319  * things like ctx->is_active and cpuctx->task_ctx are set.
2320  */
2321 static int  __perf_install_in_context(void *info)
2322 {
2323 	struct perf_event *event = info;
2324 	struct perf_event_context *ctx = event->ctx;
2325 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2326 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
2327 	bool reprogram = true;
2328 	int ret = 0;
2329 
2330 	raw_spin_lock(&cpuctx->ctx.lock);
2331 	if (ctx->task) {
2332 		raw_spin_lock(&ctx->lock);
2333 		task_ctx = ctx;
2334 
2335 		reprogram = (ctx->task == current);
2336 
2337 		/*
2338 		 * If the task is running, it must be running on this CPU,
2339 		 * otherwise we cannot reprogram things.
2340 		 *
2341 		 * If its not running, we don't care, ctx->lock will
2342 		 * serialize against it becoming runnable.
2343 		 */
2344 		if (task_curr(ctx->task) && !reprogram) {
2345 			ret = -ESRCH;
2346 			goto unlock;
2347 		}
2348 
2349 		WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2350 	} else if (task_ctx) {
2351 		raw_spin_lock(&task_ctx->lock);
2352 	}
2353 
2354 	if (reprogram) {
2355 		ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2356 		add_event_to_ctx(event, ctx);
2357 		ctx_resched(cpuctx, task_ctx, get_event_type(event));
2358 	} else {
2359 		add_event_to_ctx(event, ctx);
2360 	}
2361 
2362 unlock:
2363 	perf_ctx_unlock(cpuctx, task_ctx);
2364 
2365 	return ret;
2366 }
2367 
2368 /*
2369  * Attach a performance event to a context.
2370  *
2371  * Very similar to event_function_call, see comment there.
2372  */
2373 static void
2374 perf_install_in_context(struct perf_event_context *ctx,
2375 			struct perf_event *event,
2376 			int cpu)
2377 {
2378 	struct task_struct *task = READ_ONCE(ctx->task);
2379 
2380 	lockdep_assert_held(&ctx->mutex);
2381 
2382 	if (event->cpu != -1)
2383 		event->cpu = cpu;
2384 
2385 	/*
2386 	 * Ensures that if we can observe event->ctx, both the event and ctx
2387 	 * will be 'complete'. See perf_iterate_sb_cpu().
2388 	 */
2389 	smp_store_release(&event->ctx, ctx);
2390 
2391 	if (!task) {
2392 		cpu_function_call(cpu, __perf_install_in_context, event);
2393 		return;
2394 	}
2395 
2396 	/*
2397 	 * Should not happen, we validate the ctx is still alive before calling.
2398 	 */
2399 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2400 		return;
2401 
2402 	/*
2403 	 * Installing events is tricky because we cannot rely on ctx->is_active
2404 	 * to be set in case this is the nr_events 0 -> 1 transition.
2405 	 *
2406 	 * Instead we use task_curr(), which tells us if the task is running.
2407 	 * However, since we use task_curr() outside of rq::lock, we can race
2408 	 * against the actual state. This means the result can be wrong.
2409 	 *
2410 	 * If we get a false positive, we retry, this is harmless.
2411 	 *
2412 	 * If we get a false negative, things are complicated. If we are after
2413 	 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2414 	 * value must be correct. If we're before, it doesn't matter since
2415 	 * perf_event_context_sched_in() will program the counter.
2416 	 *
2417 	 * However, this hinges on the remote context switch having observed
2418 	 * our task->perf_event_ctxp[] store, such that it will in fact take
2419 	 * ctx::lock in perf_event_context_sched_in().
2420 	 *
2421 	 * We do this by task_function_call(), if the IPI fails to hit the task
2422 	 * we know any future context switch of task must see the
2423 	 * perf_event_ctpx[] store.
2424 	 */
2425 
2426 	/*
2427 	 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2428 	 * task_cpu() load, such that if the IPI then does not find the task
2429 	 * running, a future context switch of that task must observe the
2430 	 * store.
2431 	 */
2432 	smp_mb();
2433 again:
2434 	if (!task_function_call(task, __perf_install_in_context, event))
2435 		return;
2436 
2437 	raw_spin_lock_irq(&ctx->lock);
2438 	task = ctx->task;
2439 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2440 		/*
2441 		 * Cannot happen because we already checked above (which also
2442 		 * cannot happen), and we hold ctx->mutex, which serializes us
2443 		 * against perf_event_exit_task_context().
2444 		 */
2445 		raw_spin_unlock_irq(&ctx->lock);
2446 		return;
2447 	}
2448 	/*
2449 	 * If the task is not running, ctx->lock will avoid it becoming so,
2450 	 * thus we can safely install the event.
2451 	 */
2452 	if (task_curr(task)) {
2453 		raw_spin_unlock_irq(&ctx->lock);
2454 		goto again;
2455 	}
2456 	add_event_to_ctx(event, ctx);
2457 	raw_spin_unlock_irq(&ctx->lock);
2458 }
2459 
2460 /*
2461  * Put a event into inactive state and update time fields.
2462  * Enabling the leader of a group effectively enables all
2463  * the group members that aren't explicitly disabled, so we
2464  * have to update their ->tstamp_enabled also.
2465  * Note: this works for group members as well as group leaders
2466  * since the non-leader members' sibling_lists will be empty.
2467  */
2468 static void __perf_event_mark_enabled(struct perf_event *event)
2469 {
2470 	struct perf_event *sub;
2471 	u64 tstamp = perf_event_time(event);
2472 
2473 	event->state = PERF_EVENT_STATE_INACTIVE;
2474 	event->tstamp_enabled = tstamp - event->total_time_enabled;
2475 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
2476 		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2477 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
2478 	}
2479 }
2480 
2481 /*
2482  * Cross CPU call to enable a performance event
2483  */
2484 static void __perf_event_enable(struct perf_event *event,
2485 				struct perf_cpu_context *cpuctx,
2486 				struct perf_event_context *ctx,
2487 				void *info)
2488 {
2489 	struct perf_event *leader = event->group_leader;
2490 	struct perf_event_context *task_ctx;
2491 
2492 	if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2493 	    event->state <= PERF_EVENT_STATE_ERROR)
2494 		return;
2495 
2496 	if (ctx->is_active)
2497 		ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2498 
2499 	__perf_event_mark_enabled(event);
2500 
2501 	if (!ctx->is_active)
2502 		return;
2503 
2504 	if (!event_filter_match(event)) {
2505 		if (is_cgroup_event(event))
2506 			perf_cgroup_defer_enabled(event);
2507 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2508 		return;
2509 	}
2510 
2511 	/*
2512 	 * If the event is in a group and isn't the group leader,
2513 	 * then don't put it on unless the group is on.
2514 	 */
2515 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2516 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2517 		return;
2518 	}
2519 
2520 	task_ctx = cpuctx->task_ctx;
2521 	if (ctx->task)
2522 		WARN_ON_ONCE(task_ctx != ctx);
2523 
2524 	ctx_resched(cpuctx, task_ctx, get_event_type(event));
2525 }
2526 
2527 /*
2528  * Enable a event.
2529  *
2530  * If event->ctx is a cloned context, callers must make sure that
2531  * every task struct that event->ctx->task could possibly point to
2532  * remains valid.  This condition is satisfied when called through
2533  * perf_event_for_each_child or perf_event_for_each as described
2534  * for perf_event_disable.
2535  */
2536 static void _perf_event_enable(struct perf_event *event)
2537 {
2538 	struct perf_event_context *ctx = event->ctx;
2539 
2540 	raw_spin_lock_irq(&ctx->lock);
2541 	if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2542 	    event->state <  PERF_EVENT_STATE_ERROR) {
2543 		raw_spin_unlock_irq(&ctx->lock);
2544 		return;
2545 	}
2546 
2547 	/*
2548 	 * If the event is in error state, clear that first.
2549 	 *
2550 	 * That way, if we see the event in error state below, we know that it
2551 	 * has gone back into error state, as distinct from the task having
2552 	 * been scheduled away before the cross-call arrived.
2553 	 */
2554 	if (event->state == PERF_EVENT_STATE_ERROR)
2555 		event->state = PERF_EVENT_STATE_OFF;
2556 	raw_spin_unlock_irq(&ctx->lock);
2557 
2558 	event_function_call(event, __perf_event_enable, NULL);
2559 }
2560 
2561 /*
2562  * See perf_event_disable();
2563  */
2564 void perf_event_enable(struct perf_event *event)
2565 {
2566 	struct perf_event_context *ctx;
2567 
2568 	ctx = perf_event_ctx_lock(event);
2569 	_perf_event_enable(event);
2570 	perf_event_ctx_unlock(event, ctx);
2571 }
2572 EXPORT_SYMBOL_GPL(perf_event_enable);
2573 
2574 struct stop_event_data {
2575 	struct perf_event	*event;
2576 	unsigned int		restart;
2577 };
2578 
2579 static int __perf_event_stop(void *info)
2580 {
2581 	struct stop_event_data *sd = info;
2582 	struct perf_event *event = sd->event;
2583 
2584 	/* if it's already INACTIVE, do nothing */
2585 	if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2586 		return 0;
2587 
2588 	/* matches smp_wmb() in event_sched_in() */
2589 	smp_rmb();
2590 
2591 	/*
2592 	 * There is a window with interrupts enabled before we get here,
2593 	 * so we need to check again lest we try to stop another CPU's event.
2594 	 */
2595 	if (READ_ONCE(event->oncpu) != smp_processor_id())
2596 		return -EAGAIN;
2597 
2598 	event->pmu->stop(event, PERF_EF_UPDATE);
2599 
2600 	/*
2601 	 * May race with the actual stop (through perf_pmu_output_stop()),
2602 	 * but it is only used for events with AUX ring buffer, and such
2603 	 * events will refuse to restart because of rb::aux_mmap_count==0,
2604 	 * see comments in perf_aux_output_begin().
2605 	 *
2606 	 * Since this is happening on a event-local CPU, no trace is lost
2607 	 * while restarting.
2608 	 */
2609 	if (sd->restart)
2610 		event->pmu->start(event, 0);
2611 
2612 	return 0;
2613 }
2614 
2615 static int perf_event_stop(struct perf_event *event, int restart)
2616 {
2617 	struct stop_event_data sd = {
2618 		.event		= event,
2619 		.restart	= restart,
2620 	};
2621 	int ret = 0;
2622 
2623 	do {
2624 		if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2625 			return 0;
2626 
2627 		/* matches smp_wmb() in event_sched_in() */
2628 		smp_rmb();
2629 
2630 		/*
2631 		 * We only want to restart ACTIVE events, so if the event goes
2632 		 * inactive here (event->oncpu==-1), there's nothing more to do;
2633 		 * fall through with ret==-ENXIO.
2634 		 */
2635 		ret = cpu_function_call(READ_ONCE(event->oncpu),
2636 					__perf_event_stop, &sd);
2637 	} while (ret == -EAGAIN);
2638 
2639 	return ret;
2640 }
2641 
2642 /*
2643  * In order to contain the amount of racy and tricky in the address filter
2644  * configuration management, it is a two part process:
2645  *
2646  * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2647  *      we update the addresses of corresponding vmas in
2648  *	event::addr_filters_offs array and bump the event::addr_filters_gen;
2649  * (p2) when an event is scheduled in (pmu::add), it calls
2650  *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2651  *      if the generation has changed since the previous call.
2652  *
2653  * If (p1) happens while the event is active, we restart it to force (p2).
2654  *
2655  * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2656  *     pre-existing mappings, called once when new filters arrive via SET_FILTER
2657  *     ioctl;
2658  * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2659  *     registered mapping, called for every new mmap(), with mm::mmap_sem down
2660  *     for reading;
2661  * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2662  *     of exec.
2663  */
2664 void perf_event_addr_filters_sync(struct perf_event *event)
2665 {
2666 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2667 
2668 	if (!has_addr_filter(event))
2669 		return;
2670 
2671 	raw_spin_lock(&ifh->lock);
2672 	if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2673 		event->pmu->addr_filters_sync(event);
2674 		event->hw.addr_filters_gen = event->addr_filters_gen;
2675 	}
2676 	raw_spin_unlock(&ifh->lock);
2677 }
2678 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2679 
2680 static int _perf_event_refresh(struct perf_event *event, int refresh)
2681 {
2682 	/*
2683 	 * not supported on inherited events
2684 	 */
2685 	if (event->attr.inherit || !is_sampling_event(event))
2686 		return -EINVAL;
2687 
2688 	atomic_add(refresh, &event->event_limit);
2689 	_perf_event_enable(event);
2690 
2691 	return 0;
2692 }
2693 
2694 /*
2695  * See perf_event_disable()
2696  */
2697 int perf_event_refresh(struct perf_event *event, int refresh)
2698 {
2699 	struct perf_event_context *ctx;
2700 	int ret;
2701 
2702 	ctx = perf_event_ctx_lock(event);
2703 	ret = _perf_event_refresh(event, refresh);
2704 	perf_event_ctx_unlock(event, ctx);
2705 
2706 	return ret;
2707 }
2708 EXPORT_SYMBOL_GPL(perf_event_refresh);
2709 
2710 static void ctx_sched_out(struct perf_event_context *ctx,
2711 			  struct perf_cpu_context *cpuctx,
2712 			  enum event_type_t event_type)
2713 {
2714 	int is_active = ctx->is_active;
2715 	struct perf_event *event;
2716 
2717 	lockdep_assert_held(&ctx->lock);
2718 
2719 	if (likely(!ctx->nr_events)) {
2720 		/*
2721 		 * See __perf_remove_from_context().
2722 		 */
2723 		WARN_ON_ONCE(ctx->is_active);
2724 		if (ctx->task)
2725 			WARN_ON_ONCE(cpuctx->task_ctx);
2726 		return;
2727 	}
2728 
2729 	ctx->is_active &= ~event_type;
2730 	if (!(ctx->is_active & EVENT_ALL))
2731 		ctx->is_active = 0;
2732 
2733 	if (ctx->task) {
2734 		WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2735 		if (!ctx->is_active)
2736 			cpuctx->task_ctx = NULL;
2737 	}
2738 
2739 	/*
2740 	 * Always update time if it was set; not only when it changes.
2741 	 * Otherwise we can 'forget' to update time for any but the last
2742 	 * context we sched out. For example:
2743 	 *
2744 	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2745 	 *   ctx_sched_out(.event_type = EVENT_PINNED)
2746 	 *
2747 	 * would only update time for the pinned events.
2748 	 */
2749 	if (is_active & EVENT_TIME) {
2750 		/* update (and stop) ctx time */
2751 		update_context_time(ctx);
2752 		update_cgrp_time_from_cpuctx(cpuctx);
2753 	}
2754 
2755 	is_active ^= ctx->is_active; /* changed bits */
2756 
2757 	if (!ctx->nr_active || !(is_active & EVENT_ALL))
2758 		return;
2759 
2760 	perf_pmu_disable(ctx->pmu);
2761 	if (is_active & EVENT_PINNED) {
2762 		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2763 			group_sched_out(event, cpuctx, ctx);
2764 	}
2765 
2766 	if (is_active & EVENT_FLEXIBLE) {
2767 		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2768 			group_sched_out(event, cpuctx, ctx);
2769 	}
2770 	perf_pmu_enable(ctx->pmu);
2771 }
2772 
2773 /*
2774  * Test whether two contexts are equivalent, i.e. whether they have both been
2775  * cloned from the same version of the same context.
2776  *
2777  * Equivalence is measured using a generation number in the context that is
2778  * incremented on each modification to it; see unclone_ctx(), list_add_event()
2779  * and list_del_event().
2780  */
2781 static int context_equiv(struct perf_event_context *ctx1,
2782 			 struct perf_event_context *ctx2)
2783 {
2784 	lockdep_assert_held(&ctx1->lock);
2785 	lockdep_assert_held(&ctx2->lock);
2786 
2787 	/* Pinning disables the swap optimization */
2788 	if (ctx1->pin_count || ctx2->pin_count)
2789 		return 0;
2790 
2791 	/* If ctx1 is the parent of ctx2 */
2792 	if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2793 		return 1;
2794 
2795 	/* If ctx2 is the parent of ctx1 */
2796 	if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2797 		return 1;
2798 
2799 	/*
2800 	 * If ctx1 and ctx2 have the same parent; we flatten the parent
2801 	 * hierarchy, see perf_event_init_context().
2802 	 */
2803 	if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2804 			ctx1->parent_gen == ctx2->parent_gen)
2805 		return 1;
2806 
2807 	/* Unmatched */
2808 	return 0;
2809 }
2810 
2811 static void __perf_event_sync_stat(struct perf_event *event,
2812 				     struct perf_event *next_event)
2813 {
2814 	u64 value;
2815 
2816 	if (!event->attr.inherit_stat)
2817 		return;
2818 
2819 	/*
2820 	 * Update the event value, we cannot use perf_event_read()
2821 	 * because we're in the middle of a context switch and have IRQs
2822 	 * disabled, which upsets smp_call_function_single(), however
2823 	 * we know the event must be on the current CPU, therefore we
2824 	 * don't need to use it.
2825 	 */
2826 	switch (event->state) {
2827 	case PERF_EVENT_STATE_ACTIVE:
2828 		event->pmu->read(event);
2829 		/* fall-through */
2830 
2831 	case PERF_EVENT_STATE_INACTIVE:
2832 		update_event_times(event);
2833 		break;
2834 
2835 	default:
2836 		break;
2837 	}
2838 
2839 	/*
2840 	 * In order to keep per-task stats reliable we need to flip the event
2841 	 * values when we flip the contexts.
2842 	 */
2843 	value = local64_read(&next_event->count);
2844 	value = local64_xchg(&event->count, value);
2845 	local64_set(&next_event->count, value);
2846 
2847 	swap(event->total_time_enabled, next_event->total_time_enabled);
2848 	swap(event->total_time_running, next_event->total_time_running);
2849 
2850 	/*
2851 	 * Since we swizzled the values, update the user visible data too.
2852 	 */
2853 	perf_event_update_userpage(event);
2854 	perf_event_update_userpage(next_event);
2855 }
2856 
2857 static void perf_event_sync_stat(struct perf_event_context *ctx,
2858 				   struct perf_event_context *next_ctx)
2859 {
2860 	struct perf_event *event, *next_event;
2861 
2862 	if (!ctx->nr_stat)
2863 		return;
2864 
2865 	update_context_time(ctx);
2866 
2867 	event = list_first_entry(&ctx->event_list,
2868 				   struct perf_event, event_entry);
2869 
2870 	next_event = list_first_entry(&next_ctx->event_list,
2871 					struct perf_event, event_entry);
2872 
2873 	while (&event->event_entry != &ctx->event_list &&
2874 	       &next_event->event_entry != &next_ctx->event_list) {
2875 
2876 		__perf_event_sync_stat(event, next_event);
2877 
2878 		event = list_next_entry(event, event_entry);
2879 		next_event = list_next_entry(next_event, event_entry);
2880 	}
2881 }
2882 
2883 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2884 					 struct task_struct *next)
2885 {
2886 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2887 	struct perf_event_context *next_ctx;
2888 	struct perf_event_context *parent, *next_parent;
2889 	struct perf_cpu_context *cpuctx;
2890 	int do_switch = 1;
2891 
2892 	if (likely(!ctx))
2893 		return;
2894 
2895 	cpuctx = __get_cpu_context(ctx);
2896 	if (!cpuctx->task_ctx)
2897 		return;
2898 
2899 	rcu_read_lock();
2900 	next_ctx = next->perf_event_ctxp[ctxn];
2901 	if (!next_ctx)
2902 		goto unlock;
2903 
2904 	parent = rcu_dereference(ctx->parent_ctx);
2905 	next_parent = rcu_dereference(next_ctx->parent_ctx);
2906 
2907 	/* If neither context have a parent context; they cannot be clones. */
2908 	if (!parent && !next_parent)
2909 		goto unlock;
2910 
2911 	if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2912 		/*
2913 		 * Looks like the two contexts are clones, so we might be
2914 		 * able to optimize the context switch.  We lock both
2915 		 * contexts and check that they are clones under the
2916 		 * lock (including re-checking that neither has been
2917 		 * uncloned in the meantime).  It doesn't matter which
2918 		 * order we take the locks because no other cpu could
2919 		 * be trying to lock both of these tasks.
2920 		 */
2921 		raw_spin_lock(&ctx->lock);
2922 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2923 		if (context_equiv(ctx, next_ctx)) {
2924 			WRITE_ONCE(ctx->task, next);
2925 			WRITE_ONCE(next_ctx->task, task);
2926 
2927 			swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2928 
2929 			/*
2930 			 * RCU_INIT_POINTER here is safe because we've not
2931 			 * modified the ctx and the above modification of
2932 			 * ctx->task and ctx->task_ctx_data are immaterial
2933 			 * since those values are always verified under
2934 			 * ctx->lock which we're now holding.
2935 			 */
2936 			RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2937 			RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2938 
2939 			do_switch = 0;
2940 
2941 			perf_event_sync_stat(ctx, next_ctx);
2942 		}
2943 		raw_spin_unlock(&next_ctx->lock);
2944 		raw_spin_unlock(&ctx->lock);
2945 	}
2946 unlock:
2947 	rcu_read_unlock();
2948 
2949 	if (do_switch) {
2950 		raw_spin_lock(&ctx->lock);
2951 		task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
2952 		raw_spin_unlock(&ctx->lock);
2953 	}
2954 }
2955 
2956 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2957 
2958 void perf_sched_cb_dec(struct pmu *pmu)
2959 {
2960 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2961 
2962 	this_cpu_dec(perf_sched_cb_usages);
2963 
2964 	if (!--cpuctx->sched_cb_usage)
2965 		list_del(&cpuctx->sched_cb_entry);
2966 }
2967 
2968 
2969 void perf_sched_cb_inc(struct pmu *pmu)
2970 {
2971 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2972 
2973 	if (!cpuctx->sched_cb_usage++)
2974 		list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
2975 
2976 	this_cpu_inc(perf_sched_cb_usages);
2977 }
2978 
2979 /*
2980  * This function provides the context switch callback to the lower code
2981  * layer. It is invoked ONLY when the context switch callback is enabled.
2982  *
2983  * This callback is relevant even to per-cpu events; for example multi event
2984  * PEBS requires this to provide PID/TID information. This requires we flush
2985  * all queued PEBS records before we context switch to a new task.
2986  */
2987 static void perf_pmu_sched_task(struct task_struct *prev,
2988 				struct task_struct *next,
2989 				bool sched_in)
2990 {
2991 	struct perf_cpu_context *cpuctx;
2992 	struct pmu *pmu;
2993 
2994 	if (prev == next)
2995 		return;
2996 
2997 	list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
2998 		pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
2999 
3000 		if (WARN_ON_ONCE(!pmu->sched_task))
3001 			continue;
3002 
3003 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3004 		perf_pmu_disable(pmu);
3005 
3006 		pmu->sched_task(cpuctx->task_ctx, sched_in);
3007 
3008 		perf_pmu_enable(pmu);
3009 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3010 	}
3011 }
3012 
3013 static void perf_event_switch(struct task_struct *task,
3014 			      struct task_struct *next_prev, bool sched_in);
3015 
3016 #define for_each_task_context_nr(ctxn)					\
3017 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
3018 
3019 /*
3020  * Called from scheduler to remove the events of the current task,
3021  * with interrupts disabled.
3022  *
3023  * We stop each event and update the event value in event->count.
3024  *
3025  * This does not protect us against NMI, but disable()
3026  * sets the disabled bit in the control field of event _before_
3027  * accessing the event control register. If a NMI hits, then it will
3028  * not restart the event.
3029  */
3030 void __perf_event_task_sched_out(struct task_struct *task,
3031 				 struct task_struct *next)
3032 {
3033 	int ctxn;
3034 
3035 	if (__this_cpu_read(perf_sched_cb_usages))
3036 		perf_pmu_sched_task(task, next, false);
3037 
3038 	if (atomic_read(&nr_switch_events))
3039 		perf_event_switch(task, next, false);
3040 
3041 	for_each_task_context_nr(ctxn)
3042 		perf_event_context_sched_out(task, ctxn, next);
3043 
3044 	/*
3045 	 * if cgroup events exist on this CPU, then we need
3046 	 * to check if we have to switch out PMU state.
3047 	 * cgroup event are system-wide mode only
3048 	 */
3049 	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3050 		perf_cgroup_sched_out(task, next);
3051 }
3052 
3053 /*
3054  * Called with IRQs disabled
3055  */
3056 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3057 			      enum event_type_t event_type)
3058 {
3059 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
3060 }
3061 
3062 static void
3063 ctx_pinned_sched_in(struct perf_event_context *ctx,
3064 		    struct perf_cpu_context *cpuctx)
3065 {
3066 	struct perf_event *event;
3067 
3068 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
3069 		if (event->state <= PERF_EVENT_STATE_OFF)
3070 			continue;
3071 		if (!event_filter_match(event))
3072 			continue;
3073 
3074 		/* may need to reset tstamp_enabled */
3075 		if (is_cgroup_event(event))
3076 			perf_cgroup_mark_enabled(event, ctx);
3077 
3078 		if (group_can_go_on(event, cpuctx, 1))
3079 			group_sched_in(event, cpuctx, ctx);
3080 
3081 		/*
3082 		 * If this pinned group hasn't been scheduled,
3083 		 * put it in error state.
3084 		 */
3085 		if (event->state == PERF_EVENT_STATE_INACTIVE) {
3086 			update_group_times(event);
3087 			event->state = PERF_EVENT_STATE_ERROR;
3088 		}
3089 	}
3090 }
3091 
3092 static void
3093 ctx_flexible_sched_in(struct perf_event_context *ctx,
3094 		      struct perf_cpu_context *cpuctx)
3095 {
3096 	struct perf_event *event;
3097 	int can_add_hw = 1;
3098 
3099 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
3100 		/* Ignore events in OFF or ERROR state */
3101 		if (event->state <= PERF_EVENT_STATE_OFF)
3102 			continue;
3103 		/*
3104 		 * Listen to the 'cpu' scheduling filter constraint
3105 		 * of events:
3106 		 */
3107 		if (!event_filter_match(event))
3108 			continue;
3109 
3110 		/* may need to reset tstamp_enabled */
3111 		if (is_cgroup_event(event))
3112 			perf_cgroup_mark_enabled(event, ctx);
3113 
3114 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
3115 			if (group_sched_in(event, cpuctx, ctx))
3116 				can_add_hw = 0;
3117 		}
3118 	}
3119 }
3120 
3121 static void
3122 ctx_sched_in(struct perf_event_context *ctx,
3123 	     struct perf_cpu_context *cpuctx,
3124 	     enum event_type_t event_type,
3125 	     struct task_struct *task)
3126 {
3127 	int is_active = ctx->is_active;
3128 	u64 now;
3129 
3130 	lockdep_assert_held(&ctx->lock);
3131 
3132 	if (likely(!ctx->nr_events))
3133 		return;
3134 
3135 	ctx->is_active |= (event_type | EVENT_TIME);
3136 	if (ctx->task) {
3137 		if (!is_active)
3138 			cpuctx->task_ctx = ctx;
3139 		else
3140 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3141 	}
3142 
3143 	is_active ^= ctx->is_active; /* changed bits */
3144 
3145 	if (is_active & EVENT_TIME) {
3146 		/* start ctx time */
3147 		now = perf_clock();
3148 		ctx->timestamp = now;
3149 		perf_cgroup_set_timestamp(task, ctx);
3150 	}
3151 
3152 	/*
3153 	 * First go through the list and put on any pinned groups
3154 	 * in order to give them the best chance of going on.
3155 	 */
3156 	if (is_active & EVENT_PINNED)
3157 		ctx_pinned_sched_in(ctx, cpuctx);
3158 
3159 	/* Then walk through the lower prio flexible groups */
3160 	if (is_active & EVENT_FLEXIBLE)
3161 		ctx_flexible_sched_in(ctx, cpuctx);
3162 }
3163 
3164 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
3165 			     enum event_type_t event_type,
3166 			     struct task_struct *task)
3167 {
3168 	struct perf_event_context *ctx = &cpuctx->ctx;
3169 
3170 	ctx_sched_in(ctx, cpuctx, event_type, task);
3171 }
3172 
3173 static void perf_event_context_sched_in(struct perf_event_context *ctx,
3174 					struct task_struct *task)
3175 {
3176 	struct perf_cpu_context *cpuctx;
3177 
3178 	cpuctx = __get_cpu_context(ctx);
3179 	if (cpuctx->task_ctx == ctx)
3180 		return;
3181 
3182 	perf_ctx_lock(cpuctx, ctx);
3183 	perf_pmu_disable(ctx->pmu);
3184 	/*
3185 	 * We want to keep the following priority order:
3186 	 * cpu pinned (that don't need to move), task pinned,
3187 	 * cpu flexible, task flexible.
3188 	 *
3189 	 * However, if task's ctx is not carrying any pinned
3190 	 * events, no need to flip the cpuctx's events around.
3191 	 */
3192 	if (!list_empty(&ctx->pinned_groups))
3193 		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3194 	perf_event_sched_in(cpuctx, ctx, task);
3195 	perf_pmu_enable(ctx->pmu);
3196 	perf_ctx_unlock(cpuctx, ctx);
3197 }
3198 
3199 /*
3200  * Called from scheduler to add the events of the current task
3201  * with interrupts disabled.
3202  *
3203  * We restore the event value and then enable it.
3204  *
3205  * This does not protect us against NMI, but enable()
3206  * sets the enabled bit in the control field of event _before_
3207  * accessing the event control register. If a NMI hits, then it will
3208  * keep the event running.
3209  */
3210 void __perf_event_task_sched_in(struct task_struct *prev,
3211 				struct task_struct *task)
3212 {
3213 	struct perf_event_context *ctx;
3214 	int ctxn;
3215 
3216 	/*
3217 	 * If cgroup events exist on this CPU, then we need to check if we have
3218 	 * to switch in PMU state; cgroup event are system-wide mode only.
3219 	 *
3220 	 * Since cgroup events are CPU events, we must schedule these in before
3221 	 * we schedule in the task events.
3222 	 */
3223 	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3224 		perf_cgroup_sched_in(prev, task);
3225 
3226 	for_each_task_context_nr(ctxn) {
3227 		ctx = task->perf_event_ctxp[ctxn];
3228 		if (likely(!ctx))
3229 			continue;
3230 
3231 		perf_event_context_sched_in(ctx, task);
3232 	}
3233 
3234 	if (atomic_read(&nr_switch_events))
3235 		perf_event_switch(task, prev, true);
3236 
3237 	if (__this_cpu_read(perf_sched_cb_usages))
3238 		perf_pmu_sched_task(prev, task, true);
3239 }
3240 
3241 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3242 {
3243 	u64 frequency = event->attr.sample_freq;
3244 	u64 sec = NSEC_PER_SEC;
3245 	u64 divisor, dividend;
3246 
3247 	int count_fls, nsec_fls, frequency_fls, sec_fls;
3248 
3249 	count_fls = fls64(count);
3250 	nsec_fls = fls64(nsec);
3251 	frequency_fls = fls64(frequency);
3252 	sec_fls = 30;
3253 
3254 	/*
3255 	 * We got @count in @nsec, with a target of sample_freq HZ
3256 	 * the target period becomes:
3257 	 *
3258 	 *             @count * 10^9
3259 	 * period = -------------------
3260 	 *          @nsec * sample_freq
3261 	 *
3262 	 */
3263 
3264 	/*
3265 	 * Reduce accuracy by one bit such that @a and @b converge
3266 	 * to a similar magnitude.
3267 	 */
3268 #define REDUCE_FLS(a, b)		\
3269 do {					\
3270 	if (a##_fls > b##_fls) {	\
3271 		a >>= 1;		\
3272 		a##_fls--;		\
3273 	} else {			\
3274 		b >>= 1;		\
3275 		b##_fls--;		\
3276 	}				\
3277 } while (0)
3278 
3279 	/*
3280 	 * Reduce accuracy until either term fits in a u64, then proceed with
3281 	 * the other, so that finally we can do a u64/u64 division.
3282 	 */
3283 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3284 		REDUCE_FLS(nsec, frequency);
3285 		REDUCE_FLS(sec, count);
3286 	}
3287 
3288 	if (count_fls + sec_fls > 64) {
3289 		divisor = nsec * frequency;
3290 
3291 		while (count_fls + sec_fls > 64) {
3292 			REDUCE_FLS(count, sec);
3293 			divisor >>= 1;
3294 		}
3295 
3296 		dividend = count * sec;
3297 	} else {
3298 		dividend = count * sec;
3299 
3300 		while (nsec_fls + frequency_fls > 64) {
3301 			REDUCE_FLS(nsec, frequency);
3302 			dividend >>= 1;
3303 		}
3304 
3305 		divisor = nsec * frequency;
3306 	}
3307 
3308 	if (!divisor)
3309 		return dividend;
3310 
3311 	return div64_u64(dividend, divisor);
3312 }
3313 
3314 static DEFINE_PER_CPU(int, perf_throttled_count);
3315 static DEFINE_PER_CPU(u64, perf_throttled_seq);
3316 
3317 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
3318 {
3319 	struct hw_perf_event *hwc = &event->hw;
3320 	s64 period, sample_period;
3321 	s64 delta;
3322 
3323 	period = perf_calculate_period(event, nsec, count);
3324 
3325 	delta = (s64)(period - hwc->sample_period);
3326 	delta = (delta + 7) / 8; /* low pass filter */
3327 
3328 	sample_period = hwc->sample_period + delta;
3329 
3330 	if (!sample_period)
3331 		sample_period = 1;
3332 
3333 	hwc->sample_period = sample_period;
3334 
3335 	if (local64_read(&hwc->period_left) > 8*sample_period) {
3336 		if (disable)
3337 			event->pmu->stop(event, PERF_EF_UPDATE);
3338 
3339 		local64_set(&hwc->period_left, 0);
3340 
3341 		if (disable)
3342 			event->pmu->start(event, PERF_EF_RELOAD);
3343 	}
3344 }
3345 
3346 /*
3347  * combine freq adjustment with unthrottling to avoid two passes over the
3348  * events. At the same time, make sure, having freq events does not change
3349  * the rate of unthrottling as that would introduce bias.
3350  */
3351 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3352 					   int needs_unthr)
3353 {
3354 	struct perf_event *event;
3355 	struct hw_perf_event *hwc;
3356 	u64 now, period = TICK_NSEC;
3357 	s64 delta;
3358 
3359 	/*
3360 	 * only need to iterate over all events iff:
3361 	 * - context have events in frequency mode (needs freq adjust)
3362 	 * - there are events to unthrottle on this cpu
3363 	 */
3364 	if (!(ctx->nr_freq || needs_unthr))
3365 		return;
3366 
3367 	raw_spin_lock(&ctx->lock);
3368 	perf_pmu_disable(ctx->pmu);
3369 
3370 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3371 		if (event->state != PERF_EVENT_STATE_ACTIVE)
3372 			continue;
3373 
3374 		if (!event_filter_match(event))
3375 			continue;
3376 
3377 		perf_pmu_disable(event->pmu);
3378 
3379 		hwc = &event->hw;
3380 
3381 		if (hwc->interrupts == MAX_INTERRUPTS) {
3382 			hwc->interrupts = 0;
3383 			perf_log_throttle(event, 1);
3384 			event->pmu->start(event, 0);
3385 		}
3386 
3387 		if (!event->attr.freq || !event->attr.sample_freq)
3388 			goto next;
3389 
3390 		/*
3391 		 * stop the event and update event->count
3392 		 */
3393 		event->pmu->stop(event, PERF_EF_UPDATE);
3394 
3395 		now = local64_read(&event->count);
3396 		delta = now - hwc->freq_count_stamp;
3397 		hwc->freq_count_stamp = now;
3398 
3399 		/*
3400 		 * restart the event
3401 		 * reload only if value has changed
3402 		 * we have stopped the event so tell that
3403 		 * to perf_adjust_period() to avoid stopping it
3404 		 * twice.
3405 		 */
3406 		if (delta > 0)
3407 			perf_adjust_period(event, period, delta, false);
3408 
3409 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3410 	next:
3411 		perf_pmu_enable(event->pmu);
3412 	}
3413 
3414 	perf_pmu_enable(ctx->pmu);
3415 	raw_spin_unlock(&ctx->lock);
3416 }
3417 
3418 /*
3419  * Round-robin a context's events:
3420  */
3421 static void rotate_ctx(struct perf_event_context *ctx)
3422 {
3423 	/*
3424 	 * Rotate the first entry last of non-pinned groups. Rotation might be
3425 	 * disabled by the inheritance code.
3426 	 */
3427 	if (!ctx->rotate_disable)
3428 		list_rotate_left(&ctx->flexible_groups);
3429 }
3430 
3431 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3432 {
3433 	struct perf_event_context *ctx = NULL;
3434 	int rotate = 0;
3435 
3436 	if (cpuctx->ctx.nr_events) {
3437 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3438 			rotate = 1;
3439 	}
3440 
3441 	ctx = cpuctx->task_ctx;
3442 	if (ctx && ctx->nr_events) {
3443 		if (ctx->nr_events != ctx->nr_active)
3444 			rotate = 1;
3445 	}
3446 
3447 	if (!rotate)
3448 		goto done;
3449 
3450 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3451 	perf_pmu_disable(cpuctx->ctx.pmu);
3452 
3453 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3454 	if (ctx)
3455 		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
3456 
3457 	rotate_ctx(&cpuctx->ctx);
3458 	if (ctx)
3459 		rotate_ctx(ctx);
3460 
3461 	perf_event_sched_in(cpuctx, ctx, current);
3462 
3463 	perf_pmu_enable(cpuctx->ctx.pmu);
3464 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3465 done:
3466 
3467 	return rotate;
3468 }
3469 
3470 void perf_event_task_tick(void)
3471 {
3472 	struct list_head *head = this_cpu_ptr(&active_ctx_list);
3473 	struct perf_event_context *ctx, *tmp;
3474 	int throttled;
3475 
3476 	WARN_ON(!irqs_disabled());
3477 
3478 	__this_cpu_inc(perf_throttled_seq);
3479 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
3480 	tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
3481 
3482 	list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3483 		perf_adjust_freq_unthr_context(ctx, throttled);
3484 }
3485 
3486 static int event_enable_on_exec(struct perf_event *event,
3487 				struct perf_event_context *ctx)
3488 {
3489 	if (!event->attr.enable_on_exec)
3490 		return 0;
3491 
3492 	event->attr.enable_on_exec = 0;
3493 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
3494 		return 0;
3495 
3496 	__perf_event_mark_enabled(event);
3497 
3498 	return 1;
3499 }
3500 
3501 /*
3502  * Enable all of a task's events that have been marked enable-on-exec.
3503  * This expects task == current.
3504  */
3505 static void perf_event_enable_on_exec(int ctxn)
3506 {
3507 	struct perf_event_context *ctx, *clone_ctx = NULL;
3508 	enum event_type_t event_type = 0;
3509 	struct perf_cpu_context *cpuctx;
3510 	struct perf_event *event;
3511 	unsigned long flags;
3512 	int enabled = 0;
3513 
3514 	local_irq_save(flags);
3515 	ctx = current->perf_event_ctxp[ctxn];
3516 	if (!ctx || !ctx->nr_events)
3517 		goto out;
3518 
3519 	cpuctx = __get_cpu_context(ctx);
3520 	perf_ctx_lock(cpuctx, ctx);
3521 	ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3522 	list_for_each_entry(event, &ctx->event_list, event_entry) {
3523 		enabled |= event_enable_on_exec(event, ctx);
3524 		event_type |= get_event_type(event);
3525 	}
3526 
3527 	/*
3528 	 * Unclone and reschedule this context if we enabled any event.
3529 	 */
3530 	if (enabled) {
3531 		clone_ctx = unclone_ctx(ctx);
3532 		ctx_resched(cpuctx, ctx, event_type);
3533 	} else {
3534 		ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
3535 	}
3536 	perf_ctx_unlock(cpuctx, ctx);
3537 
3538 out:
3539 	local_irq_restore(flags);
3540 
3541 	if (clone_ctx)
3542 		put_ctx(clone_ctx);
3543 }
3544 
3545 struct perf_read_data {
3546 	struct perf_event *event;
3547 	bool group;
3548 	int ret;
3549 };
3550 
3551 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
3552 {
3553 	u16 local_pkg, event_pkg;
3554 
3555 	if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
3556 		int local_cpu = smp_processor_id();
3557 
3558 		event_pkg = topology_physical_package_id(event_cpu);
3559 		local_pkg = topology_physical_package_id(local_cpu);
3560 
3561 		if (event_pkg == local_pkg)
3562 			return local_cpu;
3563 	}
3564 
3565 	return event_cpu;
3566 }
3567 
3568 /*
3569  * Cross CPU call to read the hardware event
3570  */
3571 static void __perf_event_read(void *info)
3572 {
3573 	struct perf_read_data *data = info;
3574 	struct perf_event *sub, *event = data->event;
3575 	struct perf_event_context *ctx = event->ctx;
3576 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3577 	struct pmu *pmu = event->pmu;
3578 
3579 	/*
3580 	 * If this is a task context, we need to check whether it is
3581 	 * the current task context of this cpu.  If not it has been
3582 	 * scheduled out before the smp call arrived.  In that case
3583 	 * event->count would have been updated to a recent sample
3584 	 * when the event was scheduled out.
3585 	 */
3586 	if (ctx->task && cpuctx->task_ctx != ctx)
3587 		return;
3588 
3589 	raw_spin_lock(&ctx->lock);
3590 	if (ctx->is_active) {
3591 		update_context_time(ctx);
3592 		update_cgrp_time_from_event(event);
3593 	}
3594 
3595 	update_event_times(event);
3596 	if (event->state != PERF_EVENT_STATE_ACTIVE)
3597 		goto unlock;
3598 
3599 	if (!data->group) {
3600 		pmu->read(event);
3601 		data->ret = 0;
3602 		goto unlock;
3603 	}
3604 
3605 	pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3606 
3607 	pmu->read(event);
3608 
3609 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
3610 		update_event_times(sub);
3611 		if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3612 			/*
3613 			 * Use sibling's PMU rather than @event's since
3614 			 * sibling could be on different (eg: software) PMU.
3615 			 */
3616 			sub->pmu->read(sub);
3617 		}
3618 	}
3619 
3620 	data->ret = pmu->commit_txn(pmu);
3621 
3622 unlock:
3623 	raw_spin_unlock(&ctx->lock);
3624 }
3625 
3626 static inline u64 perf_event_count(struct perf_event *event)
3627 {
3628 	if (event->pmu->count)
3629 		return event->pmu->count(event);
3630 
3631 	return __perf_event_count(event);
3632 }
3633 
3634 /*
3635  * NMI-safe method to read a local event, that is an event that
3636  * is:
3637  *   - either for the current task, or for this CPU
3638  *   - does not have inherit set, for inherited task events
3639  *     will not be local and we cannot read them atomically
3640  *   - must not have a pmu::count method
3641  */
3642 int perf_event_read_local(struct perf_event *event, u64 *value)
3643 {
3644 	unsigned long flags;
3645 	int ret = 0;
3646 
3647 	/*
3648 	 * Disabling interrupts avoids all counter scheduling (context
3649 	 * switches, timer based rotation and IPIs).
3650 	 */
3651 	local_irq_save(flags);
3652 
3653 	/*
3654 	 * It must not be an event with inherit set, we cannot read
3655 	 * all child counters from atomic context.
3656 	 */
3657 	if (event->attr.inherit) {
3658 		ret = -EOPNOTSUPP;
3659 		goto out;
3660 	}
3661 
3662 	/*
3663 	 * It must not have a pmu::count method, those are not
3664 	 * NMI safe.
3665 	 */
3666 	if (event->pmu->count) {
3667 		ret = -EOPNOTSUPP;
3668 		goto out;
3669 	}
3670 
3671 	/* If this is a per-task event, it must be for current */
3672 	if ((event->attach_state & PERF_ATTACH_TASK) &&
3673 	    event->hw.target != current) {
3674 		ret = -EINVAL;
3675 		goto out;
3676 	}
3677 
3678 	/* If this is a per-CPU event, it must be for this CPU */
3679 	if (!(event->attach_state & PERF_ATTACH_TASK) &&
3680 	    event->cpu != smp_processor_id()) {
3681 		ret = -EINVAL;
3682 		goto out;
3683 	}
3684 
3685 	/*
3686 	 * If the event is currently on this CPU, its either a per-task event,
3687 	 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3688 	 * oncpu == -1).
3689 	 */
3690 	if (event->oncpu == smp_processor_id())
3691 		event->pmu->read(event);
3692 
3693 	*value = local64_read(&event->count);
3694 out:
3695 	local_irq_restore(flags);
3696 
3697 	return ret;
3698 }
3699 
3700 static int perf_event_read(struct perf_event *event, bool group)
3701 {
3702 	int event_cpu, ret = 0;
3703 
3704 	/*
3705 	 * If event is enabled and currently active on a CPU, update the
3706 	 * value in the event structure:
3707 	 */
3708 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
3709 		struct perf_read_data data = {
3710 			.event = event,
3711 			.group = group,
3712 			.ret = 0,
3713 		};
3714 
3715 		event_cpu = READ_ONCE(event->oncpu);
3716 		if ((unsigned)event_cpu >= nr_cpu_ids)
3717 			return 0;
3718 
3719 		preempt_disable();
3720 		event_cpu = __perf_event_read_cpu(event, event_cpu);
3721 
3722 		/*
3723 		 * Purposely ignore the smp_call_function_single() return
3724 		 * value.
3725 		 *
3726 		 * If event_cpu isn't a valid CPU it means the event got
3727 		 * scheduled out and that will have updated the event count.
3728 		 *
3729 		 * Therefore, either way, we'll have an up-to-date event count
3730 		 * after this.
3731 		 */
3732 		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
3733 		preempt_enable();
3734 		ret = data.ret;
3735 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3736 		struct perf_event_context *ctx = event->ctx;
3737 		unsigned long flags;
3738 
3739 		raw_spin_lock_irqsave(&ctx->lock, flags);
3740 		/*
3741 		 * may read while context is not active
3742 		 * (e.g., thread is blocked), in that case
3743 		 * we cannot update context time
3744 		 */
3745 		if (ctx->is_active) {
3746 			update_context_time(ctx);
3747 			update_cgrp_time_from_event(event);
3748 		}
3749 		if (group)
3750 			update_group_times(event);
3751 		else
3752 			update_event_times(event);
3753 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
3754 	}
3755 
3756 	return ret;
3757 }
3758 
3759 /*
3760  * Initialize the perf_event context in a task_struct:
3761  */
3762 static void __perf_event_init_context(struct perf_event_context *ctx)
3763 {
3764 	raw_spin_lock_init(&ctx->lock);
3765 	mutex_init(&ctx->mutex);
3766 	INIT_LIST_HEAD(&ctx->active_ctx_list);
3767 	INIT_LIST_HEAD(&ctx->pinned_groups);
3768 	INIT_LIST_HEAD(&ctx->flexible_groups);
3769 	INIT_LIST_HEAD(&ctx->event_list);
3770 	atomic_set(&ctx->refcount, 1);
3771 }
3772 
3773 static struct perf_event_context *
3774 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3775 {
3776 	struct perf_event_context *ctx;
3777 
3778 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3779 	if (!ctx)
3780 		return NULL;
3781 
3782 	__perf_event_init_context(ctx);
3783 	if (task) {
3784 		ctx->task = task;
3785 		get_task_struct(task);
3786 	}
3787 	ctx->pmu = pmu;
3788 
3789 	return ctx;
3790 }
3791 
3792 static struct task_struct *
3793 find_lively_task_by_vpid(pid_t vpid)
3794 {
3795 	struct task_struct *task;
3796 
3797 	rcu_read_lock();
3798 	if (!vpid)
3799 		task = current;
3800 	else
3801 		task = find_task_by_vpid(vpid);
3802 	if (task)
3803 		get_task_struct(task);
3804 	rcu_read_unlock();
3805 
3806 	if (!task)
3807 		return ERR_PTR(-ESRCH);
3808 
3809 	return task;
3810 }
3811 
3812 /*
3813  * Returns a matching context with refcount and pincount.
3814  */
3815 static struct perf_event_context *
3816 find_get_context(struct pmu *pmu, struct task_struct *task,
3817 		struct perf_event *event)
3818 {
3819 	struct perf_event_context *ctx, *clone_ctx = NULL;
3820 	struct perf_cpu_context *cpuctx;
3821 	void *task_ctx_data = NULL;
3822 	unsigned long flags;
3823 	int ctxn, err;
3824 	int cpu = event->cpu;
3825 
3826 	if (!task) {
3827 		/* Must be root to operate on a CPU event: */
3828 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3829 			return ERR_PTR(-EACCES);
3830 
3831 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3832 		ctx = &cpuctx->ctx;
3833 		get_ctx(ctx);
3834 		++ctx->pin_count;
3835 
3836 		return ctx;
3837 	}
3838 
3839 	err = -EINVAL;
3840 	ctxn = pmu->task_ctx_nr;
3841 	if (ctxn < 0)
3842 		goto errout;
3843 
3844 	if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3845 		task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3846 		if (!task_ctx_data) {
3847 			err = -ENOMEM;
3848 			goto errout;
3849 		}
3850 	}
3851 
3852 retry:
3853 	ctx = perf_lock_task_context(task, ctxn, &flags);
3854 	if (ctx) {
3855 		clone_ctx = unclone_ctx(ctx);
3856 		++ctx->pin_count;
3857 
3858 		if (task_ctx_data && !ctx->task_ctx_data) {
3859 			ctx->task_ctx_data = task_ctx_data;
3860 			task_ctx_data = NULL;
3861 		}
3862 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
3863 
3864 		if (clone_ctx)
3865 			put_ctx(clone_ctx);
3866 	} else {
3867 		ctx = alloc_perf_context(pmu, task);
3868 		err = -ENOMEM;
3869 		if (!ctx)
3870 			goto errout;
3871 
3872 		if (task_ctx_data) {
3873 			ctx->task_ctx_data = task_ctx_data;
3874 			task_ctx_data = NULL;
3875 		}
3876 
3877 		err = 0;
3878 		mutex_lock(&task->perf_event_mutex);
3879 		/*
3880 		 * If it has already passed perf_event_exit_task().
3881 		 * we must see PF_EXITING, it takes this mutex too.
3882 		 */
3883 		if (task->flags & PF_EXITING)
3884 			err = -ESRCH;
3885 		else if (task->perf_event_ctxp[ctxn])
3886 			err = -EAGAIN;
3887 		else {
3888 			get_ctx(ctx);
3889 			++ctx->pin_count;
3890 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3891 		}
3892 		mutex_unlock(&task->perf_event_mutex);
3893 
3894 		if (unlikely(err)) {
3895 			put_ctx(ctx);
3896 
3897 			if (err == -EAGAIN)
3898 				goto retry;
3899 			goto errout;
3900 		}
3901 	}
3902 
3903 	kfree(task_ctx_data);
3904 	return ctx;
3905 
3906 errout:
3907 	kfree(task_ctx_data);
3908 	return ERR_PTR(err);
3909 }
3910 
3911 static void perf_event_free_filter(struct perf_event *event);
3912 static void perf_event_free_bpf_prog(struct perf_event *event);
3913 
3914 static void free_event_rcu(struct rcu_head *head)
3915 {
3916 	struct perf_event *event;
3917 
3918 	event = container_of(head, struct perf_event, rcu_head);
3919 	if (event->ns)
3920 		put_pid_ns(event->ns);
3921 	perf_event_free_filter(event);
3922 	kfree(event);
3923 }
3924 
3925 static void ring_buffer_attach(struct perf_event *event,
3926 			       struct ring_buffer *rb);
3927 
3928 static void detach_sb_event(struct perf_event *event)
3929 {
3930 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3931 
3932 	raw_spin_lock(&pel->lock);
3933 	list_del_rcu(&event->sb_list);
3934 	raw_spin_unlock(&pel->lock);
3935 }
3936 
3937 static bool is_sb_event(struct perf_event *event)
3938 {
3939 	struct perf_event_attr *attr = &event->attr;
3940 
3941 	if (event->parent)
3942 		return false;
3943 
3944 	if (event->attach_state & PERF_ATTACH_TASK)
3945 		return false;
3946 
3947 	if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3948 	    attr->comm || attr->comm_exec ||
3949 	    attr->task ||
3950 	    attr->context_switch)
3951 		return true;
3952 	return false;
3953 }
3954 
3955 static void unaccount_pmu_sb_event(struct perf_event *event)
3956 {
3957 	if (is_sb_event(event))
3958 		detach_sb_event(event);
3959 }
3960 
3961 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3962 {
3963 	if (event->parent)
3964 		return;
3965 
3966 	if (is_cgroup_event(event))
3967 		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3968 }
3969 
3970 #ifdef CONFIG_NO_HZ_FULL
3971 static DEFINE_SPINLOCK(nr_freq_lock);
3972 #endif
3973 
3974 static void unaccount_freq_event_nohz(void)
3975 {
3976 #ifdef CONFIG_NO_HZ_FULL
3977 	spin_lock(&nr_freq_lock);
3978 	if (atomic_dec_and_test(&nr_freq_events))
3979 		tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3980 	spin_unlock(&nr_freq_lock);
3981 #endif
3982 }
3983 
3984 static void unaccount_freq_event(void)
3985 {
3986 	if (tick_nohz_full_enabled())
3987 		unaccount_freq_event_nohz();
3988 	else
3989 		atomic_dec(&nr_freq_events);
3990 }
3991 
3992 static void unaccount_event(struct perf_event *event)
3993 {
3994 	bool dec = false;
3995 
3996 	if (event->parent)
3997 		return;
3998 
3999 	if (event->attach_state & PERF_ATTACH_TASK)
4000 		dec = true;
4001 	if (event->attr.mmap || event->attr.mmap_data)
4002 		atomic_dec(&nr_mmap_events);
4003 	if (event->attr.comm)
4004 		atomic_dec(&nr_comm_events);
4005 	if (event->attr.namespaces)
4006 		atomic_dec(&nr_namespaces_events);
4007 	if (event->attr.task)
4008 		atomic_dec(&nr_task_events);
4009 	if (event->attr.freq)
4010 		unaccount_freq_event();
4011 	if (event->attr.context_switch) {
4012 		dec = true;
4013 		atomic_dec(&nr_switch_events);
4014 	}
4015 	if (is_cgroup_event(event))
4016 		dec = true;
4017 	if (has_branch_stack(event))
4018 		dec = true;
4019 
4020 	if (dec) {
4021 		if (!atomic_add_unless(&perf_sched_count, -1, 1))
4022 			schedule_delayed_work(&perf_sched_work, HZ);
4023 	}
4024 
4025 	unaccount_event_cpu(event, event->cpu);
4026 
4027 	unaccount_pmu_sb_event(event);
4028 }
4029 
4030 static void perf_sched_delayed(struct work_struct *work)
4031 {
4032 	mutex_lock(&perf_sched_mutex);
4033 	if (atomic_dec_and_test(&perf_sched_count))
4034 		static_branch_disable(&perf_sched_events);
4035 	mutex_unlock(&perf_sched_mutex);
4036 }
4037 
4038 /*
4039  * The following implement mutual exclusion of events on "exclusive" pmus
4040  * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4041  * at a time, so we disallow creating events that might conflict, namely:
4042  *
4043  *  1) cpu-wide events in the presence of per-task events,
4044  *  2) per-task events in the presence of cpu-wide events,
4045  *  3) two matching events on the same context.
4046  *
4047  * The former two cases are handled in the allocation path (perf_event_alloc(),
4048  * _free_event()), the latter -- before the first perf_install_in_context().
4049  */
4050 static int exclusive_event_init(struct perf_event *event)
4051 {
4052 	struct pmu *pmu = event->pmu;
4053 
4054 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4055 		return 0;
4056 
4057 	/*
4058 	 * Prevent co-existence of per-task and cpu-wide events on the
4059 	 * same exclusive pmu.
4060 	 *
4061 	 * Negative pmu::exclusive_cnt means there are cpu-wide
4062 	 * events on this "exclusive" pmu, positive means there are
4063 	 * per-task events.
4064 	 *
4065 	 * Since this is called in perf_event_alloc() path, event::ctx
4066 	 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
4067 	 * to mean "per-task event", because unlike other attach states it
4068 	 * never gets cleared.
4069 	 */
4070 	if (event->attach_state & PERF_ATTACH_TASK) {
4071 		if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
4072 			return -EBUSY;
4073 	} else {
4074 		if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4075 			return -EBUSY;
4076 	}
4077 
4078 	return 0;
4079 }
4080 
4081 static void exclusive_event_destroy(struct perf_event *event)
4082 {
4083 	struct pmu *pmu = event->pmu;
4084 
4085 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4086 		return;
4087 
4088 	/* see comment in exclusive_event_init() */
4089 	if (event->attach_state & PERF_ATTACH_TASK)
4090 		atomic_dec(&pmu->exclusive_cnt);
4091 	else
4092 		atomic_inc(&pmu->exclusive_cnt);
4093 }
4094 
4095 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4096 {
4097 	if ((e1->pmu == e2->pmu) &&
4098 	    (e1->cpu == e2->cpu ||
4099 	     e1->cpu == -1 ||
4100 	     e2->cpu == -1))
4101 		return true;
4102 	return false;
4103 }
4104 
4105 /* Called under the same ctx::mutex as perf_install_in_context() */
4106 static bool exclusive_event_installable(struct perf_event *event,
4107 					struct perf_event_context *ctx)
4108 {
4109 	struct perf_event *iter_event;
4110 	struct pmu *pmu = event->pmu;
4111 
4112 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4113 		return true;
4114 
4115 	list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4116 		if (exclusive_event_match(iter_event, event))
4117 			return false;
4118 	}
4119 
4120 	return true;
4121 }
4122 
4123 static void perf_addr_filters_splice(struct perf_event *event,
4124 				       struct list_head *head);
4125 
4126 static void _free_event(struct perf_event *event)
4127 {
4128 	irq_work_sync(&event->pending);
4129 
4130 	unaccount_event(event);
4131 
4132 	if (event->rb) {
4133 		/*
4134 		 * Can happen when we close an event with re-directed output.
4135 		 *
4136 		 * Since we have a 0 refcount, perf_mmap_close() will skip
4137 		 * over us; possibly making our ring_buffer_put() the last.
4138 		 */
4139 		mutex_lock(&event->mmap_mutex);
4140 		ring_buffer_attach(event, NULL);
4141 		mutex_unlock(&event->mmap_mutex);
4142 	}
4143 
4144 	if (is_cgroup_event(event))
4145 		perf_detach_cgroup(event);
4146 
4147 	if (!event->parent) {
4148 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4149 			put_callchain_buffers();
4150 	}
4151 
4152 	perf_event_free_bpf_prog(event);
4153 	perf_addr_filters_splice(event, NULL);
4154 	kfree(event->addr_filters_offs);
4155 
4156 	if (event->destroy)
4157 		event->destroy(event);
4158 
4159 	if (event->ctx)
4160 		put_ctx(event->ctx);
4161 
4162 	exclusive_event_destroy(event);
4163 	module_put(event->pmu->module);
4164 
4165 	call_rcu(&event->rcu_head, free_event_rcu);
4166 }
4167 
4168 /*
4169  * Used to free events which have a known refcount of 1, such as in error paths
4170  * where the event isn't exposed yet and inherited events.
4171  */
4172 static void free_event(struct perf_event *event)
4173 {
4174 	if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4175 				"unexpected event refcount: %ld; ptr=%p\n",
4176 				atomic_long_read(&event->refcount), event)) {
4177 		/* leak to avoid use-after-free */
4178 		return;
4179 	}
4180 
4181 	_free_event(event);
4182 }
4183 
4184 /*
4185  * Remove user event from the owner task.
4186  */
4187 static void perf_remove_from_owner(struct perf_event *event)
4188 {
4189 	struct task_struct *owner;
4190 
4191 	rcu_read_lock();
4192 	/*
4193 	 * Matches the smp_store_release() in perf_event_exit_task(). If we
4194 	 * observe !owner it means the list deletion is complete and we can
4195 	 * indeed free this event, otherwise we need to serialize on
4196 	 * owner->perf_event_mutex.
4197 	 */
4198 	owner = lockless_dereference(event->owner);
4199 	if (owner) {
4200 		/*
4201 		 * Since delayed_put_task_struct() also drops the last
4202 		 * task reference we can safely take a new reference
4203 		 * while holding the rcu_read_lock().
4204 		 */
4205 		get_task_struct(owner);
4206 	}
4207 	rcu_read_unlock();
4208 
4209 	if (owner) {
4210 		/*
4211 		 * If we're here through perf_event_exit_task() we're already
4212 		 * holding ctx->mutex which would be an inversion wrt. the
4213 		 * normal lock order.
4214 		 *
4215 		 * However we can safely take this lock because its the child
4216 		 * ctx->mutex.
4217 		 */
4218 		mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4219 
4220 		/*
4221 		 * We have to re-check the event->owner field, if it is cleared
4222 		 * we raced with perf_event_exit_task(), acquiring the mutex
4223 		 * ensured they're done, and we can proceed with freeing the
4224 		 * event.
4225 		 */
4226 		if (event->owner) {
4227 			list_del_init(&event->owner_entry);
4228 			smp_store_release(&event->owner, NULL);
4229 		}
4230 		mutex_unlock(&owner->perf_event_mutex);
4231 		put_task_struct(owner);
4232 	}
4233 }
4234 
4235 static void put_event(struct perf_event *event)
4236 {
4237 	if (!atomic_long_dec_and_test(&event->refcount))
4238 		return;
4239 
4240 	_free_event(event);
4241 }
4242 
4243 /*
4244  * Kill an event dead; while event:refcount will preserve the event
4245  * object, it will not preserve its functionality. Once the last 'user'
4246  * gives up the object, we'll destroy the thing.
4247  */
4248 int perf_event_release_kernel(struct perf_event *event)
4249 {
4250 	struct perf_event_context *ctx = event->ctx;
4251 	struct perf_event *child, *tmp;
4252 
4253 	/*
4254 	 * If we got here through err_file: fput(event_file); we will not have
4255 	 * attached to a context yet.
4256 	 */
4257 	if (!ctx) {
4258 		WARN_ON_ONCE(event->attach_state &
4259 				(PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4260 		goto no_ctx;
4261 	}
4262 
4263 	if (!is_kernel_event(event))
4264 		perf_remove_from_owner(event);
4265 
4266 	ctx = perf_event_ctx_lock(event);
4267 	WARN_ON_ONCE(ctx->parent_ctx);
4268 	perf_remove_from_context(event, DETACH_GROUP);
4269 
4270 	raw_spin_lock_irq(&ctx->lock);
4271 	/*
4272 	 * Mark this event as STATE_DEAD, there is no external reference to it
4273 	 * anymore.
4274 	 *
4275 	 * Anybody acquiring event->child_mutex after the below loop _must_
4276 	 * also see this, most importantly inherit_event() which will avoid
4277 	 * placing more children on the list.
4278 	 *
4279 	 * Thus this guarantees that we will in fact observe and kill _ALL_
4280 	 * child events.
4281 	 */
4282 	event->state = PERF_EVENT_STATE_DEAD;
4283 	raw_spin_unlock_irq(&ctx->lock);
4284 
4285 	perf_event_ctx_unlock(event, ctx);
4286 
4287 again:
4288 	mutex_lock(&event->child_mutex);
4289 	list_for_each_entry(child, &event->child_list, child_list) {
4290 
4291 		/*
4292 		 * Cannot change, child events are not migrated, see the
4293 		 * comment with perf_event_ctx_lock_nested().
4294 		 */
4295 		ctx = lockless_dereference(child->ctx);
4296 		/*
4297 		 * Since child_mutex nests inside ctx::mutex, we must jump
4298 		 * through hoops. We start by grabbing a reference on the ctx.
4299 		 *
4300 		 * Since the event cannot get freed while we hold the
4301 		 * child_mutex, the context must also exist and have a !0
4302 		 * reference count.
4303 		 */
4304 		get_ctx(ctx);
4305 
4306 		/*
4307 		 * Now that we have a ctx ref, we can drop child_mutex, and
4308 		 * acquire ctx::mutex without fear of it going away. Then we
4309 		 * can re-acquire child_mutex.
4310 		 */
4311 		mutex_unlock(&event->child_mutex);
4312 		mutex_lock(&ctx->mutex);
4313 		mutex_lock(&event->child_mutex);
4314 
4315 		/*
4316 		 * Now that we hold ctx::mutex and child_mutex, revalidate our
4317 		 * state, if child is still the first entry, it didn't get freed
4318 		 * and we can continue doing so.
4319 		 */
4320 		tmp = list_first_entry_or_null(&event->child_list,
4321 					       struct perf_event, child_list);
4322 		if (tmp == child) {
4323 			perf_remove_from_context(child, DETACH_GROUP);
4324 			list_del(&child->child_list);
4325 			free_event(child);
4326 			/*
4327 			 * This matches the refcount bump in inherit_event();
4328 			 * this can't be the last reference.
4329 			 */
4330 			put_event(event);
4331 		}
4332 
4333 		mutex_unlock(&event->child_mutex);
4334 		mutex_unlock(&ctx->mutex);
4335 		put_ctx(ctx);
4336 		goto again;
4337 	}
4338 	mutex_unlock(&event->child_mutex);
4339 
4340 no_ctx:
4341 	put_event(event); /* Must be the 'last' reference */
4342 	return 0;
4343 }
4344 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4345 
4346 /*
4347  * Called when the last reference to the file is gone.
4348  */
4349 static int perf_release(struct inode *inode, struct file *file)
4350 {
4351 	perf_event_release_kernel(file->private_data);
4352 	return 0;
4353 }
4354 
4355 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
4356 {
4357 	struct perf_event *child;
4358 	u64 total = 0;
4359 
4360 	*enabled = 0;
4361 	*running = 0;
4362 
4363 	mutex_lock(&event->child_mutex);
4364 
4365 	(void)perf_event_read(event, false);
4366 	total += perf_event_count(event);
4367 
4368 	*enabled += event->total_time_enabled +
4369 			atomic64_read(&event->child_total_time_enabled);
4370 	*running += event->total_time_running +
4371 			atomic64_read(&event->child_total_time_running);
4372 
4373 	list_for_each_entry(child, &event->child_list, child_list) {
4374 		(void)perf_event_read(child, false);
4375 		total += perf_event_count(child);
4376 		*enabled += child->total_time_enabled;
4377 		*running += child->total_time_running;
4378 	}
4379 	mutex_unlock(&event->child_mutex);
4380 
4381 	return total;
4382 }
4383 EXPORT_SYMBOL_GPL(perf_event_read_value);
4384 
4385 static int __perf_read_group_add(struct perf_event *leader,
4386 					u64 read_format, u64 *values)
4387 {
4388 	struct perf_event_context *ctx = leader->ctx;
4389 	struct perf_event *sub;
4390 	unsigned long flags;
4391 	int n = 1; /* skip @nr */
4392 	int ret;
4393 
4394 	ret = perf_event_read(leader, true);
4395 	if (ret)
4396 		return ret;
4397 
4398 	/*
4399 	 * Since we co-schedule groups, {enabled,running} times of siblings
4400 	 * will be identical to those of the leader, so we only publish one
4401 	 * set.
4402 	 */
4403 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4404 		values[n++] += leader->total_time_enabled +
4405 			atomic64_read(&leader->child_total_time_enabled);
4406 	}
4407 
4408 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4409 		values[n++] += leader->total_time_running +
4410 			atomic64_read(&leader->child_total_time_running);
4411 	}
4412 
4413 	/*
4414 	 * Write {count,id} tuples for every sibling.
4415 	 */
4416 	values[n++] += perf_event_count(leader);
4417 	if (read_format & PERF_FORMAT_ID)
4418 		values[n++] = primary_event_id(leader);
4419 
4420 	raw_spin_lock_irqsave(&ctx->lock, flags);
4421 
4422 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4423 		values[n++] += perf_event_count(sub);
4424 		if (read_format & PERF_FORMAT_ID)
4425 			values[n++] = primary_event_id(sub);
4426 	}
4427 
4428 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
4429 	return 0;
4430 }
4431 
4432 static int perf_read_group(struct perf_event *event,
4433 				   u64 read_format, char __user *buf)
4434 {
4435 	struct perf_event *leader = event->group_leader, *child;
4436 	struct perf_event_context *ctx = leader->ctx;
4437 	int ret;
4438 	u64 *values;
4439 
4440 	lockdep_assert_held(&ctx->mutex);
4441 
4442 	values = kzalloc(event->read_size, GFP_KERNEL);
4443 	if (!values)
4444 		return -ENOMEM;
4445 
4446 	values[0] = 1 + leader->nr_siblings;
4447 
4448 	/*
4449 	 * By locking the child_mutex of the leader we effectively
4450 	 * lock the child list of all siblings.. XXX explain how.
4451 	 */
4452 	mutex_lock(&leader->child_mutex);
4453 
4454 	ret = __perf_read_group_add(leader, read_format, values);
4455 	if (ret)
4456 		goto unlock;
4457 
4458 	list_for_each_entry(child, &leader->child_list, child_list) {
4459 		ret = __perf_read_group_add(child, read_format, values);
4460 		if (ret)
4461 			goto unlock;
4462 	}
4463 
4464 	mutex_unlock(&leader->child_mutex);
4465 
4466 	ret = event->read_size;
4467 	if (copy_to_user(buf, values, event->read_size))
4468 		ret = -EFAULT;
4469 	goto out;
4470 
4471 unlock:
4472 	mutex_unlock(&leader->child_mutex);
4473 out:
4474 	kfree(values);
4475 	return ret;
4476 }
4477 
4478 static int perf_read_one(struct perf_event *event,
4479 				 u64 read_format, char __user *buf)
4480 {
4481 	u64 enabled, running;
4482 	u64 values[4];
4483 	int n = 0;
4484 
4485 	values[n++] = perf_event_read_value(event, &enabled, &running);
4486 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4487 		values[n++] = enabled;
4488 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4489 		values[n++] = running;
4490 	if (read_format & PERF_FORMAT_ID)
4491 		values[n++] = primary_event_id(event);
4492 
4493 	if (copy_to_user(buf, values, n * sizeof(u64)))
4494 		return -EFAULT;
4495 
4496 	return n * sizeof(u64);
4497 }
4498 
4499 static bool is_event_hup(struct perf_event *event)
4500 {
4501 	bool no_children;
4502 
4503 	if (event->state > PERF_EVENT_STATE_EXIT)
4504 		return false;
4505 
4506 	mutex_lock(&event->child_mutex);
4507 	no_children = list_empty(&event->child_list);
4508 	mutex_unlock(&event->child_mutex);
4509 	return no_children;
4510 }
4511 
4512 /*
4513  * Read the performance event - simple non blocking version for now
4514  */
4515 static ssize_t
4516 __perf_read(struct perf_event *event, char __user *buf, size_t count)
4517 {
4518 	u64 read_format = event->attr.read_format;
4519 	int ret;
4520 
4521 	/*
4522 	 * Return end-of-file for a read on a event that is in
4523 	 * error state (i.e. because it was pinned but it couldn't be
4524 	 * scheduled on to the CPU at some point).
4525 	 */
4526 	if (event->state == PERF_EVENT_STATE_ERROR)
4527 		return 0;
4528 
4529 	if (count < event->read_size)
4530 		return -ENOSPC;
4531 
4532 	WARN_ON_ONCE(event->ctx->parent_ctx);
4533 	if (read_format & PERF_FORMAT_GROUP)
4534 		ret = perf_read_group(event, read_format, buf);
4535 	else
4536 		ret = perf_read_one(event, read_format, buf);
4537 
4538 	return ret;
4539 }
4540 
4541 static ssize_t
4542 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4543 {
4544 	struct perf_event *event = file->private_data;
4545 	struct perf_event_context *ctx;
4546 	int ret;
4547 
4548 	ctx = perf_event_ctx_lock(event);
4549 	ret = __perf_read(event, buf, count);
4550 	perf_event_ctx_unlock(event, ctx);
4551 
4552 	return ret;
4553 }
4554 
4555 static unsigned int perf_poll(struct file *file, poll_table *wait)
4556 {
4557 	struct perf_event *event = file->private_data;
4558 	struct ring_buffer *rb;
4559 	unsigned int events = POLLHUP;
4560 
4561 	poll_wait(file, &event->waitq, wait);
4562 
4563 	if (is_event_hup(event))
4564 		return events;
4565 
4566 	/*
4567 	 * Pin the event->rb by taking event->mmap_mutex; otherwise
4568 	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
4569 	 */
4570 	mutex_lock(&event->mmap_mutex);
4571 	rb = event->rb;
4572 	if (rb)
4573 		events = atomic_xchg(&rb->poll, 0);
4574 	mutex_unlock(&event->mmap_mutex);
4575 	return events;
4576 }
4577 
4578 static void _perf_event_reset(struct perf_event *event)
4579 {
4580 	(void)perf_event_read(event, false);
4581 	local64_set(&event->count, 0);
4582 	perf_event_update_userpage(event);
4583 }
4584 
4585 /*
4586  * Holding the top-level event's child_mutex means that any
4587  * descendant process that has inherited this event will block
4588  * in perf_event_exit_event() if it goes to exit, thus satisfying the
4589  * task existence requirements of perf_event_enable/disable.
4590  */
4591 static void perf_event_for_each_child(struct perf_event *event,
4592 					void (*func)(struct perf_event *))
4593 {
4594 	struct perf_event *child;
4595 
4596 	WARN_ON_ONCE(event->ctx->parent_ctx);
4597 
4598 	mutex_lock(&event->child_mutex);
4599 	func(event);
4600 	list_for_each_entry(child, &event->child_list, child_list)
4601 		func(child);
4602 	mutex_unlock(&event->child_mutex);
4603 }
4604 
4605 static void perf_event_for_each(struct perf_event *event,
4606 				  void (*func)(struct perf_event *))
4607 {
4608 	struct perf_event_context *ctx = event->ctx;
4609 	struct perf_event *sibling;
4610 
4611 	lockdep_assert_held(&ctx->mutex);
4612 
4613 	event = event->group_leader;
4614 
4615 	perf_event_for_each_child(event, func);
4616 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
4617 		perf_event_for_each_child(sibling, func);
4618 }
4619 
4620 static void __perf_event_period(struct perf_event *event,
4621 				struct perf_cpu_context *cpuctx,
4622 				struct perf_event_context *ctx,
4623 				void *info)
4624 {
4625 	u64 value = *((u64 *)info);
4626 	bool active;
4627 
4628 	if (event->attr.freq) {
4629 		event->attr.sample_freq = value;
4630 	} else {
4631 		event->attr.sample_period = value;
4632 		event->hw.sample_period = value;
4633 	}
4634 
4635 	active = (event->state == PERF_EVENT_STATE_ACTIVE);
4636 	if (active) {
4637 		perf_pmu_disable(ctx->pmu);
4638 		/*
4639 		 * We could be throttled; unthrottle now to avoid the tick
4640 		 * trying to unthrottle while we already re-started the event.
4641 		 */
4642 		if (event->hw.interrupts == MAX_INTERRUPTS) {
4643 			event->hw.interrupts = 0;
4644 			perf_log_throttle(event, 1);
4645 		}
4646 		event->pmu->stop(event, PERF_EF_UPDATE);
4647 	}
4648 
4649 	local64_set(&event->hw.period_left, 0);
4650 
4651 	if (active) {
4652 		event->pmu->start(event, PERF_EF_RELOAD);
4653 		perf_pmu_enable(ctx->pmu);
4654 	}
4655 }
4656 
4657 static int perf_event_period(struct perf_event *event, u64 __user *arg)
4658 {
4659 	u64 value;
4660 
4661 	if (!is_sampling_event(event))
4662 		return -EINVAL;
4663 
4664 	if (copy_from_user(&value, arg, sizeof(value)))
4665 		return -EFAULT;
4666 
4667 	if (!value)
4668 		return -EINVAL;
4669 
4670 	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4671 		return -EINVAL;
4672 
4673 	event_function_call(event, __perf_event_period, &value);
4674 
4675 	return 0;
4676 }
4677 
4678 static const struct file_operations perf_fops;
4679 
4680 static inline int perf_fget_light(int fd, struct fd *p)
4681 {
4682 	struct fd f = fdget(fd);
4683 	if (!f.file)
4684 		return -EBADF;
4685 
4686 	if (f.file->f_op != &perf_fops) {
4687 		fdput(f);
4688 		return -EBADF;
4689 	}
4690 	*p = f;
4691 	return 0;
4692 }
4693 
4694 static int perf_event_set_output(struct perf_event *event,
4695 				 struct perf_event *output_event);
4696 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4697 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4698 
4699 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
4700 {
4701 	void (*func)(struct perf_event *);
4702 	u32 flags = arg;
4703 
4704 	switch (cmd) {
4705 	case PERF_EVENT_IOC_ENABLE:
4706 		func = _perf_event_enable;
4707 		break;
4708 	case PERF_EVENT_IOC_DISABLE:
4709 		func = _perf_event_disable;
4710 		break;
4711 	case PERF_EVENT_IOC_RESET:
4712 		func = _perf_event_reset;
4713 		break;
4714 
4715 	case PERF_EVENT_IOC_REFRESH:
4716 		return _perf_event_refresh(event, arg);
4717 
4718 	case PERF_EVENT_IOC_PERIOD:
4719 		return perf_event_period(event, (u64 __user *)arg);
4720 
4721 	case PERF_EVENT_IOC_ID:
4722 	{
4723 		u64 id = primary_event_id(event);
4724 
4725 		if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4726 			return -EFAULT;
4727 		return 0;
4728 	}
4729 
4730 	case PERF_EVENT_IOC_SET_OUTPUT:
4731 	{
4732 		int ret;
4733 		if (arg != -1) {
4734 			struct perf_event *output_event;
4735 			struct fd output;
4736 			ret = perf_fget_light(arg, &output);
4737 			if (ret)
4738 				return ret;
4739 			output_event = output.file->private_data;
4740 			ret = perf_event_set_output(event, output_event);
4741 			fdput(output);
4742 		} else {
4743 			ret = perf_event_set_output(event, NULL);
4744 		}
4745 		return ret;
4746 	}
4747 
4748 	case PERF_EVENT_IOC_SET_FILTER:
4749 		return perf_event_set_filter(event, (void __user *)arg);
4750 
4751 	case PERF_EVENT_IOC_SET_BPF:
4752 		return perf_event_set_bpf_prog(event, arg);
4753 
4754 	case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4755 		struct ring_buffer *rb;
4756 
4757 		rcu_read_lock();
4758 		rb = rcu_dereference(event->rb);
4759 		if (!rb || !rb->nr_pages) {
4760 			rcu_read_unlock();
4761 			return -EINVAL;
4762 		}
4763 		rb_toggle_paused(rb, !!arg);
4764 		rcu_read_unlock();
4765 		return 0;
4766 	}
4767 	default:
4768 		return -ENOTTY;
4769 	}
4770 
4771 	if (flags & PERF_IOC_FLAG_GROUP)
4772 		perf_event_for_each(event, func);
4773 	else
4774 		perf_event_for_each_child(event, func);
4775 
4776 	return 0;
4777 }
4778 
4779 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4780 {
4781 	struct perf_event *event = file->private_data;
4782 	struct perf_event_context *ctx;
4783 	long ret;
4784 
4785 	ctx = perf_event_ctx_lock(event);
4786 	ret = _perf_ioctl(event, cmd, arg);
4787 	perf_event_ctx_unlock(event, ctx);
4788 
4789 	return ret;
4790 }
4791 
4792 #ifdef CONFIG_COMPAT
4793 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4794 				unsigned long arg)
4795 {
4796 	switch (_IOC_NR(cmd)) {
4797 	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4798 	case _IOC_NR(PERF_EVENT_IOC_ID):
4799 		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4800 		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4801 			cmd &= ~IOCSIZE_MASK;
4802 			cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4803 		}
4804 		break;
4805 	}
4806 	return perf_ioctl(file, cmd, arg);
4807 }
4808 #else
4809 # define perf_compat_ioctl NULL
4810 #endif
4811 
4812 int perf_event_task_enable(void)
4813 {
4814 	struct perf_event_context *ctx;
4815 	struct perf_event *event;
4816 
4817 	mutex_lock(&current->perf_event_mutex);
4818 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4819 		ctx = perf_event_ctx_lock(event);
4820 		perf_event_for_each_child(event, _perf_event_enable);
4821 		perf_event_ctx_unlock(event, ctx);
4822 	}
4823 	mutex_unlock(&current->perf_event_mutex);
4824 
4825 	return 0;
4826 }
4827 
4828 int perf_event_task_disable(void)
4829 {
4830 	struct perf_event_context *ctx;
4831 	struct perf_event *event;
4832 
4833 	mutex_lock(&current->perf_event_mutex);
4834 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4835 		ctx = perf_event_ctx_lock(event);
4836 		perf_event_for_each_child(event, _perf_event_disable);
4837 		perf_event_ctx_unlock(event, ctx);
4838 	}
4839 	mutex_unlock(&current->perf_event_mutex);
4840 
4841 	return 0;
4842 }
4843 
4844 static int perf_event_index(struct perf_event *event)
4845 {
4846 	if (event->hw.state & PERF_HES_STOPPED)
4847 		return 0;
4848 
4849 	if (event->state != PERF_EVENT_STATE_ACTIVE)
4850 		return 0;
4851 
4852 	return event->pmu->event_idx(event);
4853 }
4854 
4855 static void calc_timer_values(struct perf_event *event,
4856 				u64 *now,
4857 				u64 *enabled,
4858 				u64 *running)
4859 {
4860 	u64 ctx_time;
4861 
4862 	*now = perf_clock();
4863 	ctx_time = event->shadow_ctx_time + *now;
4864 	*enabled = ctx_time - event->tstamp_enabled;
4865 	*running = ctx_time - event->tstamp_running;
4866 }
4867 
4868 static void perf_event_init_userpage(struct perf_event *event)
4869 {
4870 	struct perf_event_mmap_page *userpg;
4871 	struct ring_buffer *rb;
4872 
4873 	rcu_read_lock();
4874 	rb = rcu_dereference(event->rb);
4875 	if (!rb)
4876 		goto unlock;
4877 
4878 	userpg = rb->user_page;
4879 
4880 	/* Allow new userspace to detect that bit 0 is deprecated */
4881 	userpg->cap_bit0_is_deprecated = 1;
4882 	userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4883 	userpg->data_offset = PAGE_SIZE;
4884 	userpg->data_size = perf_data_size(rb);
4885 
4886 unlock:
4887 	rcu_read_unlock();
4888 }
4889 
4890 void __weak arch_perf_update_userpage(
4891 	struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4892 {
4893 }
4894 
4895 /*
4896  * Callers need to ensure there can be no nesting of this function, otherwise
4897  * the seqlock logic goes bad. We can not serialize this because the arch
4898  * code calls this from NMI context.
4899  */
4900 void perf_event_update_userpage(struct perf_event *event)
4901 {
4902 	struct perf_event_mmap_page *userpg;
4903 	struct ring_buffer *rb;
4904 	u64 enabled, running, now;
4905 
4906 	rcu_read_lock();
4907 	rb = rcu_dereference(event->rb);
4908 	if (!rb)
4909 		goto unlock;
4910 
4911 	/*
4912 	 * compute total_time_enabled, total_time_running
4913 	 * based on snapshot values taken when the event
4914 	 * was last scheduled in.
4915 	 *
4916 	 * we cannot simply called update_context_time()
4917 	 * because of locking issue as we can be called in
4918 	 * NMI context
4919 	 */
4920 	calc_timer_values(event, &now, &enabled, &running);
4921 
4922 	userpg = rb->user_page;
4923 	/*
4924 	 * Disable preemption so as to not let the corresponding user-space
4925 	 * spin too long if we get preempted.
4926 	 */
4927 	preempt_disable();
4928 	++userpg->lock;
4929 	barrier();
4930 	userpg->index = perf_event_index(event);
4931 	userpg->offset = perf_event_count(event);
4932 	if (userpg->index)
4933 		userpg->offset -= local64_read(&event->hw.prev_count);
4934 
4935 	userpg->time_enabled = enabled +
4936 			atomic64_read(&event->child_total_time_enabled);
4937 
4938 	userpg->time_running = running +
4939 			atomic64_read(&event->child_total_time_running);
4940 
4941 	arch_perf_update_userpage(event, userpg, now);
4942 
4943 	barrier();
4944 	++userpg->lock;
4945 	preempt_enable();
4946 unlock:
4947 	rcu_read_unlock();
4948 }
4949 
4950 static int perf_mmap_fault(struct vm_fault *vmf)
4951 {
4952 	struct perf_event *event = vmf->vma->vm_file->private_data;
4953 	struct ring_buffer *rb;
4954 	int ret = VM_FAULT_SIGBUS;
4955 
4956 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
4957 		if (vmf->pgoff == 0)
4958 			ret = 0;
4959 		return ret;
4960 	}
4961 
4962 	rcu_read_lock();
4963 	rb = rcu_dereference(event->rb);
4964 	if (!rb)
4965 		goto unlock;
4966 
4967 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4968 		goto unlock;
4969 
4970 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4971 	if (!vmf->page)
4972 		goto unlock;
4973 
4974 	get_page(vmf->page);
4975 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
4976 	vmf->page->index   = vmf->pgoff;
4977 
4978 	ret = 0;
4979 unlock:
4980 	rcu_read_unlock();
4981 
4982 	return ret;
4983 }
4984 
4985 static void ring_buffer_attach(struct perf_event *event,
4986 			       struct ring_buffer *rb)
4987 {
4988 	struct ring_buffer *old_rb = NULL;
4989 	unsigned long flags;
4990 
4991 	if (event->rb) {
4992 		/*
4993 		 * Should be impossible, we set this when removing
4994 		 * event->rb_entry and wait/clear when adding event->rb_entry.
4995 		 */
4996 		WARN_ON_ONCE(event->rcu_pending);
4997 
4998 		old_rb = event->rb;
4999 		spin_lock_irqsave(&old_rb->event_lock, flags);
5000 		list_del_rcu(&event->rb_entry);
5001 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
5002 
5003 		event->rcu_batches = get_state_synchronize_rcu();
5004 		event->rcu_pending = 1;
5005 	}
5006 
5007 	if (rb) {
5008 		if (event->rcu_pending) {
5009 			cond_synchronize_rcu(event->rcu_batches);
5010 			event->rcu_pending = 0;
5011 		}
5012 
5013 		spin_lock_irqsave(&rb->event_lock, flags);
5014 		list_add_rcu(&event->rb_entry, &rb->event_list);
5015 		spin_unlock_irqrestore(&rb->event_lock, flags);
5016 	}
5017 
5018 	/*
5019 	 * Avoid racing with perf_mmap_close(AUX): stop the event
5020 	 * before swizzling the event::rb pointer; if it's getting
5021 	 * unmapped, its aux_mmap_count will be 0 and it won't
5022 	 * restart. See the comment in __perf_pmu_output_stop().
5023 	 *
5024 	 * Data will inevitably be lost when set_output is done in
5025 	 * mid-air, but then again, whoever does it like this is
5026 	 * not in for the data anyway.
5027 	 */
5028 	if (has_aux(event))
5029 		perf_event_stop(event, 0);
5030 
5031 	rcu_assign_pointer(event->rb, rb);
5032 
5033 	if (old_rb) {
5034 		ring_buffer_put(old_rb);
5035 		/*
5036 		 * Since we detached before setting the new rb, so that we
5037 		 * could attach the new rb, we could have missed a wakeup.
5038 		 * Provide it now.
5039 		 */
5040 		wake_up_all(&event->waitq);
5041 	}
5042 }
5043 
5044 static void ring_buffer_wakeup(struct perf_event *event)
5045 {
5046 	struct ring_buffer *rb;
5047 
5048 	rcu_read_lock();
5049 	rb = rcu_dereference(event->rb);
5050 	if (rb) {
5051 		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
5052 			wake_up_all(&event->waitq);
5053 	}
5054 	rcu_read_unlock();
5055 }
5056 
5057 struct ring_buffer *ring_buffer_get(struct perf_event *event)
5058 {
5059 	struct ring_buffer *rb;
5060 
5061 	rcu_read_lock();
5062 	rb = rcu_dereference(event->rb);
5063 	if (rb) {
5064 		if (!atomic_inc_not_zero(&rb->refcount))
5065 			rb = NULL;
5066 	}
5067 	rcu_read_unlock();
5068 
5069 	return rb;
5070 }
5071 
5072 void ring_buffer_put(struct ring_buffer *rb)
5073 {
5074 	if (!atomic_dec_and_test(&rb->refcount))
5075 		return;
5076 
5077 	WARN_ON_ONCE(!list_empty(&rb->event_list));
5078 
5079 	call_rcu(&rb->rcu_head, rb_free_rcu);
5080 }
5081 
5082 static void perf_mmap_open(struct vm_area_struct *vma)
5083 {
5084 	struct perf_event *event = vma->vm_file->private_data;
5085 
5086 	atomic_inc(&event->mmap_count);
5087 	atomic_inc(&event->rb->mmap_count);
5088 
5089 	if (vma->vm_pgoff)
5090 		atomic_inc(&event->rb->aux_mmap_count);
5091 
5092 	if (event->pmu->event_mapped)
5093 		event->pmu->event_mapped(event);
5094 }
5095 
5096 static void perf_pmu_output_stop(struct perf_event *event);
5097 
5098 /*
5099  * A buffer can be mmap()ed multiple times; either directly through the same
5100  * event, or through other events by use of perf_event_set_output().
5101  *
5102  * In order to undo the VM accounting done by perf_mmap() we need to destroy
5103  * the buffer here, where we still have a VM context. This means we need
5104  * to detach all events redirecting to us.
5105  */
5106 static void perf_mmap_close(struct vm_area_struct *vma)
5107 {
5108 	struct perf_event *event = vma->vm_file->private_data;
5109 
5110 	struct ring_buffer *rb = ring_buffer_get(event);
5111 	struct user_struct *mmap_user = rb->mmap_user;
5112 	int mmap_locked = rb->mmap_locked;
5113 	unsigned long size = perf_data_size(rb);
5114 
5115 	if (event->pmu->event_unmapped)
5116 		event->pmu->event_unmapped(event);
5117 
5118 	/*
5119 	 * rb->aux_mmap_count will always drop before rb->mmap_count and
5120 	 * event->mmap_count, so it is ok to use event->mmap_mutex to
5121 	 * serialize with perf_mmap here.
5122 	 */
5123 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5124 	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
5125 		/*
5126 		 * Stop all AUX events that are writing to this buffer,
5127 		 * so that we can free its AUX pages and corresponding PMU
5128 		 * data. Note that after rb::aux_mmap_count dropped to zero,
5129 		 * they won't start any more (see perf_aux_output_begin()).
5130 		 */
5131 		perf_pmu_output_stop(event);
5132 
5133 		/* now it's safe to free the pages */
5134 		atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5135 		vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5136 
5137 		/* this has to be the last one */
5138 		rb_free_aux(rb);
5139 		WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5140 
5141 		mutex_unlock(&event->mmap_mutex);
5142 	}
5143 
5144 	atomic_dec(&rb->mmap_count);
5145 
5146 	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
5147 		goto out_put;
5148 
5149 	ring_buffer_attach(event, NULL);
5150 	mutex_unlock(&event->mmap_mutex);
5151 
5152 	/* If there's still other mmap()s of this buffer, we're done. */
5153 	if (atomic_read(&rb->mmap_count))
5154 		goto out_put;
5155 
5156 	/*
5157 	 * No other mmap()s, detach from all other events that might redirect
5158 	 * into the now unreachable buffer. Somewhat complicated by the
5159 	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5160 	 */
5161 again:
5162 	rcu_read_lock();
5163 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5164 		if (!atomic_long_inc_not_zero(&event->refcount)) {
5165 			/*
5166 			 * This event is en-route to free_event() which will
5167 			 * detach it and remove it from the list.
5168 			 */
5169 			continue;
5170 		}
5171 		rcu_read_unlock();
5172 
5173 		mutex_lock(&event->mmap_mutex);
5174 		/*
5175 		 * Check we didn't race with perf_event_set_output() which can
5176 		 * swizzle the rb from under us while we were waiting to
5177 		 * acquire mmap_mutex.
5178 		 *
5179 		 * If we find a different rb; ignore this event, a next
5180 		 * iteration will no longer find it on the list. We have to
5181 		 * still restart the iteration to make sure we're not now
5182 		 * iterating the wrong list.
5183 		 */
5184 		if (event->rb == rb)
5185 			ring_buffer_attach(event, NULL);
5186 
5187 		mutex_unlock(&event->mmap_mutex);
5188 		put_event(event);
5189 
5190 		/*
5191 		 * Restart the iteration; either we're on the wrong list or
5192 		 * destroyed its integrity by doing a deletion.
5193 		 */
5194 		goto again;
5195 	}
5196 	rcu_read_unlock();
5197 
5198 	/*
5199 	 * It could be there's still a few 0-ref events on the list; they'll
5200 	 * get cleaned up by free_event() -- they'll also still have their
5201 	 * ref on the rb and will free it whenever they are done with it.
5202 	 *
5203 	 * Aside from that, this buffer is 'fully' detached and unmapped,
5204 	 * undo the VM accounting.
5205 	 */
5206 
5207 	atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5208 	vma->vm_mm->pinned_vm -= mmap_locked;
5209 	free_uid(mmap_user);
5210 
5211 out_put:
5212 	ring_buffer_put(rb); /* could be last */
5213 }
5214 
5215 static const struct vm_operations_struct perf_mmap_vmops = {
5216 	.open		= perf_mmap_open,
5217 	.close		= perf_mmap_close, /* non mergable */
5218 	.fault		= perf_mmap_fault,
5219 	.page_mkwrite	= perf_mmap_fault,
5220 };
5221 
5222 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5223 {
5224 	struct perf_event *event = file->private_data;
5225 	unsigned long user_locked, user_lock_limit;
5226 	struct user_struct *user = current_user();
5227 	unsigned long locked, lock_limit;
5228 	struct ring_buffer *rb = NULL;
5229 	unsigned long vma_size;
5230 	unsigned long nr_pages;
5231 	long user_extra = 0, extra = 0;
5232 	int ret = 0, flags = 0;
5233 
5234 	/*
5235 	 * Don't allow mmap() of inherited per-task counters. This would
5236 	 * create a performance issue due to all children writing to the
5237 	 * same rb.
5238 	 */
5239 	if (event->cpu == -1 && event->attr.inherit)
5240 		return -EINVAL;
5241 
5242 	if (!(vma->vm_flags & VM_SHARED))
5243 		return -EINVAL;
5244 
5245 	vma_size = vma->vm_end - vma->vm_start;
5246 
5247 	if (vma->vm_pgoff == 0) {
5248 		nr_pages = (vma_size / PAGE_SIZE) - 1;
5249 	} else {
5250 		/*
5251 		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5252 		 * mapped, all subsequent mappings should have the same size
5253 		 * and offset. Must be above the normal perf buffer.
5254 		 */
5255 		u64 aux_offset, aux_size;
5256 
5257 		if (!event->rb)
5258 			return -EINVAL;
5259 
5260 		nr_pages = vma_size / PAGE_SIZE;
5261 
5262 		mutex_lock(&event->mmap_mutex);
5263 		ret = -EINVAL;
5264 
5265 		rb = event->rb;
5266 		if (!rb)
5267 			goto aux_unlock;
5268 
5269 		aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5270 		aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5271 
5272 		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5273 			goto aux_unlock;
5274 
5275 		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5276 			goto aux_unlock;
5277 
5278 		/* already mapped with a different offset */
5279 		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5280 			goto aux_unlock;
5281 
5282 		if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5283 			goto aux_unlock;
5284 
5285 		/* already mapped with a different size */
5286 		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5287 			goto aux_unlock;
5288 
5289 		if (!is_power_of_2(nr_pages))
5290 			goto aux_unlock;
5291 
5292 		if (!atomic_inc_not_zero(&rb->mmap_count))
5293 			goto aux_unlock;
5294 
5295 		if (rb_has_aux(rb)) {
5296 			atomic_inc(&rb->aux_mmap_count);
5297 			ret = 0;
5298 			goto unlock;
5299 		}
5300 
5301 		atomic_set(&rb->aux_mmap_count, 1);
5302 		user_extra = nr_pages;
5303 
5304 		goto accounting;
5305 	}
5306 
5307 	/*
5308 	 * If we have rb pages ensure they're a power-of-two number, so we
5309 	 * can do bitmasks instead of modulo.
5310 	 */
5311 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
5312 		return -EINVAL;
5313 
5314 	if (vma_size != PAGE_SIZE * (1 + nr_pages))
5315 		return -EINVAL;
5316 
5317 	WARN_ON_ONCE(event->ctx->parent_ctx);
5318 again:
5319 	mutex_lock(&event->mmap_mutex);
5320 	if (event->rb) {
5321 		if (event->rb->nr_pages != nr_pages) {
5322 			ret = -EINVAL;
5323 			goto unlock;
5324 		}
5325 
5326 		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5327 			/*
5328 			 * Raced against perf_mmap_close() through
5329 			 * perf_event_set_output(). Try again, hope for better
5330 			 * luck.
5331 			 */
5332 			mutex_unlock(&event->mmap_mutex);
5333 			goto again;
5334 		}
5335 
5336 		goto unlock;
5337 	}
5338 
5339 	user_extra = nr_pages + 1;
5340 
5341 accounting:
5342 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
5343 
5344 	/*
5345 	 * Increase the limit linearly with more CPUs:
5346 	 */
5347 	user_lock_limit *= num_online_cpus();
5348 
5349 	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5350 
5351 	if (user_locked > user_lock_limit)
5352 		extra = user_locked - user_lock_limit;
5353 
5354 	lock_limit = rlimit(RLIMIT_MEMLOCK);
5355 	lock_limit >>= PAGE_SHIFT;
5356 	locked = vma->vm_mm->pinned_vm + extra;
5357 
5358 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5359 		!capable(CAP_IPC_LOCK)) {
5360 		ret = -EPERM;
5361 		goto unlock;
5362 	}
5363 
5364 	WARN_ON(!rb && event->rb);
5365 
5366 	if (vma->vm_flags & VM_WRITE)
5367 		flags |= RING_BUFFER_WRITABLE;
5368 
5369 	if (!rb) {
5370 		rb = rb_alloc(nr_pages,
5371 			      event->attr.watermark ? event->attr.wakeup_watermark : 0,
5372 			      event->cpu, flags);
5373 
5374 		if (!rb) {
5375 			ret = -ENOMEM;
5376 			goto unlock;
5377 		}
5378 
5379 		atomic_set(&rb->mmap_count, 1);
5380 		rb->mmap_user = get_current_user();
5381 		rb->mmap_locked = extra;
5382 
5383 		ring_buffer_attach(event, rb);
5384 
5385 		perf_event_init_userpage(event);
5386 		perf_event_update_userpage(event);
5387 	} else {
5388 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5389 				   event->attr.aux_watermark, flags);
5390 		if (!ret)
5391 			rb->aux_mmap_locked = extra;
5392 	}
5393 
5394 unlock:
5395 	if (!ret) {
5396 		atomic_long_add(user_extra, &user->locked_vm);
5397 		vma->vm_mm->pinned_vm += extra;
5398 
5399 		atomic_inc(&event->mmap_count);
5400 	} else if (rb) {
5401 		atomic_dec(&rb->mmap_count);
5402 	}
5403 aux_unlock:
5404 	mutex_unlock(&event->mmap_mutex);
5405 
5406 	/*
5407 	 * Since pinned accounting is per vm we cannot allow fork() to copy our
5408 	 * vma.
5409 	 */
5410 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
5411 	vma->vm_ops = &perf_mmap_vmops;
5412 
5413 	if (event->pmu->event_mapped)
5414 		event->pmu->event_mapped(event);
5415 
5416 	return ret;
5417 }
5418 
5419 static int perf_fasync(int fd, struct file *filp, int on)
5420 {
5421 	struct inode *inode = file_inode(filp);
5422 	struct perf_event *event = filp->private_data;
5423 	int retval;
5424 
5425 	inode_lock(inode);
5426 	retval = fasync_helper(fd, filp, on, &event->fasync);
5427 	inode_unlock(inode);
5428 
5429 	if (retval < 0)
5430 		return retval;
5431 
5432 	return 0;
5433 }
5434 
5435 static const struct file_operations perf_fops = {
5436 	.llseek			= no_llseek,
5437 	.release		= perf_release,
5438 	.read			= perf_read,
5439 	.poll			= perf_poll,
5440 	.unlocked_ioctl		= perf_ioctl,
5441 	.compat_ioctl		= perf_compat_ioctl,
5442 	.mmap			= perf_mmap,
5443 	.fasync			= perf_fasync,
5444 };
5445 
5446 /*
5447  * Perf event wakeup
5448  *
5449  * If there's data, ensure we set the poll() state and publish everything
5450  * to user-space before waking everybody up.
5451  */
5452 
5453 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5454 {
5455 	/* only the parent has fasync state */
5456 	if (event->parent)
5457 		event = event->parent;
5458 	return &event->fasync;
5459 }
5460 
5461 void perf_event_wakeup(struct perf_event *event)
5462 {
5463 	ring_buffer_wakeup(event);
5464 
5465 	if (event->pending_kill) {
5466 		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
5467 		event->pending_kill = 0;
5468 	}
5469 }
5470 
5471 static void perf_pending_event(struct irq_work *entry)
5472 {
5473 	struct perf_event *event = container_of(entry,
5474 			struct perf_event, pending);
5475 	int rctx;
5476 
5477 	rctx = perf_swevent_get_recursion_context();
5478 	/*
5479 	 * If we 'fail' here, that's OK, it means recursion is already disabled
5480 	 * and we won't recurse 'further'.
5481 	 */
5482 
5483 	if (event->pending_disable) {
5484 		event->pending_disable = 0;
5485 		perf_event_disable_local(event);
5486 	}
5487 
5488 	if (event->pending_wakeup) {
5489 		event->pending_wakeup = 0;
5490 		perf_event_wakeup(event);
5491 	}
5492 
5493 	if (rctx >= 0)
5494 		perf_swevent_put_recursion_context(rctx);
5495 }
5496 
5497 /*
5498  * We assume there is only KVM supporting the callbacks.
5499  * Later on, we might change it to a list if there is
5500  * another virtualization implementation supporting the callbacks.
5501  */
5502 struct perf_guest_info_callbacks *perf_guest_cbs;
5503 
5504 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5505 {
5506 	perf_guest_cbs = cbs;
5507 	return 0;
5508 }
5509 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5510 
5511 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5512 {
5513 	perf_guest_cbs = NULL;
5514 	return 0;
5515 }
5516 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5517 
5518 static void
5519 perf_output_sample_regs(struct perf_output_handle *handle,
5520 			struct pt_regs *regs, u64 mask)
5521 {
5522 	int bit;
5523 	DECLARE_BITMAP(_mask, 64);
5524 
5525 	bitmap_from_u64(_mask, mask);
5526 	for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
5527 		u64 val;
5528 
5529 		val = perf_reg_value(regs, bit);
5530 		perf_output_put(handle, val);
5531 	}
5532 }
5533 
5534 static void perf_sample_regs_user(struct perf_regs *regs_user,
5535 				  struct pt_regs *regs,
5536 				  struct pt_regs *regs_user_copy)
5537 {
5538 	if (user_mode(regs)) {
5539 		regs_user->abi = perf_reg_abi(current);
5540 		regs_user->regs = regs;
5541 	} else if (current->mm) {
5542 		perf_get_regs_user(regs_user, regs, regs_user_copy);
5543 	} else {
5544 		regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5545 		regs_user->regs = NULL;
5546 	}
5547 }
5548 
5549 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5550 				  struct pt_regs *regs)
5551 {
5552 	regs_intr->regs = regs;
5553 	regs_intr->abi  = perf_reg_abi(current);
5554 }
5555 
5556 
5557 /*
5558  * Get remaining task size from user stack pointer.
5559  *
5560  * It'd be better to take stack vma map and limit this more
5561  * precisly, but there's no way to get it safely under interrupt,
5562  * so using TASK_SIZE as limit.
5563  */
5564 static u64 perf_ustack_task_size(struct pt_regs *regs)
5565 {
5566 	unsigned long addr = perf_user_stack_pointer(regs);
5567 
5568 	if (!addr || addr >= TASK_SIZE)
5569 		return 0;
5570 
5571 	return TASK_SIZE - addr;
5572 }
5573 
5574 static u16
5575 perf_sample_ustack_size(u16 stack_size, u16 header_size,
5576 			struct pt_regs *regs)
5577 {
5578 	u64 task_size;
5579 
5580 	/* No regs, no stack pointer, no dump. */
5581 	if (!regs)
5582 		return 0;
5583 
5584 	/*
5585 	 * Check if we fit in with the requested stack size into the:
5586 	 * - TASK_SIZE
5587 	 *   If we don't, we limit the size to the TASK_SIZE.
5588 	 *
5589 	 * - remaining sample size
5590 	 *   If we don't, we customize the stack size to
5591 	 *   fit in to the remaining sample size.
5592 	 */
5593 
5594 	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5595 	stack_size = min(stack_size, (u16) task_size);
5596 
5597 	/* Current header size plus static size and dynamic size. */
5598 	header_size += 2 * sizeof(u64);
5599 
5600 	/* Do we fit in with the current stack dump size? */
5601 	if ((u16) (header_size + stack_size) < header_size) {
5602 		/*
5603 		 * If we overflow the maximum size for the sample,
5604 		 * we customize the stack dump size to fit in.
5605 		 */
5606 		stack_size = USHRT_MAX - header_size - sizeof(u64);
5607 		stack_size = round_up(stack_size, sizeof(u64));
5608 	}
5609 
5610 	return stack_size;
5611 }
5612 
5613 static void
5614 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5615 			  struct pt_regs *regs)
5616 {
5617 	/* Case of a kernel thread, nothing to dump */
5618 	if (!regs) {
5619 		u64 size = 0;
5620 		perf_output_put(handle, size);
5621 	} else {
5622 		unsigned long sp;
5623 		unsigned int rem;
5624 		u64 dyn_size;
5625 
5626 		/*
5627 		 * We dump:
5628 		 * static size
5629 		 *   - the size requested by user or the best one we can fit
5630 		 *     in to the sample max size
5631 		 * data
5632 		 *   - user stack dump data
5633 		 * dynamic size
5634 		 *   - the actual dumped size
5635 		 */
5636 
5637 		/* Static size. */
5638 		perf_output_put(handle, dump_size);
5639 
5640 		/* Data. */
5641 		sp = perf_user_stack_pointer(regs);
5642 		rem = __output_copy_user(handle, (void *) sp, dump_size);
5643 		dyn_size = dump_size - rem;
5644 
5645 		perf_output_skip(handle, rem);
5646 
5647 		/* Dynamic size. */
5648 		perf_output_put(handle, dyn_size);
5649 	}
5650 }
5651 
5652 static void __perf_event_header__init_id(struct perf_event_header *header,
5653 					 struct perf_sample_data *data,
5654 					 struct perf_event *event)
5655 {
5656 	u64 sample_type = event->attr.sample_type;
5657 
5658 	data->type = sample_type;
5659 	header->size += event->id_header_size;
5660 
5661 	if (sample_type & PERF_SAMPLE_TID) {
5662 		/* namespace issues */
5663 		data->tid_entry.pid = perf_event_pid(event, current);
5664 		data->tid_entry.tid = perf_event_tid(event, current);
5665 	}
5666 
5667 	if (sample_type & PERF_SAMPLE_TIME)
5668 		data->time = perf_event_clock(event);
5669 
5670 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
5671 		data->id = primary_event_id(event);
5672 
5673 	if (sample_type & PERF_SAMPLE_STREAM_ID)
5674 		data->stream_id = event->id;
5675 
5676 	if (sample_type & PERF_SAMPLE_CPU) {
5677 		data->cpu_entry.cpu	 = raw_smp_processor_id();
5678 		data->cpu_entry.reserved = 0;
5679 	}
5680 }
5681 
5682 void perf_event_header__init_id(struct perf_event_header *header,
5683 				struct perf_sample_data *data,
5684 				struct perf_event *event)
5685 {
5686 	if (event->attr.sample_id_all)
5687 		__perf_event_header__init_id(header, data, event);
5688 }
5689 
5690 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5691 					   struct perf_sample_data *data)
5692 {
5693 	u64 sample_type = data->type;
5694 
5695 	if (sample_type & PERF_SAMPLE_TID)
5696 		perf_output_put(handle, data->tid_entry);
5697 
5698 	if (sample_type & PERF_SAMPLE_TIME)
5699 		perf_output_put(handle, data->time);
5700 
5701 	if (sample_type & PERF_SAMPLE_ID)
5702 		perf_output_put(handle, data->id);
5703 
5704 	if (sample_type & PERF_SAMPLE_STREAM_ID)
5705 		perf_output_put(handle, data->stream_id);
5706 
5707 	if (sample_type & PERF_SAMPLE_CPU)
5708 		perf_output_put(handle, data->cpu_entry);
5709 
5710 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
5711 		perf_output_put(handle, data->id);
5712 }
5713 
5714 void perf_event__output_id_sample(struct perf_event *event,
5715 				  struct perf_output_handle *handle,
5716 				  struct perf_sample_data *sample)
5717 {
5718 	if (event->attr.sample_id_all)
5719 		__perf_event__output_id_sample(handle, sample);
5720 }
5721 
5722 static void perf_output_read_one(struct perf_output_handle *handle,
5723 				 struct perf_event *event,
5724 				 u64 enabled, u64 running)
5725 {
5726 	u64 read_format = event->attr.read_format;
5727 	u64 values[4];
5728 	int n = 0;
5729 
5730 	values[n++] = perf_event_count(event);
5731 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5732 		values[n++] = enabled +
5733 			atomic64_read(&event->child_total_time_enabled);
5734 	}
5735 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5736 		values[n++] = running +
5737 			atomic64_read(&event->child_total_time_running);
5738 	}
5739 	if (read_format & PERF_FORMAT_ID)
5740 		values[n++] = primary_event_id(event);
5741 
5742 	__output_copy(handle, values, n * sizeof(u64));
5743 }
5744 
5745 static void perf_output_read_group(struct perf_output_handle *handle,
5746 			    struct perf_event *event,
5747 			    u64 enabled, u64 running)
5748 {
5749 	struct perf_event *leader = event->group_leader, *sub;
5750 	u64 read_format = event->attr.read_format;
5751 	u64 values[5];
5752 	int n = 0;
5753 
5754 	values[n++] = 1 + leader->nr_siblings;
5755 
5756 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5757 		values[n++] = enabled;
5758 
5759 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5760 		values[n++] = running;
5761 
5762 	if (leader != event)
5763 		leader->pmu->read(leader);
5764 
5765 	values[n++] = perf_event_count(leader);
5766 	if (read_format & PERF_FORMAT_ID)
5767 		values[n++] = primary_event_id(leader);
5768 
5769 	__output_copy(handle, values, n * sizeof(u64));
5770 
5771 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5772 		n = 0;
5773 
5774 		if ((sub != event) &&
5775 		    (sub->state == PERF_EVENT_STATE_ACTIVE))
5776 			sub->pmu->read(sub);
5777 
5778 		values[n++] = perf_event_count(sub);
5779 		if (read_format & PERF_FORMAT_ID)
5780 			values[n++] = primary_event_id(sub);
5781 
5782 		__output_copy(handle, values, n * sizeof(u64));
5783 	}
5784 }
5785 
5786 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5787 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
5788 
5789 /*
5790  * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
5791  *
5792  * The problem is that its both hard and excessively expensive to iterate the
5793  * child list, not to mention that its impossible to IPI the children running
5794  * on another CPU, from interrupt/NMI context.
5795  */
5796 static void perf_output_read(struct perf_output_handle *handle,
5797 			     struct perf_event *event)
5798 {
5799 	u64 enabled = 0, running = 0, now;
5800 	u64 read_format = event->attr.read_format;
5801 
5802 	/*
5803 	 * compute total_time_enabled, total_time_running
5804 	 * based on snapshot values taken when the event
5805 	 * was last scheduled in.
5806 	 *
5807 	 * we cannot simply called update_context_time()
5808 	 * because of locking issue as we are called in
5809 	 * NMI context
5810 	 */
5811 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
5812 		calc_timer_values(event, &now, &enabled, &running);
5813 
5814 	if (event->attr.read_format & PERF_FORMAT_GROUP)
5815 		perf_output_read_group(handle, event, enabled, running);
5816 	else
5817 		perf_output_read_one(handle, event, enabled, running);
5818 }
5819 
5820 void perf_output_sample(struct perf_output_handle *handle,
5821 			struct perf_event_header *header,
5822 			struct perf_sample_data *data,
5823 			struct perf_event *event)
5824 {
5825 	u64 sample_type = data->type;
5826 
5827 	perf_output_put(handle, *header);
5828 
5829 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
5830 		perf_output_put(handle, data->id);
5831 
5832 	if (sample_type & PERF_SAMPLE_IP)
5833 		perf_output_put(handle, data->ip);
5834 
5835 	if (sample_type & PERF_SAMPLE_TID)
5836 		perf_output_put(handle, data->tid_entry);
5837 
5838 	if (sample_type & PERF_SAMPLE_TIME)
5839 		perf_output_put(handle, data->time);
5840 
5841 	if (sample_type & PERF_SAMPLE_ADDR)
5842 		perf_output_put(handle, data->addr);
5843 
5844 	if (sample_type & PERF_SAMPLE_ID)
5845 		perf_output_put(handle, data->id);
5846 
5847 	if (sample_type & PERF_SAMPLE_STREAM_ID)
5848 		perf_output_put(handle, data->stream_id);
5849 
5850 	if (sample_type & PERF_SAMPLE_CPU)
5851 		perf_output_put(handle, data->cpu_entry);
5852 
5853 	if (sample_type & PERF_SAMPLE_PERIOD)
5854 		perf_output_put(handle, data->period);
5855 
5856 	if (sample_type & PERF_SAMPLE_READ)
5857 		perf_output_read(handle, event);
5858 
5859 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5860 		if (data->callchain) {
5861 			int size = 1;
5862 
5863 			if (data->callchain)
5864 				size += data->callchain->nr;
5865 
5866 			size *= sizeof(u64);
5867 
5868 			__output_copy(handle, data->callchain, size);
5869 		} else {
5870 			u64 nr = 0;
5871 			perf_output_put(handle, nr);
5872 		}
5873 	}
5874 
5875 	if (sample_type & PERF_SAMPLE_RAW) {
5876 		struct perf_raw_record *raw = data->raw;
5877 
5878 		if (raw) {
5879 			struct perf_raw_frag *frag = &raw->frag;
5880 
5881 			perf_output_put(handle, raw->size);
5882 			do {
5883 				if (frag->copy) {
5884 					__output_custom(handle, frag->copy,
5885 							frag->data, frag->size);
5886 				} else {
5887 					__output_copy(handle, frag->data,
5888 						      frag->size);
5889 				}
5890 				if (perf_raw_frag_last(frag))
5891 					break;
5892 				frag = frag->next;
5893 			} while (1);
5894 			if (frag->pad)
5895 				__output_skip(handle, NULL, frag->pad);
5896 		} else {
5897 			struct {
5898 				u32	size;
5899 				u32	data;
5900 			} raw = {
5901 				.size = sizeof(u32),
5902 				.data = 0,
5903 			};
5904 			perf_output_put(handle, raw);
5905 		}
5906 	}
5907 
5908 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5909 		if (data->br_stack) {
5910 			size_t size;
5911 
5912 			size = data->br_stack->nr
5913 			     * sizeof(struct perf_branch_entry);
5914 
5915 			perf_output_put(handle, data->br_stack->nr);
5916 			perf_output_copy(handle, data->br_stack->entries, size);
5917 		} else {
5918 			/*
5919 			 * we always store at least the value of nr
5920 			 */
5921 			u64 nr = 0;
5922 			perf_output_put(handle, nr);
5923 		}
5924 	}
5925 
5926 	if (sample_type & PERF_SAMPLE_REGS_USER) {
5927 		u64 abi = data->regs_user.abi;
5928 
5929 		/*
5930 		 * If there are no regs to dump, notice it through
5931 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5932 		 */
5933 		perf_output_put(handle, abi);
5934 
5935 		if (abi) {
5936 			u64 mask = event->attr.sample_regs_user;
5937 			perf_output_sample_regs(handle,
5938 						data->regs_user.regs,
5939 						mask);
5940 		}
5941 	}
5942 
5943 	if (sample_type & PERF_SAMPLE_STACK_USER) {
5944 		perf_output_sample_ustack(handle,
5945 					  data->stack_user_size,
5946 					  data->regs_user.regs);
5947 	}
5948 
5949 	if (sample_type & PERF_SAMPLE_WEIGHT)
5950 		perf_output_put(handle, data->weight);
5951 
5952 	if (sample_type & PERF_SAMPLE_DATA_SRC)
5953 		perf_output_put(handle, data->data_src.val);
5954 
5955 	if (sample_type & PERF_SAMPLE_TRANSACTION)
5956 		perf_output_put(handle, data->txn);
5957 
5958 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
5959 		u64 abi = data->regs_intr.abi;
5960 		/*
5961 		 * If there are no regs to dump, notice it through
5962 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5963 		 */
5964 		perf_output_put(handle, abi);
5965 
5966 		if (abi) {
5967 			u64 mask = event->attr.sample_regs_intr;
5968 
5969 			perf_output_sample_regs(handle,
5970 						data->regs_intr.regs,
5971 						mask);
5972 		}
5973 	}
5974 
5975 	if (!event->attr.watermark) {
5976 		int wakeup_events = event->attr.wakeup_events;
5977 
5978 		if (wakeup_events) {
5979 			struct ring_buffer *rb = handle->rb;
5980 			int events = local_inc_return(&rb->events);
5981 
5982 			if (events >= wakeup_events) {
5983 				local_sub(wakeup_events, &rb->events);
5984 				local_inc(&rb->wakeup);
5985 			}
5986 		}
5987 	}
5988 }
5989 
5990 void perf_prepare_sample(struct perf_event_header *header,
5991 			 struct perf_sample_data *data,
5992 			 struct perf_event *event,
5993 			 struct pt_regs *regs)
5994 {
5995 	u64 sample_type = event->attr.sample_type;
5996 
5997 	header->type = PERF_RECORD_SAMPLE;
5998 	header->size = sizeof(*header) + event->header_size;
5999 
6000 	header->misc = 0;
6001 	header->misc |= perf_misc_flags(regs);
6002 
6003 	__perf_event_header__init_id(header, data, event);
6004 
6005 	if (sample_type & PERF_SAMPLE_IP)
6006 		data->ip = perf_instruction_pointer(regs);
6007 
6008 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
6009 		int size = 1;
6010 
6011 		data->callchain = perf_callchain(event, regs);
6012 
6013 		if (data->callchain)
6014 			size += data->callchain->nr;
6015 
6016 		header->size += size * sizeof(u64);
6017 	}
6018 
6019 	if (sample_type & PERF_SAMPLE_RAW) {
6020 		struct perf_raw_record *raw = data->raw;
6021 		int size;
6022 
6023 		if (raw) {
6024 			struct perf_raw_frag *frag = &raw->frag;
6025 			u32 sum = 0;
6026 
6027 			do {
6028 				sum += frag->size;
6029 				if (perf_raw_frag_last(frag))
6030 					break;
6031 				frag = frag->next;
6032 			} while (1);
6033 
6034 			size = round_up(sum + sizeof(u32), sizeof(u64));
6035 			raw->size = size - sizeof(u32);
6036 			frag->pad = raw->size - sum;
6037 		} else {
6038 			size = sizeof(u64);
6039 		}
6040 
6041 		header->size += size;
6042 	}
6043 
6044 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6045 		int size = sizeof(u64); /* nr */
6046 		if (data->br_stack) {
6047 			size += data->br_stack->nr
6048 			      * sizeof(struct perf_branch_entry);
6049 		}
6050 		header->size += size;
6051 	}
6052 
6053 	if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
6054 		perf_sample_regs_user(&data->regs_user, regs,
6055 				      &data->regs_user_copy);
6056 
6057 	if (sample_type & PERF_SAMPLE_REGS_USER) {
6058 		/* regs dump ABI info */
6059 		int size = sizeof(u64);
6060 
6061 		if (data->regs_user.regs) {
6062 			u64 mask = event->attr.sample_regs_user;
6063 			size += hweight64(mask) * sizeof(u64);
6064 		}
6065 
6066 		header->size += size;
6067 	}
6068 
6069 	if (sample_type & PERF_SAMPLE_STACK_USER) {
6070 		/*
6071 		 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
6072 		 * processed as the last one or have additional check added
6073 		 * in case new sample type is added, because we could eat
6074 		 * up the rest of the sample size.
6075 		 */
6076 		u16 stack_size = event->attr.sample_stack_user;
6077 		u16 size = sizeof(u64);
6078 
6079 		stack_size = perf_sample_ustack_size(stack_size, header->size,
6080 						     data->regs_user.regs);
6081 
6082 		/*
6083 		 * If there is something to dump, add space for the dump
6084 		 * itself and for the field that tells the dynamic size,
6085 		 * which is how many have been actually dumped.
6086 		 */
6087 		if (stack_size)
6088 			size += sizeof(u64) + stack_size;
6089 
6090 		data->stack_user_size = stack_size;
6091 		header->size += size;
6092 	}
6093 
6094 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
6095 		/* regs dump ABI info */
6096 		int size = sizeof(u64);
6097 
6098 		perf_sample_regs_intr(&data->regs_intr, regs);
6099 
6100 		if (data->regs_intr.regs) {
6101 			u64 mask = event->attr.sample_regs_intr;
6102 
6103 			size += hweight64(mask) * sizeof(u64);
6104 		}
6105 
6106 		header->size += size;
6107 	}
6108 }
6109 
6110 static void __always_inline
6111 __perf_event_output(struct perf_event *event,
6112 		    struct perf_sample_data *data,
6113 		    struct pt_regs *regs,
6114 		    int (*output_begin)(struct perf_output_handle *,
6115 					struct perf_event *,
6116 					unsigned int))
6117 {
6118 	struct perf_output_handle handle;
6119 	struct perf_event_header header;
6120 
6121 	/* protect the callchain buffers */
6122 	rcu_read_lock();
6123 
6124 	perf_prepare_sample(&header, data, event, regs);
6125 
6126 	if (output_begin(&handle, event, header.size))
6127 		goto exit;
6128 
6129 	perf_output_sample(&handle, &header, data, event);
6130 
6131 	perf_output_end(&handle);
6132 
6133 exit:
6134 	rcu_read_unlock();
6135 }
6136 
6137 void
6138 perf_event_output_forward(struct perf_event *event,
6139 			 struct perf_sample_data *data,
6140 			 struct pt_regs *regs)
6141 {
6142 	__perf_event_output(event, data, regs, perf_output_begin_forward);
6143 }
6144 
6145 void
6146 perf_event_output_backward(struct perf_event *event,
6147 			   struct perf_sample_data *data,
6148 			   struct pt_regs *regs)
6149 {
6150 	__perf_event_output(event, data, regs, perf_output_begin_backward);
6151 }
6152 
6153 void
6154 perf_event_output(struct perf_event *event,
6155 		  struct perf_sample_data *data,
6156 		  struct pt_regs *regs)
6157 {
6158 	__perf_event_output(event, data, regs, perf_output_begin);
6159 }
6160 
6161 /*
6162  * read event_id
6163  */
6164 
6165 struct perf_read_event {
6166 	struct perf_event_header	header;
6167 
6168 	u32				pid;
6169 	u32				tid;
6170 };
6171 
6172 static void
6173 perf_event_read_event(struct perf_event *event,
6174 			struct task_struct *task)
6175 {
6176 	struct perf_output_handle handle;
6177 	struct perf_sample_data sample;
6178 	struct perf_read_event read_event = {
6179 		.header = {
6180 			.type = PERF_RECORD_READ,
6181 			.misc = 0,
6182 			.size = sizeof(read_event) + event->read_size,
6183 		},
6184 		.pid = perf_event_pid(event, task),
6185 		.tid = perf_event_tid(event, task),
6186 	};
6187 	int ret;
6188 
6189 	perf_event_header__init_id(&read_event.header, &sample, event);
6190 	ret = perf_output_begin(&handle, event, read_event.header.size);
6191 	if (ret)
6192 		return;
6193 
6194 	perf_output_put(&handle, read_event);
6195 	perf_output_read(&handle, event);
6196 	perf_event__output_id_sample(event, &handle, &sample);
6197 
6198 	perf_output_end(&handle);
6199 }
6200 
6201 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
6202 
6203 static void
6204 perf_iterate_ctx(struct perf_event_context *ctx,
6205 		   perf_iterate_f output,
6206 		   void *data, bool all)
6207 {
6208 	struct perf_event *event;
6209 
6210 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6211 		if (!all) {
6212 			if (event->state < PERF_EVENT_STATE_INACTIVE)
6213 				continue;
6214 			if (!event_filter_match(event))
6215 				continue;
6216 		}
6217 
6218 		output(event, data);
6219 	}
6220 }
6221 
6222 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
6223 {
6224 	struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6225 	struct perf_event *event;
6226 
6227 	list_for_each_entry_rcu(event, &pel->list, sb_list) {
6228 		/*
6229 		 * Skip events that are not fully formed yet; ensure that
6230 		 * if we observe event->ctx, both event and ctx will be
6231 		 * complete enough. See perf_install_in_context().
6232 		 */
6233 		if (!smp_load_acquire(&event->ctx))
6234 			continue;
6235 
6236 		if (event->state < PERF_EVENT_STATE_INACTIVE)
6237 			continue;
6238 		if (!event_filter_match(event))
6239 			continue;
6240 		output(event, data);
6241 	}
6242 }
6243 
6244 /*
6245  * Iterate all events that need to receive side-band events.
6246  *
6247  * For new callers; ensure that account_pmu_sb_event() includes
6248  * your event, otherwise it might not get delivered.
6249  */
6250 static void
6251 perf_iterate_sb(perf_iterate_f output, void *data,
6252 	       struct perf_event_context *task_ctx)
6253 {
6254 	struct perf_event_context *ctx;
6255 	int ctxn;
6256 
6257 	rcu_read_lock();
6258 	preempt_disable();
6259 
6260 	/*
6261 	 * If we have task_ctx != NULL we only notify the task context itself.
6262 	 * The task_ctx is set only for EXIT events before releasing task
6263 	 * context.
6264 	 */
6265 	if (task_ctx) {
6266 		perf_iterate_ctx(task_ctx, output, data, false);
6267 		goto done;
6268 	}
6269 
6270 	perf_iterate_sb_cpu(output, data);
6271 
6272 	for_each_task_context_nr(ctxn) {
6273 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6274 		if (ctx)
6275 			perf_iterate_ctx(ctx, output, data, false);
6276 	}
6277 done:
6278 	preempt_enable();
6279 	rcu_read_unlock();
6280 }
6281 
6282 /*
6283  * Clear all file-based filters at exec, they'll have to be
6284  * re-instated when/if these objects are mmapped again.
6285  */
6286 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6287 {
6288 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6289 	struct perf_addr_filter *filter;
6290 	unsigned int restart = 0, count = 0;
6291 	unsigned long flags;
6292 
6293 	if (!has_addr_filter(event))
6294 		return;
6295 
6296 	raw_spin_lock_irqsave(&ifh->lock, flags);
6297 	list_for_each_entry(filter, &ifh->list, entry) {
6298 		if (filter->inode) {
6299 			event->addr_filters_offs[count] = 0;
6300 			restart++;
6301 		}
6302 
6303 		count++;
6304 	}
6305 
6306 	if (restart)
6307 		event->addr_filters_gen++;
6308 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
6309 
6310 	if (restart)
6311 		perf_event_stop(event, 1);
6312 }
6313 
6314 void perf_event_exec(void)
6315 {
6316 	struct perf_event_context *ctx;
6317 	int ctxn;
6318 
6319 	rcu_read_lock();
6320 	for_each_task_context_nr(ctxn) {
6321 		ctx = current->perf_event_ctxp[ctxn];
6322 		if (!ctx)
6323 			continue;
6324 
6325 		perf_event_enable_on_exec(ctxn);
6326 
6327 		perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
6328 				   true);
6329 	}
6330 	rcu_read_unlock();
6331 }
6332 
6333 struct remote_output {
6334 	struct ring_buffer	*rb;
6335 	int			err;
6336 };
6337 
6338 static void __perf_event_output_stop(struct perf_event *event, void *data)
6339 {
6340 	struct perf_event *parent = event->parent;
6341 	struct remote_output *ro = data;
6342 	struct ring_buffer *rb = ro->rb;
6343 	struct stop_event_data sd = {
6344 		.event	= event,
6345 	};
6346 
6347 	if (!has_aux(event))
6348 		return;
6349 
6350 	if (!parent)
6351 		parent = event;
6352 
6353 	/*
6354 	 * In case of inheritance, it will be the parent that links to the
6355 	 * ring-buffer, but it will be the child that's actually using it.
6356 	 *
6357 	 * We are using event::rb to determine if the event should be stopped,
6358 	 * however this may race with ring_buffer_attach() (through set_output),
6359 	 * which will make us skip the event that actually needs to be stopped.
6360 	 * So ring_buffer_attach() has to stop an aux event before re-assigning
6361 	 * its rb pointer.
6362 	 */
6363 	if (rcu_dereference(parent->rb) == rb)
6364 		ro->err = __perf_event_stop(&sd);
6365 }
6366 
6367 static int __perf_pmu_output_stop(void *info)
6368 {
6369 	struct perf_event *event = info;
6370 	struct pmu *pmu = event->pmu;
6371 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6372 	struct remote_output ro = {
6373 		.rb	= event->rb,
6374 	};
6375 
6376 	rcu_read_lock();
6377 	perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
6378 	if (cpuctx->task_ctx)
6379 		perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
6380 				   &ro, false);
6381 	rcu_read_unlock();
6382 
6383 	return ro.err;
6384 }
6385 
6386 static void perf_pmu_output_stop(struct perf_event *event)
6387 {
6388 	struct perf_event *iter;
6389 	int err, cpu;
6390 
6391 restart:
6392 	rcu_read_lock();
6393 	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6394 		/*
6395 		 * For per-CPU events, we need to make sure that neither they
6396 		 * nor their children are running; for cpu==-1 events it's
6397 		 * sufficient to stop the event itself if it's active, since
6398 		 * it can't have children.
6399 		 */
6400 		cpu = iter->cpu;
6401 		if (cpu == -1)
6402 			cpu = READ_ONCE(iter->oncpu);
6403 
6404 		if (cpu == -1)
6405 			continue;
6406 
6407 		err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6408 		if (err == -EAGAIN) {
6409 			rcu_read_unlock();
6410 			goto restart;
6411 		}
6412 	}
6413 	rcu_read_unlock();
6414 }
6415 
6416 /*
6417  * task tracking -- fork/exit
6418  *
6419  * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
6420  */
6421 
6422 struct perf_task_event {
6423 	struct task_struct		*task;
6424 	struct perf_event_context	*task_ctx;
6425 
6426 	struct {
6427 		struct perf_event_header	header;
6428 
6429 		u32				pid;
6430 		u32				ppid;
6431 		u32				tid;
6432 		u32				ptid;
6433 		u64				time;
6434 	} event_id;
6435 };
6436 
6437 static int perf_event_task_match(struct perf_event *event)
6438 {
6439 	return event->attr.comm  || event->attr.mmap ||
6440 	       event->attr.mmap2 || event->attr.mmap_data ||
6441 	       event->attr.task;
6442 }
6443 
6444 static void perf_event_task_output(struct perf_event *event,
6445 				   void *data)
6446 {
6447 	struct perf_task_event *task_event = data;
6448 	struct perf_output_handle handle;
6449 	struct perf_sample_data	sample;
6450 	struct task_struct *task = task_event->task;
6451 	int ret, size = task_event->event_id.header.size;
6452 
6453 	if (!perf_event_task_match(event))
6454 		return;
6455 
6456 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
6457 
6458 	ret = perf_output_begin(&handle, event,
6459 				task_event->event_id.header.size);
6460 	if (ret)
6461 		goto out;
6462 
6463 	task_event->event_id.pid = perf_event_pid(event, task);
6464 	task_event->event_id.ppid = perf_event_pid(event, current);
6465 
6466 	task_event->event_id.tid = perf_event_tid(event, task);
6467 	task_event->event_id.ptid = perf_event_tid(event, current);
6468 
6469 	task_event->event_id.time = perf_event_clock(event);
6470 
6471 	perf_output_put(&handle, task_event->event_id);
6472 
6473 	perf_event__output_id_sample(event, &handle, &sample);
6474 
6475 	perf_output_end(&handle);
6476 out:
6477 	task_event->event_id.header.size = size;
6478 }
6479 
6480 static void perf_event_task(struct task_struct *task,
6481 			      struct perf_event_context *task_ctx,
6482 			      int new)
6483 {
6484 	struct perf_task_event task_event;
6485 
6486 	if (!atomic_read(&nr_comm_events) &&
6487 	    !atomic_read(&nr_mmap_events) &&
6488 	    !atomic_read(&nr_task_events))
6489 		return;
6490 
6491 	task_event = (struct perf_task_event){
6492 		.task	  = task,
6493 		.task_ctx = task_ctx,
6494 		.event_id    = {
6495 			.header = {
6496 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
6497 				.misc = 0,
6498 				.size = sizeof(task_event.event_id),
6499 			},
6500 			/* .pid  */
6501 			/* .ppid */
6502 			/* .tid  */
6503 			/* .ptid */
6504 			/* .time */
6505 		},
6506 	};
6507 
6508 	perf_iterate_sb(perf_event_task_output,
6509 		       &task_event,
6510 		       task_ctx);
6511 }
6512 
6513 void perf_event_fork(struct task_struct *task)
6514 {
6515 	perf_event_task(task, NULL, 1);
6516 	perf_event_namespaces(task);
6517 }
6518 
6519 /*
6520  * comm tracking
6521  */
6522 
6523 struct perf_comm_event {
6524 	struct task_struct	*task;
6525 	char			*comm;
6526 	int			comm_size;
6527 
6528 	struct {
6529 		struct perf_event_header	header;
6530 
6531 		u32				pid;
6532 		u32				tid;
6533 	} event_id;
6534 };
6535 
6536 static int perf_event_comm_match(struct perf_event *event)
6537 {
6538 	return event->attr.comm;
6539 }
6540 
6541 static void perf_event_comm_output(struct perf_event *event,
6542 				   void *data)
6543 {
6544 	struct perf_comm_event *comm_event = data;
6545 	struct perf_output_handle handle;
6546 	struct perf_sample_data sample;
6547 	int size = comm_event->event_id.header.size;
6548 	int ret;
6549 
6550 	if (!perf_event_comm_match(event))
6551 		return;
6552 
6553 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6554 	ret = perf_output_begin(&handle, event,
6555 				comm_event->event_id.header.size);
6556 
6557 	if (ret)
6558 		goto out;
6559 
6560 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6561 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
6562 
6563 	perf_output_put(&handle, comm_event->event_id);
6564 	__output_copy(&handle, comm_event->comm,
6565 				   comm_event->comm_size);
6566 
6567 	perf_event__output_id_sample(event, &handle, &sample);
6568 
6569 	perf_output_end(&handle);
6570 out:
6571 	comm_event->event_id.header.size = size;
6572 }
6573 
6574 static void perf_event_comm_event(struct perf_comm_event *comm_event)
6575 {
6576 	char comm[TASK_COMM_LEN];
6577 	unsigned int size;
6578 
6579 	memset(comm, 0, sizeof(comm));
6580 	strlcpy(comm, comm_event->task->comm, sizeof(comm));
6581 	size = ALIGN(strlen(comm)+1, sizeof(u64));
6582 
6583 	comm_event->comm = comm;
6584 	comm_event->comm_size = size;
6585 
6586 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
6587 
6588 	perf_iterate_sb(perf_event_comm_output,
6589 		       comm_event,
6590 		       NULL);
6591 }
6592 
6593 void perf_event_comm(struct task_struct *task, bool exec)
6594 {
6595 	struct perf_comm_event comm_event;
6596 
6597 	if (!atomic_read(&nr_comm_events))
6598 		return;
6599 
6600 	comm_event = (struct perf_comm_event){
6601 		.task	= task,
6602 		/* .comm      */
6603 		/* .comm_size */
6604 		.event_id  = {
6605 			.header = {
6606 				.type = PERF_RECORD_COMM,
6607 				.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
6608 				/* .size */
6609 			},
6610 			/* .pid */
6611 			/* .tid */
6612 		},
6613 	};
6614 
6615 	perf_event_comm_event(&comm_event);
6616 }
6617 
6618 /*
6619  * namespaces tracking
6620  */
6621 
6622 struct perf_namespaces_event {
6623 	struct task_struct		*task;
6624 
6625 	struct {
6626 		struct perf_event_header	header;
6627 
6628 		u32				pid;
6629 		u32				tid;
6630 		u64				nr_namespaces;
6631 		struct perf_ns_link_info	link_info[NR_NAMESPACES];
6632 	} event_id;
6633 };
6634 
6635 static int perf_event_namespaces_match(struct perf_event *event)
6636 {
6637 	return event->attr.namespaces;
6638 }
6639 
6640 static void perf_event_namespaces_output(struct perf_event *event,
6641 					 void *data)
6642 {
6643 	struct perf_namespaces_event *namespaces_event = data;
6644 	struct perf_output_handle handle;
6645 	struct perf_sample_data sample;
6646 	int ret;
6647 
6648 	if (!perf_event_namespaces_match(event))
6649 		return;
6650 
6651 	perf_event_header__init_id(&namespaces_event->event_id.header,
6652 				   &sample, event);
6653 	ret = perf_output_begin(&handle, event,
6654 				namespaces_event->event_id.header.size);
6655 	if (ret)
6656 		return;
6657 
6658 	namespaces_event->event_id.pid = perf_event_pid(event,
6659 							namespaces_event->task);
6660 	namespaces_event->event_id.tid = perf_event_tid(event,
6661 							namespaces_event->task);
6662 
6663 	perf_output_put(&handle, namespaces_event->event_id);
6664 
6665 	perf_event__output_id_sample(event, &handle, &sample);
6666 
6667 	perf_output_end(&handle);
6668 }
6669 
6670 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
6671 				   struct task_struct *task,
6672 				   const struct proc_ns_operations *ns_ops)
6673 {
6674 	struct path ns_path;
6675 	struct inode *ns_inode;
6676 	void *error;
6677 
6678 	error = ns_get_path(&ns_path, task, ns_ops);
6679 	if (!error) {
6680 		ns_inode = ns_path.dentry->d_inode;
6681 		ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
6682 		ns_link_info->ino = ns_inode->i_ino;
6683 	}
6684 }
6685 
6686 void perf_event_namespaces(struct task_struct *task)
6687 {
6688 	struct perf_namespaces_event namespaces_event;
6689 	struct perf_ns_link_info *ns_link_info;
6690 
6691 	if (!atomic_read(&nr_namespaces_events))
6692 		return;
6693 
6694 	namespaces_event = (struct perf_namespaces_event){
6695 		.task	= task,
6696 		.event_id  = {
6697 			.header = {
6698 				.type = PERF_RECORD_NAMESPACES,
6699 				.misc = 0,
6700 				.size = sizeof(namespaces_event.event_id),
6701 			},
6702 			/* .pid */
6703 			/* .tid */
6704 			.nr_namespaces = NR_NAMESPACES,
6705 			/* .link_info[NR_NAMESPACES] */
6706 		},
6707 	};
6708 
6709 	ns_link_info = namespaces_event.event_id.link_info;
6710 
6711 	perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
6712 			       task, &mntns_operations);
6713 
6714 #ifdef CONFIG_USER_NS
6715 	perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
6716 			       task, &userns_operations);
6717 #endif
6718 #ifdef CONFIG_NET_NS
6719 	perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
6720 			       task, &netns_operations);
6721 #endif
6722 #ifdef CONFIG_UTS_NS
6723 	perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
6724 			       task, &utsns_operations);
6725 #endif
6726 #ifdef CONFIG_IPC_NS
6727 	perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
6728 			       task, &ipcns_operations);
6729 #endif
6730 #ifdef CONFIG_PID_NS
6731 	perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
6732 			       task, &pidns_operations);
6733 #endif
6734 #ifdef CONFIG_CGROUPS
6735 	perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
6736 			       task, &cgroupns_operations);
6737 #endif
6738 
6739 	perf_iterate_sb(perf_event_namespaces_output,
6740 			&namespaces_event,
6741 			NULL);
6742 }
6743 
6744 /*
6745  * mmap tracking
6746  */
6747 
6748 struct perf_mmap_event {
6749 	struct vm_area_struct	*vma;
6750 
6751 	const char		*file_name;
6752 	int			file_size;
6753 	int			maj, min;
6754 	u64			ino;
6755 	u64			ino_generation;
6756 	u32			prot, flags;
6757 
6758 	struct {
6759 		struct perf_event_header	header;
6760 
6761 		u32				pid;
6762 		u32				tid;
6763 		u64				start;
6764 		u64				len;
6765 		u64				pgoff;
6766 	} event_id;
6767 };
6768 
6769 static int perf_event_mmap_match(struct perf_event *event,
6770 				 void *data)
6771 {
6772 	struct perf_mmap_event *mmap_event = data;
6773 	struct vm_area_struct *vma = mmap_event->vma;
6774 	int executable = vma->vm_flags & VM_EXEC;
6775 
6776 	return (!executable && event->attr.mmap_data) ||
6777 	       (executable && (event->attr.mmap || event->attr.mmap2));
6778 }
6779 
6780 static void perf_event_mmap_output(struct perf_event *event,
6781 				   void *data)
6782 {
6783 	struct perf_mmap_event *mmap_event = data;
6784 	struct perf_output_handle handle;
6785 	struct perf_sample_data sample;
6786 	int size = mmap_event->event_id.header.size;
6787 	int ret;
6788 
6789 	if (!perf_event_mmap_match(event, data))
6790 		return;
6791 
6792 	if (event->attr.mmap2) {
6793 		mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6794 		mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6795 		mmap_event->event_id.header.size += sizeof(mmap_event->min);
6796 		mmap_event->event_id.header.size += sizeof(mmap_event->ino);
6797 		mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
6798 		mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6799 		mmap_event->event_id.header.size += sizeof(mmap_event->flags);
6800 	}
6801 
6802 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6803 	ret = perf_output_begin(&handle, event,
6804 				mmap_event->event_id.header.size);
6805 	if (ret)
6806 		goto out;
6807 
6808 	mmap_event->event_id.pid = perf_event_pid(event, current);
6809 	mmap_event->event_id.tid = perf_event_tid(event, current);
6810 
6811 	perf_output_put(&handle, mmap_event->event_id);
6812 
6813 	if (event->attr.mmap2) {
6814 		perf_output_put(&handle, mmap_event->maj);
6815 		perf_output_put(&handle, mmap_event->min);
6816 		perf_output_put(&handle, mmap_event->ino);
6817 		perf_output_put(&handle, mmap_event->ino_generation);
6818 		perf_output_put(&handle, mmap_event->prot);
6819 		perf_output_put(&handle, mmap_event->flags);
6820 	}
6821 
6822 	__output_copy(&handle, mmap_event->file_name,
6823 				   mmap_event->file_size);
6824 
6825 	perf_event__output_id_sample(event, &handle, &sample);
6826 
6827 	perf_output_end(&handle);
6828 out:
6829 	mmap_event->event_id.header.size = size;
6830 }
6831 
6832 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6833 {
6834 	struct vm_area_struct *vma = mmap_event->vma;
6835 	struct file *file = vma->vm_file;
6836 	int maj = 0, min = 0;
6837 	u64 ino = 0, gen = 0;
6838 	u32 prot = 0, flags = 0;
6839 	unsigned int size;
6840 	char tmp[16];
6841 	char *buf = NULL;
6842 	char *name;
6843 
6844 	if (vma->vm_flags & VM_READ)
6845 		prot |= PROT_READ;
6846 	if (vma->vm_flags & VM_WRITE)
6847 		prot |= PROT_WRITE;
6848 	if (vma->vm_flags & VM_EXEC)
6849 		prot |= PROT_EXEC;
6850 
6851 	if (vma->vm_flags & VM_MAYSHARE)
6852 		flags = MAP_SHARED;
6853 	else
6854 		flags = MAP_PRIVATE;
6855 
6856 	if (vma->vm_flags & VM_DENYWRITE)
6857 		flags |= MAP_DENYWRITE;
6858 	if (vma->vm_flags & VM_MAYEXEC)
6859 		flags |= MAP_EXECUTABLE;
6860 	if (vma->vm_flags & VM_LOCKED)
6861 		flags |= MAP_LOCKED;
6862 	if (vma->vm_flags & VM_HUGETLB)
6863 		flags |= MAP_HUGETLB;
6864 
6865 	if (file) {
6866 		struct inode *inode;
6867 		dev_t dev;
6868 
6869 		buf = kmalloc(PATH_MAX, GFP_KERNEL);
6870 		if (!buf) {
6871 			name = "//enomem";
6872 			goto cpy_name;
6873 		}
6874 		/*
6875 		 * d_path() works from the end of the rb backwards, so we
6876 		 * need to add enough zero bytes after the string to handle
6877 		 * the 64bit alignment we do later.
6878 		 */
6879 		name = file_path(file, buf, PATH_MAX - sizeof(u64));
6880 		if (IS_ERR(name)) {
6881 			name = "//toolong";
6882 			goto cpy_name;
6883 		}
6884 		inode = file_inode(vma->vm_file);
6885 		dev = inode->i_sb->s_dev;
6886 		ino = inode->i_ino;
6887 		gen = inode->i_generation;
6888 		maj = MAJOR(dev);
6889 		min = MINOR(dev);
6890 
6891 		goto got_name;
6892 	} else {
6893 		if (vma->vm_ops && vma->vm_ops->name) {
6894 			name = (char *) vma->vm_ops->name(vma);
6895 			if (name)
6896 				goto cpy_name;
6897 		}
6898 
6899 		name = (char *)arch_vma_name(vma);
6900 		if (name)
6901 			goto cpy_name;
6902 
6903 		if (vma->vm_start <= vma->vm_mm->start_brk &&
6904 				vma->vm_end >= vma->vm_mm->brk) {
6905 			name = "[heap]";
6906 			goto cpy_name;
6907 		}
6908 		if (vma->vm_start <= vma->vm_mm->start_stack &&
6909 				vma->vm_end >= vma->vm_mm->start_stack) {
6910 			name = "[stack]";
6911 			goto cpy_name;
6912 		}
6913 
6914 		name = "//anon";
6915 		goto cpy_name;
6916 	}
6917 
6918 cpy_name:
6919 	strlcpy(tmp, name, sizeof(tmp));
6920 	name = tmp;
6921 got_name:
6922 	/*
6923 	 * Since our buffer works in 8 byte units we need to align our string
6924 	 * size to a multiple of 8. However, we must guarantee the tail end is
6925 	 * zero'd out to avoid leaking random bits to userspace.
6926 	 */
6927 	size = strlen(name)+1;
6928 	while (!IS_ALIGNED(size, sizeof(u64)))
6929 		name[size++] = '\0';
6930 
6931 	mmap_event->file_name = name;
6932 	mmap_event->file_size = size;
6933 	mmap_event->maj = maj;
6934 	mmap_event->min = min;
6935 	mmap_event->ino = ino;
6936 	mmap_event->ino_generation = gen;
6937 	mmap_event->prot = prot;
6938 	mmap_event->flags = flags;
6939 
6940 	if (!(vma->vm_flags & VM_EXEC))
6941 		mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6942 
6943 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
6944 
6945 	perf_iterate_sb(perf_event_mmap_output,
6946 		       mmap_event,
6947 		       NULL);
6948 
6949 	kfree(buf);
6950 }
6951 
6952 /*
6953  * Check whether inode and address range match filter criteria.
6954  */
6955 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6956 				     struct file *file, unsigned long offset,
6957 				     unsigned long size)
6958 {
6959 	if (filter->inode != file_inode(file))
6960 		return false;
6961 
6962 	if (filter->offset > offset + size)
6963 		return false;
6964 
6965 	if (filter->offset + filter->size < offset)
6966 		return false;
6967 
6968 	return true;
6969 }
6970 
6971 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6972 {
6973 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6974 	struct vm_area_struct *vma = data;
6975 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6976 	struct file *file = vma->vm_file;
6977 	struct perf_addr_filter *filter;
6978 	unsigned int restart = 0, count = 0;
6979 
6980 	if (!has_addr_filter(event))
6981 		return;
6982 
6983 	if (!file)
6984 		return;
6985 
6986 	raw_spin_lock_irqsave(&ifh->lock, flags);
6987 	list_for_each_entry(filter, &ifh->list, entry) {
6988 		if (perf_addr_filter_match(filter, file, off,
6989 					     vma->vm_end - vma->vm_start)) {
6990 			event->addr_filters_offs[count] = vma->vm_start;
6991 			restart++;
6992 		}
6993 
6994 		count++;
6995 	}
6996 
6997 	if (restart)
6998 		event->addr_filters_gen++;
6999 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
7000 
7001 	if (restart)
7002 		perf_event_stop(event, 1);
7003 }
7004 
7005 /*
7006  * Adjust all task's events' filters to the new vma
7007  */
7008 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
7009 {
7010 	struct perf_event_context *ctx;
7011 	int ctxn;
7012 
7013 	/*
7014 	 * Data tracing isn't supported yet and as such there is no need
7015 	 * to keep track of anything that isn't related to executable code:
7016 	 */
7017 	if (!(vma->vm_flags & VM_EXEC))
7018 		return;
7019 
7020 	rcu_read_lock();
7021 	for_each_task_context_nr(ctxn) {
7022 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
7023 		if (!ctx)
7024 			continue;
7025 
7026 		perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
7027 	}
7028 	rcu_read_unlock();
7029 }
7030 
7031 void perf_event_mmap(struct vm_area_struct *vma)
7032 {
7033 	struct perf_mmap_event mmap_event;
7034 
7035 	if (!atomic_read(&nr_mmap_events))
7036 		return;
7037 
7038 	mmap_event = (struct perf_mmap_event){
7039 		.vma	= vma,
7040 		/* .file_name */
7041 		/* .file_size */
7042 		.event_id  = {
7043 			.header = {
7044 				.type = PERF_RECORD_MMAP,
7045 				.misc = PERF_RECORD_MISC_USER,
7046 				/* .size */
7047 			},
7048 			/* .pid */
7049 			/* .tid */
7050 			.start  = vma->vm_start,
7051 			.len    = vma->vm_end - vma->vm_start,
7052 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
7053 		},
7054 		/* .maj (attr_mmap2 only) */
7055 		/* .min (attr_mmap2 only) */
7056 		/* .ino (attr_mmap2 only) */
7057 		/* .ino_generation (attr_mmap2 only) */
7058 		/* .prot (attr_mmap2 only) */
7059 		/* .flags (attr_mmap2 only) */
7060 	};
7061 
7062 	perf_addr_filters_adjust(vma);
7063 	perf_event_mmap_event(&mmap_event);
7064 }
7065 
7066 void perf_event_aux_event(struct perf_event *event, unsigned long head,
7067 			  unsigned long size, u64 flags)
7068 {
7069 	struct perf_output_handle handle;
7070 	struct perf_sample_data sample;
7071 	struct perf_aux_event {
7072 		struct perf_event_header	header;
7073 		u64				offset;
7074 		u64				size;
7075 		u64				flags;
7076 	} rec = {
7077 		.header = {
7078 			.type = PERF_RECORD_AUX,
7079 			.misc = 0,
7080 			.size = sizeof(rec),
7081 		},
7082 		.offset		= head,
7083 		.size		= size,
7084 		.flags		= flags,
7085 	};
7086 	int ret;
7087 
7088 	perf_event_header__init_id(&rec.header, &sample, event);
7089 	ret = perf_output_begin(&handle, event, rec.header.size);
7090 
7091 	if (ret)
7092 		return;
7093 
7094 	perf_output_put(&handle, rec);
7095 	perf_event__output_id_sample(event, &handle, &sample);
7096 
7097 	perf_output_end(&handle);
7098 }
7099 
7100 /*
7101  * Lost/dropped samples logging
7102  */
7103 void perf_log_lost_samples(struct perf_event *event, u64 lost)
7104 {
7105 	struct perf_output_handle handle;
7106 	struct perf_sample_data sample;
7107 	int ret;
7108 
7109 	struct {
7110 		struct perf_event_header	header;
7111 		u64				lost;
7112 	} lost_samples_event = {
7113 		.header = {
7114 			.type = PERF_RECORD_LOST_SAMPLES,
7115 			.misc = 0,
7116 			.size = sizeof(lost_samples_event),
7117 		},
7118 		.lost		= lost,
7119 	};
7120 
7121 	perf_event_header__init_id(&lost_samples_event.header, &sample, event);
7122 
7123 	ret = perf_output_begin(&handle, event,
7124 				lost_samples_event.header.size);
7125 	if (ret)
7126 		return;
7127 
7128 	perf_output_put(&handle, lost_samples_event);
7129 	perf_event__output_id_sample(event, &handle, &sample);
7130 	perf_output_end(&handle);
7131 }
7132 
7133 /*
7134  * context_switch tracking
7135  */
7136 
7137 struct perf_switch_event {
7138 	struct task_struct	*task;
7139 	struct task_struct	*next_prev;
7140 
7141 	struct {
7142 		struct perf_event_header	header;
7143 		u32				next_prev_pid;
7144 		u32				next_prev_tid;
7145 	} event_id;
7146 };
7147 
7148 static int perf_event_switch_match(struct perf_event *event)
7149 {
7150 	return event->attr.context_switch;
7151 }
7152 
7153 static void perf_event_switch_output(struct perf_event *event, void *data)
7154 {
7155 	struct perf_switch_event *se = data;
7156 	struct perf_output_handle handle;
7157 	struct perf_sample_data sample;
7158 	int ret;
7159 
7160 	if (!perf_event_switch_match(event))
7161 		return;
7162 
7163 	/* Only CPU-wide events are allowed to see next/prev pid/tid */
7164 	if (event->ctx->task) {
7165 		se->event_id.header.type = PERF_RECORD_SWITCH;
7166 		se->event_id.header.size = sizeof(se->event_id.header);
7167 	} else {
7168 		se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
7169 		se->event_id.header.size = sizeof(se->event_id);
7170 		se->event_id.next_prev_pid =
7171 					perf_event_pid(event, se->next_prev);
7172 		se->event_id.next_prev_tid =
7173 					perf_event_tid(event, se->next_prev);
7174 	}
7175 
7176 	perf_event_header__init_id(&se->event_id.header, &sample, event);
7177 
7178 	ret = perf_output_begin(&handle, event, se->event_id.header.size);
7179 	if (ret)
7180 		return;
7181 
7182 	if (event->ctx->task)
7183 		perf_output_put(&handle, se->event_id.header);
7184 	else
7185 		perf_output_put(&handle, se->event_id);
7186 
7187 	perf_event__output_id_sample(event, &handle, &sample);
7188 
7189 	perf_output_end(&handle);
7190 }
7191 
7192 static void perf_event_switch(struct task_struct *task,
7193 			      struct task_struct *next_prev, bool sched_in)
7194 {
7195 	struct perf_switch_event switch_event;
7196 
7197 	/* N.B. caller checks nr_switch_events != 0 */
7198 
7199 	switch_event = (struct perf_switch_event){
7200 		.task		= task,
7201 		.next_prev	= next_prev,
7202 		.event_id	= {
7203 			.header = {
7204 				/* .type */
7205 				.misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
7206 				/* .size */
7207 			},
7208 			/* .next_prev_pid */
7209 			/* .next_prev_tid */
7210 		},
7211 	};
7212 
7213 	perf_iterate_sb(perf_event_switch_output,
7214 		       &switch_event,
7215 		       NULL);
7216 }
7217 
7218 /*
7219  * IRQ throttle logging
7220  */
7221 
7222 static void perf_log_throttle(struct perf_event *event, int enable)
7223 {
7224 	struct perf_output_handle handle;
7225 	struct perf_sample_data sample;
7226 	int ret;
7227 
7228 	struct {
7229 		struct perf_event_header	header;
7230 		u64				time;
7231 		u64				id;
7232 		u64				stream_id;
7233 	} throttle_event = {
7234 		.header = {
7235 			.type = PERF_RECORD_THROTTLE,
7236 			.misc = 0,
7237 			.size = sizeof(throttle_event),
7238 		},
7239 		.time		= perf_event_clock(event),
7240 		.id		= primary_event_id(event),
7241 		.stream_id	= event->id,
7242 	};
7243 
7244 	if (enable)
7245 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
7246 
7247 	perf_event_header__init_id(&throttle_event.header, &sample, event);
7248 
7249 	ret = perf_output_begin(&handle, event,
7250 				throttle_event.header.size);
7251 	if (ret)
7252 		return;
7253 
7254 	perf_output_put(&handle, throttle_event);
7255 	perf_event__output_id_sample(event, &handle, &sample);
7256 	perf_output_end(&handle);
7257 }
7258 
7259 static void perf_log_itrace_start(struct perf_event *event)
7260 {
7261 	struct perf_output_handle handle;
7262 	struct perf_sample_data sample;
7263 	struct perf_aux_event {
7264 		struct perf_event_header        header;
7265 		u32				pid;
7266 		u32				tid;
7267 	} rec;
7268 	int ret;
7269 
7270 	if (event->parent)
7271 		event = event->parent;
7272 
7273 	if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
7274 	    event->hw.itrace_started)
7275 		return;
7276 
7277 	rec.header.type	= PERF_RECORD_ITRACE_START;
7278 	rec.header.misc	= 0;
7279 	rec.header.size	= sizeof(rec);
7280 	rec.pid	= perf_event_pid(event, current);
7281 	rec.tid	= perf_event_tid(event, current);
7282 
7283 	perf_event_header__init_id(&rec.header, &sample, event);
7284 	ret = perf_output_begin(&handle, event, rec.header.size);
7285 
7286 	if (ret)
7287 		return;
7288 
7289 	perf_output_put(&handle, rec);
7290 	perf_event__output_id_sample(event, &handle, &sample);
7291 
7292 	perf_output_end(&handle);
7293 }
7294 
7295 static int
7296 __perf_event_account_interrupt(struct perf_event *event, int throttle)
7297 {
7298 	struct hw_perf_event *hwc = &event->hw;
7299 	int ret = 0;
7300 	u64 seq;
7301 
7302 	seq = __this_cpu_read(perf_throttled_seq);
7303 	if (seq != hwc->interrupts_seq) {
7304 		hwc->interrupts_seq = seq;
7305 		hwc->interrupts = 1;
7306 	} else {
7307 		hwc->interrupts++;
7308 		if (unlikely(throttle
7309 			     && hwc->interrupts >= max_samples_per_tick)) {
7310 			__this_cpu_inc(perf_throttled_count);
7311 			tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
7312 			hwc->interrupts = MAX_INTERRUPTS;
7313 			perf_log_throttle(event, 0);
7314 			ret = 1;
7315 		}
7316 	}
7317 
7318 	if (event->attr.freq) {
7319 		u64 now = perf_clock();
7320 		s64 delta = now - hwc->freq_time_stamp;
7321 
7322 		hwc->freq_time_stamp = now;
7323 
7324 		if (delta > 0 && delta < 2*TICK_NSEC)
7325 			perf_adjust_period(event, delta, hwc->last_period, true);
7326 	}
7327 
7328 	return ret;
7329 }
7330 
7331 int perf_event_account_interrupt(struct perf_event *event)
7332 {
7333 	return __perf_event_account_interrupt(event, 1);
7334 }
7335 
7336 /*
7337  * Generic event overflow handling, sampling.
7338  */
7339 
7340 static int __perf_event_overflow(struct perf_event *event,
7341 				   int throttle, struct perf_sample_data *data,
7342 				   struct pt_regs *regs)
7343 {
7344 	int events = atomic_read(&event->event_limit);
7345 	int ret = 0;
7346 
7347 	/*
7348 	 * Non-sampling counters might still use the PMI to fold short
7349 	 * hardware counters, ignore those.
7350 	 */
7351 	if (unlikely(!is_sampling_event(event)))
7352 		return 0;
7353 
7354 	ret = __perf_event_account_interrupt(event, throttle);
7355 
7356 	/*
7357 	 * XXX event_limit might not quite work as expected on inherited
7358 	 * events
7359 	 */
7360 
7361 	event->pending_kill = POLL_IN;
7362 	if (events && atomic_dec_and_test(&event->event_limit)) {
7363 		ret = 1;
7364 		event->pending_kill = POLL_HUP;
7365 
7366 		perf_event_disable_inatomic(event);
7367 	}
7368 
7369 	READ_ONCE(event->overflow_handler)(event, data, regs);
7370 
7371 	if (*perf_event_fasync(event) && event->pending_kill) {
7372 		event->pending_wakeup = 1;
7373 		irq_work_queue(&event->pending);
7374 	}
7375 
7376 	return ret;
7377 }
7378 
7379 int perf_event_overflow(struct perf_event *event,
7380 			  struct perf_sample_data *data,
7381 			  struct pt_regs *regs)
7382 {
7383 	return __perf_event_overflow(event, 1, data, regs);
7384 }
7385 
7386 /*
7387  * Generic software event infrastructure
7388  */
7389 
7390 struct swevent_htable {
7391 	struct swevent_hlist		*swevent_hlist;
7392 	struct mutex			hlist_mutex;
7393 	int				hlist_refcount;
7394 
7395 	/* Recursion avoidance in each contexts */
7396 	int				recursion[PERF_NR_CONTEXTS];
7397 };
7398 
7399 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7400 
7401 /*
7402  * We directly increment event->count and keep a second value in
7403  * event->hw.period_left to count intervals. This period event
7404  * is kept in the range [-sample_period, 0] so that we can use the
7405  * sign as trigger.
7406  */
7407 
7408 u64 perf_swevent_set_period(struct perf_event *event)
7409 {
7410 	struct hw_perf_event *hwc = &event->hw;
7411 	u64 period = hwc->last_period;
7412 	u64 nr, offset;
7413 	s64 old, val;
7414 
7415 	hwc->last_period = hwc->sample_period;
7416 
7417 again:
7418 	old = val = local64_read(&hwc->period_left);
7419 	if (val < 0)
7420 		return 0;
7421 
7422 	nr = div64_u64(period + val, period);
7423 	offset = nr * period;
7424 	val -= offset;
7425 	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7426 		goto again;
7427 
7428 	return nr;
7429 }
7430 
7431 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
7432 				    struct perf_sample_data *data,
7433 				    struct pt_regs *regs)
7434 {
7435 	struct hw_perf_event *hwc = &event->hw;
7436 	int throttle = 0;
7437 
7438 	if (!overflow)
7439 		overflow = perf_swevent_set_period(event);
7440 
7441 	if (hwc->interrupts == MAX_INTERRUPTS)
7442 		return;
7443 
7444 	for (; overflow; overflow--) {
7445 		if (__perf_event_overflow(event, throttle,
7446 					    data, regs)) {
7447 			/*
7448 			 * We inhibit the overflow from happening when
7449 			 * hwc->interrupts == MAX_INTERRUPTS.
7450 			 */
7451 			break;
7452 		}
7453 		throttle = 1;
7454 	}
7455 }
7456 
7457 static void perf_swevent_event(struct perf_event *event, u64 nr,
7458 			       struct perf_sample_data *data,
7459 			       struct pt_regs *regs)
7460 {
7461 	struct hw_perf_event *hwc = &event->hw;
7462 
7463 	local64_add(nr, &event->count);
7464 
7465 	if (!regs)
7466 		return;
7467 
7468 	if (!is_sampling_event(event))
7469 		return;
7470 
7471 	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7472 		data->period = nr;
7473 		return perf_swevent_overflow(event, 1, data, regs);
7474 	} else
7475 		data->period = event->hw.last_period;
7476 
7477 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
7478 		return perf_swevent_overflow(event, 1, data, regs);
7479 
7480 	if (local64_add_negative(nr, &hwc->period_left))
7481 		return;
7482 
7483 	perf_swevent_overflow(event, 0, data, regs);
7484 }
7485 
7486 static int perf_exclude_event(struct perf_event *event,
7487 			      struct pt_regs *regs)
7488 {
7489 	if (event->hw.state & PERF_HES_STOPPED)
7490 		return 1;
7491 
7492 	if (regs) {
7493 		if (event->attr.exclude_user && user_mode(regs))
7494 			return 1;
7495 
7496 		if (event->attr.exclude_kernel && !user_mode(regs))
7497 			return 1;
7498 	}
7499 
7500 	return 0;
7501 }
7502 
7503 static int perf_swevent_match(struct perf_event *event,
7504 				enum perf_type_id type,
7505 				u32 event_id,
7506 				struct perf_sample_data *data,
7507 				struct pt_regs *regs)
7508 {
7509 	if (event->attr.type != type)
7510 		return 0;
7511 
7512 	if (event->attr.config != event_id)
7513 		return 0;
7514 
7515 	if (perf_exclude_event(event, regs))
7516 		return 0;
7517 
7518 	return 1;
7519 }
7520 
7521 static inline u64 swevent_hash(u64 type, u32 event_id)
7522 {
7523 	u64 val = event_id | (type << 32);
7524 
7525 	return hash_64(val, SWEVENT_HLIST_BITS);
7526 }
7527 
7528 static inline struct hlist_head *
7529 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
7530 {
7531 	u64 hash = swevent_hash(type, event_id);
7532 
7533 	return &hlist->heads[hash];
7534 }
7535 
7536 /* For the read side: events when they trigger */
7537 static inline struct hlist_head *
7538 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
7539 {
7540 	struct swevent_hlist *hlist;
7541 
7542 	hlist = rcu_dereference(swhash->swevent_hlist);
7543 	if (!hlist)
7544 		return NULL;
7545 
7546 	return __find_swevent_head(hlist, type, event_id);
7547 }
7548 
7549 /* For the event head insertion and removal in the hlist */
7550 static inline struct hlist_head *
7551 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
7552 {
7553 	struct swevent_hlist *hlist;
7554 	u32 event_id = event->attr.config;
7555 	u64 type = event->attr.type;
7556 
7557 	/*
7558 	 * Event scheduling is always serialized against hlist allocation
7559 	 * and release. Which makes the protected version suitable here.
7560 	 * The context lock guarantees that.
7561 	 */
7562 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
7563 					  lockdep_is_held(&event->ctx->lock));
7564 	if (!hlist)
7565 		return NULL;
7566 
7567 	return __find_swevent_head(hlist, type, event_id);
7568 }
7569 
7570 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
7571 				    u64 nr,
7572 				    struct perf_sample_data *data,
7573 				    struct pt_regs *regs)
7574 {
7575 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7576 	struct perf_event *event;
7577 	struct hlist_head *head;
7578 
7579 	rcu_read_lock();
7580 	head = find_swevent_head_rcu(swhash, type, event_id);
7581 	if (!head)
7582 		goto end;
7583 
7584 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
7585 		if (perf_swevent_match(event, type, event_id, data, regs))
7586 			perf_swevent_event(event, nr, data, regs);
7587 	}
7588 end:
7589 	rcu_read_unlock();
7590 }
7591 
7592 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7593 
7594 int perf_swevent_get_recursion_context(void)
7595 {
7596 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7597 
7598 	return get_recursion_context(swhash->recursion);
7599 }
7600 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
7601 
7602 void perf_swevent_put_recursion_context(int rctx)
7603 {
7604 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7605 
7606 	put_recursion_context(swhash->recursion, rctx);
7607 }
7608 
7609 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7610 {
7611 	struct perf_sample_data data;
7612 
7613 	if (WARN_ON_ONCE(!regs))
7614 		return;
7615 
7616 	perf_sample_data_init(&data, addr, 0);
7617 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
7618 }
7619 
7620 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7621 {
7622 	int rctx;
7623 
7624 	preempt_disable_notrace();
7625 	rctx = perf_swevent_get_recursion_context();
7626 	if (unlikely(rctx < 0))
7627 		goto fail;
7628 
7629 	___perf_sw_event(event_id, nr, regs, addr);
7630 
7631 	perf_swevent_put_recursion_context(rctx);
7632 fail:
7633 	preempt_enable_notrace();
7634 }
7635 
7636 static void perf_swevent_read(struct perf_event *event)
7637 {
7638 }
7639 
7640 static int perf_swevent_add(struct perf_event *event, int flags)
7641 {
7642 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7643 	struct hw_perf_event *hwc = &event->hw;
7644 	struct hlist_head *head;
7645 
7646 	if (is_sampling_event(event)) {
7647 		hwc->last_period = hwc->sample_period;
7648 		perf_swevent_set_period(event);
7649 	}
7650 
7651 	hwc->state = !(flags & PERF_EF_START);
7652 
7653 	head = find_swevent_head(swhash, event);
7654 	if (WARN_ON_ONCE(!head))
7655 		return -EINVAL;
7656 
7657 	hlist_add_head_rcu(&event->hlist_entry, head);
7658 	perf_event_update_userpage(event);
7659 
7660 	return 0;
7661 }
7662 
7663 static void perf_swevent_del(struct perf_event *event, int flags)
7664 {
7665 	hlist_del_rcu(&event->hlist_entry);
7666 }
7667 
7668 static void perf_swevent_start(struct perf_event *event, int flags)
7669 {
7670 	event->hw.state = 0;
7671 }
7672 
7673 static void perf_swevent_stop(struct perf_event *event, int flags)
7674 {
7675 	event->hw.state = PERF_HES_STOPPED;
7676 }
7677 
7678 /* Deref the hlist from the update side */
7679 static inline struct swevent_hlist *
7680 swevent_hlist_deref(struct swevent_htable *swhash)
7681 {
7682 	return rcu_dereference_protected(swhash->swevent_hlist,
7683 					 lockdep_is_held(&swhash->hlist_mutex));
7684 }
7685 
7686 static void swevent_hlist_release(struct swevent_htable *swhash)
7687 {
7688 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
7689 
7690 	if (!hlist)
7691 		return;
7692 
7693 	RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
7694 	kfree_rcu(hlist, rcu_head);
7695 }
7696 
7697 static void swevent_hlist_put_cpu(int cpu)
7698 {
7699 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7700 
7701 	mutex_lock(&swhash->hlist_mutex);
7702 
7703 	if (!--swhash->hlist_refcount)
7704 		swevent_hlist_release(swhash);
7705 
7706 	mutex_unlock(&swhash->hlist_mutex);
7707 }
7708 
7709 static void swevent_hlist_put(void)
7710 {
7711 	int cpu;
7712 
7713 	for_each_possible_cpu(cpu)
7714 		swevent_hlist_put_cpu(cpu);
7715 }
7716 
7717 static int swevent_hlist_get_cpu(int cpu)
7718 {
7719 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7720 	int err = 0;
7721 
7722 	mutex_lock(&swhash->hlist_mutex);
7723 	if (!swevent_hlist_deref(swhash) &&
7724 	    cpumask_test_cpu(cpu, perf_online_mask)) {
7725 		struct swevent_hlist *hlist;
7726 
7727 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7728 		if (!hlist) {
7729 			err = -ENOMEM;
7730 			goto exit;
7731 		}
7732 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
7733 	}
7734 	swhash->hlist_refcount++;
7735 exit:
7736 	mutex_unlock(&swhash->hlist_mutex);
7737 
7738 	return err;
7739 }
7740 
7741 static int swevent_hlist_get(void)
7742 {
7743 	int err, cpu, failed_cpu;
7744 
7745 	mutex_lock(&pmus_lock);
7746 	for_each_possible_cpu(cpu) {
7747 		err = swevent_hlist_get_cpu(cpu);
7748 		if (err) {
7749 			failed_cpu = cpu;
7750 			goto fail;
7751 		}
7752 	}
7753 	mutex_unlock(&pmus_lock);
7754 	return 0;
7755 fail:
7756 	for_each_possible_cpu(cpu) {
7757 		if (cpu == failed_cpu)
7758 			break;
7759 		swevent_hlist_put_cpu(cpu);
7760 	}
7761 	mutex_unlock(&pmus_lock);
7762 	return err;
7763 }
7764 
7765 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
7766 
7767 static void sw_perf_event_destroy(struct perf_event *event)
7768 {
7769 	u64 event_id = event->attr.config;
7770 
7771 	WARN_ON(event->parent);
7772 
7773 	static_key_slow_dec(&perf_swevent_enabled[event_id]);
7774 	swevent_hlist_put();
7775 }
7776 
7777 static int perf_swevent_init(struct perf_event *event)
7778 {
7779 	u64 event_id = event->attr.config;
7780 
7781 	if (event->attr.type != PERF_TYPE_SOFTWARE)
7782 		return -ENOENT;
7783 
7784 	/*
7785 	 * no branch sampling for software events
7786 	 */
7787 	if (has_branch_stack(event))
7788 		return -EOPNOTSUPP;
7789 
7790 	switch (event_id) {
7791 	case PERF_COUNT_SW_CPU_CLOCK:
7792 	case PERF_COUNT_SW_TASK_CLOCK:
7793 		return -ENOENT;
7794 
7795 	default:
7796 		break;
7797 	}
7798 
7799 	if (event_id >= PERF_COUNT_SW_MAX)
7800 		return -ENOENT;
7801 
7802 	if (!event->parent) {
7803 		int err;
7804 
7805 		err = swevent_hlist_get();
7806 		if (err)
7807 			return err;
7808 
7809 		static_key_slow_inc(&perf_swevent_enabled[event_id]);
7810 		event->destroy = sw_perf_event_destroy;
7811 	}
7812 
7813 	return 0;
7814 }
7815 
7816 static struct pmu perf_swevent = {
7817 	.task_ctx_nr	= perf_sw_context,
7818 
7819 	.capabilities	= PERF_PMU_CAP_NO_NMI,
7820 
7821 	.event_init	= perf_swevent_init,
7822 	.add		= perf_swevent_add,
7823 	.del		= perf_swevent_del,
7824 	.start		= perf_swevent_start,
7825 	.stop		= perf_swevent_stop,
7826 	.read		= perf_swevent_read,
7827 };
7828 
7829 #ifdef CONFIG_EVENT_TRACING
7830 
7831 static int perf_tp_filter_match(struct perf_event *event,
7832 				struct perf_sample_data *data)
7833 {
7834 	void *record = data->raw->frag.data;
7835 
7836 	/* only top level events have filters set */
7837 	if (event->parent)
7838 		event = event->parent;
7839 
7840 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
7841 		return 1;
7842 	return 0;
7843 }
7844 
7845 static int perf_tp_event_match(struct perf_event *event,
7846 				struct perf_sample_data *data,
7847 				struct pt_regs *regs)
7848 {
7849 	if (event->hw.state & PERF_HES_STOPPED)
7850 		return 0;
7851 	/*
7852 	 * All tracepoints are from kernel-space.
7853 	 */
7854 	if (event->attr.exclude_kernel)
7855 		return 0;
7856 
7857 	if (!perf_tp_filter_match(event, data))
7858 		return 0;
7859 
7860 	return 1;
7861 }
7862 
7863 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7864 			       struct trace_event_call *call, u64 count,
7865 			       struct pt_regs *regs, struct hlist_head *head,
7866 			       struct task_struct *task)
7867 {
7868 	struct bpf_prog *prog = call->prog;
7869 
7870 	if (prog) {
7871 		*(struct pt_regs **)raw_data = regs;
7872 		if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7873 			perf_swevent_put_recursion_context(rctx);
7874 			return;
7875 		}
7876 	}
7877 	perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7878 		      rctx, task);
7879 }
7880 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7881 
7882 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
7883 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
7884 		   struct task_struct *task)
7885 {
7886 	struct perf_sample_data data;
7887 	struct perf_event *event;
7888 
7889 	struct perf_raw_record raw = {
7890 		.frag = {
7891 			.size = entry_size,
7892 			.data = record,
7893 		},
7894 	};
7895 
7896 	perf_sample_data_init(&data, 0, 0);
7897 	data.raw = &raw;
7898 
7899 	perf_trace_buf_update(record, event_type);
7900 
7901 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
7902 		if (perf_tp_event_match(event, &data, regs))
7903 			perf_swevent_event(event, count, &data, regs);
7904 	}
7905 
7906 	/*
7907 	 * If we got specified a target task, also iterate its context and
7908 	 * deliver this event there too.
7909 	 */
7910 	if (task && task != current) {
7911 		struct perf_event_context *ctx;
7912 		struct trace_entry *entry = record;
7913 
7914 		rcu_read_lock();
7915 		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7916 		if (!ctx)
7917 			goto unlock;
7918 
7919 		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7920 			if (event->attr.type != PERF_TYPE_TRACEPOINT)
7921 				continue;
7922 			if (event->attr.config != entry->type)
7923 				continue;
7924 			if (perf_tp_event_match(event, &data, regs))
7925 				perf_swevent_event(event, count, &data, regs);
7926 		}
7927 unlock:
7928 		rcu_read_unlock();
7929 	}
7930 
7931 	perf_swevent_put_recursion_context(rctx);
7932 }
7933 EXPORT_SYMBOL_GPL(perf_tp_event);
7934 
7935 static void tp_perf_event_destroy(struct perf_event *event)
7936 {
7937 	perf_trace_destroy(event);
7938 }
7939 
7940 static int perf_tp_event_init(struct perf_event *event)
7941 {
7942 	int err;
7943 
7944 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
7945 		return -ENOENT;
7946 
7947 	/*
7948 	 * no branch sampling for tracepoint events
7949 	 */
7950 	if (has_branch_stack(event))
7951 		return -EOPNOTSUPP;
7952 
7953 	err = perf_trace_init(event);
7954 	if (err)
7955 		return err;
7956 
7957 	event->destroy = tp_perf_event_destroy;
7958 
7959 	return 0;
7960 }
7961 
7962 static struct pmu perf_tracepoint = {
7963 	.task_ctx_nr	= perf_sw_context,
7964 
7965 	.event_init	= perf_tp_event_init,
7966 	.add		= perf_trace_add,
7967 	.del		= perf_trace_del,
7968 	.start		= perf_swevent_start,
7969 	.stop		= perf_swevent_stop,
7970 	.read		= perf_swevent_read,
7971 };
7972 
7973 static inline void perf_tp_register(void)
7974 {
7975 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
7976 }
7977 
7978 static void perf_event_free_filter(struct perf_event *event)
7979 {
7980 	ftrace_profile_free_filter(event);
7981 }
7982 
7983 #ifdef CONFIG_BPF_SYSCALL
7984 static void bpf_overflow_handler(struct perf_event *event,
7985 				 struct perf_sample_data *data,
7986 				 struct pt_regs *regs)
7987 {
7988 	struct bpf_perf_event_data_kern ctx = {
7989 		.data = data,
7990 		.regs = regs,
7991 	};
7992 	int ret = 0;
7993 
7994 	preempt_disable();
7995 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
7996 		goto out;
7997 	rcu_read_lock();
7998 	ret = BPF_PROG_RUN(event->prog, &ctx);
7999 	rcu_read_unlock();
8000 out:
8001 	__this_cpu_dec(bpf_prog_active);
8002 	preempt_enable();
8003 	if (!ret)
8004 		return;
8005 
8006 	event->orig_overflow_handler(event, data, regs);
8007 }
8008 
8009 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8010 {
8011 	struct bpf_prog *prog;
8012 
8013 	if (event->overflow_handler_context)
8014 		/* hw breakpoint or kernel counter */
8015 		return -EINVAL;
8016 
8017 	if (event->prog)
8018 		return -EEXIST;
8019 
8020 	prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
8021 	if (IS_ERR(prog))
8022 		return PTR_ERR(prog);
8023 
8024 	event->prog = prog;
8025 	event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
8026 	WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
8027 	return 0;
8028 }
8029 
8030 static void perf_event_free_bpf_handler(struct perf_event *event)
8031 {
8032 	struct bpf_prog *prog = event->prog;
8033 
8034 	if (!prog)
8035 		return;
8036 
8037 	WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
8038 	event->prog = NULL;
8039 	bpf_prog_put(prog);
8040 }
8041 #else
8042 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8043 {
8044 	return -EOPNOTSUPP;
8045 }
8046 static void perf_event_free_bpf_handler(struct perf_event *event)
8047 {
8048 }
8049 #endif
8050 
8051 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8052 {
8053 	bool is_kprobe, is_tracepoint;
8054 	struct bpf_prog *prog;
8055 
8056 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
8057 		return perf_event_set_bpf_handler(event, prog_fd);
8058 
8059 	if (event->tp_event->prog)
8060 		return -EEXIST;
8061 
8062 	is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
8063 	is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
8064 	if (!is_kprobe && !is_tracepoint)
8065 		/* bpf programs can only be attached to u/kprobe or tracepoint */
8066 		return -EINVAL;
8067 
8068 	prog = bpf_prog_get(prog_fd);
8069 	if (IS_ERR(prog))
8070 		return PTR_ERR(prog);
8071 
8072 	if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
8073 	    (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
8074 		/* valid fd, but invalid bpf program type */
8075 		bpf_prog_put(prog);
8076 		return -EINVAL;
8077 	}
8078 
8079 	if (is_tracepoint) {
8080 		int off = trace_event_get_offsets(event->tp_event);
8081 
8082 		if (prog->aux->max_ctx_offset > off) {
8083 			bpf_prog_put(prog);
8084 			return -EACCES;
8085 		}
8086 	}
8087 	event->tp_event->prog = prog;
8088 
8089 	return 0;
8090 }
8091 
8092 static void perf_event_free_bpf_prog(struct perf_event *event)
8093 {
8094 	struct bpf_prog *prog;
8095 
8096 	perf_event_free_bpf_handler(event);
8097 
8098 	if (!event->tp_event)
8099 		return;
8100 
8101 	prog = event->tp_event->prog;
8102 	if (prog) {
8103 		event->tp_event->prog = NULL;
8104 		bpf_prog_put(prog);
8105 	}
8106 }
8107 
8108 #else
8109 
8110 static inline void perf_tp_register(void)
8111 {
8112 }
8113 
8114 static void perf_event_free_filter(struct perf_event *event)
8115 {
8116 }
8117 
8118 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8119 {
8120 	return -ENOENT;
8121 }
8122 
8123 static void perf_event_free_bpf_prog(struct perf_event *event)
8124 {
8125 }
8126 #endif /* CONFIG_EVENT_TRACING */
8127 
8128 #ifdef CONFIG_HAVE_HW_BREAKPOINT
8129 void perf_bp_event(struct perf_event *bp, void *data)
8130 {
8131 	struct perf_sample_data sample;
8132 	struct pt_regs *regs = data;
8133 
8134 	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
8135 
8136 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
8137 		perf_swevent_event(bp, 1, &sample, regs);
8138 }
8139 #endif
8140 
8141 /*
8142  * Allocate a new address filter
8143  */
8144 static struct perf_addr_filter *
8145 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
8146 {
8147 	int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
8148 	struct perf_addr_filter *filter;
8149 
8150 	filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
8151 	if (!filter)
8152 		return NULL;
8153 
8154 	INIT_LIST_HEAD(&filter->entry);
8155 	list_add_tail(&filter->entry, filters);
8156 
8157 	return filter;
8158 }
8159 
8160 static void free_filters_list(struct list_head *filters)
8161 {
8162 	struct perf_addr_filter *filter, *iter;
8163 
8164 	list_for_each_entry_safe(filter, iter, filters, entry) {
8165 		if (filter->inode)
8166 			iput(filter->inode);
8167 		list_del(&filter->entry);
8168 		kfree(filter);
8169 	}
8170 }
8171 
8172 /*
8173  * Free existing address filters and optionally install new ones
8174  */
8175 static void perf_addr_filters_splice(struct perf_event *event,
8176 				     struct list_head *head)
8177 {
8178 	unsigned long flags;
8179 	LIST_HEAD(list);
8180 
8181 	if (!has_addr_filter(event))
8182 		return;
8183 
8184 	/* don't bother with children, they don't have their own filters */
8185 	if (event->parent)
8186 		return;
8187 
8188 	raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
8189 
8190 	list_splice_init(&event->addr_filters.list, &list);
8191 	if (head)
8192 		list_splice(head, &event->addr_filters.list);
8193 
8194 	raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
8195 
8196 	free_filters_list(&list);
8197 }
8198 
8199 /*
8200  * Scan through mm's vmas and see if one of them matches the
8201  * @filter; if so, adjust filter's address range.
8202  * Called with mm::mmap_sem down for reading.
8203  */
8204 static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
8205 					    struct mm_struct *mm)
8206 {
8207 	struct vm_area_struct *vma;
8208 
8209 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
8210 		struct file *file = vma->vm_file;
8211 		unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8212 		unsigned long vma_size = vma->vm_end - vma->vm_start;
8213 
8214 		if (!file)
8215 			continue;
8216 
8217 		if (!perf_addr_filter_match(filter, file, off, vma_size))
8218 			continue;
8219 
8220 		return vma->vm_start;
8221 	}
8222 
8223 	return 0;
8224 }
8225 
8226 /*
8227  * Update event's address range filters based on the
8228  * task's existing mappings, if any.
8229  */
8230 static void perf_event_addr_filters_apply(struct perf_event *event)
8231 {
8232 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8233 	struct task_struct *task = READ_ONCE(event->ctx->task);
8234 	struct perf_addr_filter *filter;
8235 	struct mm_struct *mm = NULL;
8236 	unsigned int count = 0;
8237 	unsigned long flags;
8238 
8239 	/*
8240 	 * We may observe TASK_TOMBSTONE, which means that the event tear-down
8241 	 * will stop on the parent's child_mutex that our caller is also holding
8242 	 */
8243 	if (task == TASK_TOMBSTONE)
8244 		return;
8245 
8246 	if (!ifh->nr_file_filters)
8247 		return;
8248 
8249 	mm = get_task_mm(event->ctx->task);
8250 	if (!mm)
8251 		goto restart;
8252 
8253 	down_read(&mm->mmap_sem);
8254 
8255 	raw_spin_lock_irqsave(&ifh->lock, flags);
8256 	list_for_each_entry(filter, &ifh->list, entry) {
8257 		event->addr_filters_offs[count] = 0;
8258 
8259 		/*
8260 		 * Adjust base offset if the filter is associated to a binary
8261 		 * that needs to be mapped:
8262 		 */
8263 		if (filter->inode)
8264 			event->addr_filters_offs[count] =
8265 				perf_addr_filter_apply(filter, mm);
8266 
8267 		count++;
8268 	}
8269 
8270 	event->addr_filters_gen++;
8271 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
8272 
8273 	up_read(&mm->mmap_sem);
8274 
8275 	mmput(mm);
8276 
8277 restart:
8278 	perf_event_stop(event, 1);
8279 }
8280 
8281 /*
8282  * Address range filtering: limiting the data to certain
8283  * instruction address ranges. Filters are ioctl()ed to us from
8284  * userspace as ascii strings.
8285  *
8286  * Filter string format:
8287  *
8288  * ACTION RANGE_SPEC
8289  * where ACTION is one of the
8290  *  * "filter": limit the trace to this region
8291  *  * "start": start tracing from this address
8292  *  * "stop": stop tracing at this address/region;
8293  * RANGE_SPEC is
8294  *  * for kernel addresses: <start address>[/<size>]
8295  *  * for object files:     <start address>[/<size>]@</path/to/object/file>
8296  *
8297  * if <size> is not specified, the range is treated as a single address.
8298  */
8299 enum {
8300 	IF_ACT_NONE = -1,
8301 	IF_ACT_FILTER,
8302 	IF_ACT_START,
8303 	IF_ACT_STOP,
8304 	IF_SRC_FILE,
8305 	IF_SRC_KERNEL,
8306 	IF_SRC_FILEADDR,
8307 	IF_SRC_KERNELADDR,
8308 };
8309 
8310 enum {
8311 	IF_STATE_ACTION = 0,
8312 	IF_STATE_SOURCE,
8313 	IF_STATE_END,
8314 };
8315 
8316 static const match_table_t if_tokens = {
8317 	{ IF_ACT_FILTER,	"filter" },
8318 	{ IF_ACT_START,		"start" },
8319 	{ IF_ACT_STOP,		"stop" },
8320 	{ IF_SRC_FILE,		"%u/%u@%s" },
8321 	{ IF_SRC_KERNEL,	"%u/%u" },
8322 	{ IF_SRC_FILEADDR,	"%u@%s" },
8323 	{ IF_SRC_KERNELADDR,	"%u" },
8324 	{ IF_ACT_NONE,		NULL },
8325 };
8326 
8327 /*
8328  * Address filter string parser
8329  */
8330 static int
8331 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8332 			     struct list_head *filters)
8333 {
8334 	struct perf_addr_filter *filter = NULL;
8335 	char *start, *orig, *filename = NULL;
8336 	struct path path;
8337 	substring_t args[MAX_OPT_ARGS];
8338 	int state = IF_STATE_ACTION, token;
8339 	unsigned int kernel = 0;
8340 	int ret = -EINVAL;
8341 
8342 	orig = fstr = kstrdup(fstr, GFP_KERNEL);
8343 	if (!fstr)
8344 		return -ENOMEM;
8345 
8346 	while ((start = strsep(&fstr, " ,\n")) != NULL) {
8347 		ret = -EINVAL;
8348 
8349 		if (!*start)
8350 			continue;
8351 
8352 		/* filter definition begins */
8353 		if (state == IF_STATE_ACTION) {
8354 			filter = perf_addr_filter_new(event, filters);
8355 			if (!filter)
8356 				goto fail;
8357 		}
8358 
8359 		token = match_token(start, if_tokens, args);
8360 		switch (token) {
8361 		case IF_ACT_FILTER:
8362 		case IF_ACT_START:
8363 			filter->filter = 1;
8364 
8365 		case IF_ACT_STOP:
8366 			if (state != IF_STATE_ACTION)
8367 				goto fail;
8368 
8369 			state = IF_STATE_SOURCE;
8370 			break;
8371 
8372 		case IF_SRC_KERNELADDR:
8373 		case IF_SRC_KERNEL:
8374 			kernel = 1;
8375 
8376 		case IF_SRC_FILEADDR:
8377 		case IF_SRC_FILE:
8378 			if (state != IF_STATE_SOURCE)
8379 				goto fail;
8380 
8381 			if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
8382 				filter->range = 1;
8383 
8384 			*args[0].to = 0;
8385 			ret = kstrtoul(args[0].from, 0, &filter->offset);
8386 			if (ret)
8387 				goto fail;
8388 
8389 			if (filter->range) {
8390 				*args[1].to = 0;
8391 				ret = kstrtoul(args[1].from, 0, &filter->size);
8392 				if (ret)
8393 					goto fail;
8394 			}
8395 
8396 			if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
8397 				int fpos = filter->range ? 2 : 1;
8398 
8399 				filename = match_strdup(&args[fpos]);
8400 				if (!filename) {
8401 					ret = -ENOMEM;
8402 					goto fail;
8403 				}
8404 			}
8405 
8406 			state = IF_STATE_END;
8407 			break;
8408 
8409 		default:
8410 			goto fail;
8411 		}
8412 
8413 		/*
8414 		 * Filter definition is fully parsed, validate and install it.
8415 		 * Make sure that it doesn't contradict itself or the event's
8416 		 * attribute.
8417 		 */
8418 		if (state == IF_STATE_END) {
8419 			ret = -EINVAL;
8420 			if (kernel && event->attr.exclude_kernel)
8421 				goto fail;
8422 
8423 			if (!kernel) {
8424 				if (!filename)
8425 					goto fail;
8426 
8427 				/*
8428 				 * For now, we only support file-based filters
8429 				 * in per-task events; doing so for CPU-wide
8430 				 * events requires additional context switching
8431 				 * trickery, since same object code will be
8432 				 * mapped at different virtual addresses in
8433 				 * different processes.
8434 				 */
8435 				ret = -EOPNOTSUPP;
8436 				if (!event->ctx->task)
8437 					goto fail_free_name;
8438 
8439 				/* look up the path and grab its inode */
8440 				ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8441 				if (ret)
8442 					goto fail_free_name;
8443 
8444 				filter->inode = igrab(d_inode(path.dentry));
8445 				path_put(&path);
8446 				kfree(filename);
8447 				filename = NULL;
8448 
8449 				ret = -EINVAL;
8450 				if (!filter->inode ||
8451 				    !S_ISREG(filter->inode->i_mode))
8452 					/* free_filters_list() will iput() */
8453 					goto fail;
8454 
8455 				event->addr_filters.nr_file_filters++;
8456 			}
8457 
8458 			/* ready to consume more filters */
8459 			state = IF_STATE_ACTION;
8460 			filter = NULL;
8461 		}
8462 	}
8463 
8464 	if (state != IF_STATE_ACTION)
8465 		goto fail;
8466 
8467 	kfree(orig);
8468 
8469 	return 0;
8470 
8471 fail_free_name:
8472 	kfree(filename);
8473 fail:
8474 	free_filters_list(filters);
8475 	kfree(orig);
8476 
8477 	return ret;
8478 }
8479 
8480 static int
8481 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8482 {
8483 	LIST_HEAD(filters);
8484 	int ret;
8485 
8486 	/*
8487 	 * Since this is called in perf_ioctl() path, we're already holding
8488 	 * ctx::mutex.
8489 	 */
8490 	lockdep_assert_held(&event->ctx->mutex);
8491 
8492 	if (WARN_ON_ONCE(event->parent))
8493 		return -EINVAL;
8494 
8495 	ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8496 	if (ret)
8497 		goto fail_clear_files;
8498 
8499 	ret = event->pmu->addr_filters_validate(&filters);
8500 	if (ret)
8501 		goto fail_free_filters;
8502 
8503 	/* remove existing filters, if any */
8504 	perf_addr_filters_splice(event, &filters);
8505 
8506 	/* install new filters */
8507 	perf_event_for_each_child(event, perf_event_addr_filters_apply);
8508 
8509 	return ret;
8510 
8511 fail_free_filters:
8512 	free_filters_list(&filters);
8513 
8514 fail_clear_files:
8515 	event->addr_filters.nr_file_filters = 0;
8516 
8517 	return ret;
8518 }
8519 
8520 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8521 {
8522 	char *filter_str;
8523 	int ret = -EINVAL;
8524 
8525 	if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8526 	    !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8527 	    !has_addr_filter(event))
8528 		return -EINVAL;
8529 
8530 	filter_str = strndup_user(arg, PAGE_SIZE);
8531 	if (IS_ERR(filter_str))
8532 		return PTR_ERR(filter_str);
8533 
8534 	if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8535 	    event->attr.type == PERF_TYPE_TRACEPOINT)
8536 		ret = ftrace_profile_set_filter(event, event->attr.config,
8537 						filter_str);
8538 	else if (has_addr_filter(event))
8539 		ret = perf_event_set_addr_filter(event, filter_str);
8540 
8541 	kfree(filter_str);
8542 	return ret;
8543 }
8544 
8545 /*
8546  * hrtimer based swevent callback
8547  */
8548 
8549 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
8550 {
8551 	enum hrtimer_restart ret = HRTIMER_RESTART;
8552 	struct perf_sample_data data;
8553 	struct pt_regs *regs;
8554 	struct perf_event *event;
8555 	u64 period;
8556 
8557 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
8558 
8559 	if (event->state != PERF_EVENT_STATE_ACTIVE)
8560 		return HRTIMER_NORESTART;
8561 
8562 	event->pmu->read(event);
8563 
8564 	perf_sample_data_init(&data, 0, event->hw.last_period);
8565 	regs = get_irq_regs();
8566 
8567 	if (regs && !perf_exclude_event(event, regs)) {
8568 		if (!(event->attr.exclude_idle && is_idle_task(current)))
8569 			if (__perf_event_overflow(event, 1, &data, regs))
8570 				ret = HRTIMER_NORESTART;
8571 	}
8572 
8573 	period = max_t(u64, 10000, event->hw.sample_period);
8574 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
8575 
8576 	return ret;
8577 }
8578 
8579 static void perf_swevent_start_hrtimer(struct perf_event *event)
8580 {
8581 	struct hw_perf_event *hwc = &event->hw;
8582 	s64 period;
8583 
8584 	if (!is_sampling_event(event))
8585 		return;
8586 
8587 	period = local64_read(&hwc->period_left);
8588 	if (period) {
8589 		if (period < 0)
8590 			period = 10000;
8591 
8592 		local64_set(&hwc->period_left, 0);
8593 	} else {
8594 		period = max_t(u64, 10000, hwc->sample_period);
8595 	}
8596 	hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8597 		      HRTIMER_MODE_REL_PINNED);
8598 }
8599 
8600 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
8601 {
8602 	struct hw_perf_event *hwc = &event->hw;
8603 
8604 	if (is_sampling_event(event)) {
8605 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
8606 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
8607 
8608 		hrtimer_cancel(&hwc->hrtimer);
8609 	}
8610 }
8611 
8612 static void perf_swevent_init_hrtimer(struct perf_event *event)
8613 {
8614 	struct hw_perf_event *hwc = &event->hw;
8615 
8616 	if (!is_sampling_event(event))
8617 		return;
8618 
8619 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8620 	hwc->hrtimer.function = perf_swevent_hrtimer;
8621 
8622 	/*
8623 	 * Since hrtimers have a fixed rate, we can do a static freq->period
8624 	 * mapping and avoid the whole period adjust feedback stuff.
8625 	 */
8626 	if (event->attr.freq) {
8627 		long freq = event->attr.sample_freq;
8628 
8629 		event->attr.sample_period = NSEC_PER_SEC / freq;
8630 		hwc->sample_period = event->attr.sample_period;
8631 		local64_set(&hwc->period_left, hwc->sample_period);
8632 		hwc->last_period = hwc->sample_period;
8633 		event->attr.freq = 0;
8634 	}
8635 }
8636 
8637 /*
8638  * Software event: cpu wall time clock
8639  */
8640 
8641 static void cpu_clock_event_update(struct perf_event *event)
8642 {
8643 	s64 prev;
8644 	u64 now;
8645 
8646 	now = local_clock();
8647 	prev = local64_xchg(&event->hw.prev_count, now);
8648 	local64_add(now - prev, &event->count);
8649 }
8650 
8651 static void cpu_clock_event_start(struct perf_event *event, int flags)
8652 {
8653 	local64_set(&event->hw.prev_count, local_clock());
8654 	perf_swevent_start_hrtimer(event);
8655 }
8656 
8657 static void cpu_clock_event_stop(struct perf_event *event, int flags)
8658 {
8659 	perf_swevent_cancel_hrtimer(event);
8660 	cpu_clock_event_update(event);
8661 }
8662 
8663 static int cpu_clock_event_add(struct perf_event *event, int flags)
8664 {
8665 	if (flags & PERF_EF_START)
8666 		cpu_clock_event_start(event, flags);
8667 	perf_event_update_userpage(event);
8668 
8669 	return 0;
8670 }
8671 
8672 static void cpu_clock_event_del(struct perf_event *event, int flags)
8673 {
8674 	cpu_clock_event_stop(event, flags);
8675 }
8676 
8677 static void cpu_clock_event_read(struct perf_event *event)
8678 {
8679 	cpu_clock_event_update(event);
8680 }
8681 
8682 static int cpu_clock_event_init(struct perf_event *event)
8683 {
8684 	if (event->attr.type != PERF_TYPE_SOFTWARE)
8685 		return -ENOENT;
8686 
8687 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8688 		return -ENOENT;
8689 
8690 	/*
8691 	 * no branch sampling for software events
8692 	 */
8693 	if (has_branch_stack(event))
8694 		return -EOPNOTSUPP;
8695 
8696 	perf_swevent_init_hrtimer(event);
8697 
8698 	return 0;
8699 }
8700 
8701 static struct pmu perf_cpu_clock = {
8702 	.task_ctx_nr	= perf_sw_context,
8703 
8704 	.capabilities	= PERF_PMU_CAP_NO_NMI,
8705 
8706 	.event_init	= cpu_clock_event_init,
8707 	.add		= cpu_clock_event_add,
8708 	.del		= cpu_clock_event_del,
8709 	.start		= cpu_clock_event_start,
8710 	.stop		= cpu_clock_event_stop,
8711 	.read		= cpu_clock_event_read,
8712 };
8713 
8714 /*
8715  * Software event: task time clock
8716  */
8717 
8718 static void task_clock_event_update(struct perf_event *event, u64 now)
8719 {
8720 	u64 prev;
8721 	s64 delta;
8722 
8723 	prev = local64_xchg(&event->hw.prev_count, now);
8724 	delta = now - prev;
8725 	local64_add(delta, &event->count);
8726 }
8727 
8728 static void task_clock_event_start(struct perf_event *event, int flags)
8729 {
8730 	local64_set(&event->hw.prev_count, event->ctx->time);
8731 	perf_swevent_start_hrtimer(event);
8732 }
8733 
8734 static void task_clock_event_stop(struct perf_event *event, int flags)
8735 {
8736 	perf_swevent_cancel_hrtimer(event);
8737 	task_clock_event_update(event, event->ctx->time);
8738 }
8739 
8740 static int task_clock_event_add(struct perf_event *event, int flags)
8741 {
8742 	if (flags & PERF_EF_START)
8743 		task_clock_event_start(event, flags);
8744 	perf_event_update_userpage(event);
8745 
8746 	return 0;
8747 }
8748 
8749 static void task_clock_event_del(struct perf_event *event, int flags)
8750 {
8751 	task_clock_event_stop(event, PERF_EF_UPDATE);
8752 }
8753 
8754 static void task_clock_event_read(struct perf_event *event)
8755 {
8756 	u64 now = perf_clock();
8757 	u64 delta = now - event->ctx->timestamp;
8758 	u64 time = event->ctx->time + delta;
8759 
8760 	task_clock_event_update(event, time);
8761 }
8762 
8763 static int task_clock_event_init(struct perf_event *event)
8764 {
8765 	if (event->attr.type != PERF_TYPE_SOFTWARE)
8766 		return -ENOENT;
8767 
8768 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8769 		return -ENOENT;
8770 
8771 	/*
8772 	 * no branch sampling for software events
8773 	 */
8774 	if (has_branch_stack(event))
8775 		return -EOPNOTSUPP;
8776 
8777 	perf_swevent_init_hrtimer(event);
8778 
8779 	return 0;
8780 }
8781 
8782 static struct pmu perf_task_clock = {
8783 	.task_ctx_nr	= perf_sw_context,
8784 
8785 	.capabilities	= PERF_PMU_CAP_NO_NMI,
8786 
8787 	.event_init	= task_clock_event_init,
8788 	.add		= task_clock_event_add,
8789 	.del		= task_clock_event_del,
8790 	.start		= task_clock_event_start,
8791 	.stop		= task_clock_event_stop,
8792 	.read		= task_clock_event_read,
8793 };
8794 
8795 static void perf_pmu_nop_void(struct pmu *pmu)
8796 {
8797 }
8798 
8799 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8800 {
8801 }
8802 
8803 static int perf_pmu_nop_int(struct pmu *pmu)
8804 {
8805 	return 0;
8806 }
8807 
8808 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
8809 
8810 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
8811 {
8812 	__this_cpu_write(nop_txn_flags, flags);
8813 
8814 	if (flags & ~PERF_PMU_TXN_ADD)
8815 		return;
8816 
8817 	perf_pmu_disable(pmu);
8818 }
8819 
8820 static int perf_pmu_commit_txn(struct pmu *pmu)
8821 {
8822 	unsigned int flags = __this_cpu_read(nop_txn_flags);
8823 
8824 	__this_cpu_write(nop_txn_flags, 0);
8825 
8826 	if (flags & ~PERF_PMU_TXN_ADD)
8827 		return 0;
8828 
8829 	perf_pmu_enable(pmu);
8830 	return 0;
8831 }
8832 
8833 static void perf_pmu_cancel_txn(struct pmu *pmu)
8834 {
8835 	unsigned int flags =  __this_cpu_read(nop_txn_flags);
8836 
8837 	__this_cpu_write(nop_txn_flags, 0);
8838 
8839 	if (flags & ~PERF_PMU_TXN_ADD)
8840 		return;
8841 
8842 	perf_pmu_enable(pmu);
8843 }
8844 
8845 static int perf_event_idx_default(struct perf_event *event)
8846 {
8847 	return 0;
8848 }
8849 
8850 /*
8851  * Ensures all contexts with the same task_ctx_nr have the same
8852  * pmu_cpu_context too.
8853  */
8854 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
8855 {
8856 	struct pmu *pmu;
8857 
8858 	if (ctxn < 0)
8859 		return NULL;
8860 
8861 	list_for_each_entry(pmu, &pmus, entry) {
8862 		if (pmu->task_ctx_nr == ctxn)
8863 			return pmu->pmu_cpu_context;
8864 	}
8865 
8866 	return NULL;
8867 }
8868 
8869 static void free_pmu_context(struct pmu *pmu)
8870 {
8871 	mutex_lock(&pmus_lock);
8872 	free_percpu(pmu->pmu_cpu_context);
8873 	mutex_unlock(&pmus_lock);
8874 }
8875 
8876 /*
8877  * Let userspace know that this PMU supports address range filtering:
8878  */
8879 static ssize_t nr_addr_filters_show(struct device *dev,
8880 				    struct device_attribute *attr,
8881 				    char *page)
8882 {
8883 	struct pmu *pmu = dev_get_drvdata(dev);
8884 
8885 	return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8886 }
8887 DEVICE_ATTR_RO(nr_addr_filters);
8888 
8889 static struct idr pmu_idr;
8890 
8891 static ssize_t
8892 type_show(struct device *dev, struct device_attribute *attr, char *page)
8893 {
8894 	struct pmu *pmu = dev_get_drvdata(dev);
8895 
8896 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8897 }
8898 static DEVICE_ATTR_RO(type);
8899 
8900 static ssize_t
8901 perf_event_mux_interval_ms_show(struct device *dev,
8902 				struct device_attribute *attr,
8903 				char *page)
8904 {
8905 	struct pmu *pmu = dev_get_drvdata(dev);
8906 
8907 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8908 }
8909 
8910 static DEFINE_MUTEX(mux_interval_mutex);
8911 
8912 static ssize_t
8913 perf_event_mux_interval_ms_store(struct device *dev,
8914 				 struct device_attribute *attr,
8915 				 const char *buf, size_t count)
8916 {
8917 	struct pmu *pmu = dev_get_drvdata(dev);
8918 	int timer, cpu, ret;
8919 
8920 	ret = kstrtoint(buf, 0, &timer);
8921 	if (ret)
8922 		return ret;
8923 
8924 	if (timer < 1)
8925 		return -EINVAL;
8926 
8927 	/* same value, noting to do */
8928 	if (timer == pmu->hrtimer_interval_ms)
8929 		return count;
8930 
8931 	mutex_lock(&mux_interval_mutex);
8932 	pmu->hrtimer_interval_ms = timer;
8933 
8934 	/* update all cpuctx for this PMU */
8935 	cpus_read_lock();
8936 	for_each_online_cpu(cpu) {
8937 		struct perf_cpu_context *cpuctx;
8938 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8939 		cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8940 
8941 		cpu_function_call(cpu,
8942 			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
8943 	}
8944 	cpus_read_unlock();
8945 	mutex_unlock(&mux_interval_mutex);
8946 
8947 	return count;
8948 }
8949 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
8950 
8951 static struct attribute *pmu_dev_attrs[] = {
8952 	&dev_attr_type.attr,
8953 	&dev_attr_perf_event_mux_interval_ms.attr,
8954 	NULL,
8955 };
8956 ATTRIBUTE_GROUPS(pmu_dev);
8957 
8958 static int pmu_bus_running;
8959 static struct bus_type pmu_bus = {
8960 	.name		= "event_source",
8961 	.dev_groups	= pmu_dev_groups,
8962 };
8963 
8964 static void pmu_dev_release(struct device *dev)
8965 {
8966 	kfree(dev);
8967 }
8968 
8969 static int pmu_dev_alloc(struct pmu *pmu)
8970 {
8971 	int ret = -ENOMEM;
8972 
8973 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8974 	if (!pmu->dev)
8975 		goto out;
8976 
8977 	pmu->dev->groups = pmu->attr_groups;
8978 	device_initialize(pmu->dev);
8979 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
8980 	if (ret)
8981 		goto free_dev;
8982 
8983 	dev_set_drvdata(pmu->dev, pmu);
8984 	pmu->dev->bus = &pmu_bus;
8985 	pmu->dev->release = pmu_dev_release;
8986 	ret = device_add(pmu->dev);
8987 	if (ret)
8988 		goto free_dev;
8989 
8990 	/* For PMUs with address filters, throw in an extra attribute: */
8991 	if (pmu->nr_addr_filters)
8992 		ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8993 
8994 	if (ret)
8995 		goto del_dev;
8996 
8997 out:
8998 	return ret;
8999 
9000 del_dev:
9001 	device_del(pmu->dev);
9002 
9003 free_dev:
9004 	put_device(pmu->dev);
9005 	goto out;
9006 }
9007 
9008 static struct lock_class_key cpuctx_mutex;
9009 static struct lock_class_key cpuctx_lock;
9010 
9011 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
9012 {
9013 	int cpu, ret;
9014 
9015 	mutex_lock(&pmus_lock);
9016 	ret = -ENOMEM;
9017 	pmu->pmu_disable_count = alloc_percpu(int);
9018 	if (!pmu->pmu_disable_count)
9019 		goto unlock;
9020 
9021 	pmu->type = -1;
9022 	if (!name)
9023 		goto skip_type;
9024 	pmu->name = name;
9025 
9026 	if (type < 0) {
9027 		type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
9028 		if (type < 0) {
9029 			ret = type;
9030 			goto free_pdc;
9031 		}
9032 	}
9033 	pmu->type = type;
9034 
9035 	if (pmu_bus_running) {
9036 		ret = pmu_dev_alloc(pmu);
9037 		if (ret)
9038 			goto free_idr;
9039 	}
9040 
9041 skip_type:
9042 	if (pmu->task_ctx_nr == perf_hw_context) {
9043 		static int hw_context_taken = 0;
9044 
9045 		/*
9046 		 * Other than systems with heterogeneous CPUs, it never makes
9047 		 * sense for two PMUs to share perf_hw_context. PMUs which are
9048 		 * uncore must use perf_invalid_context.
9049 		 */
9050 		if (WARN_ON_ONCE(hw_context_taken &&
9051 		    !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
9052 			pmu->task_ctx_nr = perf_invalid_context;
9053 
9054 		hw_context_taken = 1;
9055 	}
9056 
9057 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
9058 	if (pmu->pmu_cpu_context)
9059 		goto got_cpu_context;
9060 
9061 	ret = -ENOMEM;
9062 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
9063 	if (!pmu->pmu_cpu_context)
9064 		goto free_dev;
9065 
9066 	for_each_possible_cpu(cpu) {
9067 		struct perf_cpu_context *cpuctx;
9068 
9069 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
9070 		__perf_event_init_context(&cpuctx->ctx);
9071 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
9072 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
9073 		cpuctx->ctx.pmu = pmu;
9074 		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
9075 
9076 		__perf_mux_hrtimer_init(cpuctx, cpu);
9077 	}
9078 
9079 got_cpu_context:
9080 	if (!pmu->start_txn) {
9081 		if (pmu->pmu_enable) {
9082 			/*
9083 			 * If we have pmu_enable/pmu_disable calls, install
9084 			 * transaction stubs that use that to try and batch
9085 			 * hardware accesses.
9086 			 */
9087 			pmu->start_txn  = perf_pmu_start_txn;
9088 			pmu->commit_txn = perf_pmu_commit_txn;
9089 			pmu->cancel_txn = perf_pmu_cancel_txn;
9090 		} else {
9091 			pmu->start_txn  = perf_pmu_nop_txn;
9092 			pmu->commit_txn = perf_pmu_nop_int;
9093 			pmu->cancel_txn = perf_pmu_nop_void;
9094 		}
9095 	}
9096 
9097 	if (!pmu->pmu_enable) {
9098 		pmu->pmu_enable  = perf_pmu_nop_void;
9099 		pmu->pmu_disable = perf_pmu_nop_void;
9100 	}
9101 
9102 	if (!pmu->event_idx)
9103 		pmu->event_idx = perf_event_idx_default;
9104 
9105 	list_add_rcu(&pmu->entry, &pmus);
9106 	atomic_set(&pmu->exclusive_cnt, 0);
9107 	ret = 0;
9108 unlock:
9109 	mutex_unlock(&pmus_lock);
9110 
9111 	return ret;
9112 
9113 free_dev:
9114 	device_del(pmu->dev);
9115 	put_device(pmu->dev);
9116 
9117 free_idr:
9118 	if (pmu->type >= PERF_TYPE_MAX)
9119 		idr_remove(&pmu_idr, pmu->type);
9120 
9121 free_pdc:
9122 	free_percpu(pmu->pmu_disable_count);
9123 	goto unlock;
9124 }
9125 EXPORT_SYMBOL_GPL(perf_pmu_register);
9126 
9127 void perf_pmu_unregister(struct pmu *pmu)
9128 {
9129 	int remove_device;
9130 
9131 	mutex_lock(&pmus_lock);
9132 	remove_device = pmu_bus_running;
9133 	list_del_rcu(&pmu->entry);
9134 	mutex_unlock(&pmus_lock);
9135 
9136 	/*
9137 	 * We dereference the pmu list under both SRCU and regular RCU, so
9138 	 * synchronize against both of those.
9139 	 */
9140 	synchronize_srcu(&pmus_srcu);
9141 	synchronize_rcu();
9142 
9143 	free_percpu(pmu->pmu_disable_count);
9144 	if (pmu->type >= PERF_TYPE_MAX)
9145 		idr_remove(&pmu_idr, pmu->type);
9146 	if (remove_device) {
9147 		if (pmu->nr_addr_filters)
9148 			device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
9149 		device_del(pmu->dev);
9150 		put_device(pmu->dev);
9151 	}
9152 	free_pmu_context(pmu);
9153 }
9154 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
9155 
9156 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
9157 {
9158 	struct perf_event_context *ctx = NULL;
9159 	int ret;
9160 
9161 	if (!try_module_get(pmu->module))
9162 		return -ENODEV;
9163 
9164 	if (event->group_leader != event) {
9165 		/*
9166 		 * This ctx->mutex can nest when we're called through
9167 		 * inheritance. See the perf_event_ctx_lock_nested() comment.
9168 		 */
9169 		ctx = perf_event_ctx_lock_nested(event->group_leader,
9170 						 SINGLE_DEPTH_NESTING);
9171 		BUG_ON(!ctx);
9172 	}
9173 
9174 	event->pmu = pmu;
9175 	ret = pmu->event_init(event);
9176 
9177 	if (ctx)
9178 		perf_event_ctx_unlock(event->group_leader, ctx);
9179 
9180 	if (ret)
9181 		module_put(pmu->module);
9182 
9183 	return ret;
9184 }
9185 
9186 static struct pmu *perf_init_event(struct perf_event *event)
9187 {
9188 	struct pmu *pmu;
9189 	int idx;
9190 	int ret;
9191 
9192 	idx = srcu_read_lock(&pmus_srcu);
9193 
9194 	/* Try parent's PMU first: */
9195 	if (event->parent && event->parent->pmu) {
9196 		pmu = event->parent->pmu;
9197 		ret = perf_try_init_event(pmu, event);
9198 		if (!ret)
9199 			goto unlock;
9200 	}
9201 
9202 	rcu_read_lock();
9203 	pmu = idr_find(&pmu_idr, event->attr.type);
9204 	rcu_read_unlock();
9205 	if (pmu) {
9206 		ret = perf_try_init_event(pmu, event);
9207 		if (ret)
9208 			pmu = ERR_PTR(ret);
9209 		goto unlock;
9210 	}
9211 
9212 	list_for_each_entry_rcu(pmu, &pmus, entry) {
9213 		ret = perf_try_init_event(pmu, event);
9214 		if (!ret)
9215 			goto unlock;
9216 
9217 		if (ret != -ENOENT) {
9218 			pmu = ERR_PTR(ret);
9219 			goto unlock;
9220 		}
9221 	}
9222 	pmu = ERR_PTR(-ENOENT);
9223 unlock:
9224 	srcu_read_unlock(&pmus_srcu, idx);
9225 
9226 	return pmu;
9227 }
9228 
9229 static void attach_sb_event(struct perf_event *event)
9230 {
9231 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
9232 
9233 	raw_spin_lock(&pel->lock);
9234 	list_add_rcu(&event->sb_list, &pel->list);
9235 	raw_spin_unlock(&pel->lock);
9236 }
9237 
9238 /*
9239  * We keep a list of all !task (and therefore per-cpu) events
9240  * that need to receive side-band records.
9241  *
9242  * This avoids having to scan all the various PMU per-cpu contexts
9243  * looking for them.
9244  */
9245 static void account_pmu_sb_event(struct perf_event *event)
9246 {
9247 	if (is_sb_event(event))
9248 		attach_sb_event(event);
9249 }
9250 
9251 static void account_event_cpu(struct perf_event *event, int cpu)
9252 {
9253 	if (event->parent)
9254 		return;
9255 
9256 	if (is_cgroup_event(event))
9257 		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
9258 }
9259 
9260 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
9261 static void account_freq_event_nohz(void)
9262 {
9263 #ifdef CONFIG_NO_HZ_FULL
9264 	/* Lock so we don't race with concurrent unaccount */
9265 	spin_lock(&nr_freq_lock);
9266 	if (atomic_inc_return(&nr_freq_events) == 1)
9267 		tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9268 	spin_unlock(&nr_freq_lock);
9269 #endif
9270 }
9271 
9272 static void account_freq_event(void)
9273 {
9274 	if (tick_nohz_full_enabled())
9275 		account_freq_event_nohz();
9276 	else
9277 		atomic_inc(&nr_freq_events);
9278 }
9279 
9280 
9281 static void account_event(struct perf_event *event)
9282 {
9283 	bool inc = false;
9284 
9285 	if (event->parent)
9286 		return;
9287 
9288 	if (event->attach_state & PERF_ATTACH_TASK)
9289 		inc = true;
9290 	if (event->attr.mmap || event->attr.mmap_data)
9291 		atomic_inc(&nr_mmap_events);
9292 	if (event->attr.comm)
9293 		atomic_inc(&nr_comm_events);
9294 	if (event->attr.namespaces)
9295 		atomic_inc(&nr_namespaces_events);
9296 	if (event->attr.task)
9297 		atomic_inc(&nr_task_events);
9298 	if (event->attr.freq)
9299 		account_freq_event();
9300 	if (event->attr.context_switch) {
9301 		atomic_inc(&nr_switch_events);
9302 		inc = true;
9303 	}
9304 	if (has_branch_stack(event))
9305 		inc = true;
9306 	if (is_cgroup_event(event))
9307 		inc = true;
9308 
9309 	if (inc) {
9310 		if (atomic_inc_not_zero(&perf_sched_count))
9311 			goto enabled;
9312 
9313 		mutex_lock(&perf_sched_mutex);
9314 		if (!atomic_read(&perf_sched_count)) {
9315 			static_branch_enable(&perf_sched_events);
9316 			/*
9317 			 * Guarantee that all CPUs observe they key change and
9318 			 * call the perf scheduling hooks before proceeding to
9319 			 * install events that need them.
9320 			 */
9321 			synchronize_sched();
9322 		}
9323 		/*
9324 		 * Now that we have waited for the sync_sched(), allow further
9325 		 * increments to by-pass the mutex.
9326 		 */
9327 		atomic_inc(&perf_sched_count);
9328 		mutex_unlock(&perf_sched_mutex);
9329 	}
9330 enabled:
9331 
9332 	account_event_cpu(event, event->cpu);
9333 
9334 	account_pmu_sb_event(event);
9335 }
9336 
9337 /*
9338  * Allocate and initialize a event structure
9339  */
9340 static struct perf_event *
9341 perf_event_alloc(struct perf_event_attr *attr, int cpu,
9342 		 struct task_struct *task,
9343 		 struct perf_event *group_leader,
9344 		 struct perf_event *parent_event,
9345 		 perf_overflow_handler_t overflow_handler,
9346 		 void *context, int cgroup_fd)
9347 {
9348 	struct pmu *pmu;
9349 	struct perf_event *event;
9350 	struct hw_perf_event *hwc;
9351 	long err = -EINVAL;
9352 
9353 	if ((unsigned)cpu >= nr_cpu_ids) {
9354 		if (!task || cpu != -1)
9355 			return ERR_PTR(-EINVAL);
9356 	}
9357 
9358 	event = kzalloc(sizeof(*event), GFP_KERNEL);
9359 	if (!event)
9360 		return ERR_PTR(-ENOMEM);
9361 
9362 	/*
9363 	 * Single events are their own group leaders, with an
9364 	 * empty sibling list:
9365 	 */
9366 	if (!group_leader)
9367 		group_leader = event;
9368 
9369 	mutex_init(&event->child_mutex);
9370 	INIT_LIST_HEAD(&event->child_list);
9371 
9372 	INIT_LIST_HEAD(&event->group_entry);
9373 	INIT_LIST_HEAD(&event->event_entry);
9374 	INIT_LIST_HEAD(&event->sibling_list);
9375 	INIT_LIST_HEAD(&event->rb_entry);
9376 	INIT_LIST_HEAD(&event->active_entry);
9377 	INIT_LIST_HEAD(&event->addr_filters.list);
9378 	INIT_HLIST_NODE(&event->hlist_entry);
9379 
9380 
9381 	init_waitqueue_head(&event->waitq);
9382 	init_irq_work(&event->pending, perf_pending_event);
9383 
9384 	mutex_init(&event->mmap_mutex);
9385 	raw_spin_lock_init(&event->addr_filters.lock);
9386 
9387 	atomic_long_set(&event->refcount, 1);
9388 	event->cpu		= cpu;
9389 	event->attr		= *attr;
9390 	event->group_leader	= group_leader;
9391 	event->pmu		= NULL;
9392 	event->oncpu		= -1;
9393 
9394 	event->parent		= parent_event;
9395 
9396 	event->ns		= get_pid_ns(task_active_pid_ns(current));
9397 	event->id		= atomic64_inc_return(&perf_event_id);
9398 
9399 	event->state		= PERF_EVENT_STATE_INACTIVE;
9400 
9401 	if (task) {
9402 		event->attach_state = PERF_ATTACH_TASK;
9403 		/*
9404 		 * XXX pmu::event_init needs to know what task to account to
9405 		 * and we cannot use the ctx information because we need the
9406 		 * pmu before we get a ctx.
9407 		 */
9408 		event->hw.target = task;
9409 	}
9410 
9411 	event->clock = &local_clock;
9412 	if (parent_event)
9413 		event->clock = parent_event->clock;
9414 
9415 	if (!overflow_handler && parent_event) {
9416 		overflow_handler = parent_event->overflow_handler;
9417 		context = parent_event->overflow_handler_context;
9418 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
9419 		if (overflow_handler == bpf_overflow_handler) {
9420 			struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
9421 
9422 			if (IS_ERR(prog)) {
9423 				err = PTR_ERR(prog);
9424 				goto err_ns;
9425 			}
9426 			event->prog = prog;
9427 			event->orig_overflow_handler =
9428 				parent_event->orig_overflow_handler;
9429 		}
9430 #endif
9431 	}
9432 
9433 	if (overflow_handler) {
9434 		event->overflow_handler	= overflow_handler;
9435 		event->overflow_handler_context = context;
9436 	} else if (is_write_backward(event)){
9437 		event->overflow_handler = perf_event_output_backward;
9438 		event->overflow_handler_context = NULL;
9439 	} else {
9440 		event->overflow_handler = perf_event_output_forward;
9441 		event->overflow_handler_context = NULL;
9442 	}
9443 
9444 	perf_event__state_init(event);
9445 
9446 	pmu = NULL;
9447 
9448 	hwc = &event->hw;
9449 	hwc->sample_period = attr->sample_period;
9450 	if (attr->freq && attr->sample_freq)
9451 		hwc->sample_period = 1;
9452 	hwc->last_period = hwc->sample_period;
9453 
9454 	local64_set(&hwc->period_left, hwc->sample_period);
9455 
9456 	/*
9457 	 * We currently do not support PERF_SAMPLE_READ on inherited events.
9458 	 * See perf_output_read().
9459 	 */
9460 	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
9461 		goto err_ns;
9462 
9463 	if (!has_branch_stack(event))
9464 		event->attr.branch_sample_type = 0;
9465 
9466 	if (cgroup_fd != -1) {
9467 		err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9468 		if (err)
9469 			goto err_ns;
9470 	}
9471 
9472 	pmu = perf_init_event(event);
9473 	if (IS_ERR(pmu)) {
9474 		err = PTR_ERR(pmu);
9475 		goto err_ns;
9476 	}
9477 
9478 	err = exclusive_event_init(event);
9479 	if (err)
9480 		goto err_pmu;
9481 
9482 	if (has_addr_filter(event)) {
9483 		event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9484 						   sizeof(unsigned long),
9485 						   GFP_KERNEL);
9486 		if (!event->addr_filters_offs) {
9487 			err = -ENOMEM;
9488 			goto err_per_task;
9489 		}
9490 
9491 		/* force hw sync on the address filters */
9492 		event->addr_filters_gen = 1;
9493 	}
9494 
9495 	if (!event->parent) {
9496 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
9497 			err = get_callchain_buffers(attr->sample_max_stack);
9498 			if (err)
9499 				goto err_addr_filters;
9500 		}
9501 	}
9502 
9503 	/* symmetric to unaccount_event() in _free_event() */
9504 	account_event(event);
9505 
9506 	return event;
9507 
9508 err_addr_filters:
9509 	kfree(event->addr_filters_offs);
9510 
9511 err_per_task:
9512 	exclusive_event_destroy(event);
9513 
9514 err_pmu:
9515 	if (event->destroy)
9516 		event->destroy(event);
9517 	module_put(pmu->module);
9518 err_ns:
9519 	if (is_cgroup_event(event))
9520 		perf_detach_cgroup(event);
9521 	if (event->ns)
9522 		put_pid_ns(event->ns);
9523 	kfree(event);
9524 
9525 	return ERR_PTR(err);
9526 }
9527 
9528 static int perf_copy_attr(struct perf_event_attr __user *uattr,
9529 			  struct perf_event_attr *attr)
9530 {
9531 	u32 size;
9532 	int ret;
9533 
9534 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9535 		return -EFAULT;
9536 
9537 	/*
9538 	 * zero the full structure, so that a short copy will be nice.
9539 	 */
9540 	memset(attr, 0, sizeof(*attr));
9541 
9542 	ret = get_user(size, &uattr->size);
9543 	if (ret)
9544 		return ret;
9545 
9546 	if (size > PAGE_SIZE)	/* silly large */
9547 		goto err_size;
9548 
9549 	if (!size)		/* abi compat */
9550 		size = PERF_ATTR_SIZE_VER0;
9551 
9552 	if (size < PERF_ATTR_SIZE_VER0)
9553 		goto err_size;
9554 
9555 	/*
9556 	 * If we're handed a bigger struct than we know of,
9557 	 * ensure all the unknown bits are 0 - i.e. new
9558 	 * user-space does not rely on any kernel feature
9559 	 * extensions we dont know about yet.
9560 	 */
9561 	if (size > sizeof(*attr)) {
9562 		unsigned char __user *addr;
9563 		unsigned char __user *end;
9564 		unsigned char val;
9565 
9566 		addr = (void __user *)uattr + sizeof(*attr);
9567 		end  = (void __user *)uattr + size;
9568 
9569 		for (; addr < end; addr++) {
9570 			ret = get_user(val, addr);
9571 			if (ret)
9572 				return ret;
9573 			if (val)
9574 				goto err_size;
9575 		}
9576 		size = sizeof(*attr);
9577 	}
9578 
9579 	ret = copy_from_user(attr, uattr, size);
9580 	if (ret)
9581 		return -EFAULT;
9582 
9583 	if (attr->__reserved_1)
9584 		return -EINVAL;
9585 
9586 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9587 		return -EINVAL;
9588 
9589 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9590 		return -EINVAL;
9591 
9592 	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9593 		u64 mask = attr->branch_sample_type;
9594 
9595 		/* only using defined bits */
9596 		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9597 			return -EINVAL;
9598 
9599 		/* at least one branch bit must be set */
9600 		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9601 			return -EINVAL;
9602 
9603 		/* propagate priv level, when not set for branch */
9604 		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9605 
9606 			/* exclude_kernel checked on syscall entry */
9607 			if (!attr->exclude_kernel)
9608 				mask |= PERF_SAMPLE_BRANCH_KERNEL;
9609 
9610 			if (!attr->exclude_user)
9611 				mask |= PERF_SAMPLE_BRANCH_USER;
9612 
9613 			if (!attr->exclude_hv)
9614 				mask |= PERF_SAMPLE_BRANCH_HV;
9615 			/*
9616 			 * adjust user setting (for HW filter setup)
9617 			 */
9618 			attr->branch_sample_type = mask;
9619 		}
9620 		/* privileged levels capture (kernel, hv): check permissions */
9621 		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
9622 		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9623 			return -EACCES;
9624 	}
9625 
9626 	if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
9627 		ret = perf_reg_validate(attr->sample_regs_user);
9628 		if (ret)
9629 			return ret;
9630 	}
9631 
9632 	if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9633 		if (!arch_perf_have_user_stack_dump())
9634 			return -ENOSYS;
9635 
9636 		/*
9637 		 * We have __u32 type for the size, but so far
9638 		 * we can only use __u16 as maximum due to the
9639 		 * __u16 sample size limit.
9640 		 */
9641 		if (attr->sample_stack_user >= USHRT_MAX)
9642 			ret = -EINVAL;
9643 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9644 			ret = -EINVAL;
9645 	}
9646 
9647 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9648 		ret = perf_reg_validate(attr->sample_regs_intr);
9649 out:
9650 	return ret;
9651 
9652 err_size:
9653 	put_user(sizeof(*attr), &uattr->size);
9654 	ret = -E2BIG;
9655 	goto out;
9656 }
9657 
9658 static int
9659 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
9660 {
9661 	struct ring_buffer *rb = NULL;
9662 	int ret = -EINVAL;
9663 
9664 	if (!output_event)
9665 		goto set;
9666 
9667 	/* don't allow circular references */
9668 	if (event == output_event)
9669 		goto out;
9670 
9671 	/*
9672 	 * Don't allow cross-cpu buffers
9673 	 */
9674 	if (output_event->cpu != event->cpu)
9675 		goto out;
9676 
9677 	/*
9678 	 * If its not a per-cpu rb, it must be the same task.
9679 	 */
9680 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9681 		goto out;
9682 
9683 	/*
9684 	 * Mixing clocks in the same buffer is trouble you don't need.
9685 	 */
9686 	if (output_event->clock != event->clock)
9687 		goto out;
9688 
9689 	/*
9690 	 * Either writing ring buffer from beginning or from end.
9691 	 * Mixing is not allowed.
9692 	 */
9693 	if (is_write_backward(output_event) != is_write_backward(event))
9694 		goto out;
9695 
9696 	/*
9697 	 * If both events generate aux data, they must be on the same PMU
9698 	 */
9699 	if (has_aux(event) && has_aux(output_event) &&
9700 	    event->pmu != output_event->pmu)
9701 		goto out;
9702 
9703 set:
9704 	mutex_lock(&event->mmap_mutex);
9705 	/* Can't redirect output if we've got an active mmap() */
9706 	if (atomic_read(&event->mmap_count))
9707 		goto unlock;
9708 
9709 	if (output_event) {
9710 		/* get the rb we want to redirect to */
9711 		rb = ring_buffer_get(output_event);
9712 		if (!rb)
9713 			goto unlock;
9714 	}
9715 
9716 	ring_buffer_attach(event, rb);
9717 
9718 	ret = 0;
9719 unlock:
9720 	mutex_unlock(&event->mmap_mutex);
9721 
9722 out:
9723 	return ret;
9724 }
9725 
9726 static void mutex_lock_double(struct mutex *a, struct mutex *b)
9727 {
9728 	if (b < a)
9729 		swap(a, b);
9730 
9731 	mutex_lock(a);
9732 	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9733 }
9734 
9735 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9736 {
9737 	bool nmi_safe = false;
9738 
9739 	switch (clk_id) {
9740 	case CLOCK_MONOTONIC:
9741 		event->clock = &ktime_get_mono_fast_ns;
9742 		nmi_safe = true;
9743 		break;
9744 
9745 	case CLOCK_MONOTONIC_RAW:
9746 		event->clock = &ktime_get_raw_fast_ns;
9747 		nmi_safe = true;
9748 		break;
9749 
9750 	case CLOCK_REALTIME:
9751 		event->clock = &ktime_get_real_ns;
9752 		break;
9753 
9754 	case CLOCK_BOOTTIME:
9755 		event->clock = &ktime_get_boot_ns;
9756 		break;
9757 
9758 	case CLOCK_TAI:
9759 		event->clock = &ktime_get_tai_ns;
9760 		break;
9761 
9762 	default:
9763 		return -EINVAL;
9764 	}
9765 
9766 	if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9767 		return -EINVAL;
9768 
9769 	return 0;
9770 }
9771 
9772 /*
9773  * Variation on perf_event_ctx_lock_nested(), except we take two context
9774  * mutexes.
9775  */
9776 static struct perf_event_context *
9777 __perf_event_ctx_lock_double(struct perf_event *group_leader,
9778 			     struct perf_event_context *ctx)
9779 {
9780 	struct perf_event_context *gctx;
9781 
9782 again:
9783 	rcu_read_lock();
9784 	gctx = READ_ONCE(group_leader->ctx);
9785 	if (!atomic_inc_not_zero(&gctx->refcount)) {
9786 		rcu_read_unlock();
9787 		goto again;
9788 	}
9789 	rcu_read_unlock();
9790 
9791 	mutex_lock_double(&gctx->mutex, &ctx->mutex);
9792 
9793 	if (group_leader->ctx != gctx) {
9794 		mutex_unlock(&ctx->mutex);
9795 		mutex_unlock(&gctx->mutex);
9796 		put_ctx(gctx);
9797 		goto again;
9798 	}
9799 
9800 	return gctx;
9801 }
9802 
9803 /**
9804  * sys_perf_event_open - open a performance event, associate it to a task/cpu
9805  *
9806  * @attr_uptr:	event_id type attributes for monitoring/sampling
9807  * @pid:		target pid
9808  * @cpu:		target cpu
9809  * @group_fd:		group leader event fd
9810  */
9811 SYSCALL_DEFINE5(perf_event_open,
9812 		struct perf_event_attr __user *, attr_uptr,
9813 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
9814 {
9815 	struct perf_event *group_leader = NULL, *output_event = NULL;
9816 	struct perf_event *event, *sibling;
9817 	struct perf_event_attr attr;
9818 	struct perf_event_context *ctx, *uninitialized_var(gctx);
9819 	struct file *event_file = NULL;
9820 	struct fd group = {NULL, 0};
9821 	struct task_struct *task = NULL;
9822 	struct pmu *pmu;
9823 	int event_fd;
9824 	int move_group = 0;
9825 	int err;
9826 	int f_flags = O_RDWR;
9827 	int cgroup_fd = -1;
9828 
9829 	/* for future expandability... */
9830 	if (flags & ~PERF_FLAG_ALL)
9831 		return -EINVAL;
9832 
9833 	err = perf_copy_attr(attr_uptr, &attr);
9834 	if (err)
9835 		return err;
9836 
9837 	if (!attr.exclude_kernel) {
9838 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9839 			return -EACCES;
9840 	}
9841 
9842 	if (attr.namespaces) {
9843 		if (!capable(CAP_SYS_ADMIN))
9844 			return -EACCES;
9845 	}
9846 
9847 	if (attr.freq) {
9848 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
9849 			return -EINVAL;
9850 	} else {
9851 		if (attr.sample_period & (1ULL << 63))
9852 			return -EINVAL;
9853 	}
9854 
9855 	if (!attr.sample_max_stack)
9856 		attr.sample_max_stack = sysctl_perf_event_max_stack;
9857 
9858 	/*
9859 	 * In cgroup mode, the pid argument is used to pass the fd
9860 	 * opened to the cgroup directory in cgroupfs. The cpu argument
9861 	 * designates the cpu on which to monitor threads from that
9862 	 * cgroup.
9863 	 */
9864 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9865 		return -EINVAL;
9866 
9867 	if (flags & PERF_FLAG_FD_CLOEXEC)
9868 		f_flags |= O_CLOEXEC;
9869 
9870 	event_fd = get_unused_fd_flags(f_flags);
9871 	if (event_fd < 0)
9872 		return event_fd;
9873 
9874 	if (group_fd != -1) {
9875 		err = perf_fget_light(group_fd, &group);
9876 		if (err)
9877 			goto err_fd;
9878 		group_leader = group.file->private_data;
9879 		if (flags & PERF_FLAG_FD_OUTPUT)
9880 			output_event = group_leader;
9881 		if (flags & PERF_FLAG_FD_NO_GROUP)
9882 			group_leader = NULL;
9883 	}
9884 
9885 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
9886 		task = find_lively_task_by_vpid(pid);
9887 		if (IS_ERR(task)) {
9888 			err = PTR_ERR(task);
9889 			goto err_group_fd;
9890 		}
9891 	}
9892 
9893 	if (task && group_leader &&
9894 	    group_leader->attr.inherit != attr.inherit) {
9895 		err = -EINVAL;
9896 		goto err_task;
9897 	}
9898 
9899 	if (task) {
9900 		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9901 		if (err)
9902 			goto err_task;
9903 
9904 		/*
9905 		 * Reuse ptrace permission checks for now.
9906 		 *
9907 		 * We must hold cred_guard_mutex across this and any potential
9908 		 * perf_install_in_context() call for this new event to
9909 		 * serialize against exec() altering our credentials (and the
9910 		 * perf_event_exit_task() that could imply).
9911 		 */
9912 		err = -EACCES;
9913 		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9914 			goto err_cred;
9915 	}
9916 
9917 	if (flags & PERF_FLAG_PID_CGROUP)
9918 		cgroup_fd = pid;
9919 
9920 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
9921 				 NULL, NULL, cgroup_fd);
9922 	if (IS_ERR(event)) {
9923 		err = PTR_ERR(event);
9924 		goto err_cred;
9925 	}
9926 
9927 	if (is_sampling_event(event)) {
9928 		if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
9929 			err = -EOPNOTSUPP;
9930 			goto err_alloc;
9931 		}
9932 	}
9933 
9934 	/*
9935 	 * Special case software events and allow them to be part of
9936 	 * any hardware group.
9937 	 */
9938 	pmu = event->pmu;
9939 
9940 	if (attr.use_clockid) {
9941 		err = perf_event_set_clock(event, attr.clockid);
9942 		if (err)
9943 			goto err_alloc;
9944 	}
9945 
9946 	if (pmu->task_ctx_nr == perf_sw_context)
9947 		event->event_caps |= PERF_EV_CAP_SOFTWARE;
9948 
9949 	if (group_leader &&
9950 	    (is_software_event(event) != is_software_event(group_leader))) {
9951 		if (is_software_event(event)) {
9952 			/*
9953 			 * If event and group_leader are not both a software
9954 			 * event, and event is, then group leader is not.
9955 			 *
9956 			 * Allow the addition of software events to !software
9957 			 * groups, this is safe because software events never
9958 			 * fail to schedule.
9959 			 */
9960 			pmu = group_leader->pmu;
9961 		} else if (is_software_event(group_leader) &&
9962 			   (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
9963 			/*
9964 			 * In case the group is a pure software group, and we
9965 			 * try to add a hardware event, move the whole group to
9966 			 * the hardware context.
9967 			 */
9968 			move_group = 1;
9969 		}
9970 	}
9971 
9972 	/*
9973 	 * Get the target context (task or percpu):
9974 	 */
9975 	ctx = find_get_context(pmu, task, event);
9976 	if (IS_ERR(ctx)) {
9977 		err = PTR_ERR(ctx);
9978 		goto err_alloc;
9979 	}
9980 
9981 	if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9982 		err = -EBUSY;
9983 		goto err_context;
9984 	}
9985 
9986 	/*
9987 	 * Look up the group leader (we will attach this event to it):
9988 	 */
9989 	if (group_leader) {
9990 		err = -EINVAL;
9991 
9992 		/*
9993 		 * Do not allow a recursive hierarchy (this new sibling
9994 		 * becoming part of another group-sibling):
9995 		 */
9996 		if (group_leader->group_leader != group_leader)
9997 			goto err_context;
9998 
9999 		/* All events in a group should have the same clock */
10000 		if (group_leader->clock != event->clock)
10001 			goto err_context;
10002 
10003 		/*
10004 		 * Do not allow to attach to a group in a different
10005 		 * task or CPU context:
10006 		 */
10007 		if (move_group) {
10008 			/*
10009 			 * Make sure we're both on the same task, or both
10010 			 * per-cpu events.
10011 			 */
10012 			if (group_leader->ctx->task != ctx->task)
10013 				goto err_context;
10014 
10015 			/*
10016 			 * Make sure we're both events for the same CPU;
10017 			 * grouping events for different CPUs is broken; since
10018 			 * you can never concurrently schedule them anyhow.
10019 			 */
10020 			if (group_leader->cpu != event->cpu)
10021 				goto err_context;
10022 		} else {
10023 			if (group_leader->ctx != ctx)
10024 				goto err_context;
10025 		}
10026 
10027 		/*
10028 		 * Only a group leader can be exclusive or pinned
10029 		 */
10030 		if (attr.exclusive || attr.pinned)
10031 			goto err_context;
10032 	}
10033 
10034 	if (output_event) {
10035 		err = perf_event_set_output(event, output_event);
10036 		if (err)
10037 			goto err_context;
10038 	}
10039 
10040 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
10041 					f_flags);
10042 	if (IS_ERR(event_file)) {
10043 		err = PTR_ERR(event_file);
10044 		event_file = NULL;
10045 		goto err_context;
10046 	}
10047 
10048 	if (move_group) {
10049 		gctx = __perf_event_ctx_lock_double(group_leader, ctx);
10050 
10051 		if (gctx->task == TASK_TOMBSTONE) {
10052 			err = -ESRCH;
10053 			goto err_locked;
10054 		}
10055 
10056 		/*
10057 		 * Check if we raced against another sys_perf_event_open() call
10058 		 * moving the software group underneath us.
10059 		 */
10060 		if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
10061 			/*
10062 			 * If someone moved the group out from under us, check
10063 			 * if this new event wound up on the same ctx, if so
10064 			 * its the regular !move_group case, otherwise fail.
10065 			 */
10066 			if (gctx != ctx) {
10067 				err = -EINVAL;
10068 				goto err_locked;
10069 			} else {
10070 				perf_event_ctx_unlock(group_leader, gctx);
10071 				move_group = 0;
10072 			}
10073 		}
10074 	} else {
10075 		mutex_lock(&ctx->mutex);
10076 	}
10077 
10078 	if (ctx->task == TASK_TOMBSTONE) {
10079 		err = -ESRCH;
10080 		goto err_locked;
10081 	}
10082 
10083 	if (!perf_event_validate_size(event)) {
10084 		err = -E2BIG;
10085 		goto err_locked;
10086 	}
10087 
10088 	if (!task) {
10089 		/*
10090 		 * Check if the @cpu we're creating an event for is online.
10091 		 *
10092 		 * We use the perf_cpu_context::ctx::mutex to serialize against
10093 		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10094 		 */
10095 		struct perf_cpu_context *cpuctx =
10096 			container_of(ctx, struct perf_cpu_context, ctx);
10097 
10098 		if (!cpuctx->online) {
10099 			err = -ENODEV;
10100 			goto err_locked;
10101 		}
10102 	}
10103 
10104 
10105 	/*
10106 	 * Must be under the same ctx::mutex as perf_install_in_context(),
10107 	 * because we need to serialize with concurrent event creation.
10108 	 */
10109 	if (!exclusive_event_installable(event, ctx)) {
10110 		/* exclusive and group stuff are assumed mutually exclusive */
10111 		WARN_ON_ONCE(move_group);
10112 
10113 		err = -EBUSY;
10114 		goto err_locked;
10115 	}
10116 
10117 	WARN_ON_ONCE(ctx->parent_ctx);
10118 
10119 	/*
10120 	 * This is the point on no return; we cannot fail hereafter. This is
10121 	 * where we start modifying current state.
10122 	 */
10123 
10124 	if (move_group) {
10125 		/*
10126 		 * See perf_event_ctx_lock() for comments on the details
10127 		 * of swizzling perf_event::ctx.
10128 		 */
10129 		perf_remove_from_context(group_leader, 0);
10130 		put_ctx(gctx);
10131 
10132 		list_for_each_entry(sibling, &group_leader->sibling_list,
10133 				    group_entry) {
10134 			perf_remove_from_context(sibling, 0);
10135 			put_ctx(gctx);
10136 		}
10137 
10138 		/*
10139 		 * Wait for everybody to stop referencing the events through
10140 		 * the old lists, before installing it on new lists.
10141 		 */
10142 		synchronize_rcu();
10143 
10144 		/*
10145 		 * Install the group siblings before the group leader.
10146 		 *
10147 		 * Because a group leader will try and install the entire group
10148 		 * (through the sibling list, which is still in-tact), we can
10149 		 * end up with siblings installed in the wrong context.
10150 		 *
10151 		 * By installing siblings first we NO-OP because they're not
10152 		 * reachable through the group lists.
10153 		 */
10154 		list_for_each_entry(sibling, &group_leader->sibling_list,
10155 				    group_entry) {
10156 			perf_event__state_init(sibling);
10157 			perf_install_in_context(ctx, sibling, sibling->cpu);
10158 			get_ctx(ctx);
10159 		}
10160 
10161 		/*
10162 		 * Removing from the context ends up with disabled
10163 		 * event. What we want here is event in the initial
10164 		 * startup state, ready to be add into new context.
10165 		 */
10166 		perf_event__state_init(group_leader);
10167 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
10168 		get_ctx(ctx);
10169 	}
10170 
10171 	/*
10172 	 * Precalculate sample_data sizes; do while holding ctx::mutex such
10173 	 * that we're serialized against further additions and before
10174 	 * perf_install_in_context() which is the point the event is active and
10175 	 * can use these values.
10176 	 */
10177 	perf_event__header_size(event);
10178 	perf_event__id_header_size(event);
10179 
10180 	event->owner = current;
10181 
10182 	perf_install_in_context(ctx, event, event->cpu);
10183 	perf_unpin_context(ctx);
10184 
10185 	if (move_group)
10186 		perf_event_ctx_unlock(group_leader, gctx);
10187 	mutex_unlock(&ctx->mutex);
10188 
10189 	if (task) {
10190 		mutex_unlock(&task->signal->cred_guard_mutex);
10191 		put_task_struct(task);
10192 	}
10193 
10194 	mutex_lock(&current->perf_event_mutex);
10195 	list_add_tail(&event->owner_entry, &current->perf_event_list);
10196 	mutex_unlock(&current->perf_event_mutex);
10197 
10198 	/*
10199 	 * Drop the reference on the group_event after placing the
10200 	 * new event on the sibling_list. This ensures destruction
10201 	 * of the group leader will find the pointer to itself in
10202 	 * perf_group_detach().
10203 	 */
10204 	fdput(group);
10205 	fd_install(event_fd, event_file);
10206 	return event_fd;
10207 
10208 err_locked:
10209 	if (move_group)
10210 		perf_event_ctx_unlock(group_leader, gctx);
10211 	mutex_unlock(&ctx->mutex);
10212 /* err_file: */
10213 	fput(event_file);
10214 err_context:
10215 	perf_unpin_context(ctx);
10216 	put_ctx(ctx);
10217 err_alloc:
10218 	/*
10219 	 * If event_file is set, the fput() above will have called ->release()
10220 	 * and that will take care of freeing the event.
10221 	 */
10222 	if (!event_file)
10223 		free_event(event);
10224 err_cred:
10225 	if (task)
10226 		mutex_unlock(&task->signal->cred_guard_mutex);
10227 err_task:
10228 	if (task)
10229 		put_task_struct(task);
10230 err_group_fd:
10231 	fdput(group);
10232 err_fd:
10233 	put_unused_fd(event_fd);
10234 	return err;
10235 }
10236 
10237 /**
10238  * perf_event_create_kernel_counter
10239  *
10240  * @attr: attributes of the counter to create
10241  * @cpu: cpu in which the counter is bound
10242  * @task: task to profile (NULL for percpu)
10243  */
10244 struct perf_event *
10245 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
10246 				 struct task_struct *task,
10247 				 perf_overflow_handler_t overflow_handler,
10248 				 void *context)
10249 {
10250 	struct perf_event_context *ctx;
10251 	struct perf_event *event;
10252 	int err;
10253 
10254 	/*
10255 	 * Get the target context (task or percpu):
10256 	 */
10257 
10258 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
10259 				 overflow_handler, context, -1);
10260 	if (IS_ERR(event)) {
10261 		err = PTR_ERR(event);
10262 		goto err;
10263 	}
10264 
10265 	/* Mark owner so we could distinguish it from user events. */
10266 	event->owner = TASK_TOMBSTONE;
10267 
10268 	ctx = find_get_context(event->pmu, task, event);
10269 	if (IS_ERR(ctx)) {
10270 		err = PTR_ERR(ctx);
10271 		goto err_free;
10272 	}
10273 
10274 	WARN_ON_ONCE(ctx->parent_ctx);
10275 	mutex_lock(&ctx->mutex);
10276 	if (ctx->task == TASK_TOMBSTONE) {
10277 		err = -ESRCH;
10278 		goto err_unlock;
10279 	}
10280 
10281 	if (!task) {
10282 		/*
10283 		 * Check if the @cpu we're creating an event for is online.
10284 		 *
10285 		 * We use the perf_cpu_context::ctx::mutex to serialize against
10286 		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10287 		 */
10288 		struct perf_cpu_context *cpuctx =
10289 			container_of(ctx, struct perf_cpu_context, ctx);
10290 		if (!cpuctx->online) {
10291 			err = -ENODEV;
10292 			goto err_unlock;
10293 		}
10294 	}
10295 
10296 	if (!exclusive_event_installable(event, ctx)) {
10297 		err = -EBUSY;
10298 		goto err_unlock;
10299 	}
10300 
10301 	perf_install_in_context(ctx, event, cpu);
10302 	perf_unpin_context(ctx);
10303 	mutex_unlock(&ctx->mutex);
10304 
10305 	return event;
10306 
10307 err_unlock:
10308 	mutex_unlock(&ctx->mutex);
10309 	perf_unpin_context(ctx);
10310 	put_ctx(ctx);
10311 err_free:
10312 	free_event(event);
10313 err:
10314 	return ERR_PTR(err);
10315 }
10316 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
10317 
10318 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
10319 {
10320 	struct perf_event_context *src_ctx;
10321 	struct perf_event_context *dst_ctx;
10322 	struct perf_event *event, *tmp;
10323 	LIST_HEAD(events);
10324 
10325 	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
10326 	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
10327 
10328 	/*
10329 	 * See perf_event_ctx_lock() for comments on the details
10330 	 * of swizzling perf_event::ctx.
10331 	 */
10332 	mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
10333 	list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
10334 				 event_entry) {
10335 		perf_remove_from_context(event, 0);
10336 		unaccount_event_cpu(event, src_cpu);
10337 		put_ctx(src_ctx);
10338 		list_add(&event->migrate_entry, &events);
10339 	}
10340 
10341 	/*
10342 	 * Wait for the events to quiesce before re-instating them.
10343 	 */
10344 	synchronize_rcu();
10345 
10346 	/*
10347 	 * Re-instate events in 2 passes.
10348 	 *
10349 	 * Skip over group leaders and only install siblings on this first
10350 	 * pass, siblings will not get enabled without a leader, however a
10351 	 * leader will enable its siblings, even if those are still on the old
10352 	 * context.
10353 	 */
10354 	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10355 		if (event->group_leader == event)
10356 			continue;
10357 
10358 		list_del(&event->migrate_entry);
10359 		if (event->state >= PERF_EVENT_STATE_OFF)
10360 			event->state = PERF_EVENT_STATE_INACTIVE;
10361 		account_event_cpu(event, dst_cpu);
10362 		perf_install_in_context(dst_ctx, event, dst_cpu);
10363 		get_ctx(dst_ctx);
10364 	}
10365 
10366 	/*
10367 	 * Once all the siblings are setup properly, install the group leaders
10368 	 * to make it go.
10369 	 */
10370 	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10371 		list_del(&event->migrate_entry);
10372 		if (event->state >= PERF_EVENT_STATE_OFF)
10373 			event->state = PERF_EVENT_STATE_INACTIVE;
10374 		account_event_cpu(event, dst_cpu);
10375 		perf_install_in_context(dst_ctx, event, dst_cpu);
10376 		get_ctx(dst_ctx);
10377 	}
10378 	mutex_unlock(&dst_ctx->mutex);
10379 	mutex_unlock(&src_ctx->mutex);
10380 }
10381 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10382 
10383 static void sync_child_event(struct perf_event *child_event,
10384 			       struct task_struct *child)
10385 {
10386 	struct perf_event *parent_event = child_event->parent;
10387 	u64 child_val;
10388 
10389 	if (child_event->attr.inherit_stat)
10390 		perf_event_read_event(child_event, child);
10391 
10392 	child_val = perf_event_count(child_event);
10393 
10394 	/*
10395 	 * Add back the child's count to the parent's count:
10396 	 */
10397 	atomic64_add(child_val, &parent_event->child_count);
10398 	atomic64_add(child_event->total_time_enabled,
10399 		     &parent_event->child_total_time_enabled);
10400 	atomic64_add(child_event->total_time_running,
10401 		     &parent_event->child_total_time_running);
10402 }
10403 
10404 static void
10405 perf_event_exit_event(struct perf_event *child_event,
10406 		      struct perf_event_context *child_ctx,
10407 		      struct task_struct *child)
10408 {
10409 	struct perf_event *parent_event = child_event->parent;
10410 
10411 	/*
10412 	 * Do not destroy the 'original' grouping; because of the context
10413 	 * switch optimization the original events could've ended up in a
10414 	 * random child task.
10415 	 *
10416 	 * If we were to destroy the original group, all group related
10417 	 * operations would cease to function properly after this random
10418 	 * child dies.
10419 	 *
10420 	 * Do destroy all inherited groups, we don't care about those
10421 	 * and being thorough is better.
10422 	 */
10423 	raw_spin_lock_irq(&child_ctx->lock);
10424 	WARN_ON_ONCE(child_ctx->is_active);
10425 
10426 	if (parent_event)
10427 		perf_group_detach(child_event);
10428 	list_del_event(child_event, child_ctx);
10429 	child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
10430 	raw_spin_unlock_irq(&child_ctx->lock);
10431 
10432 	/*
10433 	 * Parent events are governed by their filedesc, retain them.
10434 	 */
10435 	if (!parent_event) {
10436 		perf_event_wakeup(child_event);
10437 		return;
10438 	}
10439 	/*
10440 	 * Child events can be cleaned up.
10441 	 */
10442 
10443 	sync_child_event(child_event, child);
10444 
10445 	/*
10446 	 * Remove this event from the parent's list
10447 	 */
10448 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
10449 	mutex_lock(&parent_event->child_mutex);
10450 	list_del_init(&child_event->child_list);
10451 	mutex_unlock(&parent_event->child_mutex);
10452 
10453 	/*
10454 	 * Kick perf_poll() for is_event_hup().
10455 	 */
10456 	perf_event_wakeup(parent_event);
10457 	free_event(child_event);
10458 	put_event(parent_event);
10459 }
10460 
10461 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
10462 {
10463 	struct perf_event_context *child_ctx, *clone_ctx = NULL;
10464 	struct perf_event *child_event, *next;
10465 
10466 	WARN_ON_ONCE(child != current);
10467 
10468 	child_ctx = perf_pin_task_context(child, ctxn);
10469 	if (!child_ctx)
10470 		return;
10471 
10472 	/*
10473 	 * In order to reduce the amount of tricky in ctx tear-down, we hold
10474 	 * ctx::mutex over the entire thing. This serializes against almost
10475 	 * everything that wants to access the ctx.
10476 	 *
10477 	 * The exception is sys_perf_event_open() /
10478 	 * perf_event_create_kernel_count() which does find_get_context()
10479 	 * without ctx::mutex (it cannot because of the move_group double mutex
10480 	 * lock thing). See the comments in perf_install_in_context().
10481 	 */
10482 	mutex_lock(&child_ctx->mutex);
10483 
10484 	/*
10485 	 * In a single ctx::lock section, de-schedule the events and detach the
10486 	 * context from the task such that we cannot ever get it scheduled back
10487 	 * in.
10488 	 */
10489 	raw_spin_lock_irq(&child_ctx->lock);
10490 	task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
10491 
10492 	/*
10493 	 * Now that the context is inactive, destroy the task <-> ctx relation
10494 	 * and mark the context dead.
10495 	 */
10496 	RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10497 	put_ctx(child_ctx); /* cannot be last */
10498 	WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10499 	put_task_struct(current); /* cannot be last */
10500 
10501 	clone_ctx = unclone_ctx(child_ctx);
10502 	raw_spin_unlock_irq(&child_ctx->lock);
10503 
10504 	if (clone_ctx)
10505 		put_ctx(clone_ctx);
10506 
10507 	/*
10508 	 * Report the task dead after unscheduling the events so that we
10509 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
10510 	 * get a few PERF_RECORD_READ events.
10511 	 */
10512 	perf_event_task(child, child_ctx, 0);
10513 
10514 	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
10515 		perf_event_exit_event(child_event, child_ctx, child);
10516 
10517 	mutex_unlock(&child_ctx->mutex);
10518 
10519 	put_ctx(child_ctx);
10520 }
10521 
10522 /*
10523  * When a child task exits, feed back event values to parent events.
10524  *
10525  * Can be called with cred_guard_mutex held when called from
10526  * install_exec_creds().
10527  */
10528 void perf_event_exit_task(struct task_struct *child)
10529 {
10530 	struct perf_event *event, *tmp;
10531 	int ctxn;
10532 
10533 	mutex_lock(&child->perf_event_mutex);
10534 	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10535 				 owner_entry) {
10536 		list_del_init(&event->owner_entry);
10537 
10538 		/*
10539 		 * Ensure the list deletion is visible before we clear
10540 		 * the owner, closes a race against perf_release() where
10541 		 * we need to serialize on the owner->perf_event_mutex.
10542 		 */
10543 		smp_store_release(&event->owner, NULL);
10544 	}
10545 	mutex_unlock(&child->perf_event_mutex);
10546 
10547 	for_each_task_context_nr(ctxn)
10548 		perf_event_exit_task_context(child, ctxn);
10549 
10550 	/*
10551 	 * The perf_event_exit_task_context calls perf_event_task
10552 	 * with child's task_ctx, which generates EXIT events for
10553 	 * child contexts and sets child->perf_event_ctxp[] to NULL.
10554 	 * At this point we need to send EXIT events to cpu contexts.
10555 	 */
10556 	perf_event_task(child, NULL, 0);
10557 }
10558 
10559 static void perf_free_event(struct perf_event *event,
10560 			    struct perf_event_context *ctx)
10561 {
10562 	struct perf_event *parent = event->parent;
10563 
10564 	if (WARN_ON_ONCE(!parent))
10565 		return;
10566 
10567 	mutex_lock(&parent->child_mutex);
10568 	list_del_init(&event->child_list);
10569 	mutex_unlock(&parent->child_mutex);
10570 
10571 	put_event(parent);
10572 
10573 	raw_spin_lock_irq(&ctx->lock);
10574 	perf_group_detach(event);
10575 	list_del_event(event, ctx);
10576 	raw_spin_unlock_irq(&ctx->lock);
10577 	free_event(event);
10578 }
10579 
10580 /*
10581  * Free an unexposed, unused context as created by inheritance by
10582  * perf_event_init_task below, used by fork() in case of fail.
10583  *
10584  * Not all locks are strictly required, but take them anyway to be nice and
10585  * help out with the lockdep assertions.
10586  */
10587 void perf_event_free_task(struct task_struct *task)
10588 {
10589 	struct perf_event_context *ctx;
10590 	struct perf_event *event, *tmp;
10591 	int ctxn;
10592 
10593 	for_each_task_context_nr(ctxn) {
10594 		ctx = task->perf_event_ctxp[ctxn];
10595 		if (!ctx)
10596 			continue;
10597 
10598 		mutex_lock(&ctx->mutex);
10599 		raw_spin_lock_irq(&ctx->lock);
10600 		/*
10601 		 * Destroy the task <-> ctx relation and mark the context dead.
10602 		 *
10603 		 * This is important because even though the task hasn't been
10604 		 * exposed yet the context has been (through child_list).
10605 		 */
10606 		RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
10607 		WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
10608 		put_task_struct(task); /* cannot be last */
10609 		raw_spin_unlock_irq(&ctx->lock);
10610 
10611 		list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
10612 			perf_free_event(event, ctx);
10613 
10614 		mutex_unlock(&ctx->mutex);
10615 		put_ctx(ctx);
10616 	}
10617 }
10618 
10619 void perf_event_delayed_put(struct task_struct *task)
10620 {
10621 	int ctxn;
10622 
10623 	for_each_task_context_nr(ctxn)
10624 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10625 }
10626 
10627 struct file *perf_event_get(unsigned int fd)
10628 {
10629 	struct file *file;
10630 
10631 	file = fget_raw(fd);
10632 	if (!file)
10633 		return ERR_PTR(-EBADF);
10634 
10635 	if (file->f_op != &perf_fops) {
10636 		fput(file);
10637 		return ERR_PTR(-EBADF);
10638 	}
10639 
10640 	return file;
10641 }
10642 
10643 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10644 {
10645 	if (!event)
10646 		return ERR_PTR(-EINVAL);
10647 
10648 	return &event->attr;
10649 }
10650 
10651 /*
10652  * Inherit a event from parent task to child task.
10653  *
10654  * Returns:
10655  *  - valid pointer on success
10656  *  - NULL for orphaned events
10657  *  - IS_ERR() on error
10658  */
10659 static struct perf_event *
10660 inherit_event(struct perf_event *parent_event,
10661 	      struct task_struct *parent,
10662 	      struct perf_event_context *parent_ctx,
10663 	      struct task_struct *child,
10664 	      struct perf_event *group_leader,
10665 	      struct perf_event_context *child_ctx)
10666 {
10667 	enum perf_event_active_state parent_state = parent_event->state;
10668 	struct perf_event *child_event;
10669 	unsigned long flags;
10670 
10671 	/*
10672 	 * Instead of creating recursive hierarchies of events,
10673 	 * we link inherited events back to the original parent,
10674 	 * which has a filp for sure, which we use as the reference
10675 	 * count:
10676 	 */
10677 	if (parent_event->parent)
10678 		parent_event = parent_event->parent;
10679 
10680 	child_event = perf_event_alloc(&parent_event->attr,
10681 					   parent_event->cpu,
10682 					   child,
10683 					   group_leader, parent_event,
10684 					   NULL, NULL, -1);
10685 	if (IS_ERR(child_event))
10686 		return child_event;
10687 
10688 	/*
10689 	 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10690 	 * must be under the same lock in order to serialize against
10691 	 * perf_event_release_kernel(), such that either we must observe
10692 	 * is_orphaned_event() or they will observe us on the child_list.
10693 	 */
10694 	mutex_lock(&parent_event->child_mutex);
10695 	if (is_orphaned_event(parent_event) ||
10696 	    !atomic_long_inc_not_zero(&parent_event->refcount)) {
10697 		mutex_unlock(&parent_event->child_mutex);
10698 		free_event(child_event);
10699 		return NULL;
10700 	}
10701 
10702 	get_ctx(child_ctx);
10703 
10704 	/*
10705 	 * Make the child state follow the state of the parent event,
10706 	 * not its attr.disabled bit.  We hold the parent's mutex,
10707 	 * so we won't race with perf_event_{en, dis}able_family.
10708 	 */
10709 	if (parent_state >= PERF_EVENT_STATE_INACTIVE)
10710 		child_event->state = PERF_EVENT_STATE_INACTIVE;
10711 	else
10712 		child_event->state = PERF_EVENT_STATE_OFF;
10713 
10714 	if (parent_event->attr.freq) {
10715 		u64 sample_period = parent_event->hw.sample_period;
10716 		struct hw_perf_event *hwc = &child_event->hw;
10717 
10718 		hwc->sample_period = sample_period;
10719 		hwc->last_period   = sample_period;
10720 
10721 		local64_set(&hwc->period_left, sample_period);
10722 	}
10723 
10724 	child_event->ctx = child_ctx;
10725 	child_event->overflow_handler = parent_event->overflow_handler;
10726 	child_event->overflow_handler_context
10727 		= parent_event->overflow_handler_context;
10728 
10729 	/*
10730 	 * Precalculate sample_data sizes
10731 	 */
10732 	perf_event__header_size(child_event);
10733 	perf_event__id_header_size(child_event);
10734 
10735 	/*
10736 	 * Link it up in the child's context:
10737 	 */
10738 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
10739 	add_event_to_ctx(child_event, child_ctx);
10740 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
10741 
10742 	/*
10743 	 * Link this into the parent event's child list
10744 	 */
10745 	list_add_tail(&child_event->child_list, &parent_event->child_list);
10746 	mutex_unlock(&parent_event->child_mutex);
10747 
10748 	return child_event;
10749 }
10750 
10751 /*
10752  * Inherits an event group.
10753  *
10754  * This will quietly suppress orphaned events; !inherit_event() is not an error.
10755  * This matches with perf_event_release_kernel() removing all child events.
10756  *
10757  * Returns:
10758  *  - 0 on success
10759  *  - <0 on error
10760  */
10761 static int inherit_group(struct perf_event *parent_event,
10762 	      struct task_struct *parent,
10763 	      struct perf_event_context *parent_ctx,
10764 	      struct task_struct *child,
10765 	      struct perf_event_context *child_ctx)
10766 {
10767 	struct perf_event *leader;
10768 	struct perf_event *sub;
10769 	struct perf_event *child_ctr;
10770 
10771 	leader = inherit_event(parent_event, parent, parent_ctx,
10772 				 child, NULL, child_ctx);
10773 	if (IS_ERR(leader))
10774 		return PTR_ERR(leader);
10775 	/*
10776 	 * @leader can be NULL here because of is_orphaned_event(). In this
10777 	 * case inherit_event() will create individual events, similar to what
10778 	 * perf_group_detach() would do anyway.
10779 	 */
10780 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10781 		child_ctr = inherit_event(sub, parent, parent_ctx,
10782 					    child, leader, child_ctx);
10783 		if (IS_ERR(child_ctr))
10784 			return PTR_ERR(child_ctr);
10785 	}
10786 	return 0;
10787 }
10788 
10789 /*
10790  * Creates the child task context and tries to inherit the event-group.
10791  *
10792  * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
10793  * inherited_all set when we 'fail' to inherit an orphaned event; this is
10794  * consistent with perf_event_release_kernel() removing all child events.
10795  *
10796  * Returns:
10797  *  - 0 on success
10798  *  - <0 on error
10799  */
10800 static int
10801 inherit_task_group(struct perf_event *event, struct task_struct *parent,
10802 		   struct perf_event_context *parent_ctx,
10803 		   struct task_struct *child, int ctxn,
10804 		   int *inherited_all)
10805 {
10806 	int ret;
10807 	struct perf_event_context *child_ctx;
10808 
10809 	if (!event->attr.inherit) {
10810 		*inherited_all = 0;
10811 		return 0;
10812 	}
10813 
10814 	child_ctx = child->perf_event_ctxp[ctxn];
10815 	if (!child_ctx) {
10816 		/*
10817 		 * This is executed from the parent task context, so
10818 		 * inherit events that have been marked for cloning.
10819 		 * First allocate and initialize a context for the
10820 		 * child.
10821 		 */
10822 		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
10823 		if (!child_ctx)
10824 			return -ENOMEM;
10825 
10826 		child->perf_event_ctxp[ctxn] = child_ctx;
10827 	}
10828 
10829 	ret = inherit_group(event, parent, parent_ctx,
10830 			    child, child_ctx);
10831 
10832 	if (ret)
10833 		*inherited_all = 0;
10834 
10835 	return ret;
10836 }
10837 
10838 /*
10839  * Initialize the perf_event context in task_struct
10840  */
10841 static int perf_event_init_context(struct task_struct *child, int ctxn)
10842 {
10843 	struct perf_event_context *child_ctx, *parent_ctx;
10844 	struct perf_event_context *cloned_ctx;
10845 	struct perf_event *event;
10846 	struct task_struct *parent = current;
10847 	int inherited_all = 1;
10848 	unsigned long flags;
10849 	int ret = 0;
10850 
10851 	if (likely(!parent->perf_event_ctxp[ctxn]))
10852 		return 0;
10853 
10854 	/*
10855 	 * If the parent's context is a clone, pin it so it won't get
10856 	 * swapped under us.
10857 	 */
10858 	parent_ctx = perf_pin_task_context(parent, ctxn);
10859 	if (!parent_ctx)
10860 		return 0;
10861 
10862 	/*
10863 	 * No need to check if parent_ctx != NULL here; since we saw
10864 	 * it non-NULL earlier, the only reason for it to become NULL
10865 	 * is if we exit, and since we're currently in the middle of
10866 	 * a fork we can't be exiting at the same time.
10867 	 */
10868 
10869 	/*
10870 	 * Lock the parent list. No need to lock the child - not PID
10871 	 * hashed yet and not running, so nobody can access it.
10872 	 */
10873 	mutex_lock(&parent_ctx->mutex);
10874 
10875 	/*
10876 	 * We dont have to disable NMIs - we are only looking at
10877 	 * the list, not manipulating it:
10878 	 */
10879 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
10880 		ret = inherit_task_group(event, parent, parent_ctx,
10881 					 child, ctxn, &inherited_all);
10882 		if (ret)
10883 			goto out_unlock;
10884 	}
10885 
10886 	/*
10887 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
10888 	 * to allocations, but we need to prevent rotation because
10889 	 * rotate_ctx() will change the list from interrupt context.
10890 	 */
10891 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10892 	parent_ctx->rotate_disable = 1;
10893 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10894 
10895 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
10896 		ret = inherit_task_group(event, parent, parent_ctx,
10897 					 child, ctxn, &inherited_all);
10898 		if (ret)
10899 			goto out_unlock;
10900 	}
10901 
10902 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10903 	parent_ctx->rotate_disable = 0;
10904 
10905 	child_ctx = child->perf_event_ctxp[ctxn];
10906 
10907 	if (child_ctx && inherited_all) {
10908 		/*
10909 		 * Mark the child context as a clone of the parent
10910 		 * context, or of whatever the parent is a clone of.
10911 		 *
10912 		 * Note that if the parent is a clone, the holding of
10913 		 * parent_ctx->lock avoids it from being uncloned.
10914 		 */
10915 		cloned_ctx = parent_ctx->parent_ctx;
10916 		if (cloned_ctx) {
10917 			child_ctx->parent_ctx = cloned_ctx;
10918 			child_ctx->parent_gen = parent_ctx->parent_gen;
10919 		} else {
10920 			child_ctx->parent_ctx = parent_ctx;
10921 			child_ctx->parent_gen = parent_ctx->generation;
10922 		}
10923 		get_ctx(child_ctx->parent_ctx);
10924 	}
10925 
10926 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10927 out_unlock:
10928 	mutex_unlock(&parent_ctx->mutex);
10929 
10930 	perf_unpin_context(parent_ctx);
10931 	put_ctx(parent_ctx);
10932 
10933 	return ret;
10934 }
10935 
10936 /*
10937  * Initialize the perf_event context in task_struct
10938  */
10939 int perf_event_init_task(struct task_struct *child)
10940 {
10941 	int ctxn, ret;
10942 
10943 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10944 	mutex_init(&child->perf_event_mutex);
10945 	INIT_LIST_HEAD(&child->perf_event_list);
10946 
10947 	for_each_task_context_nr(ctxn) {
10948 		ret = perf_event_init_context(child, ctxn);
10949 		if (ret) {
10950 			perf_event_free_task(child);
10951 			return ret;
10952 		}
10953 	}
10954 
10955 	return 0;
10956 }
10957 
10958 static void __init perf_event_init_all_cpus(void)
10959 {
10960 	struct swevent_htable *swhash;
10961 	int cpu;
10962 
10963 	zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
10964 
10965 	for_each_possible_cpu(cpu) {
10966 		swhash = &per_cpu(swevent_htable, cpu);
10967 		mutex_init(&swhash->hlist_mutex);
10968 		INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
10969 
10970 		INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10971 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
10972 
10973 #ifdef CONFIG_CGROUP_PERF
10974 		INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
10975 #endif
10976 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
10977 	}
10978 }
10979 
10980 void perf_swevent_init_cpu(unsigned int cpu)
10981 {
10982 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
10983 
10984 	mutex_lock(&swhash->hlist_mutex);
10985 	if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
10986 		struct swevent_hlist *hlist;
10987 
10988 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10989 		WARN_ON(!hlist);
10990 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
10991 	}
10992 	mutex_unlock(&swhash->hlist_mutex);
10993 }
10994 
10995 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
10996 static void __perf_event_exit_context(void *__info)
10997 {
10998 	struct perf_event_context *ctx = __info;
10999 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
11000 	struct perf_event *event;
11001 
11002 	raw_spin_lock(&ctx->lock);
11003 	list_for_each_entry(event, &ctx->event_list, event_entry)
11004 		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
11005 	raw_spin_unlock(&ctx->lock);
11006 }
11007 
11008 static void perf_event_exit_cpu_context(int cpu)
11009 {
11010 	struct perf_cpu_context *cpuctx;
11011 	struct perf_event_context *ctx;
11012 	struct pmu *pmu;
11013 
11014 	mutex_lock(&pmus_lock);
11015 	list_for_each_entry(pmu, &pmus, entry) {
11016 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11017 		ctx = &cpuctx->ctx;
11018 
11019 		mutex_lock(&ctx->mutex);
11020 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
11021 		cpuctx->online = 0;
11022 		mutex_unlock(&ctx->mutex);
11023 	}
11024 	cpumask_clear_cpu(cpu, perf_online_mask);
11025 	mutex_unlock(&pmus_lock);
11026 }
11027 #else
11028 
11029 static void perf_event_exit_cpu_context(int cpu) { }
11030 
11031 #endif
11032 
11033 int perf_event_init_cpu(unsigned int cpu)
11034 {
11035 	struct perf_cpu_context *cpuctx;
11036 	struct perf_event_context *ctx;
11037 	struct pmu *pmu;
11038 
11039 	perf_swevent_init_cpu(cpu);
11040 
11041 	mutex_lock(&pmus_lock);
11042 	cpumask_set_cpu(cpu, perf_online_mask);
11043 	list_for_each_entry(pmu, &pmus, entry) {
11044 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11045 		ctx = &cpuctx->ctx;
11046 
11047 		mutex_lock(&ctx->mutex);
11048 		cpuctx->online = 1;
11049 		mutex_unlock(&ctx->mutex);
11050 	}
11051 	mutex_unlock(&pmus_lock);
11052 
11053 	return 0;
11054 }
11055 
11056 int perf_event_exit_cpu(unsigned int cpu)
11057 {
11058 	perf_event_exit_cpu_context(cpu);
11059 	return 0;
11060 }
11061 
11062 static int
11063 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
11064 {
11065 	int cpu;
11066 
11067 	for_each_online_cpu(cpu)
11068 		perf_event_exit_cpu(cpu);
11069 
11070 	return NOTIFY_OK;
11071 }
11072 
11073 /*
11074  * Run the perf reboot notifier at the very last possible moment so that
11075  * the generic watchdog code runs as long as possible.
11076  */
11077 static struct notifier_block perf_reboot_notifier = {
11078 	.notifier_call = perf_reboot,
11079 	.priority = INT_MIN,
11080 };
11081 
11082 void __init perf_event_init(void)
11083 {
11084 	int ret;
11085 
11086 	idr_init(&pmu_idr);
11087 
11088 	perf_event_init_all_cpus();
11089 	init_srcu_struct(&pmus_srcu);
11090 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
11091 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
11092 	perf_pmu_register(&perf_task_clock, NULL, -1);
11093 	perf_tp_register();
11094 	perf_event_init_cpu(smp_processor_id());
11095 	register_reboot_notifier(&perf_reboot_notifier);
11096 
11097 	ret = init_hw_breakpoint();
11098 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
11099 
11100 	/*
11101 	 * Build time assertion that we keep the data_head at the intended
11102 	 * location.  IOW, validation we got the __reserved[] size right.
11103 	 */
11104 	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
11105 		     != 1024);
11106 }
11107 
11108 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
11109 			      char *page)
11110 {
11111 	struct perf_pmu_events_attr *pmu_attr =
11112 		container_of(attr, struct perf_pmu_events_attr, attr);
11113 
11114 	if (pmu_attr->event_str)
11115 		return sprintf(page, "%s\n", pmu_attr->event_str);
11116 
11117 	return 0;
11118 }
11119 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
11120 
11121 static int __init perf_event_sysfs_init(void)
11122 {
11123 	struct pmu *pmu;
11124 	int ret;
11125 
11126 	mutex_lock(&pmus_lock);
11127 
11128 	ret = bus_register(&pmu_bus);
11129 	if (ret)
11130 		goto unlock;
11131 
11132 	list_for_each_entry(pmu, &pmus, entry) {
11133 		if (!pmu->name || pmu->type < 0)
11134 			continue;
11135 
11136 		ret = pmu_dev_alloc(pmu);
11137 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
11138 	}
11139 	pmu_bus_running = 1;
11140 	ret = 0;
11141 
11142 unlock:
11143 	mutex_unlock(&pmus_lock);
11144 
11145 	return ret;
11146 }
11147 device_initcall(perf_event_sysfs_init);
11148 
11149 #ifdef CONFIG_CGROUP_PERF
11150 static struct cgroup_subsys_state *
11151 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
11152 {
11153 	struct perf_cgroup *jc;
11154 
11155 	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
11156 	if (!jc)
11157 		return ERR_PTR(-ENOMEM);
11158 
11159 	jc->info = alloc_percpu(struct perf_cgroup_info);
11160 	if (!jc->info) {
11161 		kfree(jc);
11162 		return ERR_PTR(-ENOMEM);
11163 	}
11164 
11165 	return &jc->css;
11166 }
11167 
11168 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
11169 {
11170 	struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
11171 
11172 	free_percpu(jc->info);
11173 	kfree(jc);
11174 }
11175 
11176 static int __perf_cgroup_move(void *info)
11177 {
11178 	struct task_struct *task = info;
11179 	rcu_read_lock();
11180 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
11181 	rcu_read_unlock();
11182 	return 0;
11183 }
11184 
11185 static void perf_cgroup_attach(struct cgroup_taskset *tset)
11186 {
11187 	struct task_struct *task;
11188 	struct cgroup_subsys_state *css;
11189 
11190 	cgroup_taskset_for_each(task, css, tset)
11191 		task_function_call(task, __perf_cgroup_move, task);
11192 }
11193 
11194 struct cgroup_subsys perf_event_cgrp_subsys = {
11195 	.css_alloc	= perf_cgroup_css_alloc,
11196 	.css_free	= perf_cgroup_css_free,
11197 	.attach		= perf_cgroup_attach,
11198 	/*
11199 	 * Implicitly enable on dfl hierarchy so that perf events can
11200 	 * always be filtered by cgroup2 path as long as perf_event
11201 	 * controller is not mounted on a legacy hierarchy.
11202 	 */
11203 	.implicit_on_dfl = true,
11204 };
11205 #endif /* CONFIG_CGROUP_PERF */
11206