xref: /linux/kernel/events/core.c (revision 97f0b13452198290799fd6780f05fbaa74f927d3)
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/ftrace_event.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 
48 #include "internal.h"
49 
50 #include <asm/irq_regs.h>
51 
52 static struct workqueue_struct *perf_wq;
53 
54 typedef int (*remote_function_f)(void *);
55 
56 struct remote_function_call {
57 	struct task_struct	*p;
58 	remote_function_f	func;
59 	void			*info;
60 	int			ret;
61 };
62 
63 static void remote_function(void *data)
64 {
65 	struct remote_function_call *tfc = data;
66 	struct task_struct *p = tfc->p;
67 
68 	if (p) {
69 		tfc->ret = -EAGAIN;
70 		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
71 			return;
72 	}
73 
74 	tfc->ret = tfc->func(tfc->info);
75 }
76 
77 /**
78  * task_function_call - call a function on the cpu on which a task runs
79  * @p:		the task to evaluate
80  * @func:	the function to be called
81  * @info:	the function call argument
82  *
83  * Calls the function @func when the task is currently running. This might
84  * be on the current CPU, which just calls the function directly
85  *
86  * returns: @func return value, or
87  *	    -ESRCH  - when the process isn't running
88  *	    -EAGAIN - when the process moved away
89  */
90 static int
91 task_function_call(struct task_struct *p, remote_function_f func, void *info)
92 {
93 	struct remote_function_call data = {
94 		.p	= p,
95 		.func	= func,
96 		.info	= info,
97 		.ret	= -ESRCH, /* No such (running) process */
98 	};
99 
100 	if (task_curr(p))
101 		smp_call_function_single(task_cpu(p), remote_function, &data, 1);
102 
103 	return data.ret;
104 }
105 
106 /**
107  * cpu_function_call - call a function on the cpu
108  * @func:	the function to be called
109  * @info:	the function call argument
110  *
111  * Calls the function @func on the remote cpu.
112  *
113  * returns: @func return value or -ENXIO when the cpu is offline
114  */
115 static int cpu_function_call(int cpu, remote_function_f func, void *info)
116 {
117 	struct remote_function_call data = {
118 		.p	= NULL,
119 		.func	= func,
120 		.info	= info,
121 		.ret	= -ENXIO, /* No such CPU */
122 	};
123 
124 	smp_call_function_single(cpu, remote_function, &data, 1);
125 
126 	return data.ret;
127 }
128 
129 #define EVENT_OWNER_KERNEL ((void *) -1)
130 
131 static bool is_kernel_event(struct perf_event *event)
132 {
133 	return event->owner == EVENT_OWNER_KERNEL;
134 }
135 
136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
137 		       PERF_FLAG_FD_OUTPUT  |\
138 		       PERF_FLAG_PID_CGROUP |\
139 		       PERF_FLAG_FD_CLOEXEC)
140 
141 /*
142  * branch priv levels that need permission checks
143  */
144 #define PERF_SAMPLE_BRANCH_PERM_PLM \
145 	(PERF_SAMPLE_BRANCH_KERNEL |\
146 	 PERF_SAMPLE_BRANCH_HV)
147 
148 enum event_type_t {
149 	EVENT_FLEXIBLE = 0x1,
150 	EVENT_PINNED = 0x2,
151 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
152 };
153 
154 /*
155  * perf_sched_events : >0 events exist
156  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
157  */
158 struct static_key_deferred perf_sched_events __read_mostly;
159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
160 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
161 
162 static atomic_t nr_mmap_events __read_mostly;
163 static atomic_t nr_comm_events __read_mostly;
164 static atomic_t nr_task_events __read_mostly;
165 static atomic_t nr_freq_events __read_mostly;
166 
167 static LIST_HEAD(pmus);
168 static DEFINE_MUTEX(pmus_lock);
169 static struct srcu_struct pmus_srcu;
170 
171 /*
172  * perf event paranoia level:
173  *  -1 - not paranoid at all
174  *   0 - disallow raw tracepoint access for unpriv
175  *   1 - disallow cpu events for unpriv
176  *   2 - disallow kernel profiling for unpriv
177  */
178 int sysctl_perf_event_paranoid __read_mostly = 1;
179 
180 /* Minimum for 512 kiB + 1 user control page */
181 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
182 
183 /*
184  * max perf event sample rate
185  */
186 #define DEFAULT_MAX_SAMPLE_RATE		100000
187 #define DEFAULT_SAMPLE_PERIOD_NS	(NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
188 #define DEFAULT_CPU_TIME_MAX_PERCENT	25
189 
190 int sysctl_perf_event_sample_rate __read_mostly	= DEFAULT_MAX_SAMPLE_RATE;
191 
192 static int max_samples_per_tick __read_mostly	= DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
193 static int perf_sample_period_ns __read_mostly	= DEFAULT_SAMPLE_PERIOD_NS;
194 
195 static int perf_sample_allowed_ns __read_mostly =
196 	DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
197 
198 void update_perf_cpu_limits(void)
199 {
200 	u64 tmp = perf_sample_period_ns;
201 
202 	tmp *= sysctl_perf_cpu_time_max_percent;
203 	do_div(tmp, 100);
204 	ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
205 }
206 
207 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
208 
209 int perf_proc_update_handler(struct ctl_table *table, int write,
210 		void __user *buffer, size_t *lenp,
211 		loff_t *ppos)
212 {
213 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
214 
215 	if (ret || !write)
216 		return ret;
217 
218 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
219 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
220 	update_perf_cpu_limits();
221 
222 	return 0;
223 }
224 
225 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
226 
227 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
228 				void __user *buffer, size_t *lenp,
229 				loff_t *ppos)
230 {
231 	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
232 
233 	if (ret || !write)
234 		return ret;
235 
236 	update_perf_cpu_limits();
237 
238 	return 0;
239 }
240 
241 /*
242  * perf samples are done in some very critical code paths (NMIs).
243  * If they take too much CPU time, the system can lock up and not
244  * get any real work done.  This will drop the sample rate when
245  * we detect that events are taking too long.
246  */
247 #define NR_ACCUMULATED_SAMPLES 128
248 static DEFINE_PER_CPU(u64, running_sample_length);
249 
250 static void perf_duration_warn(struct irq_work *w)
251 {
252 	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
253 	u64 avg_local_sample_len;
254 	u64 local_samples_len;
255 
256 	local_samples_len = __this_cpu_read(running_sample_length);
257 	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
258 
259 	printk_ratelimited(KERN_WARNING
260 			"perf interrupt took too long (%lld > %lld), lowering "
261 			"kernel.perf_event_max_sample_rate to %d\n",
262 			avg_local_sample_len, allowed_ns >> 1,
263 			sysctl_perf_event_sample_rate);
264 }
265 
266 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
267 
268 void perf_sample_event_took(u64 sample_len_ns)
269 {
270 	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
271 	u64 avg_local_sample_len;
272 	u64 local_samples_len;
273 
274 	if (allowed_ns == 0)
275 		return;
276 
277 	/* decay the counter by 1 average sample */
278 	local_samples_len = __this_cpu_read(running_sample_length);
279 	local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
280 	local_samples_len += sample_len_ns;
281 	__this_cpu_write(running_sample_length, local_samples_len);
282 
283 	/*
284 	 * note: this will be biased artifically low until we have
285 	 * seen NR_ACCUMULATED_SAMPLES.  Doing it this way keeps us
286 	 * from having to maintain a count.
287 	 */
288 	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
289 
290 	if (avg_local_sample_len <= allowed_ns)
291 		return;
292 
293 	if (max_samples_per_tick <= 1)
294 		return;
295 
296 	max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
297 	sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
298 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
299 
300 	update_perf_cpu_limits();
301 
302 	if (!irq_work_queue(&perf_duration_work)) {
303 		early_printk("perf interrupt took too long (%lld > %lld), lowering "
304 			     "kernel.perf_event_max_sample_rate to %d\n",
305 			     avg_local_sample_len, allowed_ns >> 1,
306 			     sysctl_perf_event_sample_rate);
307 	}
308 }
309 
310 static atomic64_t perf_event_id;
311 
312 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
313 			      enum event_type_t event_type);
314 
315 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
316 			     enum event_type_t event_type,
317 			     struct task_struct *task);
318 
319 static void update_context_time(struct perf_event_context *ctx);
320 static u64 perf_event_time(struct perf_event *event);
321 
322 void __weak perf_event_print_debug(void)	{ }
323 
324 extern __weak const char *perf_pmu_name(void)
325 {
326 	return "pmu";
327 }
328 
329 static inline u64 perf_clock(void)
330 {
331 	return local_clock();
332 }
333 
334 static inline u64 perf_event_clock(struct perf_event *event)
335 {
336 	return event->clock();
337 }
338 
339 static inline struct perf_cpu_context *
340 __get_cpu_context(struct perf_event_context *ctx)
341 {
342 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
343 }
344 
345 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
346 			  struct perf_event_context *ctx)
347 {
348 	raw_spin_lock(&cpuctx->ctx.lock);
349 	if (ctx)
350 		raw_spin_lock(&ctx->lock);
351 }
352 
353 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
354 			    struct perf_event_context *ctx)
355 {
356 	if (ctx)
357 		raw_spin_unlock(&ctx->lock);
358 	raw_spin_unlock(&cpuctx->ctx.lock);
359 }
360 
361 #ifdef CONFIG_CGROUP_PERF
362 
363 static inline bool
364 perf_cgroup_match(struct perf_event *event)
365 {
366 	struct perf_event_context *ctx = event->ctx;
367 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
368 
369 	/* @event doesn't care about cgroup */
370 	if (!event->cgrp)
371 		return true;
372 
373 	/* wants specific cgroup scope but @cpuctx isn't associated with any */
374 	if (!cpuctx->cgrp)
375 		return false;
376 
377 	/*
378 	 * Cgroup scoping is recursive.  An event enabled for a cgroup is
379 	 * also enabled for all its descendant cgroups.  If @cpuctx's
380 	 * cgroup is a descendant of @event's (the test covers identity
381 	 * case), it's a match.
382 	 */
383 	return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
384 				    event->cgrp->css.cgroup);
385 }
386 
387 static inline void perf_detach_cgroup(struct perf_event *event)
388 {
389 	css_put(&event->cgrp->css);
390 	event->cgrp = NULL;
391 }
392 
393 static inline int is_cgroup_event(struct perf_event *event)
394 {
395 	return event->cgrp != NULL;
396 }
397 
398 static inline u64 perf_cgroup_event_time(struct perf_event *event)
399 {
400 	struct perf_cgroup_info *t;
401 
402 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
403 	return t->time;
404 }
405 
406 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
407 {
408 	struct perf_cgroup_info *info;
409 	u64 now;
410 
411 	now = perf_clock();
412 
413 	info = this_cpu_ptr(cgrp->info);
414 
415 	info->time += now - info->timestamp;
416 	info->timestamp = now;
417 }
418 
419 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
420 {
421 	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
422 	if (cgrp_out)
423 		__update_cgrp_time(cgrp_out);
424 }
425 
426 static inline void update_cgrp_time_from_event(struct perf_event *event)
427 {
428 	struct perf_cgroup *cgrp;
429 
430 	/*
431 	 * ensure we access cgroup data only when needed and
432 	 * when we know the cgroup is pinned (css_get)
433 	 */
434 	if (!is_cgroup_event(event))
435 		return;
436 
437 	cgrp = perf_cgroup_from_task(current);
438 	/*
439 	 * Do not update time when cgroup is not active
440 	 */
441 	if (cgrp == event->cgrp)
442 		__update_cgrp_time(event->cgrp);
443 }
444 
445 static inline void
446 perf_cgroup_set_timestamp(struct task_struct *task,
447 			  struct perf_event_context *ctx)
448 {
449 	struct perf_cgroup *cgrp;
450 	struct perf_cgroup_info *info;
451 
452 	/*
453 	 * ctx->lock held by caller
454 	 * ensure we do not access cgroup data
455 	 * unless we have the cgroup pinned (css_get)
456 	 */
457 	if (!task || !ctx->nr_cgroups)
458 		return;
459 
460 	cgrp = perf_cgroup_from_task(task);
461 	info = this_cpu_ptr(cgrp->info);
462 	info->timestamp = ctx->timestamp;
463 }
464 
465 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
466 #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
467 
468 /*
469  * reschedule events based on the cgroup constraint of task.
470  *
471  * mode SWOUT : schedule out everything
472  * mode SWIN : schedule in based on cgroup for next
473  */
474 void perf_cgroup_switch(struct task_struct *task, int mode)
475 {
476 	struct perf_cpu_context *cpuctx;
477 	struct pmu *pmu;
478 	unsigned long flags;
479 
480 	/*
481 	 * disable interrupts to avoid geting nr_cgroup
482 	 * changes via __perf_event_disable(). Also
483 	 * avoids preemption.
484 	 */
485 	local_irq_save(flags);
486 
487 	/*
488 	 * we reschedule only in the presence of cgroup
489 	 * constrained events.
490 	 */
491 	rcu_read_lock();
492 
493 	list_for_each_entry_rcu(pmu, &pmus, entry) {
494 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
495 		if (cpuctx->unique_pmu != pmu)
496 			continue; /* ensure we process each cpuctx once */
497 
498 		/*
499 		 * perf_cgroup_events says at least one
500 		 * context on this CPU has cgroup events.
501 		 *
502 		 * ctx->nr_cgroups reports the number of cgroup
503 		 * events for a context.
504 		 */
505 		if (cpuctx->ctx.nr_cgroups > 0) {
506 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
507 			perf_pmu_disable(cpuctx->ctx.pmu);
508 
509 			if (mode & PERF_CGROUP_SWOUT) {
510 				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
511 				/*
512 				 * must not be done before ctxswout due
513 				 * to event_filter_match() in event_sched_out()
514 				 */
515 				cpuctx->cgrp = NULL;
516 			}
517 
518 			if (mode & PERF_CGROUP_SWIN) {
519 				WARN_ON_ONCE(cpuctx->cgrp);
520 				/*
521 				 * set cgrp before ctxsw in to allow
522 				 * event_filter_match() to not have to pass
523 				 * task around
524 				 */
525 				cpuctx->cgrp = perf_cgroup_from_task(task);
526 				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
527 			}
528 			perf_pmu_enable(cpuctx->ctx.pmu);
529 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
530 		}
531 	}
532 
533 	rcu_read_unlock();
534 
535 	local_irq_restore(flags);
536 }
537 
538 static inline void perf_cgroup_sched_out(struct task_struct *task,
539 					 struct task_struct *next)
540 {
541 	struct perf_cgroup *cgrp1;
542 	struct perf_cgroup *cgrp2 = NULL;
543 
544 	/*
545 	 * we come here when we know perf_cgroup_events > 0
546 	 */
547 	cgrp1 = perf_cgroup_from_task(task);
548 
549 	/*
550 	 * next is NULL when called from perf_event_enable_on_exec()
551 	 * that will systematically cause a cgroup_switch()
552 	 */
553 	if (next)
554 		cgrp2 = perf_cgroup_from_task(next);
555 
556 	/*
557 	 * only schedule out current cgroup events if we know
558 	 * that we are switching to a different cgroup. Otherwise,
559 	 * do no touch the cgroup events.
560 	 */
561 	if (cgrp1 != cgrp2)
562 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
563 }
564 
565 static inline void perf_cgroup_sched_in(struct task_struct *prev,
566 					struct task_struct *task)
567 {
568 	struct perf_cgroup *cgrp1;
569 	struct perf_cgroup *cgrp2 = NULL;
570 
571 	/*
572 	 * we come here when we know perf_cgroup_events > 0
573 	 */
574 	cgrp1 = perf_cgroup_from_task(task);
575 
576 	/* prev can never be NULL */
577 	cgrp2 = perf_cgroup_from_task(prev);
578 
579 	/*
580 	 * only need to schedule in cgroup events if we are changing
581 	 * cgroup during ctxsw. Cgroup events were not scheduled
582 	 * out of ctxsw out if that was not the case.
583 	 */
584 	if (cgrp1 != cgrp2)
585 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
586 }
587 
588 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
589 				      struct perf_event_attr *attr,
590 				      struct perf_event *group_leader)
591 {
592 	struct perf_cgroup *cgrp;
593 	struct cgroup_subsys_state *css;
594 	struct fd f = fdget(fd);
595 	int ret = 0;
596 
597 	if (!f.file)
598 		return -EBADF;
599 
600 	css = css_tryget_online_from_dir(f.file->f_path.dentry,
601 					 &perf_event_cgrp_subsys);
602 	if (IS_ERR(css)) {
603 		ret = PTR_ERR(css);
604 		goto out;
605 	}
606 
607 	cgrp = container_of(css, struct perf_cgroup, css);
608 	event->cgrp = cgrp;
609 
610 	/*
611 	 * all events in a group must monitor
612 	 * the same cgroup because a task belongs
613 	 * to only one perf cgroup at a time
614 	 */
615 	if (group_leader && group_leader->cgrp != cgrp) {
616 		perf_detach_cgroup(event);
617 		ret = -EINVAL;
618 	}
619 out:
620 	fdput(f);
621 	return ret;
622 }
623 
624 static inline void
625 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
626 {
627 	struct perf_cgroup_info *t;
628 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
629 	event->shadow_ctx_time = now - t->timestamp;
630 }
631 
632 static inline void
633 perf_cgroup_defer_enabled(struct perf_event *event)
634 {
635 	/*
636 	 * when the current task's perf cgroup does not match
637 	 * the event's, we need to remember to call the
638 	 * perf_mark_enable() function the first time a task with
639 	 * a matching perf cgroup is scheduled in.
640 	 */
641 	if (is_cgroup_event(event) && !perf_cgroup_match(event))
642 		event->cgrp_defer_enabled = 1;
643 }
644 
645 static inline void
646 perf_cgroup_mark_enabled(struct perf_event *event,
647 			 struct perf_event_context *ctx)
648 {
649 	struct perf_event *sub;
650 	u64 tstamp = perf_event_time(event);
651 
652 	if (!event->cgrp_defer_enabled)
653 		return;
654 
655 	event->cgrp_defer_enabled = 0;
656 
657 	event->tstamp_enabled = tstamp - event->total_time_enabled;
658 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
659 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
660 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
661 			sub->cgrp_defer_enabled = 0;
662 		}
663 	}
664 }
665 #else /* !CONFIG_CGROUP_PERF */
666 
667 static inline bool
668 perf_cgroup_match(struct perf_event *event)
669 {
670 	return true;
671 }
672 
673 static inline void perf_detach_cgroup(struct perf_event *event)
674 {}
675 
676 static inline int is_cgroup_event(struct perf_event *event)
677 {
678 	return 0;
679 }
680 
681 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
682 {
683 	return 0;
684 }
685 
686 static inline void update_cgrp_time_from_event(struct perf_event *event)
687 {
688 }
689 
690 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
691 {
692 }
693 
694 static inline void perf_cgroup_sched_out(struct task_struct *task,
695 					 struct task_struct *next)
696 {
697 }
698 
699 static inline void perf_cgroup_sched_in(struct task_struct *prev,
700 					struct task_struct *task)
701 {
702 }
703 
704 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
705 				      struct perf_event_attr *attr,
706 				      struct perf_event *group_leader)
707 {
708 	return -EINVAL;
709 }
710 
711 static inline void
712 perf_cgroup_set_timestamp(struct task_struct *task,
713 			  struct perf_event_context *ctx)
714 {
715 }
716 
717 void
718 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
719 {
720 }
721 
722 static inline void
723 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
724 {
725 }
726 
727 static inline u64 perf_cgroup_event_time(struct perf_event *event)
728 {
729 	return 0;
730 }
731 
732 static inline void
733 perf_cgroup_defer_enabled(struct perf_event *event)
734 {
735 }
736 
737 static inline void
738 perf_cgroup_mark_enabled(struct perf_event *event,
739 			 struct perf_event_context *ctx)
740 {
741 }
742 #endif
743 
744 /*
745  * set default to be dependent on timer tick just
746  * like original code
747  */
748 #define PERF_CPU_HRTIMER (1000 / HZ)
749 /*
750  * function must be called with interrupts disbled
751  */
752 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
753 {
754 	struct perf_cpu_context *cpuctx;
755 	int rotations = 0;
756 
757 	WARN_ON(!irqs_disabled());
758 
759 	cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
760 	rotations = perf_rotate_context(cpuctx);
761 
762 	raw_spin_lock(&cpuctx->hrtimer_lock);
763 	if (rotations)
764 		hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
765 	else
766 		cpuctx->hrtimer_active = 0;
767 	raw_spin_unlock(&cpuctx->hrtimer_lock);
768 
769 	return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
770 }
771 
772 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
773 {
774 	struct hrtimer *timer = &cpuctx->hrtimer;
775 	struct pmu *pmu = cpuctx->ctx.pmu;
776 	u64 interval;
777 
778 	/* no multiplexing needed for SW PMU */
779 	if (pmu->task_ctx_nr == perf_sw_context)
780 		return;
781 
782 	/*
783 	 * check default is sane, if not set then force to
784 	 * default interval (1/tick)
785 	 */
786 	interval = pmu->hrtimer_interval_ms;
787 	if (interval < 1)
788 		interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
789 
790 	cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
791 
792 	raw_spin_lock_init(&cpuctx->hrtimer_lock);
793 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
794 	timer->function = perf_mux_hrtimer_handler;
795 }
796 
797 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
798 {
799 	struct hrtimer *timer = &cpuctx->hrtimer;
800 	struct pmu *pmu = cpuctx->ctx.pmu;
801 	unsigned long flags;
802 
803 	/* not for SW PMU */
804 	if (pmu->task_ctx_nr == perf_sw_context)
805 		return 0;
806 
807 	raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
808 	if (!cpuctx->hrtimer_active) {
809 		cpuctx->hrtimer_active = 1;
810 		hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
811 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
812 	}
813 	raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
814 
815 	return 0;
816 }
817 
818 void perf_pmu_disable(struct pmu *pmu)
819 {
820 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
821 	if (!(*count)++)
822 		pmu->pmu_disable(pmu);
823 }
824 
825 void perf_pmu_enable(struct pmu *pmu)
826 {
827 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
828 	if (!--(*count))
829 		pmu->pmu_enable(pmu);
830 }
831 
832 static DEFINE_PER_CPU(struct list_head, active_ctx_list);
833 
834 /*
835  * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
836  * perf_event_task_tick() are fully serialized because they're strictly cpu
837  * affine and perf_event_ctx{activate,deactivate} are called with IRQs
838  * disabled, while perf_event_task_tick is called from IRQ context.
839  */
840 static void perf_event_ctx_activate(struct perf_event_context *ctx)
841 {
842 	struct list_head *head = this_cpu_ptr(&active_ctx_list);
843 
844 	WARN_ON(!irqs_disabled());
845 
846 	WARN_ON(!list_empty(&ctx->active_ctx_list));
847 
848 	list_add(&ctx->active_ctx_list, head);
849 }
850 
851 static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
852 {
853 	WARN_ON(!irqs_disabled());
854 
855 	WARN_ON(list_empty(&ctx->active_ctx_list));
856 
857 	list_del_init(&ctx->active_ctx_list);
858 }
859 
860 static void get_ctx(struct perf_event_context *ctx)
861 {
862 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
863 }
864 
865 static void free_ctx(struct rcu_head *head)
866 {
867 	struct perf_event_context *ctx;
868 
869 	ctx = container_of(head, struct perf_event_context, rcu_head);
870 	kfree(ctx->task_ctx_data);
871 	kfree(ctx);
872 }
873 
874 static void put_ctx(struct perf_event_context *ctx)
875 {
876 	if (atomic_dec_and_test(&ctx->refcount)) {
877 		if (ctx->parent_ctx)
878 			put_ctx(ctx->parent_ctx);
879 		if (ctx->task)
880 			put_task_struct(ctx->task);
881 		call_rcu(&ctx->rcu_head, free_ctx);
882 	}
883 }
884 
885 /*
886  * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
887  * perf_pmu_migrate_context() we need some magic.
888  *
889  * Those places that change perf_event::ctx will hold both
890  * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
891  *
892  * Lock ordering is by mutex address. There are two other sites where
893  * perf_event_context::mutex nests and those are:
894  *
895  *  - perf_event_exit_task_context()	[ child , 0 ]
896  *      __perf_event_exit_task()
897  *        sync_child_event()
898  *          put_event()			[ parent, 1 ]
899  *
900  *  - perf_event_init_context()		[ parent, 0 ]
901  *      inherit_task_group()
902  *        inherit_group()
903  *          inherit_event()
904  *            perf_event_alloc()
905  *              perf_init_event()
906  *                perf_try_init_event()	[ child , 1 ]
907  *
908  * While it appears there is an obvious deadlock here -- the parent and child
909  * nesting levels are inverted between the two. This is in fact safe because
910  * life-time rules separate them. That is an exiting task cannot fork, and a
911  * spawning task cannot (yet) exit.
912  *
913  * But remember that that these are parent<->child context relations, and
914  * migration does not affect children, therefore these two orderings should not
915  * interact.
916  *
917  * The change in perf_event::ctx does not affect children (as claimed above)
918  * because the sys_perf_event_open() case will install a new event and break
919  * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
920  * concerned with cpuctx and that doesn't have children.
921  *
922  * The places that change perf_event::ctx will issue:
923  *
924  *   perf_remove_from_context();
925  *   synchronize_rcu();
926  *   perf_install_in_context();
927  *
928  * to affect the change. The remove_from_context() + synchronize_rcu() should
929  * quiesce the event, after which we can install it in the new location. This
930  * means that only external vectors (perf_fops, prctl) can perturb the event
931  * while in transit. Therefore all such accessors should also acquire
932  * perf_event_context::mutex to serialize against this.
933  *
934  * However; because event->ctx can change while we're waiting to acquire
935  * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
936  * function.
937  *
938  * Lock order:
939  *	task_struct::perf_event_mutex
940  *	  perf_event_context::mutex
941  *	    perf_event_context::lock
942  *	    perf_event::child_mutex;
943  *	    perf_event::mmap_mutex
944  *	    mmap_sem
945  */
946 static struct perf_event_context *
947 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
948 {
949 	struct perf_event_context *ctx;
950 
951 again:
952 	rcu_read_lock();
953 	ctx = ACCESS_ONCE(event->ctx);
954 	if (!atomic_inc_not_zero(&ctx->refcount)) {
955 		rcu_read_unlock();
956 		goto again;
957 	}
958 	rcu_read_unlock();
959 
960 	mutex_lock_nested(&ctx->mutex, nesting);
961 	if (event->ctx != ctx) {
962 		mutex_unlock(&ctx->mutex);
963 		put_ctx(ctx);
964 		goto again;
965 	}
966 
967 	return ctx;
968 }
969 
970 static inline struct perf_event_context *
971 perf_event_ctx_lock(struct perf_event *event)
972 {
973 	return perf_event_ctx_lock_nested(event, 0);
974 }
975 
976 static void perf_event_ctx_unlock(struct perf_event *event,
977 				  struct perf_event_context *ctx)
978 {
979 	mutex_unlock(&ctx->mutex);
980 	put_ctx(ctx);
981 }
982 
983 /*
984  * This must be done under the ctx->lock, such as to serialize against
985  * context_equiv(), therefore we cannot call put_ctx() since that might end up
986  * calling scheduler related locks and ctx->lock nests inside those.
987  */
988 static __must_check struct perf_event_context *
989 unclone_ctx(struct perf_event_context *ctx)
990 {
991 	struct perf_event_context *parent_ctx = ctx->parent_ctx;
992 
993 	lockdep_assert_held(&ctx->lock);
994 
995 	if (parent_ctx)
996 		ctx->parent_ctx = NULL;
997 	ctx->generation++;
998 
999 	return parent_ctx;
1000 }
1001 
1002 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1003 {
1004 	/*
1005 	 * only top level events have the pid namespace they were created in
1006 	 */
1007 	if (event->parent)
1008 		event = event->parent;
1009 
1010 	return task_tgid_nr_ns(p, event->ns);
1011 }
1012 
1013 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1014 {
1015 	/*
1016 	 * only top level events have the pid namespace they were created in
1017 	 */
1018 	if (event->parent)
1019 		event = event->parent;
1020 
1021 	return task_pid_nr_ns(p, event->ns);
1022 }
1023 
1024 /*
1025  * If we inherit events we want to return the parent event id
1026  * to userspace.
1027  */
1028 static u64 primary_event_id(struct perf_event *event)
1029 {
1030 	u64 id = event->id;
1031 
1032 	if (event->parent)
1033 		id = event->parent->id;
1034 
1035 	return id;
1036 }
1037 
1038 /*
1039  * Get the perf_event_context for a task and lock it.
1040  * This has to cope with with the fact that until it is locked,
1041  * the context could get moved to another task.
1042  */
1043 static struct perf_event_context *
1044 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1045 {
1046 	struct perf_event_context *ctx;
1047 
1048 retry:
1049 	/*
1050 	 * One of the few rules of preemptible RCU is that one cannot do
1051 	 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1052 	 * part of the read side critical section was preemptible -- see
1053 	 * rcu_read_unlock_special().
1054 	 *
1055 	 * Since ctx->lock nests under rq->lock we must ensure the entire read
1056 	 * side critical section is non-preemptible.
1057 	 */
1058 	preempt_disable();
1059 	rcu_read_lock();
1060 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1061 	if (ctx) {
1062 		/*
1063 		 * If this context is a clone of another, it might
1064 		 * get swapped for another underneath us by
1065 		 * perf_event_task_sched_out, though the
1066 		 * rcu_read_lock() protects us from any context
1067 		 * getting freed.  Lock the context and check if it
1068 		 * got swapped before we could get the lock, and retry
1069 		 * if so.  If we locked the right context, then it
1070 		 * can't get swapped on us any more.
1071 		 */
1072 		raw_spin_lock_irqsave(&ctx->lock, *flags);
1073 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1074 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
1075 			rcu_read_unlock();
1076 			preempt_enable();
1077 			goto retry;
1078 		}
1079 
1080 		if (!atomic_inc_not_zero(&ctx->refcount)) {
1081 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
1082 			ctx = NULL;
1083 		}
1084 	}
1085 	rcu_read_unlock();
1086 	preempt_enable();
1087 	return ctx;
1088 }
1089 
1090 /*
1091  * Get the context for a task and increment its pin_count so it
1092  * can't get swapped to another task.  This also increments its
1093  * reference count so that the context can't get freed.
1094  */
1095 static struct perf_event_context *
1096 perf_pin_task_context(struct task_struct *task, int ctxn)
1097 {
1098 	struct perf_event_context *ctx;
1099 	unsigned long flags;
1100 
1101 	ctx = perf_lock_task_context(task, ctxn, &flags);
1102 	if (ctx) {
1103 		++ctx->pin_count;
1104 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
1105 	}
1106 	return ctx;
1107 }
1108 
1109 static void perf_unpin_context(struct perf_event_context *ctx)
1110 {
1111 	unsigned long flags;
1112 
1113 	raw_spin_lock_irqsave(&ctx->lock, flags);
1114 	--ctx->pin_count;
1115 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
1116 }
1117 
1118 /*
1119  * Update the record of the current time in a context.
1120  */
1121 static void update_context_time(struct perf_event_context *ctx)
1122 {
1123 	u64 now = perf_clock();
1124 
1125 	ctx->time += now - ctx->timestamp;
1126 	ctx->timestamp = now;
1127 }
1128 
1129 static u64 perf_event_time(struct perf_event *event)
1130 {
1131 	struct perf_event_context *ctx = event->ctx;
1132 
1133 	if (is_cgroup_event(event))
1134 		return perf_cgroup_event_time(event);
1135 
1136 	return ctx ? ctx->time : 0;
1137 }
1138 
1139 /*
1140  * Update the total_time_enabled and total_time_running fields for a event.
1141  * The caller of this function needs to hold the ctx->lock.
1142  */
1143 static void update_event_times(struct perf_event *event)
1144 {
1145 	struct perf_event_context *ctx = event->ctx;
1146 	u64 run_end;
1147 
1148 	if (event->state < PERF_EVENT_STATE_INACTIVE ||
1149 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1150 		return;
1151 	/*
1152 	 * in cgroup mode, time_enabled represents
1153 	 * the time the event was enabled AND active
1154 	 * tasks were in the monitored cgroup. This is
1155 	 * independent of the activity of the context as
1156 	 * there may be a mix of cgroup and non-cgroup events.
1157 	 *
1158 	 * That is why we treat cgroup events differently
1159 	 * here.
1160 	 */
1161 	if (is_cgroup_event(event))
1162 		run_end = perf_cgroup_event_time(event);
1163 	else if (ctx->is_active)
1164 		run_end = ctx->time;
1165 	else
1166 		run_end = event->tstamp_stopped;
1167 
1168 	event->total_time_enabled = run_end - event->tstamp_enabled;
1169 
1170 	if (event->state == PERF_EVENT_STATE_INACTIVE)
1171 		run_end = event->tstamp_stopped;
1172 	else
1173 		run_end = perf_event_time(event);
1174 
1175 	event->total_time_running = run_end - event->tstamp_running;
1176 
1177 }
1178 
1179 /*
1180  * Update total_time_enabled and total_time_running for all events in a group.
1181  */
1182 static void update_group_times(struct perf_event *leader)
1183 {
1184 	struct perf_event *event;
1185 
1186 	update_event_times(leader);
1187 	list_for_each_entry(event, &leader->sibling_list, group_entry)
1188 		update_event_times(event);
1189 }
1190 
1191 static struct list_head *
1192 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1193 {
1194 	if (event->attr.pinned)
1195 		return &ctx->pinned_groups;
1196 	else
1197 		return &ctx->flexible_groups;
1198 }
1199 
1200 /*
1201  * Add a event from the lists for its context.
1202  * Must be called with ctx->mutex and ctx->lock held.
1203  */
1204 static void
1205 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1206 {
1207 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1208 	event->attach_state |= PERF_ATTACH_CONTEXT;
1209 
1210 	/*
1211 	 * If we're a stand alone event or group leader, we go to the context
1212 	 * list, group events are kept attached to the group so that
1213 	 * perf_group_detach can, at all times, locate all siblings.
1214 	 */
1215 	if (event->group_leader == event) {
1216 		struct list_head *list;
1217 
1218 		if (is_software_event(event))
1219 			event->group_flags |= PERF_GROUP_SOFTWARE;
1220 
1221 		list = ctx_group_list(event, ctx);
1222 		list_add_tail(&event->group_entry, list);
1223 	}
1224 
1225 	if (is_cgroup_event(event))
1226 		ctx->nr_cgroups++;
1227 
1228 	list_add_rcu(&event->event_entry, &ctx->event_list);
1229 	ctx->nr_events++;
1230 	if (event->attr.inherit_stat)
1231 		ctx->nr_stat++;
1232 
1233 	ctx->generation++;
1234 }
1235 
1236 /*
1237  * Initialize event state based on the perf_event_attr::disabled.
1238  */
1239 static inline void perf_event__state_init(struct perf_event *event)
1240 {
1241 	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1242 					      PERF_EVENT_STATE_INACTIVE;
1243 }
1244 
1245 /*
1246  * Called at perf_event creation and when events are attached/detached from a
1247  * group.
1248  */
1249 static void perf_event__read_size(struct perf_event *event)
1250 {
1251 	int entry = sizeof(u64); /* value */
1252 	int size = 0;
1253 	int nr = 1;
1254 
1255 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1256 		size += sizeof(u64);
1257 
1258 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1259 		size += sizeof(u64);
1260 
1261 	if (event->attr.read_format & PERF_FORMAT_ID)
1262 		entry += sizeof(u64);
1263 
1264 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
1265 		nr += event->group_leader->nr_siblings;
1266 		size += sizeof(u64);
1267 	}
1268 
1269 	size += entry * nr;
1270 	event->read_size = size;
1271 }
1272 
1273 static void perf_event__header_size(struct perf_event *event)
1274 {
1275 	struct perf_sample_data *data;
1276 	u64 sample_type = event->attr.sample_type;
1277 	u16 size = 0;
1278 
1279 	perf_event__read_size(event);
1280 
1281 	if (sample_type & PERF_SAMPLE_IP)
1282 		size += sizeof(data->ip);
1283 
1284 	if (sample_type & PERF_SAMPLE_ADDR)
1285 		size += sizeof(data->addr);
1286 
1287 	if (sample_type & PERF_SAMPLE_PERIOD)
1288 		size += sizeof(data->period);
1289 
1290 	if (sample_type & PERF_SAMPLE_WEIGHT)
1291 		size += sizeof(data->weight);
1292 
1293 	if (sample_type & PERF_SAMPLE_READ)
1294 		size += event->read_size;
1295 
1296 	if (sample_type & PERF_SAMPLE_DATA_SRC)
1297 		size += sizeof(data->data_src.val);
1298 
1299 	if (sample_type & PERF_SAMPLE_TRANSACTION)
1300 		size += sizeof(data->txn);
1301 
1302 	event->header_size = size;
1303 }
1304 
1305 static void perf_event__id_header_size(struct perf_event *event)
1306 {
1307 	struct perf_sample_data *data;
1308 	u64 sample_type = event->attr.sample_type;
1309 	u16 size = 0;
1310 
1311 	if (sample_type & PERF_SAMPLE_TID)
1312 		size += sizeof(data->tid_entry);
1313 
1314 	if (sample_type & PERF_SAMPLE_TIME)
1315 		size += sizeof(data->time);
1316 
1317 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1318 		size += sizeof(data->id);
1319 
1320 	if (sample_type & PERF_SAMPLE_ID)
1321 		size += sizeof(data->id);
1322 
1323 	if (sample_type & PERF_SAMPLE_STREAM_ID)
1324 		size += sizeof(data->stream_id);
1325 
1326 	if (sample_type & PERF_SAMPLE_CPU)
1327 		size += sizeof(data->cpu_entry);
1328 
1329 	event->id_header_size = size;
1330 }
1331 
1332 static void perf_group_attach(struct perf_event *event)
1333 {
1334 	struct perf_event *group_leader = event->group_leader, *pos;
1335 
1336 	/*
1337 	 * We can have double attach due to group movement in perf_event_open.
1338 	 */
1339 	if (event->attach_state & PERF_ATTACH_GROUP)
1340 		return;
1341 
1342 	event->attach_state |= PERF_ATTACH_GROUP;
1343 
1344 	if (group_leader == event)
1345 		return;
1346 
1347 	WARN_ON_ONCE(group_leader->ctx != event->ctx);
1348 
1349 	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1350 			!is_software_event(event))
1351 		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1352 
1353 	list_add_tail(&event->group_entry, &group_leader->sibling_list);
1354 	group_leader->nr_siblings++;
1355 
1356 	perf_event__header_size(group_leader);
1357 
1358 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1359 		perf_event__header_size(pos);
1360 }
1361 
1362 /*
1363  * Remove a event from the lists for its context.
1364  * Must be called with ctx->mutex and ctx->lock held.
1365  */
1366 static void
1367 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1368 {
1369 	struct perf_cpu_context *cpuctx;
1370 
1371 	WARN_ON_ONCE(event->ctx != ctx);
1372 	lockdep_assert_held(&ctx->lock);
1373 
1374 	/*
1375 	 * We can have double detach due to exit/hot-unplug + close.
1376 	 */
1377 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1378 		return;
1379 
1380 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
1381 
1382 	if (is_cgroup_event(event)) {
1383 		ctx->nr_cgroups--;
1384 		cpuctx = __get_cpu_context(ctx);
1385 		/*
1386 		 * if there are no more cgroup events
1387 		 * then cler cgrp to avoid stale pointer
1388 		 * in update_cgrp_time_from_cpuctx()
1389 		 */
1390 		if (!ctx->nr_cgroups)
1391 			cpuctx->cgrp = NULL;
1392 	}
1393 
1394 	ctx->nr_events--;
1395 	if (event->attr.inherit_stat)
1396 		ctx->nr_stat--;
1397 
1398 	list_del_rcu(&event->event_entry);
1399 
1400 	if (event->group_leader == event)
1401 		list_del_init(&event->group_entry);
1402 
1403 	update_group_times(event);
1404 
1405 	/*
1406 	 * If event was in error state, then keep it
1407 	 * that way, otherwise bogus counts will be
1408 	 * returned on read(). The only way to get out
1409 	 * of error state is by explicit re-enabling
1410 	 * of the event
1411 	 */
1412 	if (event->state > PERF_EVENT_STATE_OFF)
1413 		event->state = PERF_EVENT_STATE_OFF;
1414 
1415 	ctx->generation++;
1416 }
1417 
1418 static void perf_group_detach(struct perf_event *event)
1419 {
1420 	struct perf_event *sibling, *tmp;
1421 	struct list_head *list = NULL;
1422 
1423 	/*
1424 	 * We can have double detach due to exit/hot-unplug + close.
1425 	 */
1426 	if (!(event->attach_state & PERF_ATTACH_GROUP))
1427 		return;
1428 
1429 	event->attach_state &= ~PERF_ATTACH_GROUP;
1430 
1431 	/*
1432 	 * If this is a sibling, remove it from its group.
1433 	 */
1434 	if (event->group_leader != event) {
1435 		list_del_init(&event->group_entry);
1436 		event->group_leader->nr_siblings--;
1437 		goto out;
1438 	}
1439 
1440 	if (!list_empty(&event->group_entry))
1441 		list = &event->group_entry;
1442 
1443 	/*
1444 	 * If this was a group event with sibling events then
1445 	 * upgrade the siblings to singleton events by adding them
1446 	 * to whatever list we are on.
1447 	 */
1448 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1449 		if (list)
1450 			list_move_tail(&sibling->group_entry, list);
1451 		sibling->group_leader = sibling;
1452 
1453 		/* Inherit group flags from the previous leader */
1454 		sibling->group_flags = event->group_flags;
1455 
1456 		WARN_ON_ONCE(sibling->ctx != event->ctx);
1457 	}
1458 
1459 out:
1460 	perf_event__header_size(event->group_leader);
1461 
1462 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1463 		perf_event__header_size(tmp);
1464 }
1465 
1466 /*
1467  * User event without the task.
1468  */
1469 static bool is_orphaned_event(struct perf_event *event)
1470 {
1471 	return event && !is_kernel_event(event) && !event->owner;
1472 }
1473 
1474 /*
1475  * Event has a parent but parent's task finished and it's
1476  * alive only because of children holding refference.
1477  */
1478 static bool is_orphaned_child(struct perf_event *event)
1479 {
1480 	return is_orphaned_event(event->parent);
1481 }
1482 
1483 static void orphans_remove_work(struct work_struct *work);
1484 
1485 static void schedule_orphans_remove(struct perf_event_context *ctx)
1486 {
1487 	if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
1488 		return;
1489 
1490 	if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
1491 		get_ctx(ctx);
1492 		ctx->orphans_remove_sched = true;
1493 	}
1494 }
1495 
1496 static int __init perf_workqueue_init(void)
1497 {
1498 	perf_wq = create_singlethread_workqueue("perf");
1499 	WARN(!perf_wq, "failed to create perf workqueue\n");
1500 	return perf_wq ? 0 : -1;
1501 }
1502 
1503 core_initcall(perf_workqueue_init);
1504 
1505 static inline int
1506 event_filter_match(struct perf_event *event)
1507 {
1508 	return (event->cpu == -1 || event->cpu == smp_processor_id())
1509 	    && perf_cgroup_match(event);
1510 }
1511 
1512 static void
1513 event_sched_out(struct perf_event *event,
1514 		  struct perf_cpu_context *cpuctx,
1515 		  struct perf_event_context *ctx)
1516 {
1517 	u64 tstamp = perf_event_time(event);
1518 	u64 delta;
1519 
1520 	WARN_ON_ONCE(event->ctx != ctx);
1521 	lockdep_assert_held(&ctx->lock);
1522 
1523 	/*
1524 	 * An event which could not be activated because of
1525 	 * filter mismatch still needs to have its timings
1526 	 * maintained, otherwise bogus information is return
1527 	 * via read() for time_enabled, time_running:
1528 	 */
1529 	if (event->state == PERF_EVENT_STATE_INACTIVE
1530 	    && !event_filter_match(event)) {
1531 		delta = tstamp - event->tstamp_stopped;
1532 		event->tstamp_running += delta;
1533 		event->tstamp_stopped = tstamp;
1534 	}
1535 
1536 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1537 		return;
1538 
1539 	perf_pmu_disable(event->pmu);
1540 
1541 	event->state = PERF_EVENT_STATE_INACTIVE;
1542 	if (event->pending_disable) {
1543 		event->pending_disable = 0;
1544 		event->state = PERF_EVENT_STATE_OFF;
1545 	}
1546 	event->tstamp_stopped = tstamp;
1547 	event->pmu->del(event, 0);
1548 	event->oncpu = -1;
1549 
1550 	if (!is_software_event(event))
1551 		cpuctx->active_oncpu--;
1552 	if (!--ctx->nr_active)
1553 		perf_event_ctx_deactivate(ctx);
1554 	if (event->attr.freq && event->attr.sample_freq)
1555 		ctx->nr_freq--;
1556 	if (event->attr.exclusive || !cpuctx->active_oncpu)
1557 		cpuctx->exclusive = 0;
1558 
1559 	if (is_orphaned_child(event))
1560 		schedule_orphans_remove(ctx);
1561 
1562 	perf_pmu_enable(event->pmu);
1563 }
1564 
1565 static void
1566 group_sched_out(struct perf_event *group_event,
1567 		struct perf_cpu_context *cpuctx,
1568 		struct perf_event_context *ctx)
1569 {
1570 	struct perf_event *event;
1571 	int state = group_event->state;
1572 
1573 	event_sched_out(group_event, cpuctx, ctx);
1574 
1575 	/*
1576 	 * Schedule out siblings (if any):
1577 	 */
1578 	list_for_each_entry(event, &group_event->sibling_list, group_entry)
1579 		event_sched_out(event, cpuctx, ctx);
1580 
1581 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1582 		cpuctx->exclusive = 0;
1583 }
1584 
1585 struct remove_event {
1586 	struct perf_event *event;
1587 	bool detach_group;
1588 };
1589 
1590 /*
1591  * Cross CPU call to remove a performance event
1592  *
1593  * We disable the event on the hardware level first. After that we
1594  * remove it from the context list.
1595  */
1596 static int __perf_remove_from_context(void *info)
1597 {
1598 	struct remove_event *re = info;
1599 	struct perf_event *event = re->event;
1600 	struct perf_event_context *ctx = event->ctx;
1601 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1602 
1603 	raw_spin_lock(&ctx->lock);
1604 	event_sched_out(event, cpuctx, ctx);
1605 	if (re->detach_group)
1606 		perf_group_detach(event);
1607 	list_del_event(event, ctx);
1608 	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1609 		ctx->is_active = 0;
1610 		cpuctx->task_ctx = NULL;
1611 	}
1612 	raw_spin_unlock(&ctx->lock);
1613 
1614 	return 0;
1615 }
1616 
1617 
1618 /*
1619  * Remove the event from a task's (or a CPU's) list of events.
1620  *
1621  * CPU events are removed with a smp call. For task events we only
1622  * call when the task is on a CPU.
1623  *
1624  * If event->ctx is a cloned context, callers must make sure that
1625  * every task struct that event->ctx->task could possibly point to
1626  * remains valid.  This is OK when called from perf_release since
1627  * that only calls us on the top-level context, which can't be a clone.
1628  * When called from perf_event_exit_task, it's OK because the
1629  * context has been detached from its task.
1630  */
1631 static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1632 {
1633 	struct perf_event_context *ctx = event->ctx;
1634 	struct task_struct *task = ctx->task;
1635 	struct remove_event re = {
1636 		.event = event,
1637 		.detach_group = detach_group,
1638 	};
1639 
1640 	lockdep_assert_held(&ctx->mutex);
1641 
1642 	if (!task) {
1643 		/*
1644 		 * Per cpu events are removed via an smp call. The removal can
1645 		 * fail if the CPU is currently offline, but in that case we
1646 		 * already called __perf_remove_from_context from
1647 		 * perf_event_exit_cpu.
1648 		 */
1649 		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1650 		return;
1651 	}
1652 
1653 retry:
1654 	if (!task_function_call(task, __perf_remove_from_context, &re))
1655 		return;
1656 
1657 	raw_spin_lock_irq(&ctx->lock);
1658 	/*
1659 	 * If we failed to find a running task, but find the context active now
1660 	 * that we've acquired the ctx->lock, retry.
1661 	 */
1662 	if (ctx->is_active) {
1663 		raw_spin_unlock_irq(&ctx->lock);
1664 		/*
1665 		 * Reload the task pointer, it might have been changed by
1666 		 * a concurrent perf_event_context_sched_out().
1667 		 */
1668 		task = ctx->task;
1669 		goto retry;
1670 	}
1671 
1672 	/*
1673 	 * Since the task isn't running, its safe to remove the event, us
1674 	 * holding the ctx->lock ensures the task won't get scheduled in.
1675 	 */
1676 	if (detach_group)
1677 		perf_group_detach(event);
1678 	list_del_event(event, ctx);
1679 	raw_spin_unlock_irq(&ctx->lock);
1680 }
1681 
1682 /*
1683  * Cross CPU call to disable a performance event
1684  */
1685 int __perf_event_disable(void *info)
1686 {
1687 	struct perf_event *event = info;
1688 	struct perf_event_context *ctx = event->ctx;
1689 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1690 
1691 	/*
1692 	 * If this is a per-task event, need to check whether this
1693 	 * event's task is the current task on this cpu.
1694 	 *
1695 	 * Can trigger due to concurrent perf_event_context_sched_out()
1696 	 * flipping contexts around.
1697 	 */
1698 	if (ctx->task && cpuctx->task_ctx != ctx)
1699 		return -EINVAL;
1700 
1701 	raw_spin_lock(&ctx->lock);
1702 
1703 	/*
1704 	 * If the event is on, turn it off.
1705 	 * If it is in error state, leave it in error state.
1706 	 */
1707 	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1708 		update_context_time(ctx);
1709 		update_cgrp_time_from_event(event);
1710 		update_group_times(event);
1711 		if (event == event->group_leader)
1712 			group_sched_out(event, cpuctx, ctx);
1713 		else
1714 			event_sched_out(event, cpuctx, ctx);
1715 		event->state = PERF_EVENT_STATE_OFF;
1716 	}
1717 
1718 	raw_spin_unlock(&ctx->lock);
1719 
1720 	return 0;
1721 }
1722 
1723 /*
1724  * Disable a event.
1725  *
1726  * If event->ctx is a cloned context, callers must make sure that
1727  * every task struct that event->ctx->task could possibly point to
1728  * remains valid.  This condition is satisifed when called through
1729  * perf_event_for_each_child or perf_event_for_each because they
1730  * hold the top-level event's child_mutex, so any descendant that
1731  * goes to exit will block in sync_child_event.
1732  * When called from perf_pending_event it's OK because event->ctx
1733  * is the current context on this CPU and preemption is disabled,
1734  * hence we can't get into perf_event_task_sched_out for this context.
1735  */
1736 static void _perf_event_disable(struct perf_event *event)
1737 {
1738 	struct perf_event_context *ctx = event->ctx;
1739 	struct task_struct *task = ctx->task;
1740 
1741 	if (!task) {
1742 		/*
1743 		 * Disable the event on the cpu that it's on
1744 		 */
1745 		cpu_function_call(event->cpu, __perf_event_disable, event);
1746 		return;
1747 	}
1748 
1749 retry:
1750 	if (!task_function_call(task, __perf_event_disable, event))
1751 		return;
1752 
1753 	raw_spin_lock_irq(&ctx->lock);
1754 	/*
1755 	 * If the event is still active, we need to retry the cross-call.
1756 	 */
1757 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1758 		raw_spin_unlock_irq(&ctx->lock);
1759 		/*
1760 		 * Reload the task pointer, it might have been changed by
1761 		 * a concurrent perf_event_context_sched_out().
1762 		 */
1763 		task = ctx->task;
1764 		goto retry;
1765 	}
1766 
1767 	/*
1768 	 * Since we have the lock this context can't be scheduled
1769 	 * in, so we can change the state safely.
1770 	 */
1771 	if (event->state == PERF_EVENT_STATE_INACTIVE) {
1772 		update_group_times(event);
1773 		event->state = PERF_EVENT_STATE_OFF;
1774 	}
1775 	raw_spin_unlock_irq(&ctx->lock);
1776 }
1777 
1778 /*
1779  * Strictly speaking kernel users cannot create groups and therefore this
1780  * interface does not need the perf_event_ctx_lock() magic.
1781  */
1782 void perf_event_disable(struct perf_event *event)
1783 {
1784 	struct perf_event_context *ctx;
1785 
1786 	ctx = perf_event_ctx_lock(event);
1787 	_perf_event_disable(event);
1788 	perf_event_ctx_unlock(event, ctx);
1789 }
1790 EXPORT_SYMBOL_GPL(perf_event_disable);
1791 
1792 static void perf_set_shadow_time(struct perf_event *event,
1793 				 struct perf_event_context *ctx,
1794 				 u64 tstamp)
1795 {
1796 	/*
1797 	 * use the correct time source for the time snapshot
1798 	 *
1799 	 * We could get by without this by leveraging the
1800 	 * fact that to get to this function, the caller
1801 	 * has most likely already called update_context_time()
1802 	 * and update_cgrp_time_xx() and thus both timestamp
1803 	 * are identical (or very close). Given that tstamp is,
1804 	 * already adjusted for cgroup, we could say that:
1805 	 *    tstamp - ctx->timestamp
1806 	 * is equivalent to
1807 	 *    tstamp - cgrp->timestamp.
1808 	 *
1809 	 * Then, in perf_output_read(), the calculation would
1810 	 * work with no changes because:
1811 	 * - event is guaranteed scheduled in
1812 	 * - no scheduled out in between
1813 	 * - thus the timestamp would be the same
1814 	 *
1815 	 * But this is a bit hairy.
1816 	 *
1817 	 * So instead, we have an explicit cgroup call to remain
1818 	 * within the time time source all along. We believe it
1819 	 * is cleaner and simpler to understand.
1820 	 */
1821 	if (is_cgroup_event(event))
1822 		perf_cgroup_set_shadow_time(event, tstamp);
1823 	else
1824 		event->shadow_ctx_time = tstamp - ctx->timestamp;
1825 }
1826 
1827 #define MAX_INTERRUPTS (~0ULL)
1828 
1829 static void perf_log_throttle(struct perf_event *event, int enable);
1830 static void perf_log_itrace_start(struct perf_event *event);
1831 
1832 static int
1833 event_sched_in(struct perf_event *event,
1834 		 struct perf_cpu_context *cpuctx,
1835 		 struct perf_event_context *ctx)
1836 {
1837 	u64 tstamp = perf_event_time(event);
1838 	int ret = 0;
1839 
1840 	lockdep_assert_held(&ctx->lock);
1841 
1842 	if (event->state <= PERF_EVENT_STATE_OFF)
1843 		return 0;
1844 
1845 	event->state = PERF_EVENT_STATE_ACTIVE;
1846 	event->oncpu = smp_processor_id();
1847 
1848 	/*
1849 	 * Unthrottle events, since we scheduled we might have missed several
1850 	 * ticks already, also for a heavily scheduling task there is little
1851 	 * guarantee it'll get a tick in a timely manner.
1852 	 */
1853 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1854 		perf_log_throttle(event, 1);
1855 		event->hw.interrupts = 0;
1856 	}
1857 
1858 	/*
1859 	 * The new state must be visible before we turn it on in the hardware:
1860 	 */
1861 	smp_wmb();
1862 
1863 	perf_pmu_disable(event->pmu);
1864 
1865 	event->tstamp_running += tstamp - event->tstamp_stopped;
1866 
1867 	perf_set_shadow_time(event, ctx, tstamp);
1868 
1869 	perf_log_itrace_start(event);
1870 
1871 	if (event->pmu->add(event, PERF_EF_START)) {
1872 		event->state = PERF_EVENT_STATE_INACTIVE;
1873 		event->oncpu = -1;
1874 		ret = -EAGAIN;
1875 		goto out;
1876 	}
1877 
1878 	if (!is_software_event(event))
1879 		cpuctx->active_oncpu++;
1880 	if (!ctx->nr_active++)
1881 		perf_event_ctx_activate(ctx);
1882 	if (event->attr.freq && event->attr.sample_freq)
1883 		ctx->nr_freq++;
1884 
1885 	if (event->attr.exclusive)
1886 		cpuctx->exclusive = 1;
1887 
1888 	if (is_orphaned_child(event))
1889 		schedule_orphans_remove(ctx);
1890 
1891 out:
1892 	perf_pmu_enable(event->pmu);
1893 
1894 	return ret;
1895 }
1896 
1897 static int
1898 group_sched_in(struct perf_event *group_event,
1899 	       struct perf_cpu_context *cpuctx,
1900 	       struct perf_event_context *ctx)
1901 {
1902 	struct perf_event *event, *partial_group = NULL;
1903 	struct pmu *pmu = ctx->pmu;
1904 	u64 now = ctx->time;
1905 	bool simulate = false;
1906 
1907 	if (group_event->state == PERF_EVENT_STATE_OFF)
1908 		return 0;
1909 
1910 	pmu->start_txn(pmu);
1911 
1912 	if (event_sched_in(group_event, cpuctx, ctx)) {
1913 		pmu->cancel_txn(pmu);
1914 		perf_mux_hrtimer_restart(cpuctx);
1915 		return -EAGAIN;
1916 	}
1917 
1918 	/*
1919 	 * Schedule in siblings as one group (if any):
1920 	 */
1921 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1922 		if (event_sched_in(event, cpuctx, ctx)) {
1923 			partial_group = event;
1924 			goto group_error;
1925 		}
1926 	}
1927 
1928 	if (!pmu->commit_txn(pmu))
1929 		return 0;
1930 
1931 group_error:
1932 	/*
1933 	 * Groups can be scheduled in as one unit only, so undo any
1934 	 * partial group before returning:
1935 	 * The events up to the failed event are scheduled out normally,
1936 	 * tstamp_stopped will be updated.
1937 	 *
1938 	 * The failed events and the remaining siblings need to have
1939 	 * their timings updated as if they had gone thru event_sched_in()
1940 	 * and event_sched_out(). This is required to get consistent timings
1941 	 * across the group. This also takes care of the case where the group
1942 	 * could never be scheduled by ensuring tstamp_stopped is set to mark
1943 	 * the time the event was actually stopped, such that time delta
1944 	 * calculation in update_event_times() is correct.
1945 	 */
1946 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1947 		if (event == partial_group)
1948 			simulate = true;
1949 
1950 		if (simulate) {
1951 			event->tstamp_running += now - event->tstamp_stopped;
1952 			event->tstamp_stopped = now;
1953 		} else {
1954 			event_sched_out(event, cpuctx, ctx);
1955 		}
1956 	}
1957 	event_sched_out(group_event, cpuctx, ctx);
1958 
1959 	pmu->cancel_txn(pmu);
1960 
1961 	perf_mux_hrtimer_restart(cpuctx);
1962 
1963 	return -EAGAIN;
1964 }
1965 
1966 /*
1967  * Work out whether we can put this event group on the CPU now.
1968  */
1969 static int group_can_go_on(struct perf_event *event,
1970 			   struct perf_cpu_context *cpuctx,
1971 			   int can_add_hw)
1972 {
1973 	/*
1974 	 * Groups consisting entirely of software events can always go on.
1975 	 */
1976 	if (event->group_flags & PERF_GROUP_SOFTWARE)
1977 		return 1;
1978 	/*
1979 	 * If an exclusive group is already on, no other hardware
1980 	 * events can go on.
1981 	 */
1982 	if (cpuctx->exclusive)
1983 		return 0;
1984 	/*
1985 	 * If this group is exclusive and there are already
1986 	 * events on the CPU, it can't go on.
1987 	 */
1988 	if (event->attr.exclusive && cpuctx->active_oncpu)
1989 		return 0;
1990 	/*
1991 	 * Otherwise, try to add it if all previous groups were able
1992 	 * to go on.
1993 	 */
1994 	return can_add_hw;
1995 }
1996 
1997 static void add_event_to_ctx(struct perf_event *event,
1998 			       struct perf_event_context *ctx)
1999 {
2000 	u64 tstamp = perf_event_time(event);
2001 
2002 	list_add_event(event, ctx);
2003 	perf_group_attach(event);
2004 	event->tstamp_enabled = tstamp;
2005 	event->tstamp_running = tstamp;
2006 	event->tstamp_stopped = tstamp;
2007 }
2008 
2009 static void task_ctx_sched_out(struct perf_event_context *ctx);
2010 static void
2011 ctx_sched_in(struct perf_event_context *ctx,
2012 	     struct perf_cpu_context *cpuctx,
2013 	     enum event_type_t event_type,
2014 	     struct task_struct *task);
2015 
2016 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2017 				struct perf_event_context *ctx,
2018 				struct task_struct *task)
2019 {
2020 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2021 	if (ctx)
2022 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2023 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2024 	if (ctx)
2025 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2026 }
2027 
2028 /*
2029  * Cross CPU call to install and enable a performance event
2030  *
2031  * Must be called with ctx->mutex held
2032  */
2033 static int  __perf_install_in_context(void *info)
2034 {
2035 	struct perf_event *event = info;
2036 	struct perf_event_context *ctx = event->ctx;
2037 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2038 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
2039 	struct task_struct *task = current;
2040 
2041 	perf_ctx_lock(cpuctx, task_ctx);
2042 	perf_pmu_disable(cpuctx->ctx.pmu);
2043 
2044 	/*
2045 	 * If there was an active task_ctx schedule it out.
2046 	 */
2047 	if (task_ctx)
2048 		task_ctx_sched_out(task_ctx);
2049 
2050 	/*
2051 	 * If the context we're installing events in is not the
2052 	 * active task_ctx, flip them.
2053 	 */
2054 	if (ctx->task && task_ctx != ctx) {
2055 		if (task_ctx)
2056 			raw_spin_unlock(&task_ctx->lock);
2057 		raw_spin_lock(&ctx->lock);
2058 		task_ctx = ctx;
2059 	}
2060 
2061 	if (task_ctx) {
2062 		cpuctx->task_ctx = task_ctx;
2063 		task = task_ctx->task;
2064 	}
2065 
2066 	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2067 
2068 	update_context_time(ctx);
2069 	/*
2070 	 * update cgrp time only if current cgrp
2071 	 * matches event->cgrp. Must be done before
2072 	 * calling add_event_to_ctx()
2073 	 */
2074 	update_cgrp_time_from_event(event);
2075 
2076 	add_event_to_ctx(event, ctx);
2077 
2078 	/*
2079 	 * Schedule everything back in
2080 	 */
2081 	perf_event_sched_in(cpuctx, task_ctx, task);
2082 
2083 	perf_pmu_enable(cpuctx->ctx.pmu);
2084 	perf_ctx_unlock(cpuctx, task_ctx);
2085 
2086 	return 0;
2087 }
2088 
2089 /*
2090  * Attach a performance event to a context
2091  *
2092  * First we add the event to the list with the hardware enable bit
2093  * in event->hw_config cleared.
2094  *
2095  * If the event is attached to a task which is on a CPU we use a smp
2096  * call to enable it in the task context. The task might have been
2097  * scheduled away, but we check this in the smp call again.
2098  */
2099 static void
2100 perf_install_in_context(struct perf_event_context *ctx,
2101 			struct perf_event *event,
2102 			int cpu)
2103 {
2104 	struct task_struct *task = ctx->task;
2105 
2106 	lockdep_assert_held(&ctx->mutex);
2107 
2108 	event->ctx = ctx;
2109 	if (event->cpu != -1)
2110 		event->cpu = cpu;
2111 
2112 	if (!task) {
2113 		/*
2114 		 * Per cpu events are installed via an smp call and
2115 		 * the install is always successful.
2116 		 */
2117 		cpu_function_call(cpu, __perf_install_in_context, event);
2118 		return;
2119 	}
2120 
2121 retry:
2122 	if (!task_function_call(task, __perf_install_in_context, event))
2123 		return;
2124 
2125 	raw_spin_lock_irq(&ctx->lock);
2126 	/*
2127 	 * If we failed to find a running task, but find the context active now
2128 	 * that we've acquired the ctx->lock, retry.
2129 	 */
2130 	if (ctx->is_active) {
2131 		raw_spin_unlock_irq(&ctx->lock);
2132 		/*
2133 		 * Reload the task pointer, it might have been changed by
2134 		 * a concurrent perf_event_context_sched_out().
2135 		 */
2136 		task = ctx->task;
2137 		goto retry;
2138 	}
2139 
2140 	/*
2141 	 * Since the task isn't running, its safe to add the event, us holding
2142 	 * the ctx->lock ensures the task won't get scheduled in.
2143 	 */
2144 	add_event_to_ctx(event, ctx);
2145 	raw_spin_unlock_irq(&ctx->lock);
2146 }
2147 
2148 /*
2149  * Put a event into inactive state and update time fields.
2150  * Enabling the leader of a group effectively enables all
2151  * the group members that aren't explicitly disabled, so we
2152  * have to update their ->tstamp_enabled also.
2153  * Note: this works for group members as well as group leaders
2154  * since the non-leader members' sibling_lists will be empty.
2155  */
2156 static void __perf_event_mark_enabled(struct perf_event *event)
2157 {
2158 	struct perf_event *sub;
2159 	u64 tstamp = perf_event_time(event);
2160 
2161 	event->state = PERF_EVENT_STATE_INACTIVE;
2162 	event->tstamp_enabled = tstamp - event->total_time_enabled;
2163 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
2164 		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2165 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
2166 	}
2167 }
2168 
2169 /*
2170  * Cross CPU call to enable a performance event
2171  */
2172 static int __perf_event_enable(void *info)
2173 {
2174 	struct perf_event *event = info;
2175 	struct perf_event_context *ctx = event->ctx;
2176 	struct perf_event *leader = event->group_leader;
2177 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2178 	int err;
2179 
2180 	/*
2181 	 * There's a time window between 'ctx->is_active' check
2182 	 * in perf_event_enable function and this place having:
2183 	 *   - IRQs on
2184 	 *   - ctx->lock unlocked
2185 	 *
2186 	 * where the task could be killed and 'ctx' deactivated
2187 	 * by perf_event_exit_task.
2188 	 */
2189 	if (!ctx->is_active)
2190 		return -EINVAL;
2191 
2192 	raw_spin_lock(&ctx->lock);
2193 	update_context_time(ctx);
2194 
2195 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2196 		goto unlock;
2197 
2198 	/*
2199 	 * set current task's cgroup time reference point
2200 	 */
2201 	perf_cgroup_set_timestamp(current, ctx);
2202 
2203 	__perf_event_mark_enabled(event);
2204 
2205 	if (!event_filter_match(event)) {
2206 		if (is_cgroup_event(event))
2207 			perf_cgroup_defer_enabled(event);
2208 		goto unlock;
2209 	}
2210 
2211 	/*
2212 	 * If the event is in a group and isn't the group leader,
2213 	 * then don't put it on unless the group is on.
2214 	 */
2215 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2216 		goto unlock;
2217 
2218 	if (!group_can_go_on(event, cpuctx, 1)) {
2219 		err = -EEXIST;
2220 	} else {
2221 		if (event == leader)
2222 			err = group_sched_in(event, cpuctx, ctx);
2223 		else
2224 			err = event_sched_in(event, cpuctx, ctx);
2225 	}
2226 
2227 	if (err) {
2228 		/*
2229 		 * If this event can't go on and it's part of a
2230 		 * group, then the whole group has to come off.
2231 		 */
2232 		if (leader != event) {
2233 			group_sched_out(leader, cpuctx, ctx);
2234 			perf_mux_hrtimer_restart(cpuctx);
2235 		}
2236 		if (leader->attr.pinned) {
2237 			update_group_times(leader);
2238 			leader->state = PERF_EVENT_STATE_ERROR;
2239 		}
2240 	}
2241 
2242 unlock:
2243 	raw_spin_unlock(&ctx->lock);
2244 
2245 	return 0;
2246 }
2247 
2248 /*
2249  * Enable a event.
2250  *
2251  * If event->ctx is a cloned context, callers must make sure that
2252  * every task struct that event->ctx->task could possibly point to
2253  * remains valid.  This condition is satisfied when called through
2254  * perf_event_for_each_child or perf_event_for_each as described
2255  * for perf_event_disable.
2256  */
2257 static void _perf_event_enable(struct perf_event *event)
2258 {
2259 	struct perf_event_context *ctx = event->ctx;
2260 	struct task_struct *task = ctx->task;
2261 
2262 	if (!task) {
2263 		/*
2264 		 * Enable the event on the cpu that it's on
2265 		 */
2266 		cpu_function_call(event->cpu, __perf_event_enable, event);
2267 		return;
2268 	}
2269 
2270 	raw_spin_lock_irq(&ctx->lock);
2271 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2272 		goto out;
2273 
2274 	/*
2275 	 * If the event is in error state, clear that first.
2276 	 * That way, if we see the event in error state below, we
2277 	 * know that it has gone back into error state, as distinct
2278 	 * from the task having been scheduled away before the
2279 	 * cross-call arrived.
2280 	 */
2281 	if (event->state == PERF_EVENT_STATE_ERROR)
2282 		event->state = PERF_EVENT_STATE_OFF;
2283 
2284 retry:
2285 	if (!ctx->is_active) {
2286 		__perf_event_mark_enabled(event);
2287 		goto out;
2288 	}
2289 
2290 	raw_spin_unlock_irq(&ctx->lock);
2291 
2292 	if (!task_function_call(task, __perf_event_enable, event))
2293 		return;
2294 
2295 	raw_spin_lock_irq(&ctx->lock);
2296 
2297 	/*
2298 	 * If the context is active and the event is still off,
2299 	 * we need to retry the cross-call.
2300 	 */
2301 	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2302 		/*
2303 		 * task could have been flipped by a concurrent
2304 		 * perf_event_context_sched_out()
2305 		 */
2306 		task = ctx->task;
2307 		goto retry;
2308 	}
2309 
2310 out:
2311 	raw_spin_unlock_irq(&ctx->lock);
2312 }
2313 
2314 /*
2315  * See perf_event_disable();
2316  */
2317 void perf_event_enable(struct perf_event *event)
2318 {
2319 	struct perf_event_context *ctx;
2320 
2321 	ctx = perf_event_ctx_lock(event);
2322 	_perf_event_enable(event);
2323 	perf_event_ctx_unlock(event, ctx);
2324 }
2325 EXPORT_SYMBOL_GPL(perf_event_enable);
2326 
2327 static int _perf_event_refresh(struct perf_event *event, int refresh)
2328 {
2329 	/*
2330 	 * not supported on inherited events
2331 	 */
2332 	if (event->attr.inherit || !is_sampling_event(event))
2333 		return -EINVAL;
2334 
2335 	atomic_add(refresh, &event->event_limit);
2336 	_perf_event_enable(event);
2337 
2338 	return 0;
2339 }
2340 
2341 /*
2342  * See perf_event_disable()
2343  */
2344 int perf_event_refresh(struct perf_event *event, int refresh)
2345 {
2346 	struct perf_event_context *ctx;
2347 	int ret;
2348 
2349 	ctx = perf_event_ctx_lock(event);
2350 	ret = _perf_event_refresh(event, refresh);
2351 	perf_event_ctx_unlock(event, ctx);
2352 
2353 	return ret;
2354 }
2355 EXPORT_SYMBOL_GPL(perf_event_refresh);
2356 
2357 static void ctx_sched_out(struct perf_event_context *ctx,
2358 			  struct perf_cpu_context *cpuctx,
2359 			  enum event_type_t event_type)
2360 {
2361 	struct perf_event *event;
2362 	int is_active = ctx->is_active;
2363 
2364 	ctx->is_active &= ~event_type;
2365 	if (likely(!ctx->nr_events))
2366 		return;
2367 
2368 	update_context_time(ctx);
2369 	update_cgrp_time_from_cpuctx(cpuctx);
2370 	if (!ctx->nr_active)
2371 		return;
2372 
2373 	perf_pmu_disable(ctx->pmu);
2374 	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2375 		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2376 			group_sched_out(event, cpuctx, ctx);
2377 	}
2378 
2379 	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2380 		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2381 			group_sched_out(event, cpuctx, ctx);
2382 	}
2383 	perf_pmu_enable(ctx->pmu);
2384 }
2385 
2386 /*
2387  * Test whether two contexts are equivalent, i.e. whether they have both been
2388  * cloned from the same version of the same context.
2389  *
2390  * Equivalence is measured using a generation number in the context that is
2391  * incremented on each modification to it; see unclone_ctx(), list_add_event()
2392  * and list_del_event().
2393  */
2394 static int context_equiv(struct perf_event_context *ctx1,
2395 			 struct perf_event_context *ctx2)
2396 {
2397 	lockdep_assert_held(&ctx1->lock);
2398 	lockdep_assert_held(&ctx2->lock);
2399 
2400 	/* Pinning disables the swap optimization */
2401 	if (ctx1->pin_count || ctx2->pin_count)
2402 		return 0;
2403 
2404 	/* If ctx1 is the parent of ctx2 */
2405 	if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2406 		return 1;
2407 
2408 	/* If ctx2 is the parent of ctx1 */
2409 	if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2410 		return 1;
2411 
2412 	/*
2413 	 * If ctx1 and ctx2 have the same parent; we flatten the parent
2414 	 * hierarchy, see perf_event_init_context().
2415 	 */
2416 	if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2417 			ctx1->parent_gen == ctx2->parent_gen)
2418 		return 1;
2419 
2420 	/* Unmatched */
2421 	return 0;
2422 }
2423 
2424 static void __perf_event_sync_stat(struct perf_event *event,
2425 				     struct perf_event *next_event)
2426 {
2427 	u64 value;
2428 
2429 	if (!event->attr.inherit_stat)
2430 		return;
2431 
2432 	/*
2433 	 * Update the event value, we cannot use perf_event_read()
2434 	 * because we're in the middle of a context switch and have IRQs
2435 	 * disabled, which upsets smp_call_function_single(), however
2436 	 * we know the event must be on the current CPU, therefore we
2437 	 * don't need to use it.
2438 	 */
2439 	switch (event->state) {
2440 	case PERF_EVENT_STATE_ACTIVE:
2441 		event->pmu->read(event);
2442 		/* fall-through */
2443 
2444 	case PERF_EVENT_STATE_INACTIVE:
2445 		update_event_times(event);
2446 		break;
2447 
2448 	default:
2449 		break;
2450 	}
2451 
2452 	/*
2453 	 * In order to keep per-task stats reliable we need to flip the event
2454 	 * values when we flip the contexts.
2455 	 */
2456 	value = local64_read(&next_event->count);
2457 	value = local64_xchg(&event->count, value);
2458 	local64_set(&next_event->count, value);
2459 
2460 	swap(event->total_time_enabled, next_event->total_time_enabled);
2461 	swap(event->total_time_running, next_event->total_time_running);
2462 
2463 	/*
2464 	 * Since we swizzled the values, update the user visible data too.
2465 	 */
2466 	perf_event_update_userpage(event);
2467 	perf_event_update_userpage(next_event);
2468 }
2469 
2470 static void perf_event_sync_stat(struct perf_event_context *ctx,
2471 				   struct perf_event_context *next_ctx)
2472 {
2473 	struct perf_event *event, *next_event;
2474 
2475 	if (!ctx->nr_stat)
2476 		return;
2477 
2478 	update_context_time(ctx);
2479 
2480 	event = list_first_entry(&ctx->event_list,
2481 				   struct perf_event, event_entry);
2482 
2483 	next_event = list_first_entry(&next_ctx->event_list,
2484 					struct perf_event, event_entry);
2485 
2486 	while (&event->event_entry != &ctx->event_list &&
2487 	       &next_event->event_entry != &next_ctx->event_list) {
2488 
2489 		__perf_event_sync_stat(event, next_event);
2490 
2491 		event = list_next_entry(event, event_entry);
2492 		next_event = list_next_entry(next_event, event_entry);
2493 	}
2494 }
2495 
2496 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2497 					 struct task_struct *next)
2498 {
2499 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2500 	struct perf_event_context *next_ctx;
2501 	struct perf_event_context *parent, *next_parent;
2502 	struct perf_cpu_context *cpuctx;
2503 	int do_switch = 1;
2504 
2505 	if (likely(!ctx))
2506 		return;
2507 
2508 	cpuctx = __get_cpu_context(ctx);
2509 	if (!cpuctx->task_ctx)
2510 		return;
2511 
2512 	rcu_read_lock();
2513 	next_ctx = next->perf_event_ctxp[ctxn];
2514 	if (!next_ctx)
2515 		goto unlock;
2516 
2517 	parent = rcu_dereference(ctx->parent_ctx);
2518 	next_parent = rcu_dereference(next_ctx->parent_ctx);
2519 
2520 	/* If neither context have a parent context; they cannot be clones. */
2521 	if (!parent && !next_parent)
2522 		goto unlock;
2523 
2524 	if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2525 		/*
2526 		 * Looks like the two contexts are clones, so we might be
2527 		 * able to optimize the context switch.  We lock both
2528 		 * contexts and check that they are clones under the
2529 		 * lock (including re-checking that neither has been
2530 		 * uncloned in the meantime).  It doesn't matter which
2531 		 * order we take the locks because no other cpu could
2532 		 * be trying to lock both of these tasks.
2533 		 */
2534 		raw_spin_lock(&ctx->lock);
2535 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2536 		if (context_equiv(ctx, next_ctx)) {
2537 			/*
2538 			 * XXX do we need a memory barrier of sorts
2539 			 * wrt to rcu_dereference() of perf_event_ctxp
2540 			 */
2541 			task->perf_event_ctxp[ctxn] = next_ctx;
2542 			next->perf_event_ctxp[ctxn] = ctx;
2543 			ctx->task = next;
2544 			next_ctx->task = task;
2545 
2546 			swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2547 
2548 			do_switch = 0;
2549 
2550 			perf_event_sync_stat(ctx, next_ctx);
2551 		}
2552 		raw_spin_unlock(&next_ctx->lock);
2553 		raw_spin_unlock(&ctx->lock);
2554 	}
2555 unlock:
2556 	rcu_read_unlock();
2557 
2558 	if (do_switch) {
2559 		raw_spin_lock(&ctx->lock);
2560 		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2561 		cpuctx->task_ctx = NULL;
2562 		raw_spin_unlock(&ctx->lock);
2563 	}
2564 }
2565 
2566 void perf_sched_cb_dec(struct pmu *pmu)
2567 {
2568 	this_cpu_dec(perf_sched_cb_usages);
2569 }
2570 
2571 void perf_sched_cb_inc(struct pmu *pmu)
2572 {
2573 	this_cpu_inc(perf_sched_cb_usages);
2574 }
2575 
2576 /*
2577  * This function provides the context switch callback to the lower code
2578  * layer. It is invoked ONLY when the context switch callback is enabled.
2579  */
2580 static void perf_pmu_sched_task(struct task_struct *prev,
2581 				struct task_struct *next,
2582 				bool sched_in)
2583 {
2584 	struct perf_cpu_context *cpuctx;
2585 	struct pmu *pmu;
2586 	unsigned long flags;
2587 
2588 	if (prev == next)
2589 		return;
2590 
2591 	local_irq_save(flags);
2592 
2593 	rcu_read_lock();
2594 
2595 	list_for_each_entry_rcu(pmu, &pmus, entry) {
2596 		if (pmu->sched_task) {
2597 			cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2598 
2599 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2600 
2601 			perf_pmu_disable(pmu);
2602 
2603 			pmu->sched_task(cpuctx->task_ctx, sched_in);
2604 
2605 			perf_pmu_enable(pmu);
2606 
2607 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2608 		}
2609 	}
2610 
2611 	rcu_read_unlock();
2612 
2613 	local_irq_restore(flags);
2614 }
2615 
2616 #define for_each_task_context_nr(ctxn)					\
2617 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2618 
2619 /*
2620  * Called from scheduler to remove the events of the current task,
2621  * with interrupts disabled.
2622  *
2623  * We stop each event and update the event value in event->count.
2624  *
2625  * This does not protect us against NMI, but disable()
2626  * sets the disabled bit in the control field of event _before_
2627  * accessing the event control register. If a NMI hits, then it will
2628  * not restart the event.
2629  */
2630 void __perf_event_task_sched_out(struct task_struct *task,
2631 				 struct task_struct *next)
2632 {
2633 	int ctxn;
2634 
2635 	if (__this_cpu_read(perf_sched_cb_usages))
2636 		perf_pmu_sched_task(task, next, false);
2637 
2638 	for_each_task_context_nr(ctxn)
2639 		perf_event_context_sched_out(task, ctxn, next);
2640 
2641 	/*
2642 	 * if cgroup events exist on this CPU, then we need
2643 	 * to check if we have to switch out PMU state.
2644 	 * cgroup event are system-wide mode only
2645 	 */
2646 	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2647 		perf_cgroup_sched_out(task, next);
2648 }
2649 
2650 static void task_ctx_sched_out(struct perf_event_context *ctx)
2651 {
2652 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2653 
2654 	if (!cpuctx->task_ctx)
2655 		return;
2656 
2657 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2658 		return;
2659 
2660 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2661 	cpuctx->task_ctx = NULL;
2662 }
2663 
2664 /*
2665  * Called with IRQs disabled
2666  */
2667 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2668 			      enum event_type_t event_type)
2669 {
2670 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2671 }
2672 
2673 static void
2674 ctx_pinned_sched_in(struct perf_event_context *ctx,
2675 		    struct perf_cpu_context *cpuctx)
2676 {
2677 	struct perf_event *event;
2678 
2679 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2680 		if (event->state <= PERF_EVENT_STATE_OFF)
2681 			continue;
2682 		if (!event_filter_match(event))
2683 			continue;
2684 
2685 		/* may need to reset tstamp_enabled */
2686 		if (is_cgroup_event(event))
2687 			perf_cgroup_mark_enabled(event, ctx);
2688 
2689 		if (group_can_go_on(event, cpuctx, 1))
2690 			group_sched_in(event, cpuctx, ctx);
2691 
2692 		/*
2693 		 * If this pinned group hasn't been scheduled,
2694 		 * put it in error state.
2695 		 */
2696 		if (event->state == PERF_EVENT_STATE_INACTIVE) {
2697 			update_group_times(event);
2698 			event->state = PERF_EVENT_STATE_ERROR;
2699 		}
2700 	}
2701 }
2702 
2703 static void
2704 ctx_flexible_sched_in(struct perf_event_context *ctx,
2705 		      struct perf_cpu_context *cpuctx)
2706 {
2707 	struct perf_event *event;
2708 	int can_add_hw = 1;
2709 
2710 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2711 		/* Ignore events in OFF or ERROR state */
2712 		if (event->state <= PERF_EVENT_STATE_OFF)
2713 			continue;
2714 		/*
2715 		 * Listen to the 'cpu' scheduling filter constraint
2716 		 * of events:
2717 		 */
2718 		if (!event_filter_match(event))
2719 			continue;
2720 
2721 		/* may need to reset tstamp_enabled */
2722 		if (is_cgroup_event(event))
2723 			perf_cgroup_mark_enabled(event, ctx);
2724 
2725 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2726 			if (group_sched_in(event, cpuctx, ctx))
2727 				can_add_hw = 0;
2728 		}
2729 	}
2730 }
2731 
2732 static void
2733 ctx_sched_in(struct perf_event_context *ctx,
2734 	     struct perf_cpu_context *cpuctx,
2735 	     enum event_type_t event_type,
2736 	     struct task_struct *task)
2737 {
2738 	u64 now;
2739 	int is_active = ctx->is_active;
2740 
2741 	ctx->is_active |= event_type;
2742 	if (likely(!ctx->nr_events))
2743 		return;
2744 
2745 	now = perf_clock();
2746 	ctx->timestamp = now;
2747 	perf_cgroup_set_timestamp(task, ctx);
2748 	/*
2749 	 * First go through the list and put on any pinned groups
2750 	 * in order to give them the best chance of going on.
2751 	 */
2752 	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2753 		ctx_pinned_sched_in(ctx, cpuctx);
2754 
2755 	/* Then walk through the lower prio flexible groups */
2756 	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2757 		ctx_flexible_sched_in(ctx, cpuctx);
2758 }
2759 
2760 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2761 			     enum event_type_t event_type,
2762 			     struct task_struct *task)
2763 {
2764 	struct perf_event_context *ctx = &cpuctx->ctx;
2765 
2766 	ctx_sched_in(ctx, cpuctx, event_type, task);
2767 }
2768 
2769 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2770 					struct task_struct *task)
2771 {
2772 	struct perf_cpu_context *cpuctx;
2773 
2774 	cpuctx = __get_cpu_context(ctx);
2775 	if (cpuctx->task_ctx == ctx)
2776 		return;
2777 
2778 	perf_ctx_lock(cpuctx, ctx);
2779 	perf_pmu_disable(ctx->pmu);
2780 	/*
2781 	 * We want to keep the following priority order:
2782 	 * cpu pinned (that don't need to move), task pinned,
2783 	 * cpu flexible, task flexible.
2784 	 */
2785 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2786 
2787 	if (ctx->nr_events)
2788 		cpuctx->task_ctx = ctx;
2789 
2790 	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2791 
2792 	perf_pmu_enable(ctx->pmu);
2793 	perf_ctx_unlock(cpuctx, ctx);
2794 }
2795 
2796 /*
2797  * Called from scheduler to add the events of the current task
2798  * with interrupts disabled.
2799  *
2800  * We restore the event value and then enable it.
2801  *
2802  * This does not protect us against NMI, but enable()
2803  * sets the enabled bit in the control field of event _before_
2804  * accessing the event control register. If a NMI hits, then it will
2805  * keep the event running.
2806  */
2807 void __perf_event_task_sched_in(struct task_struct *prev,
2808 				struct task_struct *task)
2809 {
2810 	struct perf_event_context *ctx;
2811 	int ctxn;
2812 
2813 	for_each_task_context_nr(ctxn) {
2814 		ctx = task->perf_event_ctxp[ctxn];
2815 		if (likely(!ctx))
2816 			continue;
2817 
2818 		perf_event_context_sched_in(ctx, task);
2819 	}
2820 	/*
2821 	 * if cgroup events exist on this CPU, then we need
2822 	 * to check if we have to switch in PMU state.
2823 	 * cgroup event are system-wide mode only
2824 	 */
2825 	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2826 		perf_cgroup_sched_in(prev, task);
2827 
2828 	if (__this_cpu_read(perf_sched_cb_usages))
2829 		perf_pmu_sched_task(prev, task, true);
2830 }
2831 
2832 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2833 {
2834 	u64 frequency = event->attr.sample_freq;
2835 	u64 sec = NSEC_PER_SEC;
2836 	u64 divisor, dividend;
2837 
2838 	int count_fls, nsec_fls, frequency_fls, sec_fls;
2839 
2840 	count_fls = fls64(count);
2841 	nsec_fls = fls64(nsec);
2842 	frequency_fls = fls64(frequency);
2843 	sec_fls = 30;
2844 
2845 	/*
2846 	 * We got @count in @nsec, with a target of sample_freq HZ
2847 	 * the target period becomes:
2848 	 *
2849 	 *             @count * 10^9
2850 	 * period = -------------------
2851 	 *          @nsec * sample_freq
2852 	 *
2853 	 */
2854 
2855 	/*
2856 	 * Reduce accuracy by one bit such that @a and @b converge
2857 	 * to a similar magnitude.
2858 	 */
2859 #define REDUCE_FLS(a, b)		\
2860 do {					\
2861 	if (a##_fls > b##_fls) {	\
2862 		a >>= 1;		\
2863 		a##_fls--;		\
2864 	} else {			\
2865 		b >>= 1;		\
2866 		b##_fls--;		\
2867 	}				\
2868 } while (0)
2869 
2870 	/*
2871 	 * Reduce accuracy until either term fits in a u64, then proceed with
2872 	 * the other, so that finally we can do a u64/u64 division.
2873 	 */
2874 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2875 		REDUCE_FLS(nsec, frequency);
2876 		REDUCE_FLS(sec, count);
2877 	}
2878 
2879 	if (count_fls + sec_fls > 64) {
2880 		divisor = nsec * frequency;
2881 
2882 		while (count_fls + sec_fls > 64) {
2883 			REDUCE_FLS(count, sec);
2884 			divisor >>= 1;
2885 		}
2886 
2887 		dividend = count * sec;
2888 	} else {
2889 		dividend = count * sec;
2890 
2891 		while (nsec_fls + frequency_fls > 64) {
2892 			REDUCE_FLS(nsec, frequency);
2893 			dividend >>= 1;
2894 		}
2895 
2896 		divisor = nsec * frequency;
2897 	}
2898 
2899 	if (!divisor)
2900 		return dividend;
2901 
2902 	return div64_u64(dividend, divisor);
2903 }
2904 
2905 static DEFINE_PER_CPU(int, perf_throttled_count);
2906 static DEFINE_PER_CPU(u64, perf_throttled_seq);
2907 
2908 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2909 {
2910 	struct hw_perf_event *hwc = &event->hw;
2911 	s64 period, sample_period;
2912 	s64 delta;
2913 
2914 	period = perf_calculate_period(event, nsec, count);
2915 
2916 	delta = (s64)(period - hwc->sample_period);
2917 	delta = (delta + 7) / 8; /* low pass filter */
2918 
2919 	sample_period = hwc->sample_period + delta;
2920 
2921 	if (!sample_period)
2922 		sample_period = 1;
2923 
2924 	hwc->sample_period = sample_period;
2925 
2926 	if (local64_read(&hwc->period_left) > 8*sample_period) {
2927 		if (disable)
2928 			event->pmu->stop(event, PERF_EF_UPDATE);
2929 
2930 		local64_set(&hwc->period_left, 0);
2931 
2932 		if (disable)
2933 			event->pmu->start(event, PERF_EF_RELOAD);
2934 	}
2935 }
2936 
2937 /*
2938  * combine freq adjustment with unthrottling to avoid two passes over the
2939  * events. At the same time, make sure, having freq events does not change
2940  * the rate of unthrottling as that would introduce bias.
2941  */
2942 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2943 					   int needs_unthr)
2944 {
2945 	struct perf_event *event;
2946 	struct hw_perf_event *hwc;
2947 	u64 now, period = TICK_NSEC;
2948 	s64 delta;
2949 
2950 	/*
2951 	 * only need to iterate over all events iff:
2952 	 * - context have events in frequency mode (needs freq adjust)
2953 	 * - there are events to unthrottle on this cpu
2954 	 */
2955 	if (!(ctx->nr_freq || needs_unthr))
2956 		return;
2957 
2958 	raw_spin_lock(&ctx->lock);
2959 	perf_pmu_disable(ctx->pmu);
2960 
2961 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2962 		if (event->state != PERF_EVENT_STATE_ACTIVE)
2963 			continue;
2964 
2965 		if (!event_filter_match(event))
2966 			continue;
2967 
2968 		perf_pmu_disable(event->pmu);
2969 
2970 		hwc = &event->hw;
2971 
2972 		if (hwc->interrupts == MAX_INTERRUPTS) {
2973 			hwc->interrupts = 0;
2974 			perf_log_throttle(event, 1);
2975 			event->pmu->start(event, 0);
2976 		}
2977 
2978 		if (!event->attr.freq || !event->attr.sample_freq)
2979 			goto next;
2980 
2981 		/*
2982 		 * stop the event and update event->count
2983 		 */
2984 		event->pmu->stop(event, PERF_EF_UPDATE);
2985 
2986 		now = local64_read(&event->count);
2987 		delta = now - hwc->freq_count_stamp;
2988 		hwc->freq_count_stamp = now;
2989 
2990 		/*
2991 		 * restart the event
2992 		 * reload only if value has changed
2993 		 * we have stopped the event so tell that
2994 		 * to perf_adjust_period() to avoid stopping it
2995 		 * twice.
2996 		 */
2997 		if (delta > 0)
2998 			perf_adjust_period(event, period, delta, false);
2999 
3000 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3001 	next:
3002 		perf_pmu_enable(event->pmu);
3003 	}
3004 
3005 	perf_pmu_enable(ctx->pmu);
3006 	raw_spin_unlock(&ctx->lock);
3007 }
3008 
3009 /*
3010  * Round-robin a context's events:
3011  */
3012 static void rotate_ctx(struct perf_event_context *ctx)
3013 {
3014 	/*
3015 	 * Rotate the first entry last of non-pinned groups. Rotation might be
3016 	 * disabled by the inheritance code.
3017 	 */
3018 	if (!ctx->rotate_disable)
3019 		list_rotate_left(&ctx->flexible_groups);
3020 }
3021 
3022 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3023 {
3024 	struct perf_event_context *ctx = NULL;
3025 	int rotate = 0;
3026 
3027 	if (cpuctx->ctx.nr_events) {
3028 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3029 			rotate = 1;
3030 	}
3031 
3032 	ctx = cpuctx->task_ctx;
3033 	if (ctx && ctx->nr_events) {
3034 		if (ctx->nr_events != ctx->nr_active)
3035 			rotate = 1;
3036 	}
3037 
3038 	if (!rotate)
3039 		goto done;
3040 
3041 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3042 	perf_pmu_disable(cpuctx->ctx.pmu);
3043 
3044 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3045 	if (ctx)
3046 		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
3047 
3048 	rotate_ctx(&cpuctx->ctx);
3049 	if (ctx)
3050 		rotate_ctx(ctx);
3051 
3052 	perf_event_sched_in(cpuctx, ctx, current);
3053 
3054 	perf_pmu_enable(cpuctx->ctx.pmu);
3055 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3056 done:
3057 
3058 	return rotate;
3059 }
3060 
3061 #ifdef CONFIG_NO_HZ_FULL
3062 bool perf_event_can_stop_tick(void)
3063 {
3064 	if (atomic_read(&nr_freq_events) ||
3065 	    __this_cpu_read(perf_throttled_count))
3066 		return false;
3067 	else
3068 		return true;
3069 }
3070 #endif
3071 
3072 void perf_event_task_tick(void)
3073 {
3074 	struct list_head *head = this_cpu_ptr(&active_ctx_list);
3075 	struct perf_event_context *ctx, *tmp;
3076 	int throttled;
3077 
3078 	WARN_ON(!irqs_disabled());
3079 
3080 	__this_cpu_inc(perf_throttled_seq);
3081 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
3082 
3083 	list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3084 		perf_adjust_freq_unthr_context(ctx, throttled);
3085 }
3086 
3087 static int event_enable_on_exec(struct perf_event *event,
3088 				struct perf_event_context *ctx)
3089 {
3090 	if (!event->attr.enable_on_exec)
3091 		return 0;
3092 
3093 	event->attr.enable_on_exec = 0;
3094 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
3095 		return 0;
3096 
3097 	__perf_event_mark_enabled(event);
3098 
3099 	return 1;
3100 }
3101 
3102 /*
3103  * Enable all of a task's events that have been marked enable-on-exec.
3104  * This expects task == current.
3105  */
3106 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
3107 {
3108 	struct perf_event_context *clone_ctx = NULL;
3109 	struct perf_event *event;
3110 	unsigned long flags;
3111 	int enabled = 0;
3112 	int ret;
3113 
3114 	local_irq_save(flags);
3115 	if (!ctx || !ctx->nr_events)
3116 		goto out;
3117 
3118 	/*
3119 	 * We must ctxsw out cgroup events to avoid conflict
3120 	 * when invoking perf_task_event_sched_in() later on
3121 	 * in this function. Otherwise we end up trying to
3122 	 * ctxswin cgroup events which are already scheduled
3123 	 * in.
3124 	 */
3125 	perf_cgroup_sched_out(current, NULL);
3126 
3127 	raw_spin_lock(&ctx->lock);
3128 	task_ctx_sched_out(ctx);
3129 
3130 	list_for_each_entry(event, &ctx->event_list, event_entry) {
3131 		ret = event_enable_on_exec(event, ctx);
3132 		if (ret)
3133 			enabled = 1;
3134 	}
3135 
3136 	/*
3137 	 * Unclone this context if we enabled any event.
3138 	 */
3139 	if (enabled)
3140 		clone_ctx = unclone_ctx(ctx);
3141 
3142 	raw_spin_unlock(&ctx->lock);
3143 
3144 	/*
3145 	 * Also calls ctxswin for cgroup events, if any:
3146 	 */
3147 	perf_event_context_sched_in(ctx, ctx->task);
3148 out:
3149 	local_irq_restore(flags);
3150 
3151 	if (clone_ctx)
3152 		put_ctx(clone_ctx);
3153 }
3154 
3155 void perf_event_exec(void)
3156 {
3157 	struct perf_event_context *ctx;
3158 	int ctxn;
3159 
3160 	rcu_read_lock();
3161 	for_each_task_context_nr(ctxn) {
3162 		ctx = current->perf_event_ctxp[ctxn];
3163 		if (!ctx)
3164 			continue;
3165 
3166 		perf_event_enable_on_exec(ctx);
3167 	}
3168 	rcu_read_unlock();
3169 }
3170 
3171 /*
3172  * Cross CPU call to read the hardware event
3173  */
3174 static void __perf_event_read(void *info)
3175 {
3176 	struct perf_event *event = info;
3177 	struct perf_event_context *ctx = event->ctx;
3178 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3179 
3180 	/*
3181 	 * If this is a task context, we need to check whether it is
3182 	 * the current task context of this cpu.  If not it has been
3183 	 * scheduled out before the smp call arrived.  In that case
3184 	 * event->count would have been updated to a recent sample
3185 	 * when the event was scheduled out.
3186 	 */
3187 	if (ctx->task && cpuctx->task_ctx != ctx)
3188 		return;
3189 
3190 	raw_spin_lock(&ctx->lock);
3191 	if (ctx->is_active) {
3192 		update_context_time(ctx);
3193 		update_cgrp_time_from_event(event);
3194 	}
3195 	update_event_times(event);
3196 	if (event->state == PERF_EVENT_STATE_ACTIVE)
3197 		event->pmu->read(event);
3198 	raw_spin_unlock(&ctx->lock);
3199 }
3200 
3201 static inline u64 perf_event_count(struct perf_event *event)
3202 {
3203 	if (event->pmu->count)
3204 		return event->pmu->count(event);
3205 
3206 	return __perf_event_count(event);
3207 }
3208 
3209 static u64 perf_event_read(struct perf_event *event)
3210 {
3211 	/*
3212 	 * If event is enabled and currently active on a CPU, update the
3213 	 * value in the event structure:
3214 	 */
3215 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
3216 		smp_call_function_single(event->oncpu,
3217 					 __perf_event_read, event, 1);
3218 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3219 		struct perf_event_context *ctx = event->ctx;
3220 		unsigned long flags;
3221 
3222 		raw_spin_lock_irqsave(&ctx->lock, flags);
3223 		/*
3224 		 * may read while context is not active
3225 		 * (e.g., thread is blocked), in that case
3226 		 * we cannot update context time
3227 		 */
3228 		if (ctx->is_active) {
3229 			update_context_time(ctx);
3230 			update_cgrp_time_from_event(event);
3231 		}
3232 		update_event_times(event);
3233 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
3234 	}
3235 
3236 	return perf_event_count(event);
3237 }
3238 
3239 /*
3240  * Initialize the perf_event context in a task_struct:
3241  */
3242 static void __perf_event_init_context(struct perf_event_context *ctx)
3243 {
3244 	raw_spin_lock_init(&ctx->lock);
3245 	mutex_init(&ctx->mutex);
3246 	INIT_LIST_HEAD(&ctx->active_ctx_list);
3247 	INIT_LIST_HEAD(&ctx->pinned_groups);
3248 	INIT_LIST_HEAD(&ctx->flexible_groups);
3249 	INIT_LIST_HEAD(&ctx->event_list);
3250 	atomic_set(&ctx->refcount, 1);
3251 	INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
3252 }
3253 
3254 static struct perf_event_context *
3255 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3256 {
3257 	struct perf_event_context *ctx;
3258 
3259 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3260 	if (!ctx)
3261 		return NULL;
3262 
3263 	__perf_event_init_context(ctx);
3264 	if (task) {
3265 		ctx->task = task;
3266 		get_task_struct(task);
3267 	}
3268 	ctx->pmu = pmu;
3269 
3270 	return ctx;
3271 }
3272 
3273 static struct task_struct *
3274 find_lively_task_by_vpid(pid_t vpid)
3275 {
3276 	struct task_struct *task;
3277 	int err;
3278 
3279 	rcu_read_lock();
3280 	if (!vpid)
3281 		task = current;
3282 	else
3283 		task = find_task_by_vpid(vpid);
3284 	if (task)
3285 		get_task_struct(task);
3286 	rcu_read_unlock();
3287 
3288 	if (!task)
3289 		return ERR_PTR(-ESRCH);
3290 
3291 	/* Reuse ptrace permission checks for now. */
3292 	err = -EACCES;
3293 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
3294 		goto errout;
3295 
3296 	return task;
3297 errout:
3298 	put_task_struct(task);
3299 	return ERR_PTR(err);
3300 
3301 }
3302 
3303 /*
3304  * Returns a matching context with refcount and pincount.
3305  */
3306 static struct perf_event_context *
3307 find_get_context(struct pmu *pmu, struct task_struct *task,
3308 		struct perf_event *event)
3309 {
3310 	struct perf_event_context *ctx, *clone_ctx = NULL;
3311 	struct perf_cpu_context *cpuctx;
3312 	void *task_ctx_data = NULL;
3313 	unsigned long flags;
3314 	int ctxn, err;
3315 	int cpu = event->cpu;
3316 
3317 	if (!task) {
3318 		/* Must be root to operate on a CPU event: */
3319 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3320 			return ERR_PTR(-EACCES);
3321 
3322 		/*
3323 		 * We could be clever and allow to attach a event to an
3324 		 * offline CPU and activate it when the CPU comes up, but
3325 		 * that's for later.
3326 		 */
3327 		if (!cpu_online(cpu))
3328 			return ERR_PTR(-ENODEV);
3329 
3330 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3331 		ctx = &cpuctx->ctx;
3332 		get_ctx(ctx);
3333 		++ctx->pin_count;
3334 
3335 		return ctx;
3336 	}
3337 
3338 	err = -EINVAL;
3339 	ctxn = pmu->task_ctx_nr;
3340 	if (ctxn < 0)
3341 		goto errout;
3342 
3343 	if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3344 		task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3345 		if (!task_ctx_data) {
3346 			err = -ENOMEM;
3347 			goto errout;
3348 		}
3349 	}
3350 
3351 retry:
3352 	ctx = perf_lock_task_context(task, ctxn, &flags);
3353 	if (ctx) {
3354 		clone_ctx = unclone_ctx(ctx);
3355 		++ctx->pin_count;
3356 
3357 		if (task_ctx_data && !ctx->task_ctx_data) {
3358 			ctx->task_ctx_data = task_ctx_data;
3359 			task_ctx_data = NULL;
3360 		}
3361 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
3362 
3363 		if (clone_ctx)
3364 			put_ctx(clone_ctx);
3365 	} else {
3366 		ctx = alloc_perf_context(pmu, task);
3367 		err = -ENOMEM;
3368 		if (!ctx)
3369 			goto errout;
3370 
3371 		if (task_ctx_data) {
3372 			ctx->task_ctx_data = task_ctx_data;
3373 			task_ctx_data = NULL;
3374 		}
3375 
3376 		err = 0;
3377 		mutex_lock(&task->perf_event_mutex);
3378 		/*
3379 		 * If it has already passed perf_event_exit_task().
3380 		 * we must see PF_EXITING, it takes this mutex too.
3381 		 */
3382 		if (task->flags & PF_EXITING)
3383 			err = -ESRCH;
3384 		else if (task->perf_event_ctxp[ctxn])
3385 			err = -EAGAIN;
3386 		else {
3387 			get_ctx(ctx);
3388 			++ctx->pin_count;
3389 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3390 		}
3391 		mutex_unlock(&task->perf_event_mutex);
3392 
3393 		if (unlikely(err)) {
3394 			put_ctx(ctx);
3395 
3396 			if (err == -EAGAIN)
3397 				goto retry;
3398 			goto errout;
3399 		}
3400 	}
3401 
3402 	kfree(task_ctx_data);
3403 	return ctx;
3404 
3405 errout:
3406 	kfree(task_ctx_data);
3407 	return ERR_PTR(err);
3408 }
3409 
3410 static void perf_event_free_filter(struct perf_event *event);
3411 static void perf_event_free_bpf_prog(struct perf_event *event);
3412 
3413 static void free_event_rcu(struct rcu_head *head)
3414 {
3415 	struct perf_event *event;
3416 
3417 	event = container_of(head, struct perf_event, rcu_head);
3418 	if (event->ns)
3419 		put_pid_ns(event->ns);
3420 	perf_event_free_filter(event);
3421 	kfree(event);
3422 }
3423 
3424 static void ring_buffer_attach(struct perf_event *event,
3425 			       struct ring_buffer *rb);
3426 
3427 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3428 {
3429 	if (event->parent)
3430 		return;
3431 
3432 	if (is_cgroup_event(event))
3433 		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3434 }
3435 
3436 static void unaccount_event(struct perf_event *event)
3437 {
3438 	if (event->parent)
3439 		return;
3440 
3441 	if (event->attach_state & PERF_ATTACH_TASK)
3442 		static_key_slow_dec_deferred(&perf_sched_events);
3443 	if (event->attr.mmap || event->attr.mmap_data)
3444 		atomic_dec(&nr_mmap_events);
3445 	if (event->attr.comm)
3446 		atomic_dec(&nr_comm_events);
3447 	if (event->attr.task)
3448 		atomic_dec(&nr_task_events);
3449 	if (event->attr.freq)
3450 		atomic_dec(&nr_freq_events);
3451 	if (is_cgroup_event(event))
3452 		static_key_slow_dec_deferred(&perf_sched_events);
3453 	if (has_branch_stack(event))
3454 		static_key_slow_dec_deferred(&perf_sched_events);
3455 
3456 	unaccount_event_cpu(event, event->cpu);
3457 }
3458 
3459 /*
3460  * The following implement mutual exclusion of events on "exclusive" pmus
3461  * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3462  * at a time, so we disallow creating events that might conflict, namely:
3463  *
3464  *  1) cpu-wide events in the presence of per-task events,
3465  *  2) per-task events in the presence of cpu-wide events,
3466  *  3) two matching events on the same context.
3467  *
3468  * The former two cases are handled in the allocation path (perf_event_alloc(),
3469  * __free_event()), the latter -- before the first perf_install_in_context().
3470  */
3471 static int exclusive_event_init(struct perf_event *event)
3472 {
3473 	struct pmu *pmu = event->pmu;
3474 
3475 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3476 		return 0;
3477 
3478 	/*
3479 	 * Prevent co-existence of per-task and cpu-wide events on the
3480 	 * same exclusive pmu.
3481 	 *
3482 	 * Negative pmu::exclusive_cnt means there are cpu-wide
3483 	 * events on this "exclusive" pmu, positive means there are
3484 	 * per-task events.
3485 	 *
3486 	 * Since this is called in perf_event_alloc() path, event::ctx
3487 	 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3488 	 * to mean "per-task event", because unlike other attach states it
3489 	 * never gets cleared.
3490 	 */
3491 	if (event->attach_state & PERF_ATTACH_TASK) {
3492 		if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3493 			return -EBUSY;
3494 	} else {
3495 		if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3496 			return -EBUSY;
3497 	}
3498 
3499 	return 0;
3500 }
3501 
3502 static void exclusive_event_destroy(struct perf_event *event)
3503 {
3504 	struct pmu *pmu = event->pmu;
3505 
3506 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3507 		return;
3508 
3509 	/* see comment in exclusive_event_init() */
3510 	if (event->attach_state & PERF_ATTACH_TASK)
3511 		atomic_dec(&pmu->exclusive_cnt);
3512 	else
3513 		atomic_inc(&pmu->exclusive_cnt);
3514 }
3515 
3516 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3517 {
3518 	if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3519 	    (e1->cpu == e2->cpu ||
3520 	     e1->cpu == -1 ||
3521 	     e2->cpu == -1))
3522 		return true;
3523 	return false;
3524 }
3525 
3526 /* Called under the same ctx::mutex as perf_install_in_context() */
3527 static bool exclusive_event_installable(struct perf_event *event,
3528 					struct perf_event_context *ctx)
3529 {
3530 	struct perf_event *iter_event;
3531 	struct pmu *pmu = event->pmu;
3532 
3533 	if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3534 		return true;
3535 
3536 	list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3537 		if (exclusive_event_match(iter_event, event))
3538 			return false;
3539 	}
3540 
3541 	return true;
3542 }
3543 
3544 static void __free_event(struct perf_event *event)
3545 {
3546 	if (!event->parent) {
3547 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3548 			put_callchain_buffers();
3549 	}
3550 
3551 	perf_event_free_bpf_prog(event);
3552 
3553 	if (event->destroy)
3554 		event->destroy(event);
3555 
3556 	if (event->ctx)
3557 		put_ctx(event->ctx);
3558 
3559 	if (event->pmu) {
3560 		exclusive_event_destroy(event);
3561 		module_put(event->pmu->module);
3562 	}
3563 
3564 	call_rcu(&event->rcu_head, free_event_rcu);
3565 }
3566 
3567 static void _free_event(struct perf_event *event)
3568 {
3569 	irq_work_sync(&event->pending);
3570 
3571 	unaccount_event(event);
3572 
3573 	if (event->rb) {
3574 		/*
3575 		 * Can happen when we close an event with re-directed output.
3576 		 *
3577 		 * Since we have a 0 refcount, perf_mmap_close() will skip
3578 		 * over us; possibly making our ring_buffer_put() the last.
3579 		 */
3580 		mutex_lock(&event->mmap_mutex);
3581 		ring_buffer_attach(event, NULL);
3582 		mutex_unlock(&event->mmap_mutex);
3583 	}
3584 
3585 	if (is_cgroup_event(event))
3586 		perf_detach_cgroup(event);
3587 
3588 	__free_event(event);
3589 }
3590 
3591 /*
3592  * Used to free events which have a known refcount of 1, such as in error paths
3593  * where the event isn't exposed yet and inherited events.
3594  */
3595 static void free_event(struct perf_event *event)
3596 {
3597 	if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3598 				"unexpected event refcount: %ld; ptr=%p\n",
3599 				atomic_long_read(&event->refcount), event)) {
3600 		/* leak to avoid use-after-free */
3601 		return;
3602 	}
3603 
3604 	_free_event(event);
3605 }
3606 
3607 /*
3608  * Remove user event from the owner task.
3609  */
3610 static void perf_remove_from_owner(struct perf_event *event)
3611 {
3612 	struct task_struct *owner;
3613 
3614 	rcu_read_lock();
3615 	owner = ACCESS_ONCE(event->owner);
3616 	/*
3617 	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3618 	 * !owner it means the list deletion is complete and we can indeed
3619 	 * free this event, otherwise we need to serialize on
3620 	 * owner->perf_event_mutex.
3621 	 */
3622 	smp_read_barrier_depends();
3623 	if (owner) {
3624 		/*
3625 		 * Since delayed_put_task_struct() also drops the last
3626 		 * task reference we can safely take a new reference
3627 		 * while holding the rcu_read_lock().
3628 		 */
3629 		get_task_struct(owner);
3630 	}
3631 	rcu_read_unlock();
3632 
3633 	if (owner) {
3634 		/*
3635 		 * If we're here through perf_event_exit_task() we're already
3636 		 * holding ctx->mutex which would be an inversion wrt. the
3637 		 * normal lock order.
3638 		 *
3639 		 * However we can safely take this lock because its the child
3640 		 * ctx->mutex.
3641 		 */
3642 		mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3643 
3644 		/*
3645 		 * We have to re-check the event->owner field, if it is cleared
3646 		 * we raced with perf_event_exit_task(), acquiring the mutex
3647 		 * ensured they're done, and we can proceed with freeing the
3648 		 * event.
3649 		 */
3650 		if (event->owner)
3651 			list_del_init(&event->owner_entry);
3652 		mutex_unlock(&owner->perf_event_mutex);
3653 		put_task_struct(owner);
3654 	}
3655 }
3656 
3657 static void put_event(struct perf_event *event)
3658 {
3659 	struct perf_event_context *ctx;
3660 
3661 	if (!atomic_long_dec_and_test(&event->refcount))
3662 		return;
3663 
3664 	if (!is_kernel_event(event))
3665 		perf_remove_from_owner(event);
3666 
3667 	/*
3668 	 * There are two ways this annotation is useful:
3669 	 *
3670 	 *  1) there is a lock recursion from perf_event_exit_task
3671 	 *     see the comment there.
3672 	 *
3673 	 *  2) there is a lock-inversion with mmap_sem through
3674 	 *     perf_event_read_group(), which takes faults while
3675 	 *     holding ctx->mutex, however this is called after
3676 	 *     the last filedesc died, so there is no possibility
3677 	 *     to trigger the AB-BA case.
3678 	 */
3679 	ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3680 	WARN_ON_ONCE(ctx->parent_ctx);
3681 	perf_remove_from_context(event, true);
3682 	perf_event_ctx_unlock(event, ctx);
3683 
3684 	_free_event(event);
3685 }
3686 
3687 int perf_event_release_kernel(struct perf_event *event)
3688 {
3689 	put_event(event);
3690 	return 0;
3691 }
3692 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3693 
3694 /*
3695  * Called when the last reference to the file is gone.
3696  */
3697 static int perf_release(struct inode *inode, struct file *file)
3698 {
3699 	put_event(file->private_data);
3700 	return 0;
3701 }
3702 
3703 /*
3704  * Remove all orphanes events from the context.
3705  */
3706 static void orphans_remove_work(struct work_struct *work)
3707 {
3708 	struct perf_event_context *ctx;
3709 	struct perf_event *event, *tmp;
3710 
3711 	ctx = container_of(work, struct perf_event_context,
3712 			   orphans_remove.work);
3713 
3714 	mutex_lock(&ctx->mutex);
3715 	list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
3716 		struct perf_event *parent_event = event->parent;
3717 
3718 		if (!is_orphaned_child(event))
3719 			continue;
3720 
3721 		perf_remove_from_context(event, true);
3722 
3723 		mutex_lock(&parent_event->child_mutex);
3724 		list_del_init(&event->child_list);
3725 		mutex_unlock(&parent_event->child_mutex);
3726 
3727 		free_event(event);
3728 		put_event(parent_event);
3729 	}
3730 
3731 	raw_spin_lock_irq(&ctx->lock);
3732 	ctx->orphans_remove_sched = false;
3733 	raw_spin_unlock_irq(&ctx->lock);
3734 	mutex_unlock(&ctx->mutex);
3735 
3736 	put_ctx(ctx);
3737 }
3738 
3739 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3740 {
3741 	struct perf_event *child;
3742 	u64 total = 0;
3743 
3744 	*enabled = 0;
3745 	*running = 0;
3746 
3747 	mutex_lock(&event->child_mutex);
3748 	total += perf_event_read(event);
3749 	*enabled += event->total_time_enabled +
3750 			atomic64_read(&event->child_total_time_enabled);
3751 	*running += event->total_time_running +
3752 			atomic64_read(&event->child_total_time_running);
3753 
3754 	list_for_each_entry(child, &event->child_list, child_list) {
3755 		total += perf_event_read(child);
3756 		*enabled += child->total_time_enabled;
3757 		*running += child->total_time_running;
3758 	}
3759 	mutex_unlock(&event->child_mutex);
3760 
3761 	return total;
3762 }
3763 EXPORT_SYMBOL_GPL(perf_event_read_value);
3764 
3765 static int perf_event_read_group(struct perf_event *event,
3766 				   u64 read_format, char __user *buf)
3767 {
3768 	struct perf_event *leader = event->group_leader, *sub;
3769 	struct perf_event_context *ctx = leader->ctx;
3770 	int n = 0, size = 0, ret;
3771 	u64 count, enabled, running;
3772 	u64 values[5];
3773 
3774 	lockdep_assert_held(&ctx->mutex);
3775 
3776 	count = perf_event_read_value(leader, &enabled, &running);
3777 
3778 	values[n++] = 1 + leader->nr_siblings;
3779 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3780 		values[n++] = enabled;
3781 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3782 		values[n++] = running;
3783 	values[n++] = count;
3784 	if (read_format & PERF_FORMAT_ID)
3785 		values[n++] = primary_event_id(leader);
3786 
3787 	size = n * sizeof(u64);
3788 
3789 	if (copy_to_user(buf, values, size))
3790 		return -EFAULT;
3791 
3792 	ret = size;
3793 
3794 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3795 		n = 0;
3796 
3797 		values[n++] = perf_event_read_value(sub, &enabled, &running);
3798 		if (read_format & PERF_FORMAT_ID)
3799 			values[n++] = primary_event_id(sub);
3800 
3801 		size = n * sizeof(u64);
3802 
3803 		if (copy_to_user(buf + ret, values, size)) {
3804 			return -EFAULT;
3805 		}
3806 
3807 		ret += size;
3808 	}
3809 
3810 	return ret;
3811 }
3812 
3813 static int perf_event_read_one(struct perf_event *event,
3814 				 u64 read_format, char __user *buf)
3815 {
3816 	u64 enabled, running;
3817 	u64 values[4];
3818 	int n = 0;
3819 
3820 	values[n++] = perf_event_read_value(event, &enabled, &running);
3821 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3822 		values[n++] = enabled;
3823 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3824 		values[n++] = running;
3825 	if (read_format & PERF_FORMAT_ID)
3826 		values[n++] = primary_event_id(event);
3827 
3828 	if (copy_to_user(buf, values, n * sizeof(u64)))
3829 		return -EFAULT;
3830 
3831 	return n * sizeof(u64);
3832 }
3833 
3834 static bool is_event_hup(struct perf_event *event)
3835 {
3836 	bool no_children;
3837 
3838 	if (event->state != PERF_EVENT_STATE_EXIT)
3839 		return false;
3840 
3841 	mutex_lock(&event->child_mutex);
3842 	no_children = list_empty(&event->child_list);
3843 	mutex_unlock(&event->child_mutex);
3844 	return no_children;
3845 }
3846 
3847 /*
3848  * Read the performance event - simple non blocking version for now
3849  */
3850 static ssize_t
3851 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3852 {
3853 	u64 read_format = event->attr.read_format;
3854 	int ret;
3855 
3856 	/*
3857 	 * Return end-of-file for a read on a event that is in
3858 	 * error state (i.e. because it was pinned but it couldn't be
3859 	 * scheduled on to the CPU at some point).
3860 	 */
3861 	if (event->state == PERF_EVENT_STATE_ERROR)
3862 		return 0;
3863 
3864 	if (count < event->read_size)
3865 		return -ENOSPC;
3866 
3867 	WARN_ON_ONCE(event->ctx->parent_ctx);
3868 	if (read_format & PERF_FORMAT_GROUP)
3869 		ret = perf_event_read_group(event, read_format, buf);
3870 	else
3871 		ret = perf_event_read_one(event, read_format, buf);
3872 
3873 	return ret;
3874 }
3875 
3876 static ssize_t
3877 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3878 {
3879 	struct perf_event *event = file->private_data;
3880 	struct perf_event_context *ctx;
3881 	int ret;
3882 
3883 	ctx = perf_event_ctx_lock(event);
3884 	ret = perf_read_hw(event, buf, count);
3885 	perf_event_ctx_unlock(event, ctx);
3886 
3887 	return ret;
3888 }
3889 
3890 static unsigned int perf_poll(struct file *file, poll_table *wait)
3891 {
3892 	struct perf_event *event = file->private_data;
3893 	struct ring_buffer *rb;
3894 	unsigned int events = POLLHUP;
3895 
3896 	poll_wait(file, &event->waitq, wait);
3897 
3898 	if (is_event_hup(event))
3899 		return events;
3900 
3901 	/*
3902 	 * Pin the event->rb by taking event->mmap_mutex; otherwise
3903 	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3904 	 */
3905 	mutex_lock(&event->mmap_mutex);
3906 	rb = event->rb;
3907 	if (rb)
3908 		events = atomic_xchg(&rb->poll, 0);
3909 	mutex_unlock(&event->mmap_mutex);
3910 	return events;
3911 }
3912 
3913 static void _perf_event_reset(struct perf_event *event)
3914 {
3915 	(void)perf_event_read(event);
3916 	local64_set(&event->count, 0);
3917 	perf_event_update_userpage(event);
3918 }
3919 
3920 /*
3921  * Holding the top-level event's child_mutex means that any
3922  * descendant process that has inherited this event will block
3923  * in sync_child_event if it goes to exit, thus satisfying the
3924  * task existence requirements of perf_event_enable/disable.
3925  */
3926 static void perf_event_for_each_child(struct perf_event *event,
3927 					void (*func)(struct perf_event *))
3928 {
3929 	struct perf_event *child;
3930 
3931 	WARN_ON_ONCE(event->ctx->parent_ctx);
3932 
3933 	mutex_lock(&event->child_mutex);
3934 	func(event);
3935 	list_for_each_entry(child, &event->child_list, child_list)
3936 		func(child);
3937 	mutex_unlock(&event->child_mutex);
3938 }
3939 
3940 static void perf_event_for_each(struct perf_event *event,
3941 				  void (*func)(struct perf_event *))
3942 {
3943 	struct perf_event_context *ctx = event->ctx;
3944 	struct perf_event *sibling;
3945 
3946 	lockdep_assert_held(&ctx->mutex);
3947 
3948 	event = event->group_leader;
3949 
3950 	perf_event_for_each_child(event, func);
3951 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3952 		perf_event_for_each_child(sibling, func);
3953 }
3954 
3955 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3956 {
3957 	struct perf_event_context *ctx = event->ctx;
3958 	int ret = 0, active;
3959 	u64 value;
3960 
3961 	if (!is_sampling_event(event))
3962 		return -EINVAL;
3963 
3964 	if (copy_from_user(&value, arg, sizeof(value)))
3965 		return -EFAULT;
3966 
3967 	if (!value)
3968 		return -EINVAL;
3969 
3970 	raw_spin_lock_irq(&ctx->lock);
3971 	if (event->attr.freq) {
3972 		if (value > sysctl_perf_event_sample_rate) {
3973 			ret = -EINVAL;
3974 			goto unlock;
3975 		}
3976 
3977 		event->attr.sample_freq = value;
3978 	} else {
3979 		event->attr.sample_period = value;
3980 		event->hw.sample_period = value;
3981 	}
3982 
3983 	active = (event->state == PERF_EVENT_STATE_ACTIVE);
3984 	if (active) {
3985 		perf_pmu_disable(ctx->pmu);
3986 		event->pmu->stop(event, PERF_EF_UPDATE);
3987 	}
3988 
3989 	local64_set(&event->hw.period_left, 0);
3990 
3991 	if (active) {
3992 		event->pmu->start(event, PERF_EF_RELOAD);
3993 		perf_pmu_enable(ctx->pmu);
3994 	}
3995 
3996 unlock:
3997 	raw_spin_unlock_irq(&ctx->lock);
3998 
3999 	return ret;
4000 }
4001 
4002 static const struct file_operations perf_fops;
4003 
4004 static inline int perf_fget_light(int fd, struct fd *p)
4005 {
4006 	struct fd f = fdget(fd);
4007 	if (!f.file)
4008 		return -EBADF;
4009 
4010 	if (f.file->f_op != &perf_fops) {
4011 		fdput(f);
4012 		return -EBADF;
4013 	}
4014 	*p = f;
4015 	return 0;
4016 }
4017 
4018 static int perf_event_set_output(struct perf_event *event,
4019 				 struct perf_event *output_event);
4020 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4021 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4022 
4023 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
4024 {
4025 	void (*func)(struct perf_event *);
4026 	u32 flags = arg;
4027 
4028 	switch (cmd) {
4029 	case PERF_EVENT_IOC_ENABLE:
4030 		func = _perf_event_enable;
4031 		break;
4032 	case PERF_EVENT_IOC_DISABLE:
4033 		func = _perf_event_disable;
4034 		break;
4035 	case PERF_EVENT_IOC_RESET:
4036 		func = _perf_event_reset;
4037 		break;
4038 
4039 	case PERF_EVENT_IOC_REFRESH:
4040 		return _perf_event_refresh(event, arg);
4041 
4042 	case PERF_EVENT_IOC_PERIOD:
4043 		return perf_event_period(event, (u64 __user *)arg);
4044 
4045 	case PERF_EVENT_IOC_ID:
4046 	{
4047 		u64 id = primary_event_id(event);
4048 
4049 		if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4050 			return -EFAULT;
4051 		return 0;
4052 	}
4053 
4054 	case PERF_EVENT_IOC_SET_OUTPUT:
4055 	{
4056 		int ret;
4057 		if (arg != -1) {
4058 			struct perf_event *output_event;
4059 			struct fd output;
4060 			ret = perf_fget_light(arg, &output);
4061 			if (ret)
4062 				return ret;
4063 			output_event = output.file->private_data;
4064 			ret = perf_event_set_output(event, output_event);
4065 			fdput(output);
4066 		} else {
4067 			ret = perf_event_set_output(event, NULL);
4068 		}
4069 		return ret;
4070 	}
4071 
4072 	case PERF_EVENT_IOC_SET_FILTER:
4073 		return perf_event_set_filter(event, (void __user *)arg);
4074 
4075 	case PERF_EVENT_IOC_SET_BPF:
4076 		return perf_event_set_bpf_prog(event, arg);
4077 
4078 	default:
4079 		return -ENOTTY;
4080 	}
4081 
4082 	if (flags & PERF_IOC_FLAG_GROUP)
4083 		perf_event_for_each(event, func);
4084 	else
4085 		perf_event_for_each_child(event, func);
4086 
4087 	return 0;
4088 }
4089 
4090 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4091 {
4092 	struct perf_event *event = file->private_data;
4093 	struct perf_event_context *ctx;
4094 	long ret;
4095 
4096 	ctx = perf_event_ctx_lock(event);
4097 	ret = _perf_ioctl(event, cmd, arg);
4098 	perf_event_ctx_unlock(event, ctx);
4099 
4100 	return ret;
4101 }
4102 
4103 #ifdef CONFIG_COMPAT
4104 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4105 				unsigned long arg)
4106 {
4107 	switch (_IOC_NR(cmd)) {
4108 	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4109 	case _IOC_NR(PERF_EVENT_IOC_ID):
4110 		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4111 		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4112 			cmd &= ~IOCSIZE_MASK;
4113 			cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4114 		}
4115 		break;
4116 	}
4117 	return perf_ioctl(file, cmd, arg);
4118 }
4119 #else
4120 # define perf_compat_ioctl NULL
4121 #endif
4122 
4123 int perf_event_task_enable(void)
4124 {
4125 	struct perf_event_context *ctx;
4126 	struct perf_event *event;
4127 
4128 	mutex_lock(&current->perf_event_mutex);
4129 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4130 		ctx = perf_event_ctx_lock(event);
4131 		perf_event_for_each_child(event, _perf_event_enable);
4132 		perf_event_ctx_unlock(event, ctx);
4133 	}
4134 	mutex_unlock(&current->perf_event_mutex);
4135 
4136 	return 0;
4137 }
4138 
4139 int perf_event_task_disable(void)
4140 {
4141 	struct perf_event_context *ctx;
4142 	struct perf_event *event;
4143 
4144 	mutex_lock(&current->perf_event_mutex);
4145 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4146 		ctx = perf_event_ctx_lock(event);
4147 		perf_event_for_each_child(event, _perf_event_disable);
4148 		perf_event_ctx_unlock(event, ctx);
4149 	}
4150 	mutex_unlock(&current->perf_event_mutex);
4151 
4152 	return 0;
4153 }
4154 
4155 static int perf_event_index(struct perf_event *event)
4156 {
4157 	if (event->hw.state & PERF_HES_STOPPED)
4158 		return 0;
4159 
4160 	if (event->state != PERF_EVENT_STATE_ACTIVE)
4161 		return 0;
4162 
4163 	return event->pmu->event_idx(event);
4164 }
4165 
4166 static void calc_timer_values(struct perf_event *event,
4167 				u64 *now,
4168 				u64 *enabled,
4169 				u64 *running)
4170 {
4171 	u64 ctx_time;
4172 
4173 	*now = perf_clock();
4174 	ctx_time = event->shadow_ctx_time + *now;
4175 	*enabled = ctx_time - event->tstamp_enabled;
4176 	*running = ctx_time - event->tstamp_running;
4177 }
4178 
4179 static void perf_event_init_userpage(struct perf_event *event)
4180 {
4181 	struct perf_event_mmap_page *userpg;
4182 	struct ring_buffer *rb;
4183 
4184 	rcu_read_lock();
4185 	rb = rcu_dereference(event->rb);
4186 	if (!rb)
4187 		goto unlock;
4188 
4189 	userpg = rb->user_page;
4190 
4191 	/* Allow new userspace to detect that bit 0 is deprecated */
4192 	userpg->cap_bit0_is_deprecated = 1;
4193 	userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4194 	userpg->data_offset = PAGE_SIZE;
4195 	userpg->data_size = perf_data_size(rb);
4196 
4197 unlock:
4198 	rcu_read_unlock();
4199 }
4200 
4201 void __weak arch_perf_update_userpage(
4202 	struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4203 {
4204 }
4205 
4206 /*
4207  * Callers need to ensure there can be no nesting of this function, otherwise
4208  * the seqlock logic goes bad. We can not serialize this because the arch
4209  * code calls this from NMI context.
4210  */
4211 void perf_event_update_userpage(struct perf_event *event)
4212 {
4213 	struct perf_event_mmap_page *userpg;
4214 	struct ring_buffer *rb;
4215 	u64 enabled, running, now;
4216 
4217 	rcu_read_lock();
4218 	rb = rcu_dereference(event->rb);
4219 	if (!rb)
4220 		goto unlock;
4221 
4222 	/*
4223 	 * compute total_time_enabled, total_time_running
4224 	 * based on snapshot values taken when the event
4225 	 * was last scheduled in.
4226 	 *
4227 	 * we cannot simply called update_context_time()
4228 	 * because of locking issue as we can be called in
4229 	 * NMI context
4230 	 */
4231 	calc_timer_values(event, &now, &enabled, &running);
4232 
4233 	userpg = rb->user_page;
4234 	/*
4235 	 * Disable preemption so as to not let the corresponding user-space
4236 	 * spin too long if we get preempted.
4237 	 */
4238 	preempt_disable();
4239 	++userpg->lock;
4240 	barrier();
4241 	userpg->index = perf_event_index(event);
4242 	userpg->offset = perf_event_count(event);
4243 	if (userpg->index)
4244 		userpg->offset -= local64_read(&event->hw.prev_count);
4245 
4246 	userpg->time_enabled = enabled +
4247 			atomic64_read(&event->child_total_time_enabled);
4248 
4249 	userpg->time_running = running +
4250 			atomic64_read(&event->child_total_time_running);
4251 
4252 	arch_perf_update_userpage(event, userpg, now);
4253 
4254 	barrier();
4255 	++userpg->lock;
4256 	preempt_enable();
4257 unlock:
4258 	rcu_read_unlock();
4259 }
4260 
4261 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4262 {
4263 	struct perf_event *event = vma->vm_file->private_data;
4264 	struct ring_buffer *rb;
4265 	int ret = VM_FAULT_SIGBUS;
4266 
4267 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
4268 		if (vmf->pgoff == 0)
4269 			ret = 0;
4270 		return ret;
4271 	}
4272 
4273 	rcu_read_lock();
4274 	rb = rcu_dereference(event->rb);
4275 	if (!rb)
4276 		goto unlock;
4277 
4278 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4279 		goto unlock;
4280 
4281 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4282 	if (!vmf->page)
4283 		goto unlock;
4284 
4285 	get_page(vmf->page);
4286 	vmf->page->mapping = vma->vm_file->f_mapping;
4287 	vmf->page->index   = vmf->pgoff;
4288 
4289 	ret = 0;
4290 unlock:
4291 	rcu_read_unlock();
4292 
4293 	return ret;
4294 }
4295 
4296 static void ring_buffer_attach(struct perf_event *event,
4297 			       struct ring_buffer *rb)
4298 {
4299 	struct ring_buffer *old_rb = NULL;
4300 	unsigned long flags;
4301 
4302 	if (event->rb) {
4303 		/*
4304 		 * Should be impossible, we set this when removing
4305 		 * event->rb_entry and wait/clear when adding event->rb_entry.
4306 		 */
4307 		WARN_ON_ONCE(event->rcu_pending);
4308 
4309 		old_rb = event->rb;
4310 		spin_lock_irqsave(&old_rb->event_lock, flags);
4311 		list_del_rcu(&event->rb_entry);
4312 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
4313 
4314 		event->rcu_batches = get_state_synchronize_rcu();
4315 		event->rcu_pending = 1;
4316 	}
4317 
4318 	if (rb) {
4319 		if (event->rcu_pending) {
4320 			cond_synchronize_rcu(event->rcu_batches);
4321 			event->rcu_pending = 0;
4322 		}
4323 
4324 		spin_lock_irqsave(&rb->event_lock, flags);
4325 		list_add_rcu(&event->rb_entry, &rb->event_list);
4326 		spin_unlock_irqrestore(&rb->event_lock, flags);
4327 	}
4328 
4329 	rcu_assign_pointer(event->rb, rb);
4330 
4331 	if (old_rb) {
4332 		ring_buffer_put(old_rb);
4333 		/*
4334 		 * Since we detached before setting the new rb, so that we
4335 		 * could attach the new rb, we could have missed a wakeup.
4336 		 * Provide it now.
4337 		 */
4338 		wake_up_all(&event->waitq);
4339 	}
4340 }
4341 
4342 static void ring_buffer_wakeup(struct perf_event *event)
4343 {
4344 	struct ring_buffer *rb;
4345 
4346 	rcu_read_lock();
4347 	rb = rcu_dereference(event->rb);
4348 	if (rb) {
4349 		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4350 			wake_up_all(&event->waitq);
4351 	}
4352 	rcu_read_unlock();
4353 }
4354 
4355 static void rb_free_rcu(struct rcu_head *rcu_head)
4356 {
4357 	struct ring_buffer *rb;
4358 
4359 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
4360 	rb_free(rb);
4361 }
4362 
4363 struct ring_buffer *ring_buffer_get(struct perf_event *event)
4364 {
4365 	struct ring_buffer *rb;
4366 
4367 	rcu_read_lock();
4368 	rb = rcu_dereference(event->rb);
4369 	if (rb) {
4370 		if (!atomic_inc_not_zero(&rb->refcount))
4371 			rb = NULL;
4372 	}
4373 	rcu_read_unlock();
4374 
4375 	return rb;
4376 }
4377 
4378 void ring_buffer_put(struct ring_buffer *rb)
4379 {
4380 	if (!atomic_dec_and_test(&rb->refcount))
4381 		return;
4382 
4383 	WARN_ON_ONCE(!list_empty(&rb->event_list));
4384 
4385 	call_rcu(&rb->rcu_head, rb_free_rcu);
4386 }
4387 
4388 static void perf_mmap_open(struct vm_area_struct *vma)
4389 {
4390 	struct perf_event *event = vma->vm_file->private_data;
4391 
4392 	atomic_inc(&event->mmap_count);
4393 	atomic_inc(&event->rb->mmap_count);
4394 
4395 	if (vma->vm_pgoff)
4396 		atomic_inc(&event->rb->aux_mmap_count);
4397 
4398 	if (event->pmu->event_mapped)
4399 		event->pmu->event_mapped(event);
4400 }
4401 
4402 /*
4403  * A buffer can be mmap()ed multiple times; either directly through the same
4404  * event, or through other events by use of perf_event_set_output().
4405  *
4406  * In order to undo the VM accounting done by perf_mmap() we need to destroy
4407  * the buffer here, where we still have a VM context. This means we need
4408  * to detach all events redirecting to us.
4409  */
4410 static void perf_mmap_close(struct vm_area_struct *vma)
4411 {
4412 	struct perf_event *event = vma->vm_file->private_data;
4413 
4414 	struct ring_buffer *rb = ring_buffer_get(event);
4415 	struct user_struct *mmap_user = rb->mmap_user;
4416 	int mmap_locked = rb->mmap_locked;
4417 	unsigned long size = perf_data_size(rb);
4418 
4419 	if (event->pmu->event_unmapped)
4420 		event->pmu->event_unmapped(event);
4421 
4422 	/*
4423 	 * rb->aux_mmap_count will always drop before rb->mmap_count and
4424 	 * event->mmap_count, so it is ok to use event->mmap_mutex to
4425 	 * serialize with perf_mmap here.
4426 	 */
4427 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4428 	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
4429 		atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4430 		vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4431 
4432 		rb_free_aux(rb);
4433 		mutex_unlock(&event->mmap_mutex);
4434 	}
4435 
4436 	atomic_dec(&rb->mmap_count);
4437 
4438 	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
4439 		goto out_put;
4440 
4441 	ring_buffer_attach(event, NULL);
4442 	mutex_unlock(&event->mmap_mutex);
4443 
4444 	/* If there's still other mmap()s of this buffer, we're done. */
4445 	if (atomic_read(&rb->mmap_count))
4446 		goto out_put;
4447 
4448 	/*
4449 	 * No other mmap()s, detach from all other events that might redirect
4450 	 * into the now unreachable buffer. Somewhat complicated by the
4451 	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4452 	 */
4453 again:
4454 	rcu_read_lock();
4455 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4456 		if (!atomic_long_inc_not_zero(&event->refcount)) {
4457 			/*
4458 			 * This event is en-route to free_event() which will
4459 			 * detach it and remove it from the list.
4460 			 */
4461 			continue;
4462 		}
4463 		rcu_read_unlock();
4464 
4465 		mutex_lock(&event->mmap_mutex);
4466 		/*
4467 		 * Check we didn't race with perf_event_set_output() which can
4468 		 * swizzle the rb from under us while we were waiting to
4469 		 * acquire mmap_mutex.
4470 		 *
4471 		 * If we find a different rb; ignore this event, a next
4472 		 * iteration will no longer find it on the list. We have to
4473 		 * still restart the iteration to make sure we're not now
4474 		 * iterating the wrong list.
4475 		 */
4476 		if (event->rb == rb)
4477 			ring_buffer_attach(event, NULL);
4478 
4479 		mutex_unlock(&event->mmap_mutex);
4480 		put_event(event);
4481 
4482 		/*
4483 		 * Restart the iteration; either we're on the wrong list or
4484 		 * destroyed its integrity by doing a deletion.
4485 		 */
4486 		goto again;
4487 	}
4488 	rcu_read_unlock();
4489 
4490 	/*
4491 	 * It could be there's still a few 0-ref events on the list; they'll
4492 	 * get cleaned up by free_event() -- they'll also still have their
4493 	 * ref on the rb and will free it whenever they are done with it.
4494 	 *
4495 	 * Aside from that, this buffer is 'fully' detached and unmapped,
4496 	 * undo the VM accounting.
4497 	 */
4498 
4499 	atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4500 	vma->vm_mm->pinned_vm -= mmap_locked;
4501 	free_uid(mmap_user);
4502 
4503 out_put:
4504 	ring_buffer_put(rb); /* could be last */
4505 }
4506 
4507 static const struct vm_operations_struct perf_mmap_vmops = {
4508 	.open		= perf_mmap_open,
4509 	.close		= perf_mmap_close, /* non mergable */
4510 	.fault		= perf_mmap_fault,
4511 	.page_mkwrite	= perf_mmap_fault,
4512 };
4513 
4514 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4515 {
4516 	struct perf_event *event = file->private_data;
4517 	unsigned long user_locked, user_lock_limit;
4518 	struct user_struct *user = current_user();
4519 	unsigned long locked, lock_limit;
4520 	struct ring_buffer *rb = NULL;
4521 	unsigned long vma_size;
4522 	unsigned long nr_pages;
4523 	long user_extra = 0, extra = 0;
4524 	int ret = 0, flags = 0;
4525 
4526 	/*
4527 	 * Don't allow mmap() of inherited per-task counters. This would
4528 	 * create a performance issue due to all children writing to the
4529 	 * same rb.
4530 	 */
4531 	if (event->cpu == -1 && event->attr.inherit)
4532 		return -EINVAL;
4533 
4534 	if (!(vma->vm_flags & VM_SHARED))
4535 		return -EINVAL;
4536 
4537 	vma_size = vma->vm_end - vma->vm_start;
4538 
4539 	if (vma->vm_pgoff == 0) {
4540 		nr_pages = (vma_size / PAGE_SIZE) - 1;
4541 	} else {
4542 		/*
4543 		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
4544 		 * mapped, all subsequent mappings should have the same size
4545 		 * and offset. Must be above the normal perf buffer.
4546 		 */
4547 		u64 aux_offset, aux_size;
4548 
4549 		if (!event->rb)
4550 			return -EINVAL;
4551 
4552 		nr_pages = vma_size / PAGE_SIZE;
4553 
4554 		mutex_lock(&event->mmap_mutex);
4555 		ret = -EINVAL;
4556 
4557 		rb = event->rb;
4558 		if (!rb)
4559 			goto aux_unlock;
4560 
4561 		aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
4562 		aux_size = ACCESS_ONCE(rb->user_page->aux_size);
4563 
4564 		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
4565 			goto aux_unlock;
4566 
4567 		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
4568 			goto aux_unlock;
4569 
4570 		/* already mapped with a different offset */
4571 		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
4572 			goto aux_unlock;
4573 
4574 		if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
4575 			goto aux_unlock;
4576 
4577 		/* already mapped with a different size */
4578 		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
4579 			goto aux_unlock;
4580 
4581 		if (!is_power_of_2(nr_pages))
4582 			goto aux_unlock;
4583 
4584 		if (!atomic_inc_not_zero(&rb->mmap_count))
4585 			goto aux_unlock;
4586 
4587 		if (rb_has_aux(rb)) {
4588 			atomic_inc(&rb->aux_mmap_count);
4589 			ret = 0;
4590 			goto unlock;
4591 		}
4592 
4593 		atomic_set(&rb->aux_mmap_count, 1);
4594 		user_extra = nr_pages;
4595 
4596 		goto accounting;
4597 	}
4598 
4599 	/*
4600 	 * If we have rb pages ensure they're a power-of-two number, so we
4601 	 * can do bitmasks instead of modulo.
4602 	 */
4603 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
4604 		return -EINVAL;
4605 
4606 	if (vma_size != PAGE_SIZE * (1 + nr_pages))
4607 		return -EINVAL;
4608 
4609 	WARN_ON_ONCE(event->ctx->parent_ctx);
4610 again:
4611 	mutex_lock(&event->mmap_mutex);
4612 	if (event->rb) {
4613 		if (event->rb->nr_pages != nr_pages) {
4614 			ret = -EINVAL;
4615 			goto unlock;
4616 		}
4617 
4618 		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4619 			/*
4620 			 * Raced against perf_mmap_close() through
4621 			 * perf_event_set_output(). Try again, hope for better
4622 			 * luck.
4623 			 */
4624 			mutex_unlock(&event->mmap_mutex);
4625 			goto again;
4626 		}
4627 
4628 		goto unlock;
4629 	}
4630 
4631 	user_extra = nr_pages + 1;
4632 
4633 accounting:
4634 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
4635 
4636 	/*
4637 	 * Increase the limit linearly with more CPUs:
4638 	 */
4639 	user_lock_limit *= num_online_cpus();
4640 
4641 	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4642 
4643 	if (user_locked > user_lock_limit)
4644 		extra = user_locked - user_lock_limit;
4645 
4646 	lock_limit = rlimit(RLIMIT_MEMLOCK);
4647 	lock_limit >>= PAGE_SHIFT;
4648 	locked = vma->vm_mm->pinned_vm + extra;
4649 
4650 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4651 		!capable(CAP_IPC_LOCK)) {
4652 		ret = -EPERM;
4653 		goto unlock;
4654 	}
4655 
4656 	WARN_ON(!rb && event->rb);
4657 
4658 	if (vma->vm_flags & VM_WRITE)
4659 		flags |= RING_BUFFER_WRITABLE;
4660 
4661 	if (!rb) {
4662 		rb = rb_alloc(nr_pages,
4663 			      event->attr.watermark ? event->attr.wakeup_watermark : 0,
4664 			      event->cpu, flags);
4665 
4666 		if (!rb) {
4667 			ret = -ENOMEM;
4668 			goto unlock;
4669 		}
4670 
4671 		atomic_set(&rb->mmap_count, 1);
4672 		rb->mmap_user = get_current_user();
4673 		rb->mmap_locked = extra;
4674 
4675 		ring_buffer_attach(event, rb);
4676 
4677 		perf_event_init_userpage(event);
4678 		perf_event_update_userpage(event);
4679 	} else {
4680 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
4681 				   event->attr.aux_watermark, flags);
4682 		if (!ret)
4683 			rb->aux_mmap_locked = extra;
4684 	}
4685 
4686 unlock:
4687 	if (!ret) {
4688 		atomic_long_add(user_extra, &user->locked_vm);
4689 		vma->vm_mm->pinned_vm += extra;
4690 
4691 		atomic_inc(&event->mmap_count);
4692 	} else if (rb) {
4693 		atomic_dec(&rb->mmap_count);
4694 	}
4695 aux_unlock:
4696 	mutex_unlock(&event->mmap_mutex);
4697 
4698 	/*
4699 	 * Since pinned accounting is per vm we cannot allow fork() to copy our
4700 	 * vma.
4701 	 */
4702 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4703 	vma->vm_ops = &perf_mmap_vmops;
4704 
4705 	if (event->pmu->event_mapped)
4706 		event->pmu->event_mapped(event);
4707 
4708 	return ret;
4709 }
4710 
4711 static int perf_fasync(int fd, struct file *filp, int on)
4712 {
4713 	struct inode *inode = file_inode(filp);
4714 	struct perf_event *event = filp->private_data;
4715 	int retval;
4716 
4717 	mutex_lock(&inode->i_mutex);
4718 	retval = fasync_helper(fd, filp, on, &event->fasync);
4719 	mutex_unlock(&inode->i_mutex);
4720 
4721 	if (retval < 0)
4722 		return retval;
4723 
4724 	return 0;
4725 }
4726 
4727 static const struct file_operations perf_fops = {
4728 	.llseek			= no_llseek,
4729 	.release		= perf_release,
4730 	.read			= perf_read,
4731 	.poll			= perf_poll,
4732 	.unlocked_ioctl		= perf_ioctl,
4733 	.compat_ioctl		= perf_compat_ioctl,
4734 	.mmap			= perf_mmap,
4735 	.fasync			= perf_fasync,
4736 };
4737 
4738 /*
4739  * Perf event wakeup
4740  *
4741  * If there's data, ensure we set the poll() state and publish everything
4742  * to user-space before waking everybody up.
4743  */
4744 
4745 void perf_event_wakeup(struct perf_event *event)
4746 {
4747 	ring_buffer_wakeup(event);
4748 
4749 	if (event->pending_kill) {
4750 		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4751 		event->pending_kill = 0;
4752 	}
4753 }
4754 
4755 static void perf_pending_event(struct irq_work *entry)
4756 {
4757 	struct perf_event *event = container_of(entry,
4758 			struct perf_event, pending);
4759 	int rctx;
4760 
4761 	rctx = perf_swevent_get_recursion_context();
4762 	/*
4763 	 * If we 'fail' here, that's OK, it means recursion is already disabled
4764 	 * and we won't recurse 'further'.
4765 	 */
4766 
4767 	if (event->pending_disable) {
4768 		event->pending_disable = 0;
4769 		__perf_event_disable(event);
4770 	}
4771 
4772 	if (event->pending_wakeup) {
4773 		event->pending_wakeup = 0;
4774 		perf_event_wakeup(event);
4775 	}
4776 
4777 	if (rctx >= 0)
4778 		perf_swevent_put_recursion_context(rctx);
4779 }
4780 
4781 /*
4782  * We assume there is only KVM supporting the callbacks.
4783  * Later on, we might change it to a list if there is
4784  * another virtualization implementation supporting the callbacks.
4785  */
4786 struct perf_guest_info_callbacks *perf_guest_cbs;
4787 
4788 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4789 {
4790 	perf_guest_cbs = cbs;
4791 	return 0;
4792 }
4793 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4794 
4795 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4796 {
4797 	perf_guest_cbs = NULL;
4798 	return 0;
4799 }
4800 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4801 
4802 static void
4803 perf_output_sample_regs(struct perf_output_handle *handle,
4804 			struct pt_regs *regs, u64 mask)
4805 {
4806 	int bit;
4807 
4808 	for_each_set_bit(bit, (const unsigned long *) &mask,
4809 			 sizeof(mask) * BITS_PER_BYTE) {
4810 		u64 val;
4811 
4812 		val = perf_reg_value(regs, bit);
4813 		perf_output_put(handle, val);
4814 	}
4815 }
4816 
4817 static void perf_sample_regs_user(struct perf_regs *regs_user,
4818 				  struct pt_regs *regs,
4819 				  struct pt_regs *regs_user_copy)
4820 {
4821 	if (user_mode(regs)) {
4822 		regs_user->abi = perf_reg_abi(current);
4823 		regs_user->regs = regs;
4824 	} else if (current->mm) {
4825 		perf_get_regs_user(regs_user, regs, regs_user_copy);
4826 	} else {
4827 		regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
4828 		regs_user->regs = NULL;
4829 	}
4830 }
4831 
4832 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
4833 				  struct pt_regs *regs)
4834 {
4835 	regs_intr->regs = regs;
4836 	regs_intr->abi  = perf_reg_abi(current);
4837 }
4838 
4839 
4840 /*
4841  * Get remaining task size from user stack pointer.
4842  *
4843  * It'd be better to take stack vma map and limit this more
4844  * precisly, but there's no way to get it safely under interrupt,
4845  * so using TASK_SIZE as limit.
4846  */
4847 static u64 perf_ustack_task_size(struct pt_regs *regs)
4848 {
4849 	unsigned long addr = perf_user_stack_pointer(regs);
4850 
4851 	if (!addr || addr >= TASK_SIZE)
4852 		return 0;
4853 
4854 	return TASK_SIZE - addr;
4855 }
4856 
4857 static u16
4858 perf_sample_ustack_size(u16 stack_size, u16 header_size,
4859 			struct pt_regs *regs)
4860 {
4861 	u64 task_size;
4862 
4863 	/* No regs, no stack pointer, no dump. */
4864 	if (!regs)
4865 		return 0;
4866 
4867 	/*
4868 	 * Check if we fit in with the requested stack size into the:
4869 	 * - TASK_SIZE
4870 	 *   If we don't, we limit the size to the TASK_SIZE.
4871 	 *
4872 	 * - remaining sample size
4873 	 *   If we don't, we customize the stack size to
4874 	 *   fit in to the remaining sample size.
4875 	 */
4876 
4877 	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4878 	stack_size = min(stack_size, (u16) task_size);
4879 
4880 	/* Current header size plus static size and dynamic size. */
4881 	header_size += 2 * sizeof(u64);
4882 
4883 	/* Do we fit in with the current stack dump size? */
4884 	if ((u16) (header_size + stack_size) < header_size) {
4885 		/*
4886 		 * If we overflow the maximum size for the sample,
4887 		 * we customize the stack dump size to fit in.
4888 		 */
4889 		stack_size = USHRT_MAX - header_size - sizeof(u64);
4890 		stack_size = round_up(stack_size, sizeof(u64));
4891 	}
4892 
4893 	return stack_size;
4894 }
4895 
4896 static void
4897 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4898 			  struct pt_regs *regs)
4899 {
4900 	/* Case of a kernel thread, nothing to dump */
4901 	if (!regs) {
4902 		u64 size = 0;
4903 		perf_output_put(handle, size);
4904 	} else {
4905 		unsigned long sp;
4906 		unsigned int rem;
4907 		u64 dyn_size;
4908 
4909 		/*
4910 		 * We dump:
4911 		 * static size
4912 		 *   - the size requested by user or the best one we can fit
4913 		 *     in to the sample max size
4914 		 * data
4915 		 *   - user stack dump data
4916 		 * dynamic size
4917 		 *   - the actual dumped size
4918 		 */
4919 
4920 		/* Static size. */
4921 		perf_output_put(handle, dump_size);
4922 
4923 		/* Data. */
4924 		sp = perf_user_stack_pointer(regs);
4925 		rem = __output_copy_user(handle, (void *) sp, dump_size);
4926 		dyn_size = dump_size - rem;
4927 
4928 		perf_output_skip(handle, rem);
4929 
4930 		/* Dynamic size. */
4931 		perf_output_put(handle, dyn_size);
4932 	}
4933 }
4934 
4935 static void __perf_event_header__init_id(struct perf_event_header *header,
4936 					 struct perf_sample_data *data,
4937 					 struct perf_event *event)
4938 {
4939 	u64 sample_type = event->attr.sample_type;
4940 
4941 	data->type = sample_type;
4942 	header->size += event->id_header_size;
4943 
4944 	if (sample_type & PERF_SAMPLE_TID) {
4945 		/* namespace issues */
4946 		data->tid_entry.pid = perf_event_pid(event, current);
4947 		data->tid_entry.tid = perf_event_tid(event, current);
4948 	}
4949 
4950 	if (sample_type & PERF_SAMPLE_TIME)
4951 		data->time = perf_event_clock(event);
4952 
4953 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
4954 		data->id = primary_event_id(event);
4955 
4956 	if (sample_type & PERF_SAMPLE_STREAM_ID)
4957 		data->stream_id = event->id;
4958 
4959 	if (sample_type & PERF_SAMPLE_CPU) {
4960 		data->cpu_entry.cpu	 = raw_smp_processor_id();
4961 		data->cpu_entry.reserved = 0;
4962 	}
4963 }
4964 
4965 void perf_event_header__init_id(struct perf_event_header *header,
4966 				struct perf_sample_data *data,
4967 				struct perf_event *event)
4968 {
4969 	if (event->attr.sample_id_all)
4970 		__perf_event_header__init_id(header, data, event);
4971 }
4972 
4973 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4974 					   struct perf_sample_data *data)
4975 {
4976 	u64 sample_type = data->type;
4977 
4978 	if (sample_type & PERF_SAMPLE_TID)
4979 		perf_output_put(handle, data->tid_entry);
4980 
4981 	if (sample_type & PERF_SAMPLE_TIME)
4982 		perf_output_put(handle, data->time);
4983 
4984 	if (sample_type & PERF_SAMPLE_ID)
4985 		perf_output_put(handle, data->id);
4986 
4987 	if (sample_type & PERF_SAMPLE_STREAM_ID)
4988 		perf_output_put(handle, data->stream_id);
4989 
4990 	if (sample_type & PERF_SAMPLE_CPU)
4991 		perf_output_put(handle, data->cpu_entry);
4992 
4993 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
4994 		perf_output_put(handle, data->id);
4995 }
4996 
4997 void perf_event__output_id_sample(struct perf_event *event,
4998 				  struct perf_output_handle *handle,
4999 				  struct perf_sample_data *sample)
5000 {
5001 	if (event->attr.sample_id_all)
5002 		__perf_event__output_id_sample(handle, sample);
5003 }
5004 
5005 static void perf_output_read_one(struct perf_output_handle *handle,
5006 				 struct perf_event *event,
5007 				 u64 enabled, u64 running)
5008 {
5009 	u64 read_format = event->attr.read_format;
5010 	u64 values[4];
5011 	int n = 0;
5012 
5013 	values[n++] = perf_event_count(event);
5014 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5015 		values[n++] = enabled +
5016 			atomic64_read(&event->child_total_time_enabled);
5017 	}
5018 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5019 		values[n++] = running +
5020 			atomic64_read(&event->child_total_time_running);
5021 	}
5022 	if (read_format & PERF_FORMAT_ID)
5023 		values[n++] = primary_event_id(event);
5024 
5025 	__output_copy(handle, values, n * sizeof(u64));
5026 }
5027 
5028 /*
5029  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
5030  */
5031 static void perf_output_read_group(struct perf_output_handle *handle,
5032 			    struct perf_event *event,
5033 			    u64 enabled, u64 running)
5034 {
5035 	struct perf_event *leader = event->group_leader, *sub;
5036 	u64 read_format = event->attr.read_format;
5037 	u64 values[5];
5038 	int n = 0;
5039 
5040 	values[n++] = 1 + leader->nr_siblings;
5041 
5042 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5043 		values[n++] = enabled;
5044 
5045 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5046 		values[n++] = running;
5047 
5048 	if (leader != event)
5049 		leader->pmu->read(leader);
5050 
5051 	values[n++] = perf_event_count(leader);
5052 	if (read_format & PERF_FORMAT_ID)
5053 		values[n++] = primary_event_id(leader);
5054 
5055 	__output_copy(handle, values, n * sizeof(u64));
5056 
5057 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5058 		n = 0;
5059 
5060 		if ((sub != event) &&
5061 		    (sub->state == PERF_EVENT_STATE_ACTIVE))
5062 			sub->pmu->read(sub);
5063 
5064 		values[n++] = perf_event_count(sub);
5065 		if (read_format & PERF_FORMAT_ID)
5066 			values[n++] = primary_event_id(sub);
5067 
5068 		__output_copy(handle, values, n * sizeof(u64));
5069 	}
5070 }
5071 
5072 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5073 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
5074 
5075 static void perf_output_read(struct perf_output_handle *handle,
5076 			     struct perf_event *event)
5077 {
5078 	u64 enabled = 0, running = 0, now;
5079 	u64 read_format = event->attr.read_format;
5080 
5081 	/*
5082 	 * compute total_time_enabled, total_time_running
5083 	 * based on snapshot values taken when the event
5084 	 * was last scheduled in.
5085 	 *
5086 	 * we cannot simply called update_context_time()
5087 	 * because of locking issue as we are called in
5088 	 * NMI context
5089 	 */
5090 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
5091 		calc_timer_values(event, &now, &enabled, &running);
5092 
5093 	if (event->attr.read_format & PERF_FORMAT_GROUP)
5094 		perf_output_read_group(handle, event, enabled, running);
5095 	else
5096 		perf_output_read_one(handle, event, enabled, running);
5097 }
5098 
5099 void perf_output_sample(struct perf_output_handle *handle,
5100 			struct perf_event_header *header,
5101 			struct perf_sample_data *data,
5102 			struct perf_event *event)
5103 {
5104 	u64 sample_type = data->type;
5105 
5106 	perf_output_put(handle, *header);
5107 
5108 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
5109 		perf_output_put(handle, data->id);
5110 
5111 	if (sample_type & PERF_SAMPLE_IP)
5112 		perf_output_put(handle, data->ip);
5113 
5114 	if (sample_type & PERF_SAMPLE_TID)
5115 		perf_output_put(handle, data->tid_entry);
5116 
5117 	if (sample_type & PERF_SAMPLE_TIME)
5118 		perf_output_put(handle, data->time);
5119 
5120 	if (sample_type & PERF_SAMPLE_ADDR)
5121 		perf_output_put(handle, data->addr);
5122 
5123 	if (sample_type & PERF_SAMPLE_ID)
5124 		perf_output_put(handle, data->id);
5125 
5126 	if (sample_type & PERF_SAMPLE_STREAM_ID)
5127 		perf_output_put(handle, data->stream_id);
5128 
5129 	if (sample_type & PERF_SAMPLE_CPU)
5130 		perf_output_put(handle, data->cpu_entry);
5131 
5132 	if (sample_type & PERF_SAMPLE_PERIOD)
5133 		perf_output_put(handle, data->period);
5134 
5135 	if (sample_type & PERF_SAMPLE_READ)
5136 		perf_output_read(handle, event);
5137 
5138 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5139 		if (data->callchain) {
5140 			int size = 1;
5141 
5142 			if (data->callchain)
5143 				size += data->callchain->nr;
5144 
5145 			size *= sizeof(u64);
5146 
5147 			__output_copy(handle, data->callchain, size);
5148 		} else {
5149 			u64 nr = 0;
5150 			perf_output_put(handle, nr);
5151 		}
5152 	}
5153 
5154 	if (sample_type & PERF_SAMPLE_RAW) {
5155 		if (data->raw) {
5156 			perf_output_put(handle, data->raw->size);
5157 			__output_copy(handle, data->raw->data,
5158 					   data->raw->size);
5159 		} else {
5160 			struct {
5161 				u32	size;
5162 				u32	data;
5163 			} raw = {
5164 				.size = sizeof(u32),
5165 				.data = 0,
5166 			};
5167 			perf_output_put(handle, raw);
5168 		}
5169 	}
5170 
5171 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5172 		if (data->br_stack) {
5173 			size_t size;
5174 
5175 			size = data->br_stack->nr
5176 			     * sizeof(struct perf_branch_entry);
5177 
5178 			perf_output_put(handle, data->br_stack->nr);
5179 			perf_output_copy(handle, data->br_stack->entries, size);
5180 		} else {
5181 			/*
5182 			 * we always store at least the value of nr
5183 			 */
5184 			u64 nr = 0;
5185 			perf_output_put(handle, nr);
5186 		}
5187 	}
5188 
5189 	if (sample_type & PERF_SAMPLE_REGS_USER) {
5190 		u64 abi = data->regs_user.abi;
5191 
5192 		/*
5193 		 * If there are no regs to dump, notice it through
5194 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5195 		 */
5196 		perf_output_put(handle, abi);
5197 
5198 		if (abi) {
5199 			u64 mask = event->attr.sample_regs_user;
5200 			perf_output_sample_regs(handle,
5201 						data->regs_user.regs,
5202 						mask);
5203 		}
5204 	}
5205 
5206 	if (sample_type & PERF_SAMPLE_STACK_USER) {
5207 		perf_output_sample_ustack(handle,
5208 					  data->stack_user_size,
5209 					  data->regs_user.regs);
5210 	}
5211 
5212 	if (sample_type & PERF_SAMPLE_WEIGHT)
5213 		perf_output_put(handle, data->weight);
5214 
5215 	if (sample_type & PERF_SAMPLE_DATA_SRC)
5216 		perf_output_put(handle, data->data_src.val);
5217 
5218 	if (sample_type & PERF_SAMPLE_TRANSACTION)
5219 		perf_output_put(handle, data->txn);
5220 
5221 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
5222 		u64 abi = data->regs_intr.abi;
5223 		/*
5224 		 * If there are no regs to dump, notice it through
5225 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5226 		 */
5227 		perf_output_put(handle, abi);
5228 
5229 		if (abi) {
5230 			u64 mask = event->attr.sample_regs_intr;
5231 
5232 			perf_output_sample_regs(handle,
5233 						data->regs_intr.regs,
5234 						mask);
5235 		}
5236 	}
5237 
5238 	if (!event->attr.watermark) {
5239 		int wakeup_events = event->attr.wakeup_events;
5240 
5241 		if (wakeup_events) {
5242 			struct ring_buffer *rb = handle->rb;
5243 			int events = local_inc_return(&rb->events);
5244 
5245 			if (events >= wakeup_events) {
5246 				local_sub(wakeup_events, &rb->events);
5247 				local_inc(&rb->wakeup);
5248 			}
5249 		}
5250 	}
5251 }
5252 
5253 void perf_prepare_sample(struct perf_event_header *header,
5254 			 struct perf_sample_data *data,
5255 			 struct perf_event *event,
5256 			 struct pt_regs *regs)
5257 {
5258 	u64 sample_type = event->attr.sample_type;
5259 
5260 	header->type = PERF_RECORD_SAMPLE;
5261 	header->size = sizeof(*header) + event->header_size;
5262 
5263 	header->misc = 0;
5264 	header->misc |= perf_misc_flags(regs);
5265 
5266 	__perf_event_header__init_id(header, data, event);
5267 
5268 	if (sample_type & PERF_SAMPLE_IP)
5269 		data->ip = perf_instruction_pointer(regs);
5270 
5271 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5272 		int size = 1;
5273 
5274 		data->callchain = perf_callchain(event, regs);
5275 
5276 		if (data->callchain)
5277 			size += data->callchain->nr;
5278 
5279 		header->size += size * sizeof(u64);
5280 	}
5281 
5282 	if (sample_type & PERF_SAMPLE_RAW) {
5283 		int size = sizeof(u32);
5284 
5285 		if (data->raw)
5286 			size += data->raw->size;
5287 		else
5288 			size += sizeof(u32);
5289 
5290 		WARN_ON_ONCE(size & (sizeof(u64)-1));
5291 		header->size += size;
5292 	}
5293 
5294 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5295 		int size = sizeof(u64); /* nr */
5296 		if (data->br_stack) {
5297 			size += data->br_stack->nr
5298 			      * sizeof(struct perf_branch_entry);
5299 		}
5300 		header->size += size;
5301 	}
5302 
5303 	if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
5304 		perf_sample_regs_user(&data->regs_user, regs,
5305 				      &data->regs_user_copy);
5306 
5307 	if (sample_type & PERF_SAMPLE_REGS_USER) {
5308 		/* regs dump ABI info */
5309 		int size = sizeof(u64);
5310 
5311 		if (data->regs_user.regs) {
5312 			u64 mask = event->attr.sample_regs_user;
5313 			size += hweight64(mask) * sizeof(u64);
5314 		}
5315 
5316 		header->size += size;
5317 	}
5318 
5319 	if (sample_type & PERF_SAMPLE_STACK_USER) {
5320 		/*
5321 		 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5322 		 * processed as the last one or have additional check added
5323 		 * in case new sample type is added, because we could eat
5324 		 * up the rest of the sample size.
5325 		 */
5326 		u16 stack_size = event->attr.sample_stack_user;
5327 		u16 size = sizeof(u64);
5328 
5329 		stack_size = perf_sample_ustack_size(stack_size, header->size,
5330 						     data->regs_user.regs);
5331 
5332 		/*
5333 		 * If there is something to dump, add space for the dump
5334 		 * itself and for the field that tells the dynamic size,
5335 		 * which is how many have been actually dumped.
5336 		 */
5337 		if (stack_size)
5338 			size += sizeof(u64) + stack_size;
5339 
5340 		data->stack_user_size = stack_size;
5341 		header->size += size;
5342 	}
5343 
5344 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
5345 		/* regs dump ABI info */
5346 		int size = sizeof(u64);
5347 
5348 		perf_sample_regs_intr(&data->regs_intr, regs);
5349 
5350 		if (data->regs_intr.regs) {
5351 			u64 mask = event->attr.sample_regs_intr;
5352 
5353 			size += hweight64(mask) * sizeof(u64);
5354 		}
5355 
5356 		header->size += size;
5357 	}
5358 }
5359 
5360 void perf_event_output(struct perf_event *event,
5361 			struct perf_sample_data *data,
5362 			struct pt_regs *regs)
5363 {
5364 	struct perf_output_handle handle;
5365 	struct perf_event_header header;
5366 
5367 	/* protect the callchain buffers */
5368 	rcu_read_lock();
5369 
5370 	perf_prepare_sample(&header, data, event, regs);
5371 
5372 	if (perf_output_begin(&handle, event, header.size))
5373 		goto exit;
5374 
5375 	perf_output_sample(&handle, &header, data, event);
5376 
5377 	perf_output_end(&handle);
5378 
5379 exit:
5380 	rcu_read_unlock();
5381 }
5382 
5383 /*
5384  * read event_id
5385  */
5386 
5387 struct perf_read_event {
5388 	struct perf_event_header	header;
5389 
5390 	u32				pid;
5391 	u32				tid;
5392 };
5393 
5394 static void
5395 perf_event_read_event(struct perf_event *event,
5396 			struct task_struct *task)
5397 {
5398 	struct perf_output_handle handle;
5399 	struct perf_sample_data sample;
5400 	struct perf_read_event read_event = {
5401 		.header = {
5402 			.type = PERF_RECORD_READ,
5403 			.misc = 0,
5404 			.size = sizeof(read_event) + event->read_size,
5405 		},
5406 		.pid = perf_event_pid(event, task),
5407 		.tid = perf_event_tid(event, task),
5408 	};
5409 	int ret;
5410 
5411 	perf_event_header__init_id(&read_event.header, &sample, event);
5412 	ret = perf_output_begin(&handle, event, read_event.header.size);
5413 	if (ret)
5414 		return;
5415 
5416 	perf_output_put(&handle, read_event);
5417 	perf_output_read(&handle, event);
5418 	perf_event__output_id_sample(event, &handle, &sample);
5419 
5420 	perf_output_end(&handle);
5421 }
5422 
5423 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5424 
5425 static void
5426 perf_event_aux_ctx(struct perf_event_context *ctx,
5427 		   perf_event_aux_output_cb output,
5428 		   void *data)
5429 {
5430 	struct perf_event *event;
5431 
5432 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5433 		if (event->state < PERF_EVENT_STATE_INACTIVE)
5434 			continue;
5435 		if (!event_filter_match(event))
5436 			continue;
5437 		output(event, data);
5438 	}
5439 }
5440 
5441 static void
5442 perf_event_aux(perf_event_aux_output_cb output, void *data,
5443 	       struct perf_event_context *task_ctx)
5444 {
5445 	struct perf_cpu_context *cpuctx;
5446 	struct perf_event_context *ctx;
5447 	struct pmu *pmu;
5448 	int ctxn;
5449 
5450 	rcu_read_lock();
5451 	list_for_each_entry_rcu(pmu, &pmus, entry) {
5452 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5453 		if (cpuctx->unique_pmu != pmu)
5454 			goto next;
5455 		perf_event_aux_ctx(&cpuctx->ctx, output, data);
5456 		if (task_ctx)
5457 			goto next;
5458 		ctxn = pmu->task_ctx_nr;
5459 		if (ctxn < 0)
5460 			goto next;
5461 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5462 		if (ctx)
5463 			perf_event_aux_ctx(ctx, output, data);
5464 next:
5465 		put_cpu_ptr(pmu->pmu_cpu_context);
5466 	}
5467 
5468 	if (task_ctx) {
5469 		preempt_disable();
5470 		perf_event_aux_ctx(task_ctx, output, data);
5471 		preempt_enable();
5472 	}
5473 	rcu_read_unlock();
5474 }
5475 
5476 /*
5477  * task tracking -- fork/exit
5478  *
5479  * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
5480  */
5481 
5482 struct perf_task_event {
5483 	struct task_struct		*task;
5484 	struct perf_event_context	*task_ctx;
5485 
5486 	struct {
5487 		struct perf_event_header	header;
5488 
5489 		u32				pid;
5490 		u32				ppid;
5491 		u32				tid;
5492 		u32				ptid;
5493 		u64				time;
5494 	} event_id;
5495 };
5496 
5497 static int perf_event_task_match(struct perf_event *event)
5498 {
5499 	return event->attr.comm  || event->attr.mmap ||
5500 	       event->attr.mmap2 || event->attr.mmap_data ||
5501 	       event->attr.task;
5502 }
5503 
5504 static void perf_event_task_output(struct perf_event *event,
5505 				   void *data)
5506 {
5507 	struct perf_task_event *task_event = data;
5508 	struct perf_output_handle handle;
5509 	struct perf_sample_data	sample;
5510 	struct task_struct *task = task_event->task;
5511 	int ret, size = task_event->event_id.header.size;
5512 
5513 	if (!perf_event_task_match(event))
5514 		return;
5515 
5516 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
5517 
5518 	ret = perf_output_begin(&handle, event,
5519 				task_event->event_id.header.size);
5520 	if (ret)
5521 		goto out;
5522 
5523 	task_event->event_id.pid = perf_event_pid(event, task);
5524 	task_event->event_id.ppid = perf_event_pid(event, current);
5525 
5526 	task_event->event_id.tid = perf_event_tid(event, task);
5527 	task_event->event_id.ptid = perf_event_tid(event, current);
5528 
5529 	task_event->event_id.time = perf_event_clock(event);
5530 
5531 	perf_output_put(&handle, task_event->event_id);
5532 
5533 	perf_event__output_id_sample(event, &handle, &sample);
5534 
5535 	perf_output_end(&handle);
5536 out:
5537 	task_event->event_id.header.size = size;
5538 }
5539 
5540 static void perf_event_task(struct task_struct *task,
5541 			      struct perf_event_context *task_ctx,
5542 			      int new)
5543 {
5544 	struct perf_task_event task_event;
5545 
5546 	if (!atomic_read(&nr_comm_events) &&
5547 	    !atomic_read(&nr_mmap_events) &&
5548 	    !atomic_read(&nr_task_events))
5549 		return;
5550 
5551 	task_event = (struct perf_task_event){
5552 		.task	  = task,
5553 		.task_ctx = task_ctx,
5554 		.event_id    = {
5555 			.header = {
5556 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
5557 				.misc = 0,
5558 				.size = sizeof(task_event.event_id),
5559 			},
5560 			/* .pid  */
5561 			/* .ppid */
5562 			/* .tid  */
5563 			/* .ptid */
5564 			/* .time */
5565 		},
5566 	};
5567 
5568 	perf_event_aux(perf_event_task_output,
5569 		       &task_event,
5570 		       task_ctx);
5571 }
5572 
5573 void perf_event_fork(struct task_struct *task)
5574 {
5575 	perf_event_task(task, NULL, 1);
5576 }
5577 
5578 /*
5579  * comm tracking
5580  */
5581 
5582 struct perf_comm_event {
5583 	struct task_struct	*task;
5584 	char			*comm;
5585 	int			comm_size;
5586 
5587 	struct {
5588 		struct perf_event_header	header;
5589 
5590 		u32				pid;
5591 		u32				tid;
5592 	} event_id;
5593 };
5594 
5595 static int perf_event_comm_match(struct perf_event *event)
5596 {
5597 	return event->attr.comm;
5598 }
5599 
5600 static void perf_event_comm_output(struct perf_event *event,
5601 				   void *data)
5602 {
5603 	struct perf_comm_event *comm_event = data;
5604 	struct perf_output_handle handle;
5605 	struct perf_sample_data sample;
5606 	int size = comm_event->event_id.header.size;
5607 	int ret;
5608 
5609 	if (!perf_event_comm_match(event))
5610 		return;
5611 
5612 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5613 	ret = perf_output_begin(&handle, event,
5614 				comm_event->event_id.header.size);
5615 
5616 	if (ret)
5617 		goto out;
5618 
5619 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5620 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
5621 
5622 	perf_output_put(&handle, comm_event->event_id);
5623 	__output_copy(&handle, comm_event->comm,
5624 				   comm_event->comm_size);
5625 
5626 	perf_event__output_id_sample(event, &handle, &sample);
5627 
5628 	perf_output_end(&handle);
5629 out:
5630 	comm_event->event_id.header.size = size;
5631 }
5632 
5633 static void perf_event_comm_event(struct perf_comm_event *comm_event)
5634 {
5635 	char comm[TASK_COMM_LEN];
5636 	unsigned int size;
5637 
5638 	memset(comm, 0, sizeof(comm));
5639 	strlcpy(comm, comm_event->task->comm, sizeof(comm));
5640 	size = ALIGN(strlen(comm)+1, sizeof(u64));
5641 
5642 	comm_event->comm = comm;
5643 	comm_event->comm_size = size;
5644 
5645 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
5646 
5647 	perf_event_aux(perf_event_comm_output,
5648 		       comm_event,
5649 		       NULL);
5650 }
5651 
5652 void perf_event_comm(struct task_struct *task, bool exec)
5653 {
5654 	struct perf_comm_event comm_event;
5655 
5656 	if (!atomic_read(&nr_comm_events))
5657 		return;
5658 
5659 	comm_event = (struct perf_comm_event){
5660 		.task	= task,
5661 		/* .comm      */
5662 		/* .comm_size */
5663 		.event_id  = {
5664 			.header = {
5665 				.type = PERF_RECORD_COMM,
5666 				.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
5667 				/* .size */
5668 			},
5669 			/* .pid */
5670 			/* .tid */
5671 		},
5672 	};
5673 
5674 	perf_event_comm_event(&comm_event);
5675 }
5676 
5677 /*
5678  * mmap tracking
5679  */
5680 
5681 struct perf_mmap_event {
5682 	struct vm_area_struct	*vma;
5683 
5684 	const char		*file_name;
5685 	int			file_size;
5686 	int			maj, min;
5687 	u64			ino;
5688 	u64			ino_generation;
5689 	u32			prot, flags;
5690 
5691 	struct {
5692 		struct perf_event_header	header;
5693 
5694 		u32				pid;
5695 		u32				tid;
5696 		u64				start;
5697 		u64				len;
5698 		u64				pgoff;
5699 	} event_id;
5700 };
5701 
5702 static int perf_event_mmap_match(struct perf_event *event,
5703 				 void *data)
5704 {
5705 	struct perf_mmap_event *mmap_event = data;
5706 	struct vm_area_struct *vma = mmap_event->vma;
5707 	int executable = vma->vm_flags & VM_EXEC;
5708 
5709 	return (!executable && event->attr.mmap_data) ||
5710 	       (executable && (event->attr.mmap || event->attr.mmap2));
5711 }
5712 
5713 static void perf_event_mmap_output(struct perf_event *event,
5714 				   void *data)
5715 {
5716 	struct perf_mmap_event *mmap_event = data;
5717 	struct perf_output_handle handle;
5718 	struct perf_sample_data sample;
5719 	int size = mmap_event->event_id.header.size;
5720 	int ret;
5721 
5722 	if (!perf_event_mmap_match(event, data))
5723 		return;
5724 
5725 	if (event->attr.mmap2) {
5726 		mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5727 		mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5728 		mmap_event->event_id.header.size += sizeof(mmap_event->min);
5729 		mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5730 		mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5731 		mmap_event->event_id.header.size += sizeof(mmap_event->prot);
5732 		mmap_event->event_id.header.size += sizeof(mmap_event->flags);
5733 	}
5734 
5735 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5736 	ret = perf_output_begin(&handle, event,
5737 				mmap_event->event_id.header.size);
5738 	if (ret)
5739 		goto out;
5740 
5741 	mmap_event->event_id.pid = perf_event_pid(event, current);
5742 	mmap_event->event_id.tid = perf_event_tid(event, current);
5743 
5744 	perf_output_put(&handle, mmap_event->event_id);
5745 
5746 	if (event->attr.mmap2) {
5747 		perf_output_put(&handle, mmap_event->maj);
5748 		perf_output_put(&handle, mmap_event->min);
5749 		perf_output_put(&handle, mmap_event->ino);
5750 		perf_output_put(&handle, mmap_event->ino_generation);
5751 		perf_output_put(&handle, mmap_event->prot);
5752 		perf_output_put(&handle, mmap_event->flags);
5753 	}
5754 
5755 	__output_copy(&handle, mmap_event->file_name,
5756 				   mmap_event->file_size);
5757 
5758 	perf_event__output_id_sample(event, &handle, &sample);
5759 
5760 	perf_output_end(&handle);
5761 out:
5762 	mmap_event->event_id.header.size = size;
5763 }
5764 
5765 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5766 {
5767 	struct vm_area_struct *vma = mmap_event->vma;
5768 	struct file *file = vma->vm_file;
5769 	int maj = 0, min = 0;
5770 	u64 ino = 0, gen = 0;
5771 	u32 prot = 0, flags = 0;
5772 	unsigned int size;
5773 	char tmp[16];
5774 	char *buf = NULL;
5775 	char *name;
5776 
5777 	if (file) {
5778 		struct inode *inode;
5779 		dev_t dev;
5780 
5781 		buf = kmalloc(PATH_MAX, GFP_KERNEL);
5782 		if (!buf) {
5783 			name = "//enomem";
5784 			goto cpy_name;
5785 		}
5786 		/*
5787 		 * d_path() works from the end of the rb backwards, so we
5788 		 * need to add enough zero bytes after the string to handle
5789 		 * the 64bit alignment we do later.
5790 		 */
5791 		name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
5792 		if (IS_ERR(name)) {
5793 			name = "//toolong";
5794 			goto cpy_name;
5795 		}
5796 		inode = file_inode(vma->vm_file);
5797 		dev = inode->i_sb->s_dev;
5798 		ino = inode->i_ino;
5799 		gen = inode->i_generation;
5800 		maj = MAJOR(dev);
5801 		min = MINOR(dev);
5802 
5803 		if (vma->vm_flags & VM_READ)
5804 			prot |= PROT_READ;
5805 		if (vma->vm_flags & VM_WRITE)
5806 			prot |= PROT_WRITE;
5807 		if (vma->vm_flags & VM_EXEC)
5808 			prot |= PROT_EXEC;
5809 
5810 		if (vma->vm_flags & VM_MAYSHARE)
5811 			flags = MAP_SHARED;
5812 		else
5813 			flags = MAP_PRIVATE;
5814 
5815 		if (vma->vm_flags & VM_DENYWRITE)
5816 			flags |= MAP_DENYWRITE;
5817 		if (vma->vm_flags & VM_MAYEXEC)
5818 			flags |= MAP_EXECUTABLE;
5819 		if (vma->vm_flags & VM_LOCKED)
5820 			flags |= MAP_LOCKED;
5821 		if (vma->vm_flags & VM_HUGETLB)
5822 			flags |= MAP_HUGETLB;
5823 
5824 		goto got_name;
5825 	} else {
5826 		if (vma->vm_ops && vma->vm_ops->name) {
5827 			name = (char *) vma->vm_ops->name(vma);
5828 			if (name)
5829 				goto cpy_name;
5830 		}
5831 
5832 		name = (char *)arch_vma_name(vma);
5833 		if (name)
5834 			goto cpy_name;
5835 
5836 		if (vma->vm_start <= vma->vm_mm->start_brk &&
5837 				vma->vm_end >= vma->vm_mm->brk) {
5838 			name = "[heap]";
5839 			goto cpy_name;
5840 		}
5841 		if (vma->vm_start <= vma->vm_mm->start_stack &&
5842 				vma->vm_end >= vma->vm_mm->start_stack) {
5843 			name = "[stack]";
5844 			goto cpy_name;
5845 		}
5846 
5847 		name = "//anon";
5848 		goto cpy_name;
5849 	}
5850 
5851 cpy_name:
5852 	strlcpy(tmp, name, sizeof(tmp));
5853 	name = tmp;
5854 got_name:
5855 	/*
5856 	 * Since our buffer works in 8 byte units we need to align our string
5857 	 * size to a multiple of 8. However, we must guarantee the tail end is
5858 	 * zero'd out to avoid leaking random bits to userspace.
5859 	 */
5860 	size = strlen(name)+1;
5861 	while (!IS_ALIGNED(size, sizeof(u64)))
5862 		name[size++] = '\0';
5863 
5864 	mmap_event->file_name = name;
5865 	mmap_event->file_size = size;
5866 	mmap_event->maj = maj;
5867 	mmap_event->min = min;
5868 	mmap_event->ino = ino;
5869 	mmap_event->ino_generation = gen;
5870 	mmap_event->prot = prot;
5871 	mmap_event->flags = flags;
5872 
5873 	if (!(vma->vm_flags & VM_EXEC))
5874 		mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5875 
5876 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5877 
5878 	perf_event_aux(perf_event_mmap_output,
5879 		       mmap_event,
5880 		       NULL);
5881 
5882 	kfree(buf);
5883 }
5884 
5885 void perf_event_mmap(struct vm_area_struct *vma)
5886 {
5887 	struct perf_mmap_event mmap_event;
5888 
5889 	if (!atomic_read(&nr_mmap_events))
5890 		return;
5891 
5892 	mmap_event = (struct perf_mmap_event){
5893 		.vma	= vma,
5894 		/* .file_name */
5895 		/* .file_size */
5896 		.event_id  = {
5897 			.header = {
5898 				.type = PERF_RECORD_MMAP,
5899 				.misc = PERF_RECORD_MISC_USER,
5900 				/* .size */
5901 			},
5902 			/* .pid */
5903 			/* .tid */
5904 			.start  = vma->vm_start,
5905 			.len    = vma->vm_end - vma->vm_start,
5906 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
5907 		},
5908 		/* .maj (attr_mmap2 only) */
5909 		/* .min (attr_mmap2 only) */
5910 		/* .ino (attr_mmap2 only) */
5911 		/* .ino_generation (attr_mmap2 only) */
5912 		/* .prot (attr_mmap2 only) */
5913 		/* .flags (attr_mmap2 only) */
5914 	};
5915 
5916 	perf_event_mmap_event(&mmap_event);
5917 }
5918 
5919 void perf_event_aux_event(struct perf_event *event, unsigned long head,
5920 			  unsigned long size, u64 flags)
5921 {
5922 	struct perf_output_handle handle;
5923 	struct perf_sample_data sample;
5924 	struct perf_aux_event {
5925 		struct perf_event_header	header;
5926 		u64				offset;
5927 		u64				size;
5928 		u64				flags;
5929 	} rec = {
5930 		.header = {
5931 			.type = PERF_RECORD_AUX,
5932 			.misc = 0,
5933 			.size = sizeof(rec),
5934 		},
5935 		.offset		= head,
5936 		.size		= size,
5937 		.flags		= flags,
5938 	};
5939 	int ret;
5940 
5941 	perf_event_header__init_id(&rec.header, &sample, event);
5942 	ret = perf_output_begin(&handle, event, rec.header.size);
5943 
5944 	if (ret)
5945 		return;
5946 
5947 	perf_output_put(&handle, rec);
5948 	perf_event__output_id_sample(event, &handle, &sample);
5949 
5950 	perf_output_end(&handle);
5951 }
5952 
5953 /*
5954  * Lost/dropped samples logging
5955  */
5956 void perf_log_lost_samples(struct perf_event *event, u64 lost)
5957 {
5958 	struct perf_output_handle handle;
5959 	struct perf_sample_data sample;
5960 	int ret;
5961 
5962 	struct {
5963 		struct perf_event_header	header;
5964 		u64				lost;
5965 	} lost_samples_event = {
5966 		.header = {
5967 			.type = PERF_RECORD_LOST_SAMPLES,
5968 			.misc = 0,
5969 			.size = sizeof(lost_samples_event),
5970 		},
5971 		.lost		= lost,
5972 	};
5973 
5974 	perf_event_header__init_id(&lost_samples_event.header, &sample, event);
5975 
5976 	ret = perf_output_begin(&handle, event,
5977 				lost_samples_event.header.size);
5978 	if (ret)
5979 		return;
5980 
5981 	perf_output_put(&handle, lost_samples_event);
5982 	perf_event__output_id_sample(event, &handle, &sample);
5983 	perf_output_end(&handle);
5984 }
5985 
5986 /*
5987  * IRQ throttle logging
5988  */
5989 
5990 static void perf_log_throttle(struct perf_event *event, int enable)
5991 {
5992 	struct perf_output_handle handle;
5993 	struct perf_sample_data sample;
5994 	int ret;
5995 
5996 	struct {
5997 		struct perf_event_header	header;
5998 		u64				time;
5999 		u64				id;
6000 		u64				stream_id;
6001 	} throttle_event = {
6002 		.header = {
6003 			.type = PERF_RECORD_THROTTLE,
6004 			.misc = 0,
6005 			.size = sizeof(throttle_event),
6006 		},
6007 		.time		= perf_event_clock(event),
6008 		.id		= primary_event_id(event),
6009 		.stream_id	= event->id,
6010 	};
6011 
6012 	if (enable)
6013 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
6014 
6015 	perf_event_header__init_id(&throttle_event.header, &sample, event);
6016 
6017 	ret = perf_output_begin(&handle, event,
6018 				throttle_event.header.size);
6019 	if (ret)
6020 		return;
6021 
6022 	perf_output_put(&handle, throttle_event);
6023 	perf_event__output_id_sample(event, &handle, &sample);
6024 	perf_output_end(&handle);
6025 }
6026 
6027 static void perf_log_itrace_start(struct perf_event *event)
6028 {
6029 	struct perf_output_handle handle;
6030 	struct perf_sample_data sample;
6031 	struct perf_aux_event {
6032 		struct perf_event_header        header;
6033 		u32				pid;
6034 		u32				tid;
6035 	} rec;
6036 	int ret;
6037 
6038 	if (event->parent)
6039 		event = event->parent;
6040 
6041 	if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6042 	    event->hw.itrace_started)
6043 		return;
6044 
6045 	event->hw.itrace_started = 1;
6046 
6047 	rec.header.type	= PERF_RECORD_ITRACE_START;
6048 	rec.header.misc	= 0;
6049 	rec.header.size	= sizeof(rec);
6050 	rec.pid	= perf_event_pid(event, current);
6051 	rec.tid	= perf_event_tid(event, current);
6052 
6053 	perf_event_header__init_id(&rec.header, &sample, event);
6054 	ret = perf_output_begin(&handle, event, rec.header.size);
6055 
6056 	if (ret)
6057 		return;
6058 
6059 	perf_output_put(&handle, rec);
6060 	perf_event__output_id_sample(event, &handle, &sample);
6061 
6062 	perf_output_end(&handle);
6063 }
6064 
6065 /*
6066  * Generic event overflow handling, sampling.
6067  */
6068 
6069 static int __perf_event_overflow(struct perf_event *event,
6070 				   int throttle, struct perf_sample_data *data,
6071 				   struct pt_regs *regs)
6072 {
6073 	int events = atomic_read(&event->event_limit);
6074 	struct hw_perf_event *hwc = &event->hw;
6075 	u64 seq;
6076 	int ret = 0;
6077 
6078 	/*
6079 	 * Non-sampling counters might still use the PMI to fold short
6080 	 * hardware counters, ignore those.
6081 	 */
6082 	if (unlikely(!is_sampling_event(event)))
6083 		return 0;
6084 
6085 	seq = __this_cpu_read(perf_throttled_seq);
6086 	if (seq != hwc->interrupts_seq) {
6087 		hwc->interrupts_seq = seq;
6088 		hwc->interrupts = 1;
6089 	} else {
6090 		hwc->interrupts++;
6091 		if (unlikely(throttle
6092 			     && hwc->interrupts >= max_samples_per_tick)) {
6093 			__this_cpu_inc(perf_throttled_count);
6094 			hwc->interrupts = MAX_INTERRUPTS;
6095 			perf_log_throttle(event, 0);
6096 			tick_nohz_full_kick();
6097 			ret = 1;
6098 		}
6099 	}
6100 
6101 	if (event->attr.freq) {
6102 		u64 now = perf_clock();
6103 		s64 delta = now - hwc->freq_time_stamp;
6104 
6105 		hwc->freq_time_stamp = now;
6106 
6107 		if (delta > 0 && delta < 2*TICK_NSEC)
6108 			perf_adjust_period(event, delta, hwc->last_period, true);
6109 	}
6110 
6111 	/*
6112 	 * XXX event_limit might not quite work as expected on inherited
6113 	 * events
6114 	 */
6115 
6116 	event->pending_kill = POLL_IN;
6117 	if (events && atomic_dec_and_test(&event->event_limit)) {
6118 		ret = 1;
6119 		event->pending_kill = POLL_HUP;
6120 		event->pending_disable = 1;
6121 		irq_work_queue(&event->pending);
6122 	}
6123 
6124 	if (event->overflow_handler)
6125 		event->overflow_handler(event, data, regs);
6126 	else
6127 		perf_event_output(event, data, regs);
6128 
6129 	if (event->fasync && event->pending_kill) {
6130 		event->pending_wakeup = 1;
6131 		irq_work_queue(&event->pending);
6132 	}
6133 
6134 	return ret;
6135 }
6136 
6137 int perf_event_overflow(struct perf_event *event,
6138 			  struct perf_sample_data *data,
6139 			  struct pt_regs *regs)
6140 {
6141 	return __perf_event_overflow(event, 1, data, regs);
6142 }
6143 
6144 /*
6145  * Generic software event infrastructure
6146  */
6147 
6148 struct swevent_htable {
6149 	struct swevent_hlist		*swevent_hlist;
6150 	struct mutex			hlist_mutex;
6151 	int				hlist_refcount;
6152 
6153 	/* Recursion avoidance in each contexts */
6154 	int				recursion[PERF_NR_CONTEXTS];
6155 
6156 	/* Keeps track of cpu being initialized/exited */
6157 	bool				online;
6158 };
6159 
6160 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6161 
6162 /*
6163  * We directly increment event->count and keep a second value in
6164  * event->hw.period_left to count intervals. This period event
6165  * is kept in the range [-sample_period, 0] so that we can use the
6166  * sign as trigger.
6167  */
6168 
6169 u64 perf_swevent_set_period(struct perf_event *event)
6170 {
6171 	struct hw_perf_event *hwc = &event->hw;
6172 	u64 period = hwc->last_period;
6173 	u64 nr, offset;
6174 	s64 old, val;
6175 
6176 	hwc->last_period = hwc->sample_period;
6177 
6178 again:
6179 	old = val = local64_read(&hwc->period_left);
6180 	if (val < 0)
6181 		return 0;
6182 
6183 	nr = div64_u64(period + val, period);
6184 	offset = nr * period;
6185 	val -= offset;
6186 	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
6187 		goto again;
6188 
6189 	return nr;
6190 }
6191 
6192 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
6193 				    struct perf_sample_data *data,
6194 				    struct pt_regs *regs)
6195 {
6196 	struct hw_perf_event *hwc = &event->hw;
6197 	int throttle = 0;
6198 
6199 	if (!overflow)
6200 		overflow = perf_swevent_set_period(event);
6201 
6202 	if (hwc->interrupts == MAX_INTERRUPTS)
6203 		return;
6204 
6205 	for (; overflow; overflow--) {
6206 		if (__perf_event_overflow(event, throttle,
6207 					    data, regs)) {
6208 			/*
6209 			 * We inhibit the overflow from happening when
6210 			 * hwc->interrupts == MAX_INTERRUPTS.
6211 			 */
6212 			break;
6213 		}
6214 		throttle = 1;
6215 	}
6216 }
6217 
6218 static void perf_swevent_event(struct perf_event *event, u64 nr,
6219 			       struct perf_sample_data *data,
6220 			       struct pt_regs *regs)
6221 {
6222 	struct hw_perf_event *hwc = &event->hw;
6223 
6224 	local64_add(nr, &event->count);
6225 
6226 	if (!regs)
6227 		return;
6228 
6229 	if (!is_sampling_event(event))
6230 		return;
6231 
6232 	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
6233 		data->period = nr;
6234 		return perf_swevent_overflow(event, 1, data, regs);
6235 	} else
6236 		data->period = event->hw.last_period;
6237 
6238 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
6239 		return perf_swevent_overflow(event, 1, data, regs);
6240 
6241 	if (local64_add_negative(nr, &hwc->period_left))
6242 		return;
6243 
6244 	perf_swevent_overflow(event, 0, data, regs);
6245 }
6246 
6247 static int perf_exclude_event(struct perf_event *event,
6248 			      struct pt_regs *regs)
6249 {
6250 	if (event->hw.state & PERF_HES_STOPPED)
6251 		return 1;
6252 
6253 	if (regs) {
6254 		if (event->attr.exclude_user && user_mode(regs))
6255 			return 1;
6256 
6257 		if (event->attr.exclude_kernel && !user_mode(regs))
6258 			return 1;
6259 	}
6260 
6261 	return 0;
6262 }
6263 
6264 static int perf_swevent_match(struct perf_event *event,
6265 				enum perf_type_id type,
6266 				u32 event_id,
6267 				struct perf_sample_data *data,
6268 				struct pt_regs *regs)
6269 {
6270 	if (event->attr.type != type)
6271 		return 0;
6272 
6273 	if (event->attr.config != event_id)
6274 		return 0;
6275 
6276 	if (perf_exclude_event(event, regs))
6277 		return 0;
6278 
6279 	return 1;
6280 }
6281 
6282 static inline u64 swevent_hash(u64 type, u32 event_id)
6283 {
6284 	u64 val = event_id | (type << 32);
6285 
6286 	return hash_64(val, SWEVENT_HLIST_BITS);
6287 }
6288 
6289 static inline struct hlist_head *
6290 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
6291 {
6292 	u64 hash = swevent_hash(type, event_id);
6293 
6294 	return &hlist->heads[hash];
6295 }
6296 
6297 /* For the read side: events when they trigger */
6298 static inline struct hlist_head *
6299 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
6300 {
6301 	struct swevent_hlist *hlist;
6302 
6303 	hlist = rcu_dereference(swhash->swevent_hlist);
6304 	if (!hlist)
6305 		return NULL;
6306 
6307 	return __find_swevent_head(hlist, type, event_id);
6308 }
6309 
6310 /* For the event head insertion and removal in the hlist */
6311 static inline struct hlist_head *
6312 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
6313 {
6314 	struct swevent_hlist *hlist;
6315 	u32 event_id = event->attr.config;
6316 	u64 type = event->attr.type;
6317 
6318 	/*
6319 	 * Event scheduling is always serialized against hlist allocation
6320 	 * and release. Which makes the protected version suitable here.
6321 	 * The context lock guarantees that.
6322 	 */
6323 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
6324 					  lockdep_is_held(&event->ctx->lock));
6325 	if (!hlist)
6326 		return NULL;
6327 
6328 	return __find_swevent_head(hlist, type, event_id);
6329 }
6330 
6331 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
6332 				    u64 nr,
6333 				    struct perf_sample_data *data,
6334 				    struct pt_regs *regs)
6335 {
6336 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6337 	struct perf_event *event;
6338 	struct hlist_head *head;
6339 
6340 	rcu_read_lock();
6341 	head = find_swevent_head_rcu(swhash, type, event_id);
6342 	if (!head)
6343 		goto end;
6344 
6345 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
6346 		if (perf_swevent_match(event, type, event_id, data, regs))
6347 			perf_swevent_event(event, nr, data, regs);
6348 	}
6349 end:
6350 	rcu_read_unlock();
6351 }
6352 
6353 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6354 
6355 int perf_swevent_get_recursion_context(void)
6356 {
6357 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6358 
6359 	return get_recursion_context(swhash->recursion);
6360 }
6361 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
6362 
6363 inline void perf_swevent_put_recursion_context(int rctx)
6364 {
6365 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6366 
6367 	put_recursion_context(swhash->recursion, rctx);
6368 }
6369 
6370 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6371 {
6372 	struct perf_sample_data data;
6373 
6374 	if (WARN_ON_ONCE(!regs))
6375 		return;
6376 
6377 	perf_sample_data_init(&data, addr, 0);
6378 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
6379 }
6380 
6381 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6382 {
6383 	int rctx;
6384 
6385 	preempt_disable_notrace();
6386 	rctx = perf_swevent_get_recursion_context();
6387 	if (unlikely(rctx < 0))
6388 		goto fail;
6389 
6390 	___perf_sw_event(event_id, nr, regs, addr);
6391 
6392 	perf_swevent_put_recursion_context(rctx);
6393 fail:
6394 	preempt_enable_notrace();
6395 }
6396 
6397 static void perf_swevent_read(struct perf_event *event)
6398 {
6399 }
6400 
6401 static int perf_swevent_add(struct perf_event *event, int flags)
6402 {
6403 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6404 	struct hw_perf_event *hwc = &event->hw;
6405 	struct hlist_head *head;
6406 
6407 	if (is_sampling_event(event)) {
6408 		hwc->last_period = hwc->sample_period;
6409 		perf_swevent_set_period(event);
6410 	}
6411 
6412 	hwc->state = !(flags & PERF_EF_START);
6413 
6414 	head = find_swevent_head(swhash, event);
6415 	if (!head) {
6416 		/*
6417 		 * We can race with cpu hotplug code. Do not
6418 		 * WARN if the cpu just got unplugged.
6419 		 */
6420 		WARN_ON_ONCE(swhash->online);
6421 		return -EINVAL;
6422 	}
6423 
6424 	hlist_add_head_rcu(&event->hlist_entry, head);
6425 	perf_event_update_userpage(event);
6426 
6427 	return 0;
6428 }
6429 
6430 static void perf_swevent_del(struct perf_event *event, int flags)
6431 {
6432 	hlist_del_rcu(&event->hlist_entry);
6433 }
6434 
6435 static void perf_swevent_start(struct perf_event *event, int flags)
6436 {
6437 	event->hw.state = 0;
6438 }
6439 
6440 static void perf_swevent_stop(struct perf_event *event, int flags)
6441 {
6442 	event->hw.state = PERF_HES_STOPPED;
6443 }
6444 
6445 /* Deref the hlist from the update side */
6446 static inline struct swevent_hlist *
6447 swevent_hlist_deref(struct swevent_htable *swhash)
6448 {
6449 	return rcu_dereference_protected(swhash->swevent_hlist,
6450 					 lockdep_is_held(&swhash->hlist_mutex));
6451 }
6452 
6453 static void swevent_hlist_release(struct swevent_htable *swhash)
6454 {
6455 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
6456 
6457 	if (!hlist)
6458 		return;
6459 
6460 	RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
6461 	kfree_rcu(hlist, rcu_head);
6462 }
6463 
6464 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
6465 {
6466 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6467 
6468 	mutex_lock(&swhash->hlist_mutex);
6469 
6470 	if (!--swhash->hlist_refcount)
6471 		swevent_hlist_release(swhash);
6472 
6473 	mutex_unlock(&swhash->hlist_mutex);
6474 }
6475 
6476 static void swevent_hlist_put(struct perf_event *event)
6477 {
6478 	int cpu;
6479 
6480 	for_each_possible_cpu(cpu)
6481 		swevent_hlist_put_cpu(event, cpu);
6482 }
6483 
6484 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
6485 {
6486 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6487 	int err = 0;
6488 
6489 	mutex_lock(&swhash->hlist_mutex);
6490 
6491 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
6492 		struct swevent_hlist *hlist;
6493 
6494 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
6495 		if (!hlist) {
6496 			err = -ENOMEM;
6497 			goto exit;
6498 		}
6499 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6500 	}
6501 	swhash->hlist_refcount++;
6502 exit:
6503 	mutex_unlock(&swhash->hlist_mutex);
6504 
6505 	return err;
6506 }
6507 
6508 static int swevent_hlist_get(struct perf_event *event)
6509 {
6510 	int err;
6511 	int cpu, failed_cpu;
6512 
6513 	get_online_cpus();
6514 	for_each_possible_cpu(cpu) {
6515 		err = swevent_hlist_get_cpu(event, cpu);
6516 		if (err) {
6517 			failed_cpu = cpu;
6518 			goto fail;
6519 		}
6520 	}
6521 	put_online_cpus();
6522 
6523 	return 0;
6524 fail:
6525 	for_each_possible_cpu(cpu) {
6526 		if (cpu == failed_cpu)
6527 			break;
6528 		swevent_hlist_put_cpu(event, cpu);
6529 	}
6530 
6531 	put_online_cpus();
6532 	return err;
6533 }
6534 
6535 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
6536 
6537 static void sw_perf_event_destroy(struct perf_event *event)
6538 {
6539 	u64 event_id = event->attr.config;
6540 
6541 	WARN_ON(event->parent);
6542 
6543 	static_key_slow_dec(&perf_swevent_enabled[event_id]);
6544 	swevent_hlist_put(event);
6545 }
6546 
6547 static int perf_swevent_init(struct perf_event *event)
6548 {
6549 	u64 event_id = event->attr.config;
6550 
6551 	if (event->attr.type != PERF_TYPE_SOFTWARE)
6552 		return -ENOENT;
6553 
6554 	/*
6555 	 * no branch sampling for software events
6556 	 */
6557 	if (has_branch_stack(event))
6558 		return -EOPNOTSUPP;
6559 
6560 	switch (event_id) {
6561 	case PERF_COUNT_SW_CPU_CLOCK:
6562 	case PERF_COUNT_SW_TASK_CLOCK:
6563 		return -ENOENT;
6564 
6565 	default:
6566 		break;
6567 	}
6568 
6569 	if (event_id >= PERF_COUNT_SW_MAX)
6570 		return -ENOENT;
6571 
6572 	if (!event->parent) {
6573 		int err;
6574 
6575 		err = swevent_hlist_get(event);
6576 		if (err)
6577 			return err;
6578 
6579 		static_key_slow_inc(&perf_swevent_enabled[event_id]);
6580 		event->destroy = sw_perf_event_destroy;
6581 	}
6582 
6583 	return 0;
6584 }
6585 
6586 static struct pmu perf_swevent = {
6587 	.task_ctx_nr	= perf_sw_context,
6588 
6589 	.capabilities	= PERF_PMU_CAP_NO_NMI,
6590 
6591 	.event_init	= perf_swevent_init,
6592 	.add		= perf_swevent_add,
6593 	.del		= perf_swevent_del,
6594 	.start		= perf_swevent_start,
6595 	.stop		= perf_swevent_stop,
6596 	.read		= perf_swevent_read,
6597 };
6598 
6599 #ifdef CONFIG_EVENT_TRACING
6600 
6601 static int perf_tp_filter_match(struct perf_event *event,
6602 				struct perf_sample_data *data)
6603 {
6604 	void *record = data->raw->data;
6605 
6606 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
6607 		return 1;
6608 	return 0;
6609 }
6610 
6611 static int perf_tp_event_match(struct perf_event *event,
6612 				struct perf_sample_data *data,
6613 				struct pt_regs *regs)
6614 {
6615 	if (event->hw.state & PERF_HES_STOPPED)
6616 		return 0;
6617 	/*
6618 	 * All tracepoints are from kernel-space.
6619 	 */
6620 	if (event->attr.exclude_kernel)
6621 		return 0;
6622 
6623 	if (!perf_tp_filter_match(event, data))
6624 		return 0;
6625 
6626 	return 1;
6627 }
6628 
6629 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
6630 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
6631 		   struct task_struct *task)
6632 {
6633 	struct perf_sample_data data;
6634 	struct perf_event *event;
6635 
6636 	struct perf_raw_record raw = {
6637 		.size = entry_size,
6638 		.data = record,
6639 	};
6640 
6641 	perf_sample_data_init(&data, addr, 0);
6642 	data.raw = &raw;
6643 
6644 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
6645 		if (perf_tp_event_match(event, &data, regs))
6646 			perf_swevent_event(event, count, &data, regs);
6647 	}
6648 
6649 	/*
6650 	 * If we got specified a target task, also iterate its context and
6651 	 * deliver this event there too.
6652 	 */
6653 	if (task && task != current) {
6654 		struct perf_event_context *ctx;
6655 		struct trace_entry *entry = record;
6656 
6657 		rcu_read_lock();
6658 		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
6659 		if (!ctx)
6660 			goto unlock;
6661 
6662 		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6663 			if (event->attr.type != PERF_TYPE_TRACEPOINT)
6664 				continue;
6665 			if (event->attr.config != entry->type)
6666 				continue;
6667 			if (perf_tp_event_match(event, &data, regs))
6668 				perf_swevent_event(event, count, &data, regs);
6669 		}
6670 unlock:
6671 		rcu_read_unlock();
6672 	}
6673 
6674 	perf_swevent_put_recursion_context(rctx);
6675 }
6676 EXPORT_SYMBOL_GPL(perf_tp_event);
6677 
6678 static void tp_perf_event_destroy(struct perf_event *event)
6679 {
6680 	perf_trace_destroy(event);
6681 }
6682 
6683 static int perf_tp_event_init(struct perf_event *event)
6684 {
6685 	int err;
6686 
6687 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
6688 		return -ENOENT;
6689 
6690 	/*
6691 	 * no branch sampling for tracepoint events
6692 	 */
6693 	if (has_branch_stack(event))
6694 		return -EOPNOTSUPP;
6695 
6696 	err = perf_trace_init(event);
6697 	if (err)
6698 		return err;
6699 
6700 	event->destroy = tp_perf_event_destroy;
6701 
6702 	return 0;
6703 }
6704 
6705 static struct pmu perf_tracepoint = {
6706 	.task_ctx_nr	= perf_sw_context,
6707 
6708 	.event_init	= perf_tp_event_init,
6709 	.add		= perf_trace_add,
6710 	.del		= perf_trace_del,
6711 	.start		= perf_swevent_start,
6712 	.stop		= perf_swevent_stop,
6713 	.read		= perf_swevent_read,
6714 };
6715 
6716 static inline void perf_tp_register(void)
6717 {
6718 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
6719 }
6720 
6721 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6722 {
6723 	char *filter_str;
6724 	int ret;
6725 
6726 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
6727 		return -EINVAL;
6728 
6729 	filter_str = strndup_user(arg, PAGE_SIZE);
6730 	if (IS_ERR(filter_str))
6731 		return PTR_ERR(filter_str);
6732 
6733 	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
6734 
6735 	kfree(filter_str);
6736 	return ret;
6737 }
6738 
6739 static void perf_event_free_filter(struct perf_event *event)
6740 {
6741 	ftrace_profile_free_filter(event);
6742 }
6743 
6744 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
6745 {
6746 	struct bpf_prog *prog;
6747 
6748 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
6749 		return -EINVAL;
6750 
6751 	if (event->tp_event->prog)
6752 		return -EEXIST;
6753 
6754 	if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
6755 		/* bpf programs can only be attached to kprobes */
6756 		return -EINVAL;
6757 
6758 	prog = bpf_prog_get(prog_fd);
6759 	if (IS_ERR(prog))
6760 		return PTR_ERR(prog);
6761 
6762 	if (prog->type != BPF_PROG_TYPE_KPROBE) {
6763 		/* valid fd, but invalid bpf program type */
6764 		bpf_prog_put(prog);
6765 		return -EINVAL;
6766 	}
6767 
6768 	event->tp_event->prog = prog;
6769 
6770 	return 0;
6771 }
6772 
6773 static void perf_event_free_bpf_prog(struct perf_event *event)
6774 {
6775 	struct bpf_prog *prog;
6776 
6777 	if (!event->tp_event)
6778 		return;
6779 
6780 	prog = event->tp_event->prog;
6781 	if (prog) {
6782 		event->tp_event->prog = NULL;
6783 		bpf_prog_put(prog);
6784 	}
6785 }
6786 
6787 #else
6788 
6789 static inline void perf_tp_register(void)
6790 {
6791 }
6792 
6793 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6794 {
6795 	return -ENOENT;
6796 }
6797 
6798 static void perf_event_free_filter(struct perf_event *event)
6799 {
6800 }
6801 
6802 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
6803 {
6804 	return -ENOENT;
6805 }
6806 
6807 static void perf_event_free_bpf_prog(struct perf_event *event)
6808 {
6809 }
6810 #endif /* CONFIG_EVENT_TRACING */
6811 
6812 #ifdef CONFIG_HAVE_HW_BREAKPOINT
6813 void perf_bp_event(struct perf_event *bp, void *data)
6814 {
6815 	struct perf_sample_data sample;
6816 	struct pt_regs *regs = data;
6817 
6818 	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
6819 
6820 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
6821 		perf_swevent_event(bp, 1, &sample, regs);
6822 }
6823 #endif
6824 
6825 /*
6826  * hrtimer based swevent callback
6827  */
6828 
6829 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
6830 {
6831 	enum hrtimer_restart ret = HRTIMER_RESTART;
6832 	struct perf_sample_data data;
6833 	struct pt_regs *regs;
6834 	struct perf_event *event;
6835 	u64 period;
6836 
6837 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
6838 
6839 	if (event->state != PERF_EVENT_STATE_ACTIVE)
6840 		return HRTIMER_NORESTART;
6841 
6842 	event->pmu->read(event);
6843 
6844 	perf_sample_data_init(&data, 0, event->hw.last_period);
6845 	regs = get_irq_regs();
6846 
6847 	if (regs && !perf_exclude_event(event, regs)) {
6848 		if (!(event->attr.exclude_idle && is_idle_task(current)))
6849 			if (__perf_event_overflow(event, 1, &data, regs))
6850 				ret = HRTIMER_NORESTART;
6851 	}
6852 
6853 	period = max_t(u64, 10000, event->hw.sample_period);
6854 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
6855 
6856 	return ret;
6857 }
6858 
6859 static void perf_swevent_start_hrtimer(struct perf_event *event)
6860 {
6861 	struct hw_perf_event *hwc = &event->hw;
6862 	s64 period;
6863 
6864 	if (!is_sampling_event(event))
6865 		return;
6866 
6867 	period = local64_read(&hwc->period_left);
6868 	if (period) {
6869 		if (period < 0)
6870 			period = 10000;
6871 
6872 		local64_set(&hwc->period_left, 0);
6873 	} else {
6874 		period = max_t(u64, 10000, hwc->sample_period);
6875 	}
6876 	hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
6877 		      HRTIMER_MODE_REL_PINNED);
6878 }
6879 
6880 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
6881 {
6882 	struct hw_perf_event *hwc = &event->hw;
6883 
6884 	if (is_sampling_event(event)) {
6885 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
6886 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
6887 
6888 		hrtimer_cancel(&hwc->hrtimer);
6889 	}
6890 }
6891 
6892 static void perf_swevent_init_hrtimer(struct perf_event *event)
6893 {
6894 	struct hw_perf_event *hwc = &event->hw;
6895 
6896 	if (!is_sampling_event(event))
6897 		return;
6898 
6899 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6900 	hwc->hrtimer.function = perf_swevent_hrtimer;
6901 
6902 	/*
6903 	 * Since hrtimers have a fixed rate, we can do a static freq->period
6904 	 * mapping and avoid the whole period adjust feedback stuff.
6905 	 */
6906 	if (event->attr.freq) {
6907 		long freq = event->attr.sample_freq;
6908 
6909 		event->attr.sample_period = NSEC_PER_SEC / freq;
6910 		hwc->sample_period = event->attr.sample_period;
6911 		local64_set(&hwc->period_left, hwc->sample_period);
6912 		hwc->last_period = hwc->sample_period;
6913 		event->attr.freq = 0;
6914 	}
6915 }
6916 
6917 /*
6918  * Software event: cpu wall time clock
6919  */
6920 
6921 static void cpu_clock_event_update(struct perf_event *event)
6922 {
6923 	s64 prev;
6924 	u64 now;
6925 
6926 	now = local_clock();
6927 	prev = local64_xchg(&event->hw.prev_count, now);
6928 	local64_add(now - prev, &event->count);
6929 }
6930 
6931 static void cpu_clock_event_start(struct perf_event *event, int flags)
6932 {
6933 	local64_set(&event->hw.prev_count, local_clock());
6934 	perf_swevent_start_hrtimer(event);
6935 }
6936 
6937 static void cpu_clock_event_stop(struct perf_event *event, int flags)
6938 {
6939 	perf_swevent_cancel_hrtimer(event);
6940 	cpu_clock_event_update(event);
6941 }
6942 
6943 static int cpu_clock_event_add(struct perf_event *event, int flags)
6944 {
6945 	if (flags & PERF_EF_START)
6946 		cpu_clock_event_start(event, flags);
6947 	perf_event_update_userpage(event);
6948 
6949 	return 0;
6950 }
6951 
6952 static void cpu_clock_event_del(struct perf_event *event, int flags)
6953 {
6954 	cpu_clock_event_stop(event, flags);
6955 }
6956 
6957 static void cpu_clock_event_read(struct perf_event *event)
6958 {
6959 	cpu_clock_event_update(event);
6960 }
6961 
6962 static int cpu_clock_event_init(struct perf_event *event)
6963 {
6964 	if (event->attr.type != PERF_TYPE_SOFTWARE)
6965 		return -ENOENT;
6966 
6967 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6968 		return -ENOENT;
6969 
6970 	/*
6971 	 * no branch sampling for software events
6972 	 */
6973 	if (has_branch_stack(event))
6974 		return -EOPNOTSUPP;
6975 
6976 	perf_swevent_init_hrtimer(event);
6977 
6978 	return 0;
6979 }
6980 
6981 static struct pmu perf_cpu_clock = {
6982 	.task_ctx_nr	= perf_sw_context,
6983 
6984 	.capabilities	= PERF_PMU_CAP_NO_NMI,
6985 
6986 	.event_init	= cpu_clock_event_init,
6987 	.add		= cpu_clock_event_add,
6988 	.del		= cpu_clock_event_del,
6989 	.start		= cpu_clock_event_start,
6990 	.stop		= cpu_clock_event_stop,
6991 	.read		= cpu_clock_event_read,
6992 };
6993 
6994 /*
6995  * Software event: task time clock
6996  */
6997 
6998 static void task_clock_event_update(struct perf_event *event, u64 now)
6999 {
7000 	u64 prev;
7001 	s64 delta;
7002 
7003 	prev = local64_xchg(&event->hw.prev_count, now);
7004 	delta = now - prev;
7005 	local64_add(delta, &event->count);
7006 }
7007 
7008 static void task_clock_event_start(struct perf_event *event, int flags)
7009 {
7010 	local64_set(&event->hw.prev_count, event->ctx->time);
7011 	perf_swevent_start_hrtimer(event);
7012 }
7013 
7014 static void task_clock_event_stop(struct perf_event *event, int flags)
7015 {
7016 	perf_swevent_cancel_hrtimer(event);
7017 	task_clock_event_update(event, event->ctx->time);
7018 }
7019 
7020 static int task_clock_event_add(struct perf_event *event, int flags)
7021 {
7022 	if (flags & PERF_EF_START)
7023 		task_clock_event_start(event, flags);
7024 	perf_event_update_userpage(event);
7025 
7026 	return 0;
7027 }
7028 
7029 static void task_clock_event_del(struct perf_event *event, int flags)
7030 {
7031 	task_clock_event_stop(event, PERF_EF_UPDATE);
7032 }
7033 
7034 static void task_clock_event_read(struct perf_event *event)
7035 {
7036 	u64 now = perf_clock();
7037 	u64 delta = now - event->ctx->timestamp;
7038 	u64 time = event->ctx->time + delta;
7039 
7040 	task_clock_event_update(event, time);
7041 }
7042 
7043 static int task_clock_event_init(struct perf_event *event)
7044 {
7045 	if (event->attr.type != PERF_TYPE_SOFTWARE)
7046 		return -ENOENT;
7047 
7048 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
7049 		return -ENOENT;
7050 
7051 	/*
7052 	 * no branch sampling for software events
7053 	 */
7054 	if (has_branch_stack(event))
7055 		return -EOPNOTSUPP;
7056 
7057 	perf_swevent_init_hrtimer(event);
7058 
7059 	return 0;
7060 }
7061 
7062 static struct pmu perf_task_clock = {
7063 	.task_ctx_nr	= perf_sw_context,
7064 
7065 	.capabilities	= PERF_PMU_CAP_NO_NMI,
7066 
7067 	.event_init	= task_clock_event_init,
7068 	.add		= task_clock_event_add,
7069 	.del		= task_clock_event_del,
7070 	.start		= task_clock_event_start,
7071 	.stop		= task_clock_event_stop,
7072 	.read		= task_clock_event_read,
7073 };
7074 
7075 static void perf_pmu_nop_void(struct pmu *pmu)
7076 {
7077 }
7078 
7079 static int perf_pmu_nop_int(struct pmu *pmu)
7080 {
7081 	return 0;
7082 }
7083 
7084 static void perf_pmu_start_txn(struct pmu *pmu)
7085 {
7086 	perf_pmu_disable(pmu);
7087 }
7088 
7089 static int perf_pmu_commit_txn(struct pmu *pmu)
7090 {
7091 	perf_pmu_enable(pmu);
7092 	return 0;
7093 }
7094 
7095 static void perf_pmu_cancel_txn(struct pmu *pmu)
7096 {
7097 	perf_pmu_enable(pmu);
7098 }
7099 
7100 static int perf_event_idx_default(struct perf_event *event)
7101 {
7102 	return 0;
7103 }
7104 
7105 /*
7106  * Ensures all contexts with the same task_ctx_nr have the same
7107  * pmu_cpu_context too.
7108  */
7109 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
7110 {
7111 	struct pmu *pmu;
7112 
7113 	if (ctxn < 0)
7114 		return NULL;
7115 
7116 	list_for_each_entry(pmu, &pmus, entry) {
7117 		if (pmu->task_ctx_nr == ctxn)
7118 			return pmu->pmu_cpu_context;
7119 	}
7120 
7121 	return NULL;
7122 }
7123 
7124 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
7125 {
7126 	int cpu;
7127 
7128 	for_each_possible_cpu(cpu) {
7129 		struct perf_cpu_context *cpuctx;
7130 
7131 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7132 
7133 		if (cpuctx->unique_pmu == old_pmu)
7134 			cpuctx->unique_pmu = pmu;
7135 	}
7136 }
7137 
7138 static void free_pmu_context(struct pmu *pmu)
7139 {
7140 	struct pmu *i;
7141 
7142 	mutex_lock(&pmus_lock);
7143 	/*
7144 	 * Like a real lame refcount.
7145 	 */
7146 	list_for_each_entry(i, &pmus, entry) {
7147 		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
7148 			update_pmu_context(i, pmu);
7149 			goto out;
7150 		}
7151 	}
7152 
7153 	free_percpu(pmu->pmu_cpu_context);
7154 out:
7155 	mutex_unlock(&pmus_lock);
7156 }
7157 static struct idr pmu_idr;
7158 
7159 static ssize_t
7160 type_show(struct device *dev, struct device_attribute *attr, char *page)
7161 {
7162 	struct pmu *pmu = dev_get_drvdata(dev);
7163 
7164 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
7165 }
7166 static DEVICE_ATTR_RO(type);
7167 
7168 static ssize_t
7169 perf_event_mux_interval_ms_show(struct device *dev,
7170 				struct device_attribute *attr,
7171 				char *page)
7172 {
7173 	struct pmu *pmu = dev_get_drvdata(dev);
7174 
7175 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
7176 }
7177 
7178 static DEFINE_MUTEX(mux_interval_mutex);
7179 
7180 static ssize_t
7181 perf_event_mux_interval_ms_store(struct device *dev,
7182 				 struct device_attribute *attr,
7183 				 const char *buf, size_t count)
7184 {
7185 	struct pmu *pmu = dev_get_drvdata(dev);
7186 	int timer, cpu, ret;
7187 
7188 	ret = kstrtoint(buf, 0, &timer);
7189 	if (ret)
7190 		return ret;
7191 
7192 	if (timer < 1)
7193 		return -EINVAL;
7194 
7195 	/* same value, noting to do */
7196 	if (timer == pmu->hrtimer_interval_ms)
7197 		return count;
7198 
7199 	mutex_lock(&mux_interval_mutex);
7200 	pmu->hrtimer_interval_ms = timer;
7201 
7202 	/* update all cpuctx for this PMU */
7203 	get_online_cpus();
7204 	for_each_online_cpu(cpu) {
7205 		struct perf_cpu_context *cpuctx;
7206 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7207 		cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
7208 
7209 		cpu_function_call(cpu,
7210 			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
7211 	}
7212 	put_online_cpus();
7213 	mutex_unlock(&mux_interval_mutex);
7214 
7215 	return count;
7216 }
7217 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
7218 
7219 static struct attribute *pmu_dev_attrs[] = {
7220 	&dev_attr_type.attr,
7221 	&dev_attr_perf_event_mux_interval_ms.attr,
7222 	NULL,
7223 };
7224 ATTRIBUTE_GROUPS(pmu_dev);
7225 
7226 static int pmu_bus_running;
7227 static struct bus_type pmu_bus = {
7228 	.name		= "event_source",
7229 	.dev_groups	= pmu_dev_groups,
7230 };
7231 
7232 static void pmu_dev_release(struct device *dev)
7233 {
7234 	kfree(dev);
7235 }
7236 
7237 static int pmu_dev_alloc(struct pmu *pmu)
7238 {
7239 	int ret = -ENOMEM;
7240 
7241 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
7242 	if (!pmu->dev)
7243 		goto out;
7244 
7245 	pmu->dev->groups = pmu->attr_groups;
7246 	device_initialize(pmu->dev);
7247 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
7248 	if (ret)
7249 		goto free_dev;
7250 
7251 	dev_set_drvdata(pmu->dev, pmu);
7252 	pmu->dev->bus = &pmu_bus;
7253 	pmu->dev->release = pmu_dev_release;
7254 	ret = device_add(pmu->dev);
7255 	if (ret)
7256 		goto free_dev;
7257 
7258 out:
7259 	return ret;
7260 
7261 free_dev:
7262 	put_device(pmu->dev);
7263 	goto out;
7264 }
7265 
7266 static struct lock_class_key cpuctx_mutex;
7267 static struct lock_class_key cpuctx_lock;
7268 
7269 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
7270 {
7271 	int cpu, ret;
7272 
7273 	mutex_lock(&pmus_lock);
7274 	ret = -ENOMEM;
7275 	pmu->pmu_disable_count = alloc_percpu(int);
7276 	if (!pmu->pmu_disable_count)
7277 		goto unlock;
7278 
7279 	pmu->type = -1;
7280 	if (!name)
7281 		goto skip_type;
7282 	pmu->name = name;
7283 
7284 	if (type < 0) {
7285 		type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
7286 		if (type < 0) {
7287 			ret = type;
7288 			goto free_pdc;
7289 		}
7290 	}
7291 	pmu->type = type;
7292 
7293 	if (pmu_bus_running) {
7294 		ret = pmu_dev_alloc(pmu);
7295 		if (ret)
7296 			goto free_idr;
7297 	}
7298 
7299 skip_type:
7300 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
7301 	if (pmu->pmu_cpu_context)
7302 		goto got_cpu_context;
7303 
7304 	ret = -ENOMEM;
7305 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
7306 	if (!pmu->pmu_cpu_context)
7307 		goto free_dev;
7308 
7309 	for_each_possible_cpu(cpu) {
7310 		struct perf_cpu_context *cpuctx;
7311 
7312 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7313 		__perf_event_init_context(&cpuctx->ctx);
7314 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
7315 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
7316 		cpuctx->ctx.pmu = pmu;
7317 
7318 		__perf_mux_hrtimer_init(cpuctx, cpu);
7319 
7320 		cpuctx->unique_pmu = pmu;
7321 	}
7322 
7323 got_cpu_context:
7324 	if (!pmu->start_txn) {
7325 		if (pmu->pmu_enable) {
7326 			/*
7327 			 * If we have pmu_enable/pmu_disable calls, install
7328 			 * transaction stubs that use that to try and batch
7329 			 * hardware accesses.
7330 			 */
7331 			pmu->start_txn  = perf_pmu_start_txn;
7332 			pmu->commit_txn = perf_pmu_commit_txn;
7333 			pmu->cancel_txn = perf_pmu_cancel_txn;
7334 		} else {
7335 			pmu->start_txn  = perf_pmu_nop_void;
7336 			pmu->commit_txn = perf_pmu_nop_int;
7337 			pmu->cancel_txn = perf_pmu_nop_void;
7338 		}
7339 	}
7340 
7341 	if (!pmu->pmu_enable) {
7342 		pmu->pmu_enable  = perf_pmu_nop_void;
7343 		pmu->pmu_disable = perf_pmu_nop_void;
7344 	}
7345 
7346 	if (!pmu->event_idx)
7347 		pmu->event_idx = perf_event_idx_default;
7348 
7349 	list_add_rcu(&pmu->entry, &pmus);
7350 	atomic_set(&pmu->exclusive_cnt, 0);
7351 	ret = 0;
7352 unlock:
7353 	mutex_unlock(&pmus_lock);
7354 
7355 	return ret;
7356 
7357 free_dev:
7358 	device_del(pmu->dev);
7359 	put_device(pmu->dev);
7360 
7361 free_idr:
7362 	if (pmu->type >= PERF_TYPE_MAX)
7363 		idr_remove(&pmu_idr, pmu->type);
7364 
7365 free_pdc:
7366 	free_percpu(pmu->pmu_disable_count);
7367 	goto unlock;
7368 }
7369 EXPORT_SYMBOL_GPL(perf_pmu_register);
7370 
7371 void perf_pmu_unregister(struct pmu *pmu)
7372 {
7373 	mutex_lock(&pmus_lock);
7374 	list_del_rcu(&pmu->entry);
7375 	mutex_unlock(&pmus_lock);
7376 
7377 	/*
7378 	 * We dereference the pmu list under both SRCU and regular RCU, so
7379 	 * synchronize against both of those.
7380 	 */
7381 	synchronize_srcu(&pmus_srcu);
7382 	synchronize_rcu();
7383 
7384 	free_percpu(pmu->pmu_disable_count);
7385 	if (pmu->type >= PERF_TYPE_MAX)
7386 		idr_remove(&pmu_idr, pmu->type);
7387 	device_del(pmu->dev);
7388 	put_device(pmu->dev);
7389 	free_pmu_context(pmu);
7390 }
7391 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
7392 
7393 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7394 {
7395 	struct perf_event_context *ctx = NULL;
7396 	int ret;
7397 
7398 	if (!try_module_get(pmu->module))
7399 		return -ENODEV;
7400 
7401 	if (event->group_leader != event) {
7402 		/*
7403 		 * This ctx->mutex can nest when we're called through
7404 		 * inheritance. See the perf_event_ctx_lock_nested() comment.
7405 		 */
7406 		ctx = perf_event_ctx_lock_nested(event->group_leader,
7407 						 SINGLE_DEPTH_NESTING);
7408 		BUG_ON(!ctx);
7409 	}
7410 
7411 	event->pmu = pmu;
7412 	ret = pmu->event_init(event);
7413 
7414 	if (ctx)
7415 		perf_event_ctx_unlock(event->group_leader, ctx);
7416 
7417 	if (ret)
7418 		module_put(pmu->module);
7419 
7420 	return ret;
7421 }
7422 
7423 struct pmu *perf_init_event(struct perf_event *event)
7424 {
7425 	struct pmu *pmu = NULL;
7426 	int idx;
7427 	int ret;
7428 
7429 	idx = srcu_read_lock(&pmus_srcu);
7430 
7431 	rcu_read_lock();
7432 	pmu = idr_find(&pmu_idr, event->attr.type);
7433 	rcu_read_unlock();
7434 	if (pmu) {
7435 		ret = perf_try_init_event(pmu, event);
7436 		if (ret)
7437 			pmu = ERR_PTR(ret);
7438 		goto unlock;
7439 	}
7440 
7441 	list_for_each_entry_rcu(pmu, &pmus, entry) {
7442 		ret = perf_try_init_event(pmu, event);
7443 		if (!ret)
7444 			goto unlock;
7445 
7446 		if (ret != -ENOENT) {
7447 			pmu = ERR_PTR(ret);
7448 			goto unlock;
7449 		}
7450 	}
7451 	pmu = ERR_PTR(-ENOENT);
7452 unlock:
7453 	srcu_read_unlock(&pmus_srcu, idx);
7454 
7455 	return pmu;
7456 }
7457 
7458 static void account_event_cpu(struct perf_event *event, int cpu)
7459 {
7460 	if (event->parent)
7461 		return;
7462 
7463 	if (is_cgroup_event(event))
7464 		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
7465 }
7466 
7467 static void account_event(struct perf_event *event)
7468 {
7469 	if (event->parent)
7470 		return;
7471 
7472 	if (event->attach_state & PERF_ATTACH_TASK)
7473 		static_key_slow_inc(&perf_sched_events.key);
7474 	if (event->attr.mmap || event->attr.mmap_data)
7475 		atomic_inc(&nr_mmap_events);
7476 	if (event->attr.comm)
7477 		atomic_inc(&nr_comm_events);
7478 	if (event->attr.task)
7479 		atomic_inc(&nr_task_events);
7480 	if (event->attr.freq) {
7481 		if (atomic_inc_return(&nr_freq_events) == 1)
7482 			tick_nohz_full_kick_all();
7483 	}
7484 	if (has_branch_stack(event))
7485 		static_key_slow_inc(&perf_sched_events.key);
7486 	if (is_cgroup_event(event))
7487 		static_key_slow_inc(&perf_sched_events.key);
7488 
7489 	account_event_cpu(event, event->cpu);
7490 }
7491 
7492 /*
7493  * Allocate and initialize a event structure
7494  */
7495 static struct perf_event *
7496 perf_event_alloc(struct perf_event_attr *attr, int cpu,
7497 		 struct task_struct *task,
7498 		 struct perf_event *group_leader,
7499 		 struct perf_event *parent_event,
7500 		 perf_overflow_handler_t overflow_handler,
7501 		 void *context, int cgroup_fd)
7502 {
7503 	struct pmu *pmu;
7504 	struct perf_event *event;
7505 	struct hw_perf_event *hwc;
7506 	long err = -EINVAL;
7507 
7508 	if ((unsigned)cpu >= nr_cpu_ids) {
7509 		if (!task || cpu != -1)
7510 			return ERR_PTR(-EINVAL);
7511 	}
7512 
7513 	event = kzalloc(sizeof(*event), GFP_KERNEL);
7514 	if (!event)
7515 		return ERR_PTR(-ENOMEM);
7516 
7517 	/*
7518 	 * Single events are their own group leaders, with an
7519 	 * empty sibling list:
7520 	 */
7521 	if (!group_leader)
7522 		group_leader = event;
7523 
7524 	mutex_init(&event->child_mutex);
7525 	INIT_LIST_HEAD(&event->child_list);
7526 
7527 	INIT_LIST_HEAD(&event->group_entry);
7528 	INIT_LIST_HEAD(&event->event_entry);
7529 	INIT_LIST_HEAD(&event->sibling_list);
7530 	INIT_LIST_HEAD(&event->rb_entry);
7531 	INIT_LIST_HEAD(&event->active_entry);
7532 	INIT_HLIST_NODE(&event->hlist_entry);
7533 
7534 
7535 	init_waitqueue_head(&event->waitq);
7536 	init_irq_work(&event->pending, perf_pending_event);
7537 
7538 	mutex_init(&event->mmap_mutex);
7539 
7540 	atomic_long_set(&event->refcount, 1);
7541 	event->cpu		= cpu;
7542 	event->attr		= *attr;
7543 	event->group_leader	= group_leader;
7544 	event->pmu		= NULL;
7545 	event->oncpu		= -1;
7546 
7547 	event->parent		= parent_event;
7548 
7549 	event->ns		= get_pid_ns(task_active_pid_ns(current));
7550 	event->id		= atomic64_inc_return(&perf_event_id);
7551 
7552 	event->state		= PERF_EVENT_STATE_INACTIVE;
7553 
7554 	if (task) {
7555 		event->attach_state = PERF_ATTACH_TASK;
7556 		/*
7557 		 * XXX pmu::event_init needs to know what task to account to
7558 		 * and we cannot use the ctx information because we need the
7559 		 * pmu before we get a ctx.
7560 		 */
7561 		event->hw.target = task;
7562 	}
7563 
7564 	event->clock = &local_clock;
7565 	if (parent_event)
7566 		event->clock = parent_event->clock;
7567 
7568 	if (!overflow_handler && parent_event) {
7569 		overflow_handler = parent_event->overflow_handler;
7570 		context = parent_event->overflow_handler_context;
7571 	}
7572 
7573 	event->overflow_handler	= overflow_handler;
7574 	event->overflow_handler_context = context;
7575 
7576 	perf_event__state_init(event);
7577 
7578 	pmu = NULL;
7579 
7580 	hwc = &event->hw;
7581 	hwc->sample_period = attr->sample_period;
7582 	if (attr->freq && attr->sample_freq)
7583 		hwc->sample_period = 1;
7584 	hwc->last_period = hwc->sample_period;
7585 
7586 	local64_set(&hwc->period_left, hwc->sample_period);
7587 
7588 	/*
7589 	 * we currently do not support PERF_FORMAT_GROUP on inherited events
7590 	 */
7591 	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
7592 		goto err_ns;
7593 
7594 	if (!has_branch_stack(event))
7595 		event->attr.branch_sample_type = 0;
7596 
7597 	if (cgroup_fd != -1) {
7598 		err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
7599 		if (err)
7600 			goto err_ns;
7601 	}
7602 
7603 	pmu = perf_init_event(event);
7604 	if (!pmu)
7605 		goto err_ns;
7606 	else if (IS_ERR(pmu)) {
7607 		err = PTR_ERR(pmu);
7608 		goto err_ns;
7609 	}
7610 
7611 	err = exclusive_event_init(event);
7612 	if (err)
7613 		goto err_pmu;
7614 
7615 	if (!event->parent) {
7616 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
7617 			err = get_callchain_buffers();
7618 			if (err)
7619 				goto err_per_task;
7620 		}
7621 	}
7622 
7623 	return event;
7624 
7625 err_per_task:
7626 	exclusive_event_destroy(event);
7627 
7628 err_pmu:
7629 	if (event->destroy)
7630 		event->destroy(event);
7631 	module_put(pmu->module);
7632 err_ns:
7633 	if (is_cgroup_event(event))
7634 		perf_detach_cgroup(event);
7635 	if (event->ns)
7636 		put_pid_ns(event->ns);
7637 	kfree(event);
7638 
7639 	return ERR_PTR(err);
7640 }
7641 
7642 static int perf_copy_attr(struct perf_event_attr __user *uattr,
7643 			  struct perf_event_attr *attr)
7644 {
7645 	u32 size;
7646 	int ret;
7647 
7648 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
7649 		return -EFAULT;
7650 
7651 	/*
7652 	 * zero the full structure, so that a short copy will be nice.
7653 	 */
7654 	memset(attr, 0, sizeof(*attr));
7655 
7656 	ret = get_user(size, &uattr->size);
7657 	if (ret)
7658 		return ret;
7659 
7660 	if (size > PAGE_SIZE)	/* silly large */
7661 		goto err_size;
7662 
7663 	if (!size)		/* abi compat */
7664 		size = PERF_ATTR_SIZE_VER0;
7665 
7666 	if (size < PERF_ATTR_SIZE_VER0)
7667 		goto err_size;
7668 
7669 	/*
7670 	 * If we're handed a bigger struct than we know of,
7671 	 * ensure all the unknown bits are 0 - i.e. new
7672 	 * user-space does not rely on any kernel feature
7673 	 * extensions we dont know about yet.
7674 	 */
7675 	if (size > sizeof(*attr)) {
7676 		unsigned char __user *addr;
7677 		unsigned char __user *end;
7678 		unsigned char val;
7679 
7680 		addr = (void __user *)uattr + sizeof(*attr);
7681 		end  = (void __user *)uattr + size;
7682 
7683 		for (; addr < end; addr++) {
7684 			ret = get_user(val, addr);
7685 			if (ret)
7686 				return ret;
7687 			if (val)
7688 				goto err_size;
7689 		}
7690 		size = sizeof(*attr);
7691 	}
7692 
7693 	ret = copy_from_user(attr, uattr, size);
7694 	if (ret)
7695 		return -EFAULT;
7696 
7697 	if (attr->__reserved_1)
7698 		return -EINVAL;
7699 
7700 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
7701 		return -EINVAL;
7702 
7703 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
7704 		return -EINVAL;
7705 
7706 	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
7707 		u64 mask = attr->branch_sample_type;
7708 
7709 		/* only using defined bits */
7710 		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
7711 			return -EINVAL;
7712 
7713 		/* at least one branch bit must be set */
7714 		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
7715 			return -EINVAL;
7716 
7717 		/* propagate priv level, when not set for branch */
7718 		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
7719 
7720 			/* exclude_kernel checked on syscall entry */
7721 			if (!attr->exclude_kernel)
7722 				mask |= PERF_SAMPLE_BRANCH_KERNEL;
7723 
7724 			if (!attr->exclude_user)
7725 				mask |= PERF_SAMPLE_BRANCH_USER;
7726 
7727 			if (!attr->exclude_hv)
7728 				mask |= PERF_SAMPLE_BRANCH_HV;
7729 			/*
7730 			 * adjust user setting (for HW filter setup)
7731 			 */
7732 			attr->branch_sample_type = mask;
7733 		}
7734 		/* privileged levels capture (kernel, hv): check permissions */
7735 		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
7736 		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7737 			return -EACCES;
7738 	}
7739 
7740 	if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
7741 		ret = perf_reg_validate(attr->sample_regs_user);
7742 		if (ret)
7743 			return ret;
7744 	}
7745 
7746 	if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
7747 		if (!arch_perf_have_user_stack_dump())
7748 			return -ENOSYS;
7749 
7750 		/*
7751 		 * We have __u32 type for the size, but so far
7752 		 * we can only use __u16 as maximum due to the
7753 		 * __u16 sample size limit.
7754 		 */
7755 		if (attr->sample_stack_user >= USHRT_MAX)
7756 			ret = -EINVAL;
7757 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
7758 			ret = -EINVAL;
7759 	}
7760 
7761 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
7762 		ret = perf_reg_validate(attr->sample_regs_intr);
7763 out:
7764 	return ret;
7765 
7766 err_size:
7767 	put_user(sizeof(*attr), &uattr->size);
7768 	ret = -E2BIG;
7769 	goto out;
7770 }
7771 
7772 static int
7773 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
7774 {
7775 	struct ring_buffer *rb = NULL;
7776 	int ret = -EINVAL;
7777 
7778 	if (!output_event)
7779 		goto set;
7780 
7781 	/* don't allow circular references */
7782 	if (event == output_event)
7783 		goto out;
7784 
7785 	/*
7786 	 * Don't allow cross-cpu buffers
7787 	 */
7788 	if (output_event->cpu != event->cpu)
7789 		goto out;
7790 
7791 	/*
7792 	 * If its not a per-cpu rb, it must be the same task.
7793 	 */
7794 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
7795 		goto out;
7796 
7797 	/*
7798 	 * Mixing clocks in the same buffer is trouble you don't need.
7799 	 */
7800 	if (output_event->clock != event->clock)
7801 		goto out;
7802 
7803 	/*
7804 	 * If both events generate aux data, they must be on the same PMU
7805 	 */
7806 	if (has_aux(event) && has_aux(output_event) &&
7807 	    event->pmu != output_event->pmu)
7808 		goto out;
7809 
7810 set:
7811 	mutex_lock(&event->mmap_mutex);
7812 	/* Can't redirect output if we've got an active mmap() */
7813 	if (atomic_read(&event->mmap_count))
7814 		goto unlock;
7815 
7816 	if (output_event) {
7817 		/* get the rb we want to redirect to */
7818 		rb = ring_buffer_get(output_event);
7819 		if (!rb)
7820 			goto unlock;
7821 	}
7822 
7823 	ring_buffer_attach(event, rb);
7824 
7825 	ret = 0;
7826 unlock:
7827 	mutex_unlock(&event->mmap_mutex);
7828 
7829 out:
7830 	return ret;
7831 }
7832 
7833 static void mutex_lock_double(struct mutex *a, struct mutex *b)
7834 {
7835 	if (b < a)
7836 		swap(a, b);
7837 
7838 	mutex_lock(a);
7839 	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
7840 }
7841 
7842 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
7843 {
7844 	bool nmi_safe = false;
7845 
7846 	switch (clk_id) {
7847 	case CLOCK_MONOTONIC:
7848 		event->clock = &ktime_get_mono_fast_ns;
7849 		nmi_safe = true;
7850 		break;
7851 
7852 	case CLOCK_MONOTONIC_RAW:
7853 		event->clock = &ktime_get_raw_fast_ns;
7854 		nmi_safe = true;
7855 		break;
7856 
7857 	case CLOCK_REALTIME:
7858 		event->clock = &ktime_get_real_ns;
7859 		break;
7860 
7861 	case CLOCK_BOOTTIME:
7862 		event->clock = &ktime_get_boot_ns;
7863 		break;
7864 
7865 	case CLOCK_TAI:
7866 		event->clock = &ktime_get_tai_ns;
7867 		break;
7868 
7869 	default:
7870 		return -EINVAL;
7871 	}
7872 
7873 	if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
7874 		return -EINVAL;
7875 
7876 	return 0;
7877 }
7878 
7879 /**
7880  * sys_perf_event_open - open a performance event, associate it to a task/cpu
7881  *
7882  * @attr_uptr:	event_id type attributes for monitoring/sampling
7883  * @pid:		target pid
7884  * @cpu:		target cpu
7885  * @group_fd:		group leader event fd
7886  */
7887 SYSCALL_DEFINE5(perf_event_open,
7888 		struct perf_event_attr __user *, attr_uptr,
7889 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
7890 {
7891 	struct perf_event *group_leader = NULL, *output_event = NULL;
7892 	struct perf_event *event, *sibling;
7893 	struct perf_event_attr attr;
7894 	struct perf_event_context *ctx, *uninitialized_var(gctx);
7895 	struct file *event_file = NULL;
7896 	struct fd group = {NULL, 0};
7897 	struct task_struct *task = NULL;
7898 	struct pmu *pmu;
7899 	int event_fd;
7900 	int move_group = 0;
7901 	int err;
7902 	int f_flags = O_RDWR;
7903 	int cgroup_fd = -1;
7904 
7905 	/* for future expandability... */
7906 	if (flags & ~PERF_FLAG_ALL)
7907 		return -EINVAL;
7908 
7909 	err = perf_copy_attr(attr_uptr, &attr);
7910 	if (err)
7911 		return err;
7912 
7913 	if (!attr.exclude_kernel) {
7914 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7915 			return -EACCES;
7916 	}
7917 
7918 	if (attr.freq) {
7919 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
7920 			return -EINVAL;
7921 	} else {
7922 		if (attr.sample_period & (1ULL << 63))
7923 			return -EINVAL;
7924 	}
7925 
7926 	/*
7927 	 * In cgroup mode, the pid argument is used to pass the fd
7928 	 * opened to the cgroup directory in cgroupfs. The cpu argument
7929 	 * designates the cpu on which to monitor threads from that
7930 	 * cgroup.
7931 	 */
7932 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
7933 		return -EINVAL;
7934 
7935 	if (flags & PERF_FLAG_FD_CLOEXEC)
7936 		f_flags |= O_CLOEXEC;
7937 
7938 	event_fd = get_unused_fd_flags(f_flags);
7939 	if (event_fd < 0)
7940 		return event_fd;
7941 
7942 	if (group_fd != -1) {
7943 		err = perf_fget_light(group_fd, &group);
7944 		if (err)
7945 			goto err_fd;
7946 		group_leader = group.file->private_data;
7947 		if (flags & PERF_FLAG_FD_OUTPUT)
7948 			output_event = group_leader;
7949 		if (flags & PERF_FLAG_FD_NO_GROUP)
7950 			group_leader = NULL;
7951 	}
7952 
7953 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
7954 		task = find_lively_task_by_vpid(pid);
7955 		if (IS_ERR(task)) {
7956 			err = PTR_ERR(task);
7957 			goto err_group_fd;
7958 		}
7959 	}
7960 
7961 	if (task && group_leader &&
7962 	    group_leader->attr.inherit != attr.inherit) {
7963 		err = -EINVAL;
7964 		goto err_task;
7965 	}
7966 
7967 	get_online_cpus();
7968 
7969 	if (flags & PERF_FLAG_PID_CGROUP)
7970 		cgroup_fd = pid;
7971 
7972 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7973 				 NULL, NULL, cgroup_fd);
7974 	if (IS_ERR(event)) {
7975 		err = PTR_ERR(event);
7976 		goto err_cpus;
7977 	}
7978 
7979 	if (is_sampling_event(event)) {
7980 		if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
7981 			err = -ENOTSUPP;
7982 			goto err_alloc;
7983 		}
7984 	}
7985 
7986 	account_event(event);
7987 
7988 	/*
7989 	 * Special case software events and allow them to be part of
7990 	 * any hardware group.
7991 	 */
7992 	pmu = event->pmu;
7993 
7994 	if (attr.use_clockid) {
7995 		err = perf_event_set_clock(event, attr.clockid);
7996 		if (err)
7997 			goto err_alloc;
7998 	}
7999 
8000 	if (group_leader &&
8001 	    (is_software_event(event) != is_software_event(group_leader))) {
8002 		if (is_software_event(event)) {
8003 			/*
8004 			 * If event and group_leader are not both a software
8005 			 * event, and event is, then group leader is not.
8006 			 *
8007 			 * Allow the addition of software events to !software
8008 			 * groups, this is safe because software events never
8009 			 * fail to schedule.
8010 			 */
8011 			pmu = group_leader->pmu;
8012 		} else if (is_software_event(group_leader) &&
8013 			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
8014 			/*
8015 			 * In case the group is a pure software group, and we
8016 			 * try to add a hardware event, move the whole group to
8017 			 * the hardware context.
8018 			 */
8019 			move_group = 1;
8020 		}
8021 	}
8022 
8023 	/*
8024 	 * Get the target context (task or percpu):
8025 	 */
8026 	ctx = find_get_context(pmu, task, event);
8027 	if (IS_ERR(ctx)) {
8028 		err = PTR_ERR(ctx);
8029 		goto err_alloc;
8030 	}
8031 
8032 	if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
8033 		err = -EBUSY;
8034 		goto err_context;
8035 	}
8036 
8037 	if (task) {
8038 		put_task_struct(task);
8039 		task = NULL;
8040 	}
8041 
8042 	/*
8043 	 * Look up the group leader (we will attach this event to it):
8044 	 */
8045 	if (group_leader) {
8046 		err = -EINVAL;
8047 
8048 		/*
8049 		 * Do not allow a recursive hierarchy (this new sibling
8050 		 * becoming part of another group-sibling):
8051 		 */
8052 		if (group_leader->group_leader != group_leader)
8053 			goto err_context;
8054 
8055 		/* All events in a group should have the same clock */
8056 		if (group_leader->clock != event->clock)
8057 			goto err_context;
8058 
8059 		/*
8060 		 * Do not allow to attach to a group in a different
8061 		 * task or CPU context:
8062 		 */
8063 		if (move_group) {
8064 			/*
8065 			 * Make sure we're both on the same task, or both
8066 			 * per-cpu events.
8067 			 */
8068 			if (group_leader->ctx->task != ctx->task)
8069 				goto err_context;
8070 
8071 			/*
8072 			 * Make sure we're both events for the same CPU;
8073 			 * grouping events for different CPUs is broken; since
8074 			 * you can never concurrently schedule them anyhow.
8075 			 */
8076 			if (group_leader->cpu != event->cpu)
8077 				goto err_context;
8078 		} else {
8079 			if (group_leader->ctx != ctx)
8080 				goto err_context;
8081 		}
8082 
8083 		/*
8084 		 * Only a group leader can be exclusive or pinned
8085 		 */
8086 		if (attr.exclusive || attr.pinned)
8087 			goto err_context;
8088 	}
8089 
8090 	if (output_event) {
8091 		err = perf_event_set_output(event, output_event);
8092 		if (err)
8093 			goto err_context;
8094 	}
8095 
8096 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
8097 					f_flags);
8098 	if (IS_ERR(event_file)) {
8099 		err = PTR_ERR(event_file);
8100 		goto err_context;
8101 	}
8102 
8103 	if (move_group) {
8104 		gctx = group_leader->ctx;
8105 
8106 		/*
8107 		 * See perf_event_ctx_lock() for comments on the details
8108 		 * of swizzling perf_event::ctx.
8109 		 */
8110 		mutex_lock_double(&gctx->mutex, &ctx->mutex);
8111 
8112 		perf_remove_from_context(group_leader, false);
8113 
8114 		list_for_each_entry(sibling, &group_leader->sibling_list,
8115 				    group_entry) {
8116 			perf_remove_from_context(sibling, false);
8117 			put_ctx(gctx);
8118 		}
8119 	} else {
8120 		mutex_lock(&ctx->mutex);
8121 	}
8122 
8123 	WARN_ON_ONCE(ctx->parent_ctx);
8124 
8125 	if (move_group) {
8126 		/*
8127 		 * Wait for everybody to stop referencing the events through
8128 		 * the old lists, before installing it on new lists.
8129 		 */
8130 		synchronize_rcu();
8131 
8132 		/*
8133 		 * Install the group siblings before the group leader.
8134 		 *
8135 		 * Because a group leader will try and install the entire group
8136 		 * (through the sibling list, which is still in-tact), we can
8137 		 * end up with siblings installed in the wrong context.
8138 		 *
8139 		 * By installing siblings first we NO-OP because they're not
8140 		 * reachable through the group lists.
8141 		 */
8142 		list_for_each_entry(sibling, &group_leader->sibling_list,
8143 				    group_entry) {
8144 			perf_event__state_init(sibling);
8145 			perf_install_in_context(ctx, sibling, sibling->cpu);
8146 			get_ctx(ctx);
8147 		}
8148 
8149 		/*
8150 		 * Removing from the context ends up with disabled
8151 		 * event. What we want here is event in the initial
8152 		 * startup state, ready to be add into new context.
8153 		 */
8154 		perf_event__state_init(group_leader);
8155 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
8156 		get_ctx(ctx);
8157 	}
8158 
8159 	if (!exclusive_event_installable(event, ctx)) {
8160 		err = -EBUSY;
8161 		mutex_unlock(&ctx->mutex);
8162 		fput(event_file);
8163 		goto err_context;
8164 	}
8165 
8166 	perf_install_in_context(ctx, event, event->cpu);
8167 	perf_unpin_context(ctx);
8168 
8169 	if (move_group) {
8170 		mutex_unlock(&gctx->mutex);
8171 		put_ctx(gctx);
8172 	}
8173 	mutex_unlock(&ctx->mutex);
8174 
8175 	put_online_cpus();
8176 
8177 	event->owner = current;
8178 
8179 	mutex_lock(&current->perf_event_mutex);
8180 	list_add_tail(&event->owner_entry, &current->perf_event_list);
8181 	mutex_unlock(&current->perf_event_mutex);
8182 
8183 	/*
8184 	 * Precalculate sample_data sizes
8185 	 */
8186 	perf_event__header_size(event);
8187 	perf_event__id_header_size(event);
8188 
8189 	/*
8190 	 * Drop the reference on the group_event after placing the
8191 	 * new event on the sibling_list. This ensures destruction
8192 	 * of the group leader will find the pointer to itself in
8193 	 * perf_group_detach().
8194 	 */
8195 	fdput(group);
8196 	fd_install(event_fd, event_file);
8197 	return event_fd;
8198 
8199 err_context:
8200 	perf_unpin_context(ctx);
8201 	put_ctx(ctx);
8202 err_alloc:
8203 	free_event(event);
8204 err_cpus:
8205 	put_online_cpus();
8206 err_task:
8207 	if (task)
8208 		put_task_struct(task);
8209 err_group_fd:
8210 	fdput(group);
8211 err_fd:
8212 	put_unused_fd(event_fd);
8213 	return err;
8214 }
8215 
8216 /**
8217  * perf_event_create_kernel_counter
8218  *
8219  * @attr: attributes of the counter to create
8220  * @cpu: cpu in which the counter is bound
8221  * @task: task to profile (NULL for percpu)
8222  */
8223 struct perf_event *
8224 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8225 				 struct task_struct *task,
8226 				 perf_overflow_handler_t overflow_handler,
8227 				 void *context)
8228 {
8229 	struct perf_event_context *ctx;
8230 	struct perf_event *event;
8231 	int err;
8232 
8233 	/*
8234 	 * Get the target context (task or percpu):
8235 	 */
8236 
8237 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
8238 				 overflow_handler, context, -1);
8239 	if (IS_ERR(event)) {
8240 		err = PTR_ERR(event);
8241 		goto err;
8242 	}
8243 
8244 	/* Mark owner so we could distinguish it from user events. */
8245 	event->owner = EVENT_OWNER_KERNEL;
8246 
8247 	account_event(event);
8248 
8249 	ctx = find_get_context(event->pmu, task, event);
8250 	if (IS_ERR(ctx)) {
8251 		err = PTR_ERR(ctx);
8252 		goto err_free;
8253 	}
8254 
8255 	WARN_ON_ONCE(ctx->parent_ctx);
8256 	mutex_lock(&ctx->mutex);
8257 	if (!exclusive_event_installable(event, ctx)) {
8258 		mutex_unlock(&ctx->mutex);
8259 		perf_unpin_context(ctx);
8260 		put_ctx(ctx);
8261 		err = -EBUSY;
8262 		goto err_free;
8263 	}
8264 
8265 	perf_install_in_context(ctx, event, cpu);
8266 	perf_unpin_context(ctx);
8267 	mutex_unlock(&ctx->mutex);
8268 
8269 	return event;
8270 
8271 err_free:
8272 	free_event(event);
8273 err:
8274 	return ERR_PTR(err);
8275 }
8276 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
8277 
8278 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
8279 {
8280 	struct perf_event_context *src_ctx;
8281 	struct perf_event_context *dst_ctx;
8282 	struct perf_event *event, *tmp;
8283 	LIST_HEAD(events);
8284 
8285 	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
8286 	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
8287 
8288 	/*
8289 	 * See perf_event_ctx_lock() for comments on the details
8290 	 * of swizzling perf_event::ctx.
8291 	 */
8292 	mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
8293 	list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
8294 				 event_entry) {
8295 		perf_remove_from_context(event, false);
8296 		unaccount_event_cpu(event, src_cpu);
8297 		put_ctx(src_ctx);
8298 		list_add(&event->migrate_entry, &events);
8299 	}
8300 
8301 	/*
8302 	 * Wait for the events to quiesce before re-instating them.
8303 	 */
8304 	synchronize_rcu();
8305 
8306 	/*
8307 	 * Re-instate events in 2 passes.
8308 	 *
8309 	 * Skip over group leaders and only install siblings on this first
8310 	 * pass, siblings will not get enabled without a leader, however a
8311 	 * leader will enable its siblings, even if those are still on the old
8312 	 * context.
8313 	 */
8314 	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8315 		if (event->group_leader == event)
8316 			continue;
8317 
8318 		list_del(&event->migrate_entry);
8319 		if (event->state >= PERF_EVENT_STATE_OFF)
8320 			event->state = PERF_EVENT_STATE_INACTIVE;
8321 		account_event_cpu(event, dst_cpu);
8322 		perf_install_in_context(dst_ctx, event, dst_cpu);
8323 		get_ctx(dst_ctx);
8324 	}
8325 
8326 	/*
8327 	 * Once all the siblings are setup properly, install the group leaders
8328 	 * to make it go.
8329 	 */
8330 	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8331 		list_del(&event->migrate_entry);
8332 		if (event->state >= PERF_EVENT_STATE_OFF)
8333 			event->state = PERF_EVENT_STATE_INACTIVE;
8334 		account_event_cpu(event, dst_cpu);
8335 		perf_install_in_context(dst_ctx, event, dst_cpu);
8336 		get_ctx(dst_ctx);
8337 	}
8338 	mutex_unlock(&dst_ctx->mutex);
8339 	mutex_unlock(&src_ctx->mutex);
8340 }
8341 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
8342 
8343 static void sync_child_event(struct perf_event *child_event,
8344 			       struct task_struct *child)
8345 {
8346 	struct perf_event *parent_event = child_event->parent;
8347 	u64 child_val;
8348 
8349 	if (child_event->attr.inherit_stat)
8350 		perf_event_read_event(child_event, child);
8351 
8352 	child_val = perf_event_count(child_event);
8353 
8354 	/*
8355 	 * Add back the child's count to the parent's count:
8356 	 */
8357 	atomic64_add(child_val, &parent_event->child_count);
8358 	atomic64_add(child_event->total_time_enabled,
8359 		     &parent_event->child_total_time_enabled);
8360 	atomic64_add(child_event->total_time_running,
8361 		     &parent_event->child_total_time_running);
8362 
8363 	/*
8364 	 * Remove this event from the parent's list
8365 	 */
8366 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8367 	mutex_lock(&parent_event->child_mutex);
8368 	list_del_init(&child_event->child_list);
8369 	mutex_unlock(&parent_event->child_mutex);
8370 
8371 	/*
8372 	 * Make sure user/parent get notified, that we just
8373 	 * lost one event.
8374 	 */
8375 	perf_event_wakeup(parent_event);
8376 
8377 	/*
8378 	 * Release the parent event, if this was the last
8379 	 * reference to it.
8380 	 */
8381 	put_event(parent_event);
8382 }
8383 
8384 static void
8385 __perf_event_exit_task(struct perf_event *child_event,
8386 			 struct perf_event_context *child_ctx,
8387 			 struct task_struct *child)
8388 {
8389 	/*
8390 	 * Do not destroy the 'original' grouping; because of the context
8391 	 * switch optimization the original events could've ended up in a
8392 	 * random child task.
8393 	 *
8394 	 * If we were to destroy the original group, all group related
8395 	 * operations would cease to function properly after this random
8396 	 * child dies.
8397 	 *
8398 	 * Do destroy all inherited groups, we don't care about those
8399 	 * and being thorough is better.
8400 	 */
8401 	perf_remove_from_context(child_event, !!child_event->parent);
8402 
8403 	/*
8404 	 * It can happen that the parent exits first, and has events
8405 	 * that are still around due to the child reference. These
8406 	 * events need to be zapped.
8407 	 */
8408 	if (child_event->parent) {
8409 		sync_child_event(child_event, child);
8410 		free_event(child_event);
8411 	} else {
8412 		child_event->state = PERF_EVENT_STATE_EXIT;
8413 		perf_event_wakeup(child_event);
8414 	}
8415 }
8416 
8417 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8418 {
8419 	struct perf_event *child_event, *next;
8420 	struct perf_event_context *child_ctx, *clone_ctx = NULL;
8421 	unsigned long flags;
8422 
8423 	if (likely(!child->perf_event_ctxp[ctxn])) {
8424 		perf_event_task(child, NULL, 0);
8425 		return;
8426 	}
8427 
8428 	local_irq_save(flags);
8429 	/*
8430 	 * We can't reschedule here because interrupts are disabled,
8431 	 * and either child is current or it is a task that can't be
8432 	 * scheduled, so we are now safe from rescheduling changing
8433 	 * our context.
8434 	 */
8435 	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
8436 
8437 	/*
8438 	 * Take the context lock here so that if find_get_context is
8439 	 * reading child->perf_event_ctxp, we wait until it has
8440 	 * incremented the context's refcount before we do put_ctx below.
8441 	 */
8442 	raw_spin_lock(&child_ctx->lock);
8443 	task_ctx_sched_out(child_ctx);
8444 	child->perf_event_ctxp[ctxn] = NULL;
8445 
8446 	/*
8447 	 * If this context is a clone; unclone it so it can't get
8448 	 * swapped to another process while we're removing all
8449 	 * the events from it.
8450 	 */
8451 	clone_ctx = unclone_ctx(child_ctx);
8452 	update_context_time(child_ctx);
8453 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
8454 
8455 	if (clone_ctx)
8456 		put_ctx(clone_ctx);
8457 
8458 	/*
8459 	 * Report the task dead after unscheduling the events so that we
8460 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
8461 	 * get a few PERF_RECORD_READ events.
8462 	 */
8463 	perf_event_task(child, child_ctx, 0);
8464 
8465 	/*
8466 	 * We can recurse on the same lock type through:
8467 	 *
8468 	 *   __perf_event_exit_task()
8469 	 *     sync_child_event()
8470 	 *       put_event()
8471 	 *         mutex_lock(&ctx->mutex)
8472 	 *
8473 	 * But since its the parent context it won't be the same instance.
8474 	 */
8475 	mutex_lock(&child_ctx->mutex);
8476 
8477 	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8478 		__perf_event_exit_task(child_event, child_ctx, child);
8479 
8480 	mutex_unlock(&child_ctx->mutex);
8481 
8482 	put_ctx(child_ctx);
8483 }
8484 
8485 /*
8486  * When a child task exits, feed back event values to parent events.
8487  */
8488 void perf_event_exit_task(struct task_struct *child)
8489 {
8490 	struct perf_event *event, *tmp;
8491 	int ctxn;
8492 
8493 	mutex_lock(&child->perf_event_mutex);
8494 	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
8495 				 owner_entry) {
8496 		list_del_init(&event->owner_entry);
8497 
8498 		/*
8499 		 * Ensure the list deletion is visible before we clear
8500 		 * the owner, closes a race against perf_release() where
8501 		 * we need to serialize on the owner->perf_event_mutex.
8502 		 */
8503 		smp_wmb();
8504 		event->owner = NULL;
8505 	}
8506 	mutex_unlock(&child->perf_event_mutex);
8507 
8508 	for_each_task_context_nr(ctxn)
8509 		perf_event_exit_task_context(child, ctxn);
8510 }
8511 
8512 static void perf_free_event(struct perf_event *event,
8513 			    struct perf_event_context *ctx)
8514 {
8515 	struct perf_event *parent = event->parent;
8516 
8517 	if (WARN_ON_ONCE(!parent))
8518 		return;
8519 
8520 	mutex_lock(&parent->child_mutex);
8521 	list_del_init(&event->child_list);
8522 	mutex_unlock(&parent->child_mutex);
8523 
8524 	put_event(parent);
8525 
8526 	raw_spin_lock_irq(&ctx->lock);
8527 	perf_group_detach(event);
8528 	list_del_event(event, ctx);
8529 	raw_spin_unlock_irq(&ctx->lock);
8530 	free_event(event);
8531 }
8532 
8533 /*
8534  * Free an unexposed, unused context as created by inheritance by
8535  * perf_event_init_task below, used by fork() in case of fail.
8536  *
8537  * Not all locks are strictly required, but take them anyway to be nice and
8538  * help out with the lockdep assertions.
8539  */
8540 void perf_event_free_task(struct task_struct *task)
8541 {
8542 	struct perf_event_context *ctx;
8543 	struct perf_event *event, *tmp;
8544 	int ctxn;
8545 
8546 	for_each_task_context_nr(ctxn) {
8547 		ctx = task->perf_event_ctxp[ctxn];
8548 		if (!ctx)
8549 			continue;
8550 
8551 		mutex_lock(&ctx->mutex);
8552 again:
8553 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
8554 				group_entry)
8555 			perf_free_event(event, ctx);
8556 
8557 		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
8558 				group_entry)
8559 			perf_free_event(event, ctx);
8560 
8561 		if (!list_empty(&ctx->pinned_groups) ||
8562 				!list_empty(&ctx->flexible_groups))
8563 			goto again;
8564 
8565 		mutex_unlock(&ctx->mutex);
8566 
8567 		put_ctx(ctx);
8568 	}
8569 }
8570 
8571 void perf_event_delayed_put(struct task_struct *task)
8572 {
8573 	int ctxn;
8574 
8575 	for_each_task_context_nr(ctxn)
8576 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
8577 }
8578 
8579 /*
8580  * inherit a event from parent task to child task:
8581  */
8582 static struct perf_event *
8583 inherit_event(struct perf_event *parent_event,
8584 	      struct task_struct *parent,
8585 	      struct perf_event_context *parent_ctx,
8586 	      struct task_struct *child,
8587 	      struct perf_event *group_leader,
8588 	      struct perf_event_context *child_ctx)
8589 {
8590 	enum perf_event_active_state parent_state = parent_event->state;
8591 	struct perf_event *child_event;
8592 	unsigned long flags;
8593 
8594 	/*
8595 	 * Instead of creating recursive hierarchies of events,
8596 	 * we link inherited events back to the original parent,
8597 	 * which has a filp for sure, which we use as the reference
8598 	 * count:
8599 	 */
8600 	if (parent_event->parent)
8601 		parent_event = parent_event->parent;
8602 
8603 	child_event = perf_event_alloc(&parent_event->attr,
8604 					   parent_event->cpu,
8605 					   child,
8606 					   group_leader, parent_event,
8607 					   NULL, NULL, -1);
8608 	if (IS_ERR(child_event))
8609 		return child_event;
8610 
8611 	if (is_orphaned_event(parent_event) ||
8612 	    !atomic_long_inc_not_zero(&parent_event->refcount)) {
8613 		free_event(child_event);
8614 		return NULL;
8615 	}
8616 
8617 	get_ctx(child_ctx);
8618 
8619 	/*
8620 	 * Make the child state follow the state of the parent event,
8621 	 * not its attr.disabled bit.  We hold the parent's mutex,
8622 	 * so we won't race with perf_event_{en, dis}able_family.
8623 	 */
8624 	if (parent_state >= PERF_EVENT_STATE_INACTIVE)
8625 		child_event->state = PERF_EVENT_STATE_INACTIVE;
8626 	else
8627 		child_event->state = PERF_EVENT_STATE_OFF;
8628 
8629 	if (parent_event->attr.freq) {
8630 		u64 sample_period = parent_event->hw.sample_period;
8631 		struct hw_perf_event *hwc = &child_event->hw;
8632 
8633 		hwc->sample_period = sample_period;
8634 		hwc->last_period   = sample_period;
8635 
8636 		local64_set(&hwc->period_left, sample_period);
8637 	}
8638 
8639 	child_event->ctx = child_ctx;
8640 	child_event->overflow_handler = parent_event->overflow_handler;
8641 	child_event->overflow_handler_context
8642 		= parent_event->overflow_handler_context;
8643 
8644 	/*
8645 	 * Precalculate sample_data sizes
8646 	 */
8647 	perf_event__header_size(child_event);
8648 	perf_event__id_header_size(child_event);
8649 
8650 	/*
8651 	 * Link it up in the child's context:
8652 	 */
8653 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
8654 	add_event_to_ctx(child_event, child_ctx);
8655 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
8656 
8657 	/*
8658 	 * Link this into the parent event's child list
8659 	 */
8660 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8661 	mutex_lock(&parent_event->child_mutex);
8662 	list_add_tail(&child_event->child_list, &parent_event->child_list);
8663 	mutex_unlock(&parent_event->child_mutex);
8664 
8665 	return child_event;
8666 }
8667 
8668 static int inherit_group(struct perf_event *parent_event,
8669 	      struct task_struct *parent,
8670 	      struct perf_event_context *parent_ctx,
8671 	      struct task_struct *child,
8672 	      struct perf_event_context *child_ctx)
8673 {
8674 	struct perf_event *leader;
8675 	struct perf_event *sub;
8676 	struct perf_event *child_ctr;
8677 
8678 	leader = inherit_event(parent_event, parent, parent_ctx,
8679 				 child, NULL, child_ctx);
8680 	if (IS_ERR(leader))
8681 		return PTR_ERR(leader);
8682 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
8683 		child_ctr = inherit_event(sub, parent, parent_ctx,
8684 					    child, leader, child_ctx);
8685 		if (IS_ERR(child_ctr))
8686 			return PTR_ERR(child_ctr);
8687 	}
8688 	return 0;
8689 }
8690 
8691 static int
8692 inherit_task_group(struct perf_event *event, struct task_struct *parent,
8693 		   struct perf_event_context *parent_ctx,
8694 		   struct task_struct *child, int ctxn,
8695 		   int *inherited_all)
8696 {
8697 	int ret;
8698 	struct perf_event_context *child_ctx;
8699 
8700 	if (!event->attr.inherit) {
8701 		*inherited_all = 0;
8702 		return 0;
8703 	}
8704 
8705 	child_ctx = child->perf_event_ctxp[ctxn];
8706 	if (!child_ctx) {
8707 		/*
8708 		 * This is executed from the parent task context, so
8709 		 * inherit events that have been marked for cloning.
8710 		 * First allocate and initialize a context for the
8711 		 * child.
8712 		 */
8713 
8714 		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
8715 		if (!child_ctx)
8716 			return -ENOMEM;
8717 
8718 		child->perf_event_ctxp[ctxn] = child_ctx;
8719 	}
8720 
8721 	ret = inherit_group(event, parent, parent_ctx,
8722 			    child, child_ctx);
8723 
8724 	if (ret)
8725 		*inherited_all = 0;
8726 
8727 	return ret;
8728 }
8729 
8730 /*
8731  * Initialize the perf_event context in task_struct
8732  */
8733 static int perf_event_init_context(struct task_struct *child, int ctxn)
8734 {
8735 	struct perf_event_context *child_ctx, *parent_ctx;
8736 	struct perf_event_context *cloned_ctx;
8737 	struct perf_event *event;
8738 	struct task_struct *parent = current;
8739 	int inherited_all = 1;
8740 	unsigned long flags;
8741 	int ret = 0;
8742 
8743 	if (likely(!parent->perf_event_ctxp[ctxn]))
8744 		return 0;
8745 
8746 	/*
8747 	 * If the parent's context is a clone, pin it so it won't get
8748 	 * swapped under us.
8749 	 */
8750 	parent_ctx = perf_pin_task_context(parent, ctxn);
8751 	if (!parent_ctx)
8752 		return 0;
8753 
8754 	/*
8755 	 * No need to check if parent_ctx != NULL here; since we saw
8756 	 * it non-NULL earlier, the only reason for it to become NULL
8757 	 * is if we exit, and since we're currently in the middle of
8758 	 * a fork we can't be exiting at the same time.
8759 	 */
8760 
8761 	/*
8762 	 * Lock the parent list. No need to lock the child - not PID
8763 	 * hashed yet and not running, so nobody can access it.
8764 	 */
8765 	mutex_lock(&parent_ctx->mutex);
8766 
8767 	/*
8768 	 * We dont have to disable NMIs - we are only looking at
8769 	 * the list, not manipulating it:
8770 	 */
8771 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8772 		ret = inherit_task_group(event, parent, parent_ctx,
8773 					 child, ctxn, &inherited_all);
8774 		if (ret)
8775 			break;
8776 	}
8777 
8778 	/*
8779 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
8780 	 * to allocations, but we need to prevent rotation because
8781 	 * rotate_ctx() will change the list from interrupt context.
8782 	 */
8783 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
8784 	parent_ctx->rotate_disable = 1;
8785 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
8786 
8787 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8788 		ret = inherit_task_group(event, parent, parent_ctx,
8789 					 child, ctxn, &inherited_all);
8790 		if (ret)
8791 			break;
8792 	}
8793 
8794 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
8795 	parent_ctx->rotate_disable = 0;
8796 
8797 	child_ctx = child->perf_event_ctxp[ctxn];
8798 
8799 	if (child_ctx && inherited_all) {
8800 		/*
8801 		 * Mark the child context as a clone of the parent
8802 		 * context, or of whatever the parent is a clone of.
8803 		 *
8804 		 * Note that if the parent is a clone, the holding of
8805 		 * parent_ctx->lock avoids it from being uncloned.
8806 		 */
8807 		cloned_ctx = parent_ctx->parent_ctx;
8808 		if (cloned_ctx) {
8809 			child_ctx->parent_ctx = cloned_ctx;
8810 			child_ctx->parent_gen = parent_ctx->parent_gen;
8811 		} else {
8812 			child_ctx->parent_ctx = parent_ctx;
8813 			child_ctx->parent_gen = parent_ctx->generation;
8814 		}
8815 		get_ctx(child_ctx->parent_ctx);
8816 	}
8817 
8818 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
8819 	mutex_unlock(&parent_ctx->mutex);
8820 
8821 	perf_unpin_context(parent_ctx);
8822 	put_ctx(parent_ctx);
8823 
8824 	return ret;
8825 }
8826 
8827 /*
8828  * Initialize the perf_event context in task_struct
8829  */
8830 int perf_event_init_task(struct task_struct *child)
8831 {
8832 	int ctxn, ret;
8833 
8834 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
8835 	mutex_init(&child->perf_event_mutex);
8836 	INIT_LIST_HEAD(&child->perf_event_list);
8837 
8838 	for_each_task_context_nr(ctxn) {
8839 		ret = perf_event_init_context(child, ctxn);
8840 		if (ret) {
8841 			perf_event_free_task(child);
8842 			return ret;
8843 		}
8844 	}
8845 
8846 	return 0;
8847 }
8848 
8849 static void __init perf_event_init_all_cpus(void)
8850 {
8851 	struct swevent_htable *swhash;
8852 	int cpu;
8853 
8854 	for_each_possible_cpu(cpu) {
8855 		swhash = &per_cpu(swevent_htable, cpu);
8856 		mutex_init(&swhash->hlist_mutex);
8857 		INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
8858 	}
8859 }
8860 
8861 static void perf_event_init_cpu(int cpu)
8862 {
8863 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
8864 
8865 	mutex_lock(&swhash->hlist_mutex);
8866 	swhash->online = true;
8867 	if (swhash->hlist_refcount > 0) {
8868 		struct swevent_hlist *hlist;
8869 
8870 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
8871 		WARN_ON(!hlist);
8872 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
8873 	}
8874 	mutex_unlock(&swhash->hlist_mutex);
8875 }
8876 
8877 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
8878 static void __perf_event_exit_context(void *__info)
8879 {
8880 	struct remove_event re = { .detach_group = true };
8881 	struct perf_event_context *ctx = __info;
8882 
8883 	rcu_read_lock();
8884 	list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
8885 		__perf_remove_from_context(&re);
8886 	rcu_read_unlock();
8887 }
8888 
8889 static void perf_event_exit_cpu_context(int cpu)
8890 {
8891 	struct perf_event_context *ctx;
8892 	struct pmu *pmu;
8893 	int idx;
8894 
8895 	idx = srcu_read_lock(&pmus_srcu);
8896 	list_for_each_entry_rcu(pmu, &pmus, entry) {
8897 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
8898 
8899 		mutex_lock(&ctx->mutex);
8900 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
8901 		mutex_unlock(&ctx->mutex);
8902 	}
8903 	srcu_read_unlock(&pmus_srcu, idx);
8904 }
8905 
8906 static void perf_event_exit_cpu(int cpu)
8907 {
8908 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
8909 
8910 	perf_event_exit_cpu_context(cpu);
8911 
8912 	mutex_lock(&swhash->hlist_mutex);
8913 	swhash->online = false;
8914 	swevent_hlist_release(swhash);
8915 	mutex_unlock(&swhash->hlist_mutex);
8916 }
8917 #else
8918 static inline void perf_event_exit_cpu(int cpu) { }
8919 #endif
8920 
8921 static int
8922 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
8923 {
8924 	int cpu;
8925 
8926 	for_each_online_cpu(cpu)
8927 		perf_event_exit_cpu(cpu);
8928 
8929 	return NOTIFY_OK;
8930 }
8931 
8932 /*
8933  * Run the perf reboot notifier at the very last possible moment so that
8934  * the generic watchdog code runs as long as possible.
8935  */
8936 static struct notifier_block perf_reboot_notifier = {
8937 	.notifier_call = perf_reboot,
8938 	.priority = INT_MIN,
8939 };
8940 
8941 static int
8942 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
8943 {
8944 	unsigned int cpu = (long)hcpu;
8945 
8946 	switch (action & ~CPU_TASKS_FROZEN) {
8947 
8948 	case CPU_UP_PREPARE:
8949 	case CPU_DOWN_FAILED:
8950 		perf_event_init_cpu(cpu);
8951 		break;
8952 
8953 	case CPU_UP_CANCELED:
8954 	case CPU_DOWN_PREPARE:
8955 		perf_event_exit_cpu(cpu);
8956 		break;
8957 	default:
8958 		break;
8959 	}
8960 
8961 	return NOTIFY_OK;
8962 }
8963 
8964 void __init perf_event_init(void)
8965 {
8966 	int ret;
8967 
8968 	idr_init(&pmu_idr);
8969 
8970 	perf_event_init_all_cpus();
8971 	init_srcu_struct(&pmus_srcu);
8972 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
8973 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
8974 	perf_pmu_register(&perf_task_clock, NULL, -1);
8975 	perf_tp_register();
8976 	perf_cpu_notifier(perf_cpu_notify);
8977 	register_reboot_notifier(&perf_reboot_notifier);
8978 
8979 	ret = init_hw_breakpoint();
8980 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
8981 
8982 	/* do not patch jump label more than once per second */
8983 	jump_label_rate_limit(&perf_sched_events, HZ);
8984 
8985 	/*
8986 	 * Build time assertion that we keep the data_head at the intended
8987 	 * location.  IOW, validation we got the __reserved[] size right.
8988 	 */
8989 	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
8990 		     != 1024);
8991 }
8992 
8993 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
8994 			      char *page)
8995 {
8996 	struct perf_pmu_events_attr *pmu_attr =
8997 		container_of(attr, struct perf_pmu_events_attr, attr);
8998 
8999 	if (pmu_attr->event_str)
9000 		return sprintf(page, "%s\n", pmu_attr->event_str);
9001 
9002 	return 0;
9003 }
9004 
9005 static int __init perf_event_sysfs_init(void)
9006 {
9007 	struct pmu *pmu;
9008 	int ret;
9009 
9010 	mutex_lock(&pmus_lock);
9011 
9012 	ret = bus_register(&pmu_bus);
9013 	if (ret)
9014 		goto unlock;
9015 
9016 	list_for_each_entry(pmu, &pmus, entry) {
9017 		if (!pmu->name || pmu->type < 0)
9018 			continue;
9019 
9020 		ret = pmu_dev_alloc(pmu);
9021 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
9022 	}
9023 	pmu_bus_running = 1;
9024 	ret = 0;
9025 
9026 unlock:
9027 	mutex_unlock(&pmus_lock);
9028 
9029 	return ret;
9030 }
9031 device_initcall(perf_event_sysfs_init);
9032 
9033 #ifdef CONFIG_CGROUP_PERF
9034 static struct cgroup_subsys_state *
9035 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9036 {
9037 	struct perf_cgroup *jc;
9038 
9039 	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
9040 	if (!jc)
9041 		return ERR_PTR(-ENOMEM);
9042 
9043 	jc->info = alloc_percpu(struct perf_cgroup_info);
9044 	if (!jc->info) {
9045 		kfree(jc);
9046 		return ERR_PTR(-ENOMEM);
9047 	}
9048 
9049 	return &jc->css;
9050 }
9051 
9052 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
9053 {
9054 	struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
9055 
9056 	free_percpu(jc->info);
9057 	kfree(jc);
9058 }
9059 
9060 static int __perf_cgroup_move(void *info)
9061 {
9062 	struct task_struct *task = info;
9063 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
9064 	return 0;
9065 }
9066 
9067 static void perf_cgroup_attach(struct cgroup_subsys_state *css,
9068 			       struct cgroup_taskset *tset)
9069 {
9070 	struct task_struct *task;
9071 
9072 	cgroup_taskset_for_each(task, tset)
9073 		task_function_call(task, __perf_cgroup_move, task);
9074 }
9075 
9076 static void perf_cgroup_exit(struct cgroup_subsys_state *css,
9077 			     struct cgroup_subsys_state *old_css,
9078 			     struct task_struct *task)
9079 {
9080 	/*
9081 	 * cgroup_exit() is called in the copy_process() failure path.
9082 	 * Ignore this case since the task hasn't ran yet, this avoids
9083 	 * trying to poke a half freed task state from generic code.
9084 	 */
9085 	if (!(task->flags & PF_EXITING))
9086 		return;
9087 
9088 	task_function_call(task, __perf_cgroup_move, task);
9089 }
9090 
9091 struct cgroup_subsys perf_event_cgrp_subsys = {
9092 	.css_alloc	= perf_cgroup_css_alloc,
9093 	.css_free	= perf_cgroup_css_free,
9094 	.exit		= perf_cgroup_exit,
9095 	.attach		= perf_cgroup_attach,
9096 };
9097 #endif /* CONFIG_CGROUP_PERF */
9098