xref: /linux/kernel/events/core.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/rculist.h>
32 #include <linux/uaccess.h>
33 #include <linux/syscalls.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/perf_event.h>
37 #include <linux/ftrace_event.h>
38 #include <linux/hw_breakpoint.h>
39 
40 #include "internal.h"
41 
42 #include <asm/irq_regs.h>
43 
44 struct remote_function_call {
45 	struct task_struct	*p;
46 	int			(*func)(void *info);
47 	void			*info;
48 	int			ret;
49 };
50 
51 static void remote_function(void *data)
52 {
53 	struct remote_function_call *tfc = data;
54 	struct task_struct *p = tfc->p;
55 
56 	if (p) {
57 		tfc->ret = -EAGAIN;
58 		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 			return;
60 	}
61 
62 	tfc->ret = tfc->func(tfc->info);
63 }
64 
65 /**
66  * task_function_call - call a function on the cpu on which a task runs
67  * @p:		the task to evaluate
68  * @func:	the function to be called
69  * @info:	the function call argument
70  *
71  * Calls the function @func when the task is currently running. This might
72  * be on the current CPU, which just calls the function directly
73  *
74  * returns: @func return value, or
75  *	    -ESRCH  - when the process isn't running
76  *	    -EAGAIN - when the process moved away
77  */
78 static int
79 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80 {
81 	struct remote_function_call data = {
82 		.p	= p,
83 		.func	= func,
84 		.info	= info,
85 		.ret	= -ESRCH, /* No such (running) process */
86 	};
87 
88 	if (task_curr(p))
89 		smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90 
91 	return data.ret;
92 }
93 
94 /**
95  * cpu_function_call - call a function on the cpu
96  * @func:	the function to be called
97  * @info:	the function call argument
98  *
99  * Calls the function @func on the remote cpu.
100  *
101  * returns: @func return value or -ENXIO when the cpu is offline
102  */
103 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104 {
105 	struct remote_function_call data = {
106 		.p	= NULL,
107 		.func	= func,
108 		.info	= info,
109 		.ret	= -ENXIO, /* No such CPU */
110 	};
111 
112 	smp_call_function_single(cpu, remote_function, &data, 1);
113 
114 	return data.ret;
115 }
116 
117 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 		       PERF_FLAG_FD_OUTPUT  |\
119 		       PERF_FLAG_PID_CGROUP)
120 
121 /*
122  * branch priv levels that need permission checks
123  */
124 #define PERF_SAMPLE_BRANCH_PERM_PLM \
125 	(PERF_SAMPLE_BRANCH_KERNEL |\
126 	 PERF_SAMPLE_BRANCH_HV)
127 
128 enum event_type_t {
129 	EVENT_FLEXIBLE = 0x1,
130 	EVENT_PINNED = 0x2,
131 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
132 };
133 
134 /*
135  * perf_sched_events : >0 events exist
136  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
137  */
138 struct static_key_deferred perf_sched_events __read_mostly;
139 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
140 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
141 
142 static atomic_t nr_mmap_events __read_mostly;
143 static atomic_t nr_comm_events __read_mostly;
144 static atomic_t nr_task_events __read_mostly;
145 
146 static LIST_HEAD(pmus);
147 static DEFINE_MUTEX(pmus_lock);
148 static struct srcu_struct pmus_srcu;
149 
150 /*
151  * perf event paranoia level:
152  *  -1 - not paranoid at all
153  *   0 - disallow raw tracepoint access for unpriv
154  *   1 - disallow cpu events for unpriv
155  *   2 - disallow kernel profiling for unpriv
156  */
157 int sysctl_perf_event_paranoid __read_mostly = 1;
158 
159 /* Minimum for 512 kiB + 1 user control page */
160 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
161 
162 /*
163  * max perf event sample rate
164  */
165 #define DEFAULT_MAX_SAMPLE_RATE 100000
166 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
167 static int max_samples_per_tick __read_mostly =
168 	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
169 
170 int perf_proc_update_handler(struct ctl_table *table, int write,
171 		void __user *buffer, size_t *lenp,
172 		loff_t *ppos)
173 {
174 	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
175 
176 	if (ret || !write)
177 		return ret;
178 
179 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
180 
181 	return 0;
182 }
183 
184 static atomic64_t perf_event_id;
185 
186 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
187 			      enum event_type_t event_type);
188 
189 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
190 			     enum event_type_t event_type,
191 			     struct task_struct *task);
192 
193 static void update_context_time(struct perf_event_context *ctx);
194 static u64 perf_event_time(struct perf_event *event);
195 
196 static void ring_buffer_attach(struct perf_event *event,
197 			       struct ring_buffer *rb);
198 
199 void __weak perf_event_print_debug(void)	{ }
200 
201 extern __weak const char *perf_pmu_name(void)
202 {
203 	return "pmu";
204 }
205 
206 static inline u64 perf_clock(void)
207 {
208 	return local_clock();
209 }
210 
211 static inline struct perf_cpu_context *
212 __get_cpu_context(struct perf_event_context *ctx)
213 {
214 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
215 }
216 
217 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
218 			  struct perf_event_context *ctx)
219 {
220 	raw_spin_lock(&cpuctx->ctx.lock);
221 	if (ctx)
222 		raw_spin_lock(&ctx->lock);
223 }
224 
225 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
226 			    struct perf_event_context *ctx)
227 {
228 	if (ctx)
229 		raw_spin_unlock(&ctx->lock);
230 	raw_spin_unlock(&cpuctx->ctx.lock);
231 }
232 
233 #ifdef CONFIG_CGROUP_PERF
234 
235 /*
236  * Must ensure cgroup is pinned (css_get) before calling
237  * this function. In other words, we cannot call this function
238  * if there is no cgroup event for the current CPU context.
239  */
240 static inline struct perf_cgroup *
241 perf_cgroup_from_task(struct task_struct *task)
242 {
243 	return container_of(task_subsys_state(task, perf_subsys_id),
244 			struct perf_cgroup, css);
245 }
246 
247 static inline bool
248 perf_cgroup_match(struct perf_event *event)
249 {
250 	struct perf_event_context *ctx = event->ctx;
251 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
252 
253 	return !event->cgrp || event->cgrp == cpuctx->cgrp;
254 }
255 
256 static inline void perf_get_cgroup(struct perf_event *event)
257 {
258 	css_get(&event->cgrp->css);
259 }
260 
261 static inline void perf_put_cgroup(struct perf_event *event)
262 {
263 	css_put(&event->cgrp->css);
264 }
265 
266 static inline void perf_detach_cgroup(struct perf_event *event)
267 {
268 	perf_put_cgroup(event);
269 	event->cgrp = NULL;
270 }
271 
272 static inline int is_cgroup_event(struct perf_event *event)
273 {
274 	return event->cgrp != NULL;
275 }
276 
277 static inline u64 perf_cgroup_event_time(struct perf_event *event)
278 {
279 	struct perf_cgroup_info *t;
280 
281 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
282 	return t->time;
283 }
284 
285 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
286 {
287 	struct perf_cgroup_info *info;
288 	u64 now;
289 
290 	now = perf_clock();
291 
292 	info = this_cpu_ptr(cgrp->info);
293 
294 	info->time += now - info->timestamp;
295 	info->timestamp = now;
296 }
297 
298 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
299 {
300 	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
301 	if (cgrp_out)
302 		__update_cgrp_time(cgrp_out);
303 }
304 
305 static inline void update_cgrp_time_from_event(struct perf_event *event)
306 {
307 	struct perf_cgroup *cgrp;
308 
309 	/*
310 	 * ensure we access cgroup data only when needed and
311 	 * when we know the cgroup is pinned (css_get)
312 	 */
313 	if (!is_cgroup_event(event))
314 		return;
315 
316 	cgrp = perf_cgroup_from_task(current);
317 	/*
318 	 * Do not update time when cgroup is not active
319 	 */
320 	if (cgrp == event->cgrp)
321 		__update_cgrp_time(event->cgrp);
322 }
323 
324 static inline void
325 perf_cgroup_set_timestamp(struct task_struct *task,
326 			  struct perf_event_context *ctx)
327 {
328 	struct perf_cgroup *cgrp;
329 	struct perf_cgroup_info *info;
330 
331 	/*
332 	 * ctx->lock held by caller
333 	 * ensure we do not access cgroup data
334 	 * unless we have the cgroup pinned (css_get)
335 	 */
336 	if (!task || !ctx->nr_cgroups)
337 		return;
338 
339 	cgrp = perf_cgroup_from_task(task);
340 	info = this_cpu_ptr(cgrp->info);
341 	info->timestamp = ctx->timestamp;
342 }
343 
344 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
345 #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
346 
347 /*
348  * reschedule events based on the cgroup constraint of task.
349  *
350  * mode SWOUT : schedule out everything
351  * mode SWIN : schedule in based on cgroup for next
352  */
353 void perf_cgroup_switch(struct task_struct *task, int mode)
354 {
355 	struct perf_cpu_context *cpuctx;
356 	struct pmu *pmu;
357 	unsigned long flags;
358 
359 	/*
360 	 * disable interrupts to avoid geting nr_cgroup
361 	 * changes via __perf_event_disable(). Also
362 	 * avoids preemption.
363 	 */
364 	local_irq_save(flags);
365 
366 	/*
367 	 * we reschedule only in the presence of cgroup
368 	 * constrained events.
369 	 */
370 	rcu_read_lock();
371 
372 	list_for_each_entry_rcu(pmu, &pmus, entry) {
373 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
374 
375 		/*
376 		 * perf_cgroup_events says at least one
377 		 * context on this CPU has cgroup events.
378 		 *
379 		 * ctx->nr_cgroups reports the number of cgroup
380 		 * events for a context.
381 		 */
382 		if (cpuctx->ctx.nr_cgroups > 0) {
383 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
384 			perf_pmu_disable(cpuctx->ctx.pmu);
385 
386 			if (mode & PERF_CGROUP_SWOUT) {
387 				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
388 				/*
389 				 * must not be done before ctxswout due
390 				 * to event_filter_match() in event_sched_out()
391 				 */
392 				cpuctx->cgrp = NULL;
393 			}
394 
395 			if (mode & PERF_CGROUP_SWIN) {
396 				WARN_ON_ONCE(cpuctx->cgrp);
397 				/* set cgrp before ctxsw in to
398 				 * allow event_filter_match() to not
399 				 * have to pass task around
400 				 */
401 				cpuctx->cgrp = perf_cgroup_from_task(task);
402 				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
403 			}
404 			perf_pmu_enable(cpuctx->ctx.pmu);
405 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
406 		}
407 	}
408 
409 	rcu_read_unlock();
410 
411 	local_irq_restore(flags);
412 }
413 
414 static inline void perf_cgroup_sched_out(struct task_struct *task,
415 					 struct task_struct *next)
416 {
417 	struct perf_cgroup *cgrp1;
418 	struct perf_cgroup *cgrp2 = NULL;
419 
420 	/*
421 	 * we come here when we know perf_cgroup_events > 0
422 	 */
423 	cgrp1 = perf_cgroup_from_task(task);
424 
425 	/*
426 	 * next is NULL when called from perf_event_enable_on_exec()
427 	 * that will systematically cause a cgroup_switch()
428 	 */
429 	if (next)
430 		cgrp2 = perf_cgroup_from_task(next);
431 
432 	/*
433 	 * only schedule out current cgroup events if we know
434 	 * that we are switching to a different cgroup. Otherwise,
435 	 * do no touch the cgroup events.
436 	 */
437 	if (cgrp1 != cgrp2)
438 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
439 }
440 
441 static inline void perf_cgroup_sched_in(struct task_struct *prev,
442 					struct task_struct *task)
443 {
444 	struct perf_cgroup *cgrp1;
445 	struct perf_cgroup *cgrp2 = NULL;
446 
447 	/*
448 	 * we come here when we know perf_cgroup_events > 0
449 	 */
450 	cgrp1 = perf_cgroup_from_task(task);
451 
452 	/* prev can never be NULL */
453 	cgrp2 = perf_cgroup_from_task(prev);
454 
455 	/*
456 	 * only need to schedule in cgroup events if we are changing
457 	 * cgroup during ctxsw. Cgroup events were not scheduled
458 	 * out of ctxsw out if that was not the case.
459 	 */
460 	if (cgrp1 != cgrp2)
461 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
462 }
463 
464 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
465 				      struct perf_event_attr *attr,
466 				      struct perf_event *group_leader)
467 {
468 	struct perf_cgroup *cgrp;
469 	struct cgroup_subsys_state *css;
470 	struct file *file;
471 	int ret = 0, fput_needed;
472 
473 	file = fget_light(fd, &fput_needed);
474 	if (!file)
475 		return -EBADF;
476 
477 	css = cgroup_css_from_dir(file, perf_subsys_id);
478 	if (IS_ERR(css)) {
479 		ret = PTR_ERR(css);
480 		goto out;
481 	}
482 
483 	cgrp = container_of(css, struct perf_cgroup, css);
484 	event->cgrp = cgrp;
485 
486 	/* must be done before we fput() the file */
487 	perf_get_cgroup(event);
488 
489 	/*
490 	 * all events in a group must monitor
491 	 * the same cgroup because a task belongs
492 	 * to only one perf cgroup at a time
493 	 */
494 	if (group_leader && group_leader->cgrp != cgrp) {
495 		perf_detach_cgroup(event);
496 		ret = -EINVAL;
497 	}
498 out:
499 	fput_light(file, fput_needed);
500 	return ret;
501 }
502 
503 static inline void
504 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
505 {
506 	struct perf_cgroup_info *t;
507 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
508 	event->shadow_ctx_time = now - t->timestamp;
509 }
510 
511 static inline void
512 perf_cgroup_defer_enabled(struct perf_event *event)
513 {
514 	/*
515 	 * when the current task's perf cgroup does not match
516 	 * the event's, we need to remember to call the
517 	 * perf_mark_enable() function the first time a task with
518 	 * a matching perf cgroup is scheduled in.
519 	 */
520 	if (is_cgroup_event(event) && !perf_cgroup_match(event))
521 		event->cgrp_defer_enabled = 1;
522 }
523 
524 static inline void
525 perf_cgroup_mark_enabled(struct perf_event *event,
526 			 struct perf_event_context *ctx)
527 {
528 	struct perf_event *sub;
529 	u64 tstamp = perf_event_time(event);
530 
531 	if (!event->cgrp_defer_enabled)
532 		return;
533 
534 	event->cgrp_defer_enabled = 0;
535 
536 	event->tstamp_enabled = tstamp - event->total_time_enabled;
537 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
538 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
539 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
540 			sub->cgrp_defer_enabled = 0;
541 		}
542 	}
543 }
544 #else /* !CONFIG_CGROUP_PERF */
545 
546 static inline bool
547 perf_cgroup_match(struct perf_event *event)
548 {
549 	return true;
550 }
551 
552 static inline void perf_detach_cgroup(struct perf_event *event)
553 {}
554 
555 static inline int is_cgroup_event(struct perf_event *event)
556 {
557 	return 0;
558 }
559 
560 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
561 {
562 	return 0;
563 }
564 
565 static inline void update_cgrp_time_from_event(struct perf_event *event)
566 {
567 }
568 
569 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
570 {
571 }
572 
573 static inline void perf_cgroup_sched_out(struct task_struct *task,
574 					 struct task_struct *next)
575 {
576 }
577 
578 static inline void perf_cgroup_sched_in(struct task_struct *prev,
579 					struct task_struct *task)
580 {
581 }
582 
583 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
584 				      struct perf_event_attr *attr,
585 				      struct perf_event *group_leader)
586 {
587 	return -EINVAL;
588 }
589 
590 static inline void
591 perf_cgroup_set_timestamp(struct task_struct *task,
592 			  struct perf_event_context *ctx)
593 {
594 }
595 
596 void
597 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
598 {
599 }
600 
601 static inline void
602 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
603 {
604 }
605 
606 static inline u64 perf_cgroup_event_time(struct perf_event *event)
607 {
608 	return 0;
609 }
610 
611 static inline void
612 perf_cgroup_defer_enabled(struct perf_event *event)
613 {
614 }
615 
616 static inline void
617 perf_cgroup_mark_enabled(struct perf_event *event,
618 			 struct perf_event_context *ctx)
619 {
620 }
621 #endif
622 
623 void perf_pmu_disable(struct pmu *pmu)
624 {
625 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
626 	if (!(*count)++)
627 		pmu->pmu_disable(pmu);
628 }
629 
630 void perf_pmu_enable(struct pmu *pmu)
631 {
632 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
633 	if (!--(*count))
634 		pmu->pmu_enable(pmu);
635 }
636 
637 static DEFINE_PER_CPU(struct list_head, rotation_list);
638 
639 /*
640  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
641  * because they're strictly cpu affine and rotate_start is called with IRQs
642  * disabled, while rotate_context is called from IRQ context.
643  */
644 static void perf_pmu_rotate_start(struct pmu *pmu)
645 {
646 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
647 	struct list_head *head = &__get_cpu_var(rotation_list);
648 
649 	WARN_ON(!irqs_disabled());
650 
651 	if (list_empty(&cpuctx->rotation_list))
652 		list_add(&cpuctx->rotation_list, head);
653 }
654 
655 static void get_ctx(struct perf_event_context *ctx)
656 {
657 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
658 }
659 
660 static void put_ctx(struct perf_event_context *ctx)
661 {
662 	if (atomic_dec_and_test(&ctx->refcount)) {
663 		if (ctx->parent_ctx)
664 			put_ctx(ctx->parent_ctx);
665 		if (ctx->task)
666 			put_task_struct(ctx->task);
667 		kfree_rcu(ctx, rcu_head);
668 	}
669 }
670 
671 static void unclone_ctx(struct perf_event_context *ctx)
672 {
673 	if (ctx->parent_ctx) {
674 		put_ctx(ctx->parent_ctx);
675 		ctx->parent_ctx = NULL;
676 	}
677 }
678 
679 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
680 {
681 	/*
682 	 * only top level events have the pid namespace they were created in
683 	 */
684 	if (event->parent)
685 		event = event->parent;
686 
687 	return task_tgid_nr_ns(p, event->ns);
688 }
689 
690 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
691 {
692 	/*
693 	 * only top level events have the pid namespace they were created in
694 	 */
695 	if (event->parent)
696 		event = event->parent;
697 
698 	return task_pid_nr_ns(p, event->ns);
699 }
700 
701 /*
702  * If we inherit events we want to return the parent event id
703  * to userspace.
704  */
705 static u64 primary_event_id(struct perf_event *event)
706 {
707 	u64 id = event->id;
708 
709 	if (event->parent)
710 		id = event->parent->id;
711 
712 	return id;
713 }
714 
715 /*
716  * Get the perf_event_context for a task and lock it.
717  * This has to cope with with the fact that until it is locked,
718  * the context could get moved to another task.
719  */
720 static struct perf_event_context *
721 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
722 {
723 	struct perf_event_context *ctx;
724 
725 	rcu_read_lock();
726 retry:
727 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
728 	if (ctx) {
729 		/*
730 		 * If this context is a clone of another, it might
731 		 * get swapped for another underneath us by
732 		 * perf_event_task_sched_out, though the
733 		 * rcu_read_lock() protects us from any context
734 		 * getting freed.  Lock the context and check if it
735 		 * got swapped before we could get the lock, and retry
736 		 * if so.  If we locked the right context, then it
737 		 * can't get swapped on us any more.
738 		 */
739 		raw_spin_lock_irqsave(&ctx->lock, *flags);
740 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
741 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
742 			goto retry;
743 		}
744 
745 		if (!atomic_inc_not_zero(&ctx->refcount)) {
746 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
747 			ctx = NULL;
748 		}
749 	}
750 	rcu_read_unlock();
751 	return ctx;
752 }
753 
754 /*
755  * Get the context for a task and increment its pin_count so it
756  * can't get swapped to another task.  This also increments its
757  * reference count so that the context can't get freed.
758  */
759 static struct perf_event_context *
760 perf_pin_task_context(struct task_struct *task, int ctxn)
761 {
762 	struct perf_event_context *ctx;
763 	unsigned long flags;
764 
765 	ctx = perf_lock_task_context(task, ctxn, &flags);
766 	if (ctx) {
767 		++ctx->pin_count;
768 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
769 	}
770 	return ctx;
771 }
772 
773 static void perf_unpin_context(struct perf_event_context *ctx)
774 {
775 	unsigned long flags;
776 
777 	raw_spin_lock_irqsave(&ctx->lock, flags);
778 	--ctx->pin_count;
779 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
780 }
781 
782 /*
783  * Update the record of the current time in a context.
784  */
785 static void update_context_time(struct perf_event_context *ctx)
786 {
787 	u64 now = perf_clock();
788 
789 	ctx->time += now - ctx->timestamp;
790 	ctx->timestamp = now;
791 }
792 
793 static u64 perf_event_time(struct perf_event *event)
794 {
795 	struct perf_event_context *ctx = event->ctx;
796 
797 	if (is_cgroup_event(event))
798 		return perf_cgroup_event_time(event);
799 
800 	return ctx ? ctx->time : 0;
801 }
802 
803 /*
804  * Update the total_time_enabled and total_time_running fields for a event.
805  * The caller of this function needs to hold the ctx->lock.
806  */
807 static void update_event_times(struct perf_event *event)
808 {
809 	struct perf_event_context *ctx = event->ctx;
810 	u64 run_end;
811 
812 	if (event->state < PERF_EVENT_STATE_INACTIVE ||
813 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
814 		return;
815 	/*
816 	 * in cgroup mode, time_enabled represents
817 	 * the time the event was enabled AND active
818 	 * tasks were in the monitored cgroup. This is
819 	 * independent of the activity of the context as
820 	 * there may be a mix of cgroup and non-cgroup events.
821 	 *
822 	 * That is why we treat cgroup events differently
823 	 * here.
824 	 */
825 	if (is_cgroup_event(event))
826 		run_end = perf_cgroup_event_time(event);
827 	else if (ctx->is_active)
828 		run_end = ctx->time;
829 	else
830 		run_end = event->tstamp_stopped;
831 
832 	event->total_time_enabled = run_end - event->tstamp_enabled;
833 
834 	if (event->state == PERF_EVENT_STATE_INACTIVE)
835 		run_end = event->tstamp_stopped;
836 	else
837 		run_end = perf_event_time(event);
838 
839 	event->total_time_running = run_end - event->tstamp_running;
840 
841 }
842 
843 /*
844  * Update total_time_enabled and total_time_running for all events in a group.
845  */
846 static void update_group_times(struct perf_event *leader)
847 {
848 	struct perf_event *event;
849 
850 	update_event_times(leader);
851 	list_for_each_entry(event, &leader->sibling_list, group_entry)
852 		update_event_times(event);
853 }
854 
855 static struct list_head *
856 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
857 {
858 	if (event->attr.pinned)
859 		return &ctx->pinned_groups;
860 	else
861 		return &ctx->flexible_groups;
862 }
863 
864 /*
865  * Add a event from the lists for its context.
866  * Must be called with ctx->mutex and ctx->lock held.
867  */
868 static void
869 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
870 {
871 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
872 	event->attach_state |= PERF_ATTACH_CONTEXT;
873 
874 	/*
875 	 * If we're a stand alone event or group leader, we go to the context
876 	 * list, group events are kept attached to the group so that
877 	 * perf_group_detach can, at all times, locate all siblings.
878 	 */
879 	if (event->group_leader == event) {
880 		struct list_head *list;
881 
882 		if (is_software_event(event))
883 			event->group_flags |= PERF_GROUP_SOFTWARE;
884 
885 		list = ctx_group_list(event, ctx);
886 		list_add_tail(&event->group_entry, list);
887 	}
888 
889 	if (is_cgroup_event(event))
890 		ctx->nr_cgroups++;
891 
892 	if (has_branch_stack(event))
893 		ctx->nr_branch_stack++;
894 
895 	list_add_rcu(&event->event_entry, &ctx->event_list);
896 	if (!ctx->nr_events)
897 		perf_pmu_rotate_start(ctx->pmu);
898 	ctx->nr_events++;
899 	if (event->attr.inherit_stat)
900 		ctx->nr_stat++;
901 }
902 
903 /*
904  * Called at perf_event creation and when events are attached/detached from a
905  * group.
906  */
907 static void perf_event__read_size(struct perf_event *event)
908 {
909 	int entry = sizeof(u64); /* value */
910 	int size = 0;
911 	int nr = 1;
912 
913 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
914 		size += sizeof(u64);
915 
916 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
917 		size += sizeof(u64);
918 
919 	if (event->attr.read_format & PERF_FORMAT_ID)
920 		entry += sizeof(u64);
921 
922 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
923 		nr += event->group_leader->nr_siblings;
924 		size += sizeof(u64);
925 	}
926 
927 	size += entry * nr;
928 	event->read_size = size;
929 }
930 
931 static void perf_event__header_size(struct perf_event *event)
932 {
933 	struct perf_sample_data *data;
934 	u64 sample_type = event->attr.sample_type;
935 	u16 size = 0;
936 
937 	perf_event__read_size(event);
938 
939 	if (sample_type & PERF_SAMPLE_IP)
940 		size += sizeof(data->ip);
941 
942 	if (sample_type & PERF_SAMPLE_ADDR)
943 		size += sizeof(data->addr);
944 
945 	if (sample_type & PERF_SAMPLE_PERIOD)
946 		size += sizeof(data->period);
947 
948 	if (sample_type & PERF_SAMPLE_READ)
949 		size += event->read_size;
950 
951 	event->header_size = size;
952 }
953 
954 static void perf_event__id_header_size(struct perf_event *event)
955 {
956 	struct perf_sample_data *data;
957 	u64 sample_type = event->attr.sample_type;
958 	u16 size = 0;
959 
960 	if (sample_type & PERF_SAMPLE_TID)
961 		size += sizeof(data->tid_entry);
962 
963 	if (sample_type & PERF_SAMPLE_TIME)
964 		size += sizeof(data->time);
965 
966 	if (sample_type & PERF_SAMPLE_ID)
967 		size += sizeof(data->id);
968 
969 	if (sample_type & PERF_SAMPLE_STREAM_ID)
970 		size += sizeof(data->stream_id);
971 
972 	if (sample_type & PERF_SAMPLE_CPU)
973 		size += sizeof(data->cpu_entry);
974 
975 	event->id_header_size = size;
976 }
977 
978 static void perf_group_attach(struct perf_event *event)
979 {
980 	struct perf_event *group_leader = event->group_leader, *pos;
981 
982 	/*
983 	 * We can have double attach due to group movement in perf_event_open.
984 	 */
985 	if (event->attach_state & PERF_ATTACH_GROUP)
986 		return;
987 
988 	event->attach_state |= PERF_ATTACH_GROUP;
989 
990 	if (group_leader == event)
991 		return;
992 
993 	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
994 			!is_software_event(event))
995 		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
996 
997 	list_add_tail(&event->group_entry, &group_leader->sibling_list);
998 	group_leader->nr_siblings++;
999 
1000 	perf_event__header_size(group_leader);
1001 
1002 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1003 		perf_event__header_size(pos);
1004 }
1005 
1006 /*
1007  * Remove a event from the lists for its context.
1008  * Must be called with ctx->mutex and ctx->lock held.
1009  */
1010 static void
1011 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1012 {
1013 	struct perf_cpu_context *cpuctx;
1014 	/*
1015 	 * We can have double detach due to exit/hot-unplug + close.
1016 	 */
1017 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1018 		return;
1019 
1020 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
1021 
1022 	if (is_cgroup_event(event)) {
1023 		ctx->nr_cgroups--;
1024 		cpuctx = __get_cpu_context(ctx);
1025 		/*
1026 		 * if there are no more cgroup events
1027 		 * then cler cgrp to avoid stale pointer
1028 		 * in update_cgrp_time_from_cpuctx()
1029 		 */
1030 		if (!ctx->nr_cgroups)
1031 			cpuctx->cgrp = NULL;
1032 	}
1033 
1034 	if (has_branch_stack(event))
1035 		ctx->nr_branch_stack--;
1036 
1037 	ctx->nr_events--;
1038 	if (event->attr.inherit_stat)
1039 		ctx->nr_stat--;
1040 
1041 	list_del_rcu(&event->event_entry);
1042 
1043 	if (event->group_leader == event)
1044 		list_del_init(&event->group_entry);
1045 
1046 	update_group_times(event);
1047 
1048 	/*
1049 	 * If event was in error state, then keep it
1050 	 * that way, otherwise bogus counts will be
1051 	 * returned on read(). The only way to get out
1052 	 * of error state is by explicit re-enabling
1053 	 * of the event
1054 	 */
1055 	if (event->state > PERF_EVENT_STATE_OFF)
1056 		event->state = PERF_EVENT_STATE_OFF;
1057 }
1058 
1059 static void perf_group_detach(struct perf_event *event)
1060 {
1061 	struct perf_event *sibling, *tmp;
1062 	struct list_head *list = NULL;
1063 
1064 	/*
1065 	 * We can have double detach due to exit/hot-unplug + close.
1066 	 */
1067 	if (!(event->attach_state & PERF_ATTACH_GROUP))
1068 		return;
1069 
1070 	event->attach_state &= ~PERF_ATTACH_GROUP;
1071 
1072 	/*
1073 	 * If this is a sibling, remove it from its group.
1074 	 */
1075 	if (event->group_leader != event) {
1076 		list_del_init(&event->group_entry);
1077 		event->group_leader->nr_siblings--;
1078 		goto out;
1079 	}
1080 
1081 	if (!list_empty(&event->group_entry))
1082 		list = &event->group_entry;
1083 
1084 	/*
1085 	 * If this was a group event with sibling events then
1086 	 * upgrade the siblings to singleton events by adding them
1087 	 * to whatever list we are on.
1088 	 */
1089 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1090 		if (list)
1091 			list_move_tail(&sibling->group_entry, list);
1092 		sibling->group_leader = sibling;
1093 
1094 		/* Inherit group flags from the previous leader */
1095 		sibling->group_flags = event->group_flags;
1096 	}
1097 
1098 out:
1099 	perf_event__header_size(event->group_leader);
1100 
1101 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1102 		perf_event__header_size(tmp);
1103 }
1104 
1105 static inline int
1106 event_filter_match(struct perf_event *event)
1107 {
1108 	return (event->cpu == -1 || event->cpu == smp_processor_id())
1109 	    && perf_cgroup_match(event);
1110 }
1111 
1112 static void
1113 event_sched_out(struct perf_event *event,
1114 		  struct perf_cpu_context *cpuctx,
1115 		  struct perf_event_context *ctx)
1116 {
1117 	u64 tstamp = perf_event_time(event);
1118 	u64 delta;
1119 	/*
1120 	 * An event which could not be activated because of
1121 	 * filter mismatch still needs to have its timings
1122 	 * maintained, otherwise bogus information is return
1123 	 * via read() for time_enabled, time_running:
1124 	 */
1125 	if (event->state == PERF_EVENT_STATE_INACTIVE
1126 	    && !event_filter_match(event)) {
1127 		delta = tstamp - event->tstamp_stopped;
1128 		event->tstamp_running += delta;
1129 		event->tstamp_stopped = tstamp;
1130 	}
1131 
1132 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1133 		return;
1134 
1135 	event->state = PERF_EVENT_STATE_INACTIVE;
1136 	if (event->pending_disable) {
1137 		event->pending_disable = 0;
1138 		event->state = PERF_EVENT_STATE_OFF;
1139 	}
1140 	event->tstamp_stopped = tstamp;
1141 	event->pmu->del(event, 0);
1142 	event->oncpu = -1;
1143 
1144 	if (!is_software_event(event))
1145 		cpuctx->active_oncpu--;
1146 	ctx->nr_active--;
1147 	if (event->attr.freq && event->attr.sample_freq)
1148 		ctx->nr_freq--;
1149 	if (event->attr.exclusive || !cpuctx->active_oncpu)
1150 		cpuctx->exclusive = 0;
1151 }
1152 
1153 static void
1154 group_sched_out(struct perf_event *group_event,
1155 		struct perf_cpu_context *cpuctx,
1156 		struct perf_event_context *ctx)
1157 {
1158 	struct perf_event *event;
1159 	int state = group_event->state;
1160 
1161 	event_sched_out(group_event, cpuctx, ctx);
1162 
1163 	/*
1164 	 * Schedule out siblings (if any):
1165 	 */
1166 	list_for_each_entry(event, &group_event->sibling_list, group_entry)
1167 		event_sched_out(event, cpuctx, ctx);
1168 
1169 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1170 		cpuctx->exclusive = 0;
1171 }
1172 
1173 /*
1174  * Cross CPU call to remove a performance event
1175  *
1176  * We disable the event on the hardware level first. After that we
1177  * remove it from the context list.
1178  */
1179 static int __perf_remove_from_context(void *info)
1180 {
1181 	struct perf_event *event = info;
1182 	struct perf_event_context *ctx = event->ctx;
1183 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1184 
1185 	raw_spin_lock(&ctx->lock);
1186 	event_sched_out(event, cpuctx, ctx);
1187 	list_del_event(event, ctx);
1188 	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1189 		ctx->is_active = 0;
1190 		cpuctx->task_ctx = NULL;
1191 	}
1192 	raw_spin_unlock(&ctx->lock);
1193 
1194 	return 0;
1195 }
1196 
1197 
1198 /*
1199  * Remove the event from a task's (or a CPU's) list of events.
1200  *
1201  * CPU events are removed with a smp call. For task events we only
1202  * call when the task is on a CPU.
1203  *
1204  * If event->ctx is a cloned context, callers must make sure that
1205  * every task struct that event->ctx->task could possibly point to
1206  * remains valid.  This is OK when called from perf_release since
1207  * that only calls us on the top-level context, which can't be a clone.
1208  * When called from perf_event_exit_task, it's OK because the
1209  * context has been detached from its task.
1210  */
1211 static void perf_remove_from_context(struct perf_event *event)
1212 {
1213 	struct perf_event_context *ctx = event->ctx;
1214 	struct task_struct *task = ctx->task;
1215 
1216 	lockdep_assert_held(&ctx->mutex);
1217 
1218 	if (!task) {
1219 		/*
1220 		 * Per cpu events are removed via an smp call and
1221 		 * the removal is always successful.
1222 		 */
1223 		cpu_function_call(event->cpu, __perf_remove_from_context, event);
1224 		return;
1225 	}
1226 
1227 retry:
1228 	if (!task_function_call(task, __perf_remove_from_context, event))
1229 		return;
1230 
1231 	raw_spin_lock_irq(&ctx->lock);
1232 	/*
1233 	 * If we failed to find a running task, but find the context active now
1234 	 * that we've acquired the ctx->lock, retry.
1235 	 */
1236 	if (ctx->is_active) {
1237 		raw_spin_unlock_irq(&ctx->lock);
1238 		goto retry;
1239 	}
1240 
1241 	/*
1242 	 * Since the task isn't running, its safe to remove the event, us
1243 	 * holding the ctx->lock ensures the task won't get scheduled in.
1244 	 */
1245 	list_del_event(event, ctx);
1246 	raw_spin_unlock_irq(&ctx->lock);
1247 }
1248 
1249 /*
1250  * Cross CPU call to disable a performance event
1251  */
1252 static int __perf_event_disable(void *info)
1253 {
1254 	struct perf_event *event = info;
1255 	struct perf_event_context *ctx = event->ctx;
1256 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1257 
1258 	/*
1259 	 * If this is a per-task event, need to check whether this
1260 	 * event's task is the current task on this cpu.
1261 	 *
1262 	 * Can trigger due to concurrent perf_event_context_sched_out()
1263 	 * flipping contexts around.
1264 	 */
1265 	if (ctx->task && cpuctx->task_ctx != ctx)
1266 		return -EINVAL;
1267 
1268 	raw_spin_lock(&ctx->lock);
1269 
1270 	/*
1271 	 * If the event is on, turn it off.
1272 	 * If it is in error state, leave it in error state.
1273 	 */
1274 	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1275 		update_context_time(ctx);
1276 		update_cgrp_time_from_event(event);
1277 		update_group_times(event);
1278 		if (event == event->group_leader)
1279 			group_sched_out(event, cpuctx, ctx);
1280 		else
1281 			event_sched_out(event, cpuctx, ctx);
1282 		event->state = PERF_EVENT_STATE_OFF;
1283 	}
1284 
1285 	raw_spin_unlock(&ctx->lock);
1286 
1287 	return 0;
1288 }
1289 
1290 /*
1291  * Disable a event.
1292  *
1293  * If event->ctx is a cloned context, callers must make sure that
1294  * every task struct that event->ctx->task could possibly point to
1295  * remains valid.  This condition is satisifed when called through
1296  * perf_event_for_each_child or perf_event_for_each because they
1297  * hold the top-level event's child_mutex, so any descendant that
1298  * goes to exit will block in sync_child_event.
1299  * When called from perf_pending_event it's OK because event->ctx
1300  * is the current context on this CPU and preemption is disabled,
1301  * hence we can't get into perf_event_task_sched_out for this context.
1302  */
1303 void perf_event_disable(struct perf_event *event)
1304 {
1305 	struct perf_event_context *ctx = event->ctx;
1306 	struct task_struct *task = ctx->task;
1307 
1308 	if (!task) {
1309 		/*
1310 		 * Disable the event on the cpu that it's on
1311 		 */
1312 		cpu_function_call(event->cpu, __perf_event_disable, event);
1313 		return;
1314 	}
1315 
1316 retry:
1317 	if (!task_function_call(task, __perf_event_disable, event))
1318 		return;
1319 
1320 	raw_spin_lock_irq(&ctx->lock);
1321 	/*
1322 	 * If the event is still active, we need to retry the cross-call.
1323 	 */
1324 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1325 		raw_spin_unlock_irq(&ctx->lock);
1326 		/*
1327 		 * Reload the task pointer, it might have been changed by
1328 		 * a concurrent perf_event_context_sched_out().
1329 		 */
1330 		task = ctx->task;
1331 		goto retry;
1332 	}
1333 
1334 	/*
1335 	 * Since we have the lock this context can't be scheduled
1336 	 * in, so we can change the state safely.
1337 	 */
1338 	if (event->state == PERF_EVENT_STATE_INACTIVE) {
1339 		update_group_times(event);
1340 		event->state = PERF_EVENT_STATE_OFF;
1341 	}
1342 	raw_spin_unlock_irq(&ctx->lock);
1343 }
1344 EXPORT_SYMBOL_GPL(perf_event_disable);
1345 
1346 static void perf_set_shadow_time(struct perf_event *event,
1347 				 struct perf_event_context *ctx,
1348 				 u64 tstamp)
1349 {
1350 	/*
1351 	 * use the correct time source for the time snapshot
1352 	 *
1353 	 * We could get by without this by leveraging the
1354 	 * fact that to get to this function, the caller
1355 	 * has most likely already called update_context_time()
1356 	 * and update_cgrp_time_xx() and thus both timestamp
1357 	 * are identical (or very close). Given that tstamp is,
1358 	 * already adjusted for cgroup, we could say that:
1359 	 *    tstamp - ctx->timestamp
1360 	 * is equivalent to
1361 	 *    tstamp - cgrp->timestamp.
1362 	 *
1363 	 * Then, in perf_output_read(), the calculation would
1364 	 * work with no changes because:
1365 	 * - event is guaranteed scheduled in
1366 	 * - no scheduled out in between
1367 	 * - thus the timestamp would be the same
1368 	 *
1369 	 * But this is a bit hairy.
1370 	 *
1371 	 * So instead, we have an explicit cgroup call to remain
1372 	 * within the time time source all along. We believe it
1373 	 * is cleaner and simpler to understand.
1374 	 */
1375 	if (is_cgroup_event(event))
1376 		perf_cgroup_set_shadow_time(event, tstamp);
1377 	else
1378 		event->shadow_ctx_time = tstamp - ctx->timestamp;
1379 }
1380 
1381 #define MAX_INTERRUPTS (~0ULL)
1382 
1383 static void perf_log_throttle(struct perf_event *event, int enable);
1384 
1385 static int
1386 event_sched_in(struct perf_event *event,
1387 		 struct perf_cpu_context *cpuctx,
1388 		 struct perf_event_context *ctx)
1389 {
1390 	u64 tstamp = perf_event_time(event);
1391 
1392 	if (event->state <= PERF_EVENT_STATE_OFF)
1393 		return 0;
1394 
1395 	event->state = PERF_EVENT_STATE_ACTIVE;
1396 	event->oncpu = smp_processor_id();
1397 
1398 	/*
1399 	 * Unthrottle events, since we scheduled we might have missed several
1400 	 * ticks already, also for a heavily scheduling task there is little
1401 	 * guarantee it'll get a tick in a timely manner.
1402 	 */
1403 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1404 		perf_log_throttle(event, 1);
1405 		event->hw.interrupts = 0;
1406 	}
1407 
1408 	/*
1409 	 * The new state must be visible before we turn it on in the hardware:
1410 	 */
1411 	smp_wmb();
1412 
1413 	if (event->pmu->add(event, PERF_EF_START)) {
1414 		event->state = PERF_EVENT_STATE_INACTIVE;
1415 		event->oncpu = -1;
1416 		return -EAGAIN;
1417 	}
1418 
1419 	event->tstamp_running += tstamp - event->tstamp_stopped;
1420 
1421 	perf_set_shadow_time(event, ctx, tstamp);
1422 
1423 	if (!is_software_event(event))
1424 		cpuctx->active_oncpu++;
1425 	ctx->nr_active++;
1426 	if (event->attr.freq && event->attr.sample_freq)
1427 		ctx->nr_freq++;
1428 
1429 	if (event->attr.exclusive)
1430 		cpuctx->exclusive = 1;
1431 
1432 	return 0;
1433 }
1434 
1435 static int
1436 group_sched_in(struct perf_event *group_event,
1437 	       struct perf_cpu_context *cpuctx,
1438 	       struct perf_event_context *ctx)
1439 {
1440 	struct perf_event *event, *partial_group = NULL;
1441 	struct pmu *pmu = group_event->pmu;
1442 	u64 now = ctx->time;
1443 	bool simulate = false;
1444 
1445 	if (group_event->state == PERF_EVENT_STATE_OFF)
1446 		return 0;
1447 
1448 	pmu->start_txn(pmu);
1449 
1450 	if (event_sched_in(group_event, cpuctx, ctx)) {
1451 		pmu->cancel_txn(pmu);
1452 		return -EAGAIN;
1453 	}
1454 
1455 	/*
1456 	 * Schedule in siblings as one group (if any):
1457 	 */
1458 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1459 		if (event_sched_in(event, cpuctx, ctx)) {
1460 			partial_group = event;
1461 			goto group_error;
1462 		}
1463 	}
1464 
1465 	if (!pmu->commit_txn(pmu))
1466 		return 0;
1467 
1468 group_error:
1469 	/*
1470 	 * Groups can be scheduled in as one unit only, so undo any
1471 	 * partial group before returning:
1472 	 * The events up to the failed event are scheduled out normally,
1473 	 * tstamp_stopped will be updated.
1474 	 *
1475 	 * The failed events and the remaining siblings need to have
1476 	 * their timings updated as if they had gone thru event_sched_in()
1477 	 * and event_sched_out(). This is required to get consistent timings
1478 	 * across the group. This also takes care of the case where the group
1479 	 * could never be scheduled by ensuring tstamp_stopped is set to mark
1480 	 * the time the event was actually stopped, such that time delta
1481 	 * calculation in update_event_times() is correct.
1482 	 */
1483 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1484 		if (event == partial_group)
1485 			simulate = true;
1486 
1487 		if (simulate) {
1488 			event->tstamp_running += now - event->tstamp_stopped;
1489 			event->tstamp_stopped = now;
1490 		} else {
1491 			event_sched_out(event, cpuctx, ctx);
1492 		}
1493 	}
1494 	event_sched_out(group_event, cpuctx, ctx);
1495 
1496 	pmu->cancel_txn(pmu);
1497 
1498 	return -EAGAIN;
1499 }
1500 
1501 /*
1502  * Work out whether we can put this event group on the CPU now.
1503  */
1504 static int group_can_go_on(struct perf_event *event,
1505 			   struct perf_cpu_context *cpuctx,
1506 			   int can_add_hw)
1507 {
1508 	/*
1509 	 * Groups consisting entirely of software events can always go on.
1510 	 */
1511 	if (event->group_flags & PERF_GROUP_SOFTWARE)
1512 		return 1;
1513 	/*
1514 	 * If an exclusive group is already on, no other hardware
1515 	 * events can go on.
1516 	 */
1517 	if (cpuctx->exclusive)
1518 		return 0;
1519 	/*
1520 	 * If this group is exclusive and there are already
1521 	 * events on the CPU, it can't go on.
1522 	 */
1523 	if (event->attr.exclusive && cpuctx->active_oncpu)
1524 		return 0;
1525 	/*
1526 	 * Otherwise, try to add it if all previous groups were able
1527 	 * to go on.
1528 	 */
1529 	return can_add_hw;
1530 }
1531 
1532 static void add_event_to_ctx(struct perf_event *event,
1533 			       struct perf_event_context *ctx)
1534 {
1535 	u64 tstamp = perf_event_time(event);
1536 
1537 	list_add_event(event, ctx);
1538 	perf_group_attach(event);
1539 	event->tstamp_enabled = tstamp;
1540 	event->tstamp_running = tstamp;
1541 	event->tstamp_stopped = tstamp;
1542 }
1543 
1544 static void task_ctx_sched_out(struct perf_event_context *ctx);
1545 static void
1546 ctx_sched_in(struct perf_event_context *ctx,
1547 	     struct perf_cpu_context *cpuctx,
1548 	     enum event_type_t event_type,
1549 	     struct task_struct *task);
1550 
1551 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1552 				struct perf_event_context *ctx,
1553 				struct task_struct *task)
1554 {
1555 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1556 	if (ctx)
1557 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1558 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1559 	if (ctx)
1560 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1561 }
1562 
1563 /*
1564  * Cross CPU call to install and enable a performance event
1565  *
1566  * Must be called with ctx->mutex held
1567  */
1568 static int  __perf_install_in_context(void *info)
1569 {
1570 	struct perf_event *event = info;
1571 	struct perf_event_context *ctx = event->ctx;
1572 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1573 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
1574 	struct task_struct *task = current;
1575 
1576 	perf_ctx_lock(cpuctx, task_ctx);
1577 	perf_pmu_disable(cpuctx->ctx.pmu);
1578 
1579 	/*
1580 	 * If there was an active task_ctx schedule it out.
1581 	 */
1582 	if (task_ctx)
1583 		task_ctx_sched_out(task_ctx);
1584 
1585 	/*
1586 	 * If the context we're installing events in is not the
1587 	 * active task_ctx, flip them.
1588 	 */
1589 	if (ctx->task && task_ctx != ctx) {
1590 		if (task_ctx)
1591 			raw_spin_unlock(&task_ctx->lock);
1592 		raw_spin_lock(&ctx->lock);
1593 		task_ctx = ctx;
1594 	}
1595 
1596 	if (task_ctx) {
1597 		cpuctx->task_ctx = task_ctx;
1598 		task = task_ctx->task;
1599 	}
1600 
1601 	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1602 
1603 	update_context_time(ctx);
1604 	/*
1605 	 * update cgrp time only if current cgrp
1606 	 * matches event->cgrp. Must be done before
1607 	 * calling add_event_to_ctx()
1608 	 */
1609 	update_cgrp_time_from_event(event);
1610 
1611 	add_event_to_ctx(event, ctx);
1612 
1613 	/*
1614 	 * Schedule everything back in
1615 	 */
1616 	perf_event_sched_in(cpuctx, task_ctx, task);
1617 
1618 	perf_pmu_enable(cpuctx->ctx.pmu);
1619 	perf_ctx_unlock(cpuctx, task_ctx);
1620 
1621 	return 0;
1622 }
1623 
1624 /*
1625  * Attach a performance event to a context
1626  *
1627  * First we add the event to the list with the hardware enable bit
1628  * in event->hw_config cleared.
1629  *
1630  * If the event is attached to a task which is on a CPU we use a smp
1631  * call to enable it in the task context. The task might have been
1632  * scheduled away, but we check this in the smp call again.
1633  */
1634 static void
1635 perf_install_in_context(struct perf_event_context *ctx,
1636 			struct perf_event *event,
1637 			int cpu)
1638 {
1639 	struct task_struct *task = ctx->task;
1640 
1641 	lockdep_assert_held(&ctx->mutex);
1642 
1643 	event->ctx = ctx;
1644 
1645 	if (!task) {
1646 		/*
1647 		 * Per cpu events are installed via an smp call and
1648 		 * the install is always successful.
1649 		 */
1650 		cpu_function_call(cpu, __perf_install_in_context, event);
1651 		return;
1652 	}
1653 
1654 retry:
1655 	if (!task_function_call(task, __perf_install_in_context, event))
1656 		return;
1657 
1658 	raw_spin_lock_irq(&ctx->lock);
1659 	/*
1660 	 * If we failed to find a running task, but find the context active now
1661 	 * that we've acquired the ctx->lock, retry.
1662 	 */
1663 	if (ctx->is_active) {
1664 		raw_spin_unlock_irq(&ctx->lock);
1665 		goto retry;
1666 	}
1667 
1668 	/*
1669 	 * Since the task isn't running, its safe to add the event, us holding
1670 	 * the ctx->lock ensures the task won't get scheduled in.
1671 	 */
1672 	add_event_to_ctx(event, ctx);
1673 	raw_spin_unlock_irq(&ctx->lock);
1674 }
1675 
1676 /*
1677  * Put a event into inactive state and update time fields.
1678  * Enabling the leader of a group effectively enables all
1679  * the group members that aren't explicitly disabled, so we
1680  * have to update their ->tstamp_enabled also.
1681  * Note: this works for group members as well as group leaders
1682  * since the non-leader members' sibling_lists will be empty.
1683  */
1684 static void __perf_event_mark_enabled(struct perf_event *event)
1685 {
1686 	struct perf_event *sub;
1687 	u64 tstamp = perf_event_time(event);
1688 
1689 	event->state = PERF_EVENT_STATE_INACTIVE;
1690 	event->tstamp_enabled = tstamp - event->total_time_enabled;
1691 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1692 		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1693 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1694 	}
1695 }
1696 
1697 /*
1698  * Cross CPU call to enable a performance event
1699  */
1700 static int __perf_event_enable(void *info)
1701 {
1702 	struct perf_event *event = info;
1703 	struct perf_event_context *ctx = event->ctx;
1704 	struct perf_event *leader = event->group_leader;
1705 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1706 	int err;
1707 
1708 	if (WARN_ON_ONCE(!ctx->is_active))
1709 		return -EINVAL;
1710 
1711 	raw_spin_lock(&ctx->lock);
1712 	update_context_time(ctx);
1713 
1714 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1715 		goto unlock;
1716 
1717 	/*
1718 	 * set current task's cgroup time reference point
1719 	 */
1720 	perf_cgroup_set_timestamp(current, ctx);
1721 
1722 	__perf_event_mark_enabled(event);
1723 
1724 	if (!event_filter_match(event)) {
1725 		if (is_cgroup_event(event))
1726 			perf_cgroup_defer_enabled(event);
1727 		goto unlock;
1728 	}
1729 
1730 	/*
1731 	 * If the event is in a group and isn't the group leader,
1732 	 * then don't put it on unless the group is on.
1733 	 */
1734 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1735 		goto unlock;
1736 
1737 	if (!group_can_go_on(event, cpuctx, 1)) {
1738 		err = -EEXIST;
1739 	} else {
1740 		if (event == leader)
1741 			err = group_sched_in(event, cpuctx, ctx);
1742 		else
1743 			err = event_sched_in(event, cpuctx, ctx);
1744 	}
1745 
1746 	if (err) {
1747 		/*
1748 		 * If this event can't go on and it's part of a
1749 		 * group, then the whole group has to come off.
1750 		 */
1751 		if (leader != event)
1752 			group_sched_out(leader, cpuctx, ctx);
1753 		if (leader->attr.pinned) {
1754 			update_group_times(leader);
1755 			leader->state = PERF_EVENT_STATE_ERROR;
1756 		}
1757 	}
1758 
1759 unlock:
1760 	raw_spin_unlock(&ctx->lock);
1761 
1762 	return 0;
1763 }
1764 
1765 /*
1766  * Enable a event.
1767  *
1768  * If event->ctx is a cloned context, callers must make sure that
1769  * every task struct that event->ctx->task could possibly point to
1770  * remains valid.  This condition is satisfied when called through
1771  * perf_event_for_each_child or perf_event_for_each as described
1772  * for perf_event_disable.
1773  */
1774 void perf_event_enable(struct perf_event *event)
1775 {
1776 	struct perf_event_context *ctx = event->ctx;
1777 	struct task_struct *task = ctx->task;
1778 
1779 	if (!task) {
1780 		/*
1781 		 * Enable the event on the cpu that it's on
1782 		 */
1783 		cpu_function_call(event->cpu, __perf_event_enable, event);
1784 		return;
1785 	}
1786 
1787 	raw_spin_lock_irq(&ctx->lock);
1788 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1789 		goto out;
1790 
1791 	/*
1792 	 * If the event is in error state, clear that first.
1793 	 * That way, if we see the event in error state below, we
1794 	 * know that it has gone back into error state, as distinct
1795 	 * from the task having been scheduled away before the
1796 	 * cross-call arrived.
1797 	 */
1798 	if (event->state == PERF_EVENT_STATE_ERROR)
1799 		event->state = PERF_EVENT_STATE_OFF;
1800 
1801 retry:
1802 	if (!ctx->is_active) {
1803 		__perf_event_mark_enabled(event);
1804 		goto out;
1805 	}
1806 
1807 	raw_spin_unlock_irq(&ctx->lock);
1808 
1809 	if (!task_function_call(task, __perf_event_enable, event))
1810 		return;
1811 
1812 	raw_spin_lock_irq(&ctx->lock);
1813 
1814 	/*
1815 	 * If the context is active and the event is still off,
1816 	 * we need to retry the cross-call.
1817 	 */
1818 	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1819 		/*
1820 		 * task could have been flipped by a concurrent
1821 		 * perf_event_context_sched_out()
1822 		 */
1823 		task = ctx->task;
1824 		goto retry;
1825 	}
1826 
1827 out:
1828 	raw_spin_unlock_irq(&ctx->lock);
1829 }
1830 EXPORT_SYMBOL_GPL(perf_event_enable);
1831 
1832 int perf_event_refresh(struct perf_event *event, int refresh)
1833 {
1834 	/*
1835 	 * not supported on inherited events
1836 	 */
1837 	if (event->attr.inherit || !is_sampling_event(event))
1838 		return -EINVAL;
1839 
1840 	atomic_add(refresh, &event->event_limit);
1841 	perf_event_enable(event);
1842 
1843 	return 0;
1844 }
1845 EXPORT_SYMBOL_GPL(perf_event_refresh);
1846 
1847 static void ctx_sched_out(struct perf_event_context *ctx,
1848 			  struct perf_cpu_context *cpuctx,
1849 			  enum event_type_t event_type)
1850 {
1851 	struct perf_event *event;
1852 	int is_active = ctx->is_active;
1853 
1854 	ctx->is_active &= ~event_type;
1855 	if (likely(!ctx->nr_events))
1856 		return;
1857 
1858 	update_context_time(ctx);
1859 	update_cgrp_time_from_cpuctx(cpuctx);
1860 	if (!ctx->nr_active)
1861 		return;
1862 
1863 	perf_pmu_disable(ctx->pmu);
1864 	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1865 		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1866 			group_sched_out(event, cpuctx, ctx);
1867 	}
1868 
1869 	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1870 		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1871 			group_sched_out(event, cpuctx, ctx);
1872 	}
1873 	perf_pmu_enable(ctx->pmu);
1874 }
1875 
1876 /*
1877  * Test whether two contexts are equivalent, i.e. whether they
1878  * have both been cloned from the same version of the same context
1879  * and they both have the same number of enabled events.
1880  * If the number of enabled events is the same, then the set
1881  * of enabled events should be the same, because these are both
1882  * inherited contexts, therefore we can't access individual events
1883  * in them directly with an fd; we can only enable/disable all
1884  * events via prctl, or enable/disable all events in a family
1885  * via ioctl, which will have the same effect on both contexts.
1886  */
1887 static int context_equiv(struct perf_event_context *ctx1,
1888 			 struct perf_event_context *ctx2)
1889 {
1890 	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1891 		&& ctx1->parent_gen == ctx2->parent_gen
1892 		&& !ctx1->pin_count && !ctx2->pin_count;
1893 }
1894 
1895 static void __perf_event_sync_stat(struct perf_event *event,
1896 				     struct perf_event *next_event)
1897 {
1898 	u64 value;
1899 
1900 	if (!event->attr.inherit_stat)
1901 		return;
1902 
1903 	/*
1904 	 * Update the event value, we cannot use perf_event_read()
1905 	 * because we're in the middle of a context switch and have IRQs
1906 	 * disabled, which upsets smp_call_function_single(), however
1907 	 * we know the event must be on the current CPU, therefore we
1908 	 * don't need to use it.
1909 	 */
1910 	switch (event->state) {
1911 	case PERF_EVENT_STATE_ACTIVE:
1912 		event->pmu->read(event);
1913 		/* fall-through */
1914 
1915 	case PERF_EVENT_STATE_INACTIVE:
1916 		update_event_times(event);
1917 		break;
1918 
1919 	default:
1920 		break;
1921 	}
1922 
1923 	/*
1924 	 * In order to keep per-task stats reliable we need to flip the event
1925 	 * values when we flip the contexts.
1926 	 */
1927 	value = local64_read(&next_event->count);
1928 	value = local64_xchg(&event->count, value);
1929 	local64_set(&next_event->count, value);
1930 
1931 	swap(event->total_time_enabled, next_event->total_time_enabled);
1932 	swap(event->total_time_running, next_event->total_time_running);
1933 
1934 	/*
1935 	 * Since we swizzled the values, update the user visible data too.
1936 	 */
1937 	perf_event_update_userpage(event);
1938 	perf_event_update_userpage(next_event);
1939 }
1940 
1941 #define list_next_entry(pos, member) \
1942 	list_entry(pos->member.next, typeof(*pos), member)
1943 
1944 static void perf_event_sync_stat(struct perf_event_context *ctx,
1945 				   struct perf_event_context *next_ctx)
1946 {
1947 	struct perf_event *event, *next_event;
1948 
1949 	if (!ctx->nr_stat)
1950 		return;
1951 
1952 	update_context_time(ctx);
1953 
1954 	event = list_first_entry(&ctx->event_list,
1955 				   struct perf_event, event_entry);
1956 
1957 	next_event = list_first_entry(&next_ctx->event_list,
1958 					struct perf_event, event_entry);
1959 
1960 	while (&event->event_entry != &ctx->event_list &&
1961 	       &next_event->event_entry != &next_ctx->event_list) {
1962 
1963 		__perf_event_sync_stat(event, next_event);
1964 
1965 		event = list_next_entry(event, event_entry);
1966 		next_event = list_next_entry(next_event, event_entry);
1967 	}
1968 }
1969 
1970 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1971 					 struct task_struct *next)
1972 {
1973 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1974 	struct perf_event_context *next_ctx;
1975 	struct perf_event_context *parent;
1976 	struct perf_cpu_context *cpuctx;
1977 	int do_switch = 1;
1978 
1979 	if (likely(!ctx))
1980 		return;
1981 
1982 	cpuctx = __get_cpu_context(ctx);
1983 	if (!cpuctx->task_ctx)
1984 		return;
1985 
1986 	rcu_read_lock();
1987 	parent = rcu_dereference(ctx->parent_ctx);
1988 	next_ctx = next->perf_event_ctxp[ctxn];
1989 	if (parent && next_ctx &&
1990 	    rcu_dereference(next_ctx->parent_ctx) == parent) {
1991 		/*
1992 		 * Looks like the two contexts are clones, so we might be
1993 		 * able to optimize the context switch.  We lock both
1994 		 * contexts and check that they are clones under the
1995 		 * lock (including re-checking that neither has been
1996 		 * uncloned in the meantime).  It doesn't matter which
1997 		 * order we take the locks because no other cpu could
1998 		 * be trying to lock both of these tasks.
1999 		 */
2000 		raw_spin_lock(&ctx->lock);
2001 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2002 		if (context_equiv(ctx, next_ctx)) {
2003 			/*
2004 			 * XXX do we need a memory barrier of sorts
2005 			 * wrt to rcu_dereference() of perf_event_ctxp
2006 			 */
2007 			task->perf_event_ctxp[ctxn] = next_ctx;
2008 			next->perf_event_ctxp[ctxn] = ctx;
2009 			ctx->task = next;
2010 			next_ctx->task = task;
2011 			do_switch = 0;
2012 
2013 			perf_event_sync_stat(ctx, next_ctx);
2014 		}
2015 		raw_spin_unlock(&next_ctx->lock);
2016 		raw_spin_unlock(&ctx->lock);
2017 	}
2018 	rcu_read_unlock();
2019 
2020 	if (do_switch) {
2021 		raw_spin_lock(&ctx->lock);
2022 		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2023 		cpuctx->task_ctx = NULL;
2024 		raw_spin_unlock(&ctx->lock);
2025 	}
2026 }
2027 
2028 #define for_each_task_context_nr(ctxn)					\
2029 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2030 
2031 /*
2032  * Called from scheduler to remove the events of the current task,
2033  * with interrupts disabled.
2034  *
2035  * We stop each event and update the event value in event->count.
2036  *
2037  * This does not protect us against NMI, but disable()
2038  * sets the disabled bit in the control field of event _before_
2039  * accessing the event control register. If a NMI hits, then it will
2040  * not restart the event.
2041  */
2042 static void __perf_event_task_sched_out(struct task_struct *task,
2043 					struct task_struct *next)
2044 {
2045 	int ctxn;
2046 
2047 	for_each_task_context_nr(ctxn)
2048 		perf_event_context_sched_out(task, ctxn, next);
2049 
2050 	/*
2051 	 * if cgroup events exist on this CPU, then we need
2052 	 * to check if we have to switch out PMU state.
2053 	 * cgroup event are system-wide mode only
2054 	 */
2055 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2056 		perf_cgroup_sched_out(task, next);
2057 }
2058 
2059 static void task_ctx_sched_out(struct perf_event_context *ctx)
2060 {
2061 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2062 
2063 	if (!cpuctx->task_ctx)
2064 		return;
2065 
2066 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2067 		return;
2068 
2069 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2070 	cpuctx->task_ctx = NULL;
2071 }
2072 
2073 /*
2074  * Called with IRQs disabled
2075  */
2076 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2077 			      enum event_type_t event_type)
2078 {
2079 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2080 }
2081 
2082 static void
2083 ctx_pinned_sched_in(struct perf_event_context *ctx,
2084 		    struct perf_cpu_context *cpuctx)
2085 {
2086 	struct perf_event *event;
2087 
2088 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2089 		if (event->state <= PERF_EVENT_STATE_OFF)
2090 			continue;
2091 		if (!event_filter_match(event))
2092 			continue;
2093 
2094 		/* may need to reset tstamp_enabled */
2095 		if (is_cgroup_event(event))
2096 			perf_cgroup_mark_enabled(event, ctx);
2097 
2098 		if (group_can_go_on(event, cpuctx, 1))
2099 			group_sched_in(event, cpuctx, ctx);
2100 
2101 		/*
2102 		 * If this pinned group hasn't been scheduled,
2103 		 * put it in error state.
2104 		 */
2105 		if (event->state == PERF_EVENT_STATE_INACTIVE) {
2106 			update_group_times(event);
2107 			event->state = PERF_EVENT_STATE_ERROR;
2108 		}
2109 	}
2110 }
2111 
2112 static void
2113 ctx_flexible_sched_in(struct perf_event_context *ctx,
2114 		      struct perf_cpu_context *cpuctx)
2115 {
2116 	struct perf_event *event;
2117 	int can_add_hw = 1;
2118 
2119 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2120 		/* Ignore events in OFF or ERROR state */
2121 		if (event->state <= PERF_EVENT_STATE_OFF)
2122 			continue;
2123 		/*
2124 		 * Listen to the 'cpu' scheduling filter constraint
2125 		 * of events:
2126 		 */
2127 		if (!event_filter_match(event))
2128 			continue;
2129 
2130 		/* may need to reset tstamp_enabled */
2131 		if (is_cgroup_event(event))
2132 			perf_cgroup_mark_enabled(event, ctx);
2133 
2134 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2135 			if (group_sched_in(event, cpuctx, ctx))
2136 				can_add_hw = 0;
2137 		}
2138 	}
2139 }
2140 
2141 static void
2142 ctx_sched_in(struct perf_event_context *ctx,
2143 	     struct perf_cpu_context *cpuctx,
2144 	     enum event_type_t event_type,
2145 	     struct task_struct *task)
2146 {
2147 	u64 now;
2148 	int is_active = ctx->is_active;
2149 
2150 	ctx->is_active |= event_type;
2151 	if (likely(!ctx->nr_events))
2152 		return;
2153 
2154 	now = perf_clock();
2155 	ctx->timestamp = now;
2156 	perf_cgroup_set_timestamp(task, ctx);
2157 	/*
2158 	 * First go through the list and put on any pinned groups
2159 	 * in order to give them the best chance of going on.
2160 	 */
2161 	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2162 		ctx_pinned_sched_in(ctx, cpuctx);
2163 
2164 	/* Then walk through the lower prio flexible groups */
2165 	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2166 		ctx_flexible_sched_in(ctx, cpuctx);
2167 }
2168 
2169 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2170 			     enum event_type_t event_type,
2171 			     struct task_struct *task)
2172 {
2173 	struct perf_event_context *ctx = &cpuctx->ctx;
2174 
2175 	ctx_sched_in(ctx, cpuctx, event_type, task);
2176 }
2177 
2178 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2179 					struct task_struct *task)
2180 {
2181 	struct perf_cpu_context *cpuctx;
2182 
2183 	cpuctx = __get_cpu_context(ctx);
2184 	if (cpuctx->task_ctx == ctx)
2185 		return;
2186 
2187 	perf_ctx_lock(cpuctx, ctx);
2188 	perf_pmu_disable(ctx->pmu);
2189 	/*
2190 	 * We want to keep the following priority order:
2191 	 * cpu pinned (that don't need to move), task pinned,
2192 	 * cpu flexible, task flexible.
2193 	 */
2194 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2195 
2196 	if (ctx->nr_events)
2197 		cpuctx->task_ctx = ctx;
2198 
2199 	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2200 
2201 	perf_pmu_enable(ctx->pmu);
2202 	perf_ctx_unlock(cpuctx, ctx);
2203 
2204 	/*
2205 	 * Since these rotations are per-cpu, we need to ensure the
2206 	 * cpu-context we got scheduled on is actually rotating.
2207 	 */
2208 	perf_pmu_rotate_start(ctx->pmu);
2209 }
2210 
2211 /*
2212  * When sampling the branck stack in system-wide, it may be necessary
2213  * to flush the stack on context switch. This happens when the branch
2214  * stack does not tag its entries with the pid of the current task.
2215  * Otherwise it becomes impossible to associate a branch entry with a
2216  * task. This ambiguity is more likely to appear when the branch stack
2217  * supports priv level filtering and the user sets it to monitor only
2218  * at the user level (which could be a useful measurement in system-wide
2219  * mode). In that case, the risk is high of having a branch stack with
2220  * branch from multiple tasks. Flushing may mean dropping the existing
2221  * entries or stashing them somewhere in the PMU specific code layer.
2222  *
2223  * This function provides the context switch callback to the lower code
2224  * layer. It is invoked ONLY when there is at least one system-wide context
2225  * with at least one active event using taken branch sampling.
2226  */
2227 static void perf_branch_stack_sched_in(struct task_struct *prev,
2228 				       struct task_struct *task)
2229 {
2230 	struct perf_cpu_context *cpuctx;
2231 	struct pmu *pmu;
2232 	unsigned long flags;
2233 
2234 	/* no need to flush branch stack if not changing task */
2235 	if (prev == task)
2236 		return;
2237 
2238 	local_irq_save(flags);
2239 
2240 	rcu_read_lock();
2241 
2242 	list_for_each_entry_rcu(pmu, &pmus, entry) {
2243 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2244 
2245 		/*
2246 		 * check if the context has at least one
2247 		 * event using PERF_SAMPLE_BRANCH_STACK
2248 		 */
2249 		if (cpuctx->ctx.nr_branch_stack > 0
2250 		    && pmu->flush_branch_stack) {
2251 
2252 			pmu = cpuctx->ctx.pmu;
2253 
2254 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2255 
2256 			perf_pmu_disable(pmu);
2257 
2258 			pmu->flush_branch_stack();
2259 
2260 			perf_pmu_enable(pmu);
2261 
2262 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2263 		}
2264 	}
2265 
2266 	rcu_read_unlock();
2267 
2268 	local_irq_restore(flags);
2269 }
2270 
2271 /*
2272  * Called from scheduler to add the events of the current task
2273  * with interrupts disabled.
2274  *
2275  * We restore the event value and then enable it.
2276  *
2277  * This does not protect us against NMI, but enable()
2278  * sets the enabled bit in the control field of event _before_
2279  * accessing the event control register. If a NMI hits, then it will
2280  * keep the event running.
2281  */
2282 static void __perf_event_task_sched_in(struct task_struct *prev,
2283 				       struct task_struct *task)
2284 {
2285 	struct perf_event_context *ctx;
2286 	int ctxn;
2287 
2288 	for_each_task_context_nr(ctxn) {
2289 		ctx = task->perf_event_ctxp[ctxn];
2290 		if (likely(!ctx))
2291 			continue;
2292 
2293 		perf_event_context_sched_in(ctx, task);
2294 	}
2295 	/*
2296 	 * if cgroup events exist on this CPU, then we need
2297 	 * to check if we have to switch in PMU state.
2298 	 * cgroup event are system-wide mode only
2299 	 */
2300 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2301 		perf_cgroup_sched_in(prev, task);
2302 
2303 	/* check for system-wide branch_stack events */
2304 	if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2305 		perf_branch_stack_sched_in(prev, task);
2306 }
2307 
2308 void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
2309 {
2310 	__perf_event_task_sched_out(prev, next);
2311 	__perf_event_task_sched_in(prev, next);
2312 }
2313 
2314 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2315 {
2316 	u64 frequency = event->attr.sample_freq;
2317 	u64 sec = NSEC_PER_SEC;
2318 	u64 divisor, dividend;
2319 
2320 	int count_fls, nsec_fls, frequency_fls, sec_fls;
2321 
2322 	count_fls = fls64(count);
2323 	nsec_fls = fls64(nsec);
2324 	frequency_fls = fls64(frequency);
2325 	sec_fls = 30;
2326 
2327 	/*
2328 	 * We got @count in @nsec, with a target of sample_freq HZ
2329 	 * the target period becomes:
2330 	 *
2331 	 *             @count * 10^9
2332 	 * period = -------------------
2333 	 *          @nsec * sample_freq
2334 	 *
2335 	 */
2336 
2337 	/*
2338 	 * Reduce accuracy by one bit such that @a and @b converge
2339 	 * to a similar magnitude.
2340 	 */
2341 #define REDUCE_FLS(a, b)		\
2342 do {					\
2343 	if (a##_fls > b##_fls) {	\
2344 		a >>= 1;		\
2345 		a##_fls--;		\
2346 	} else {			\
2347 		b >>= 1;		\
2348 		b##_fls--;		\
2349 	}				\
2350 } while (0)
2351 
2352 	/*
2353 	 * Reduce accuracy until either term fits in a u64, then proceed with
2354 	 * the other, so that finally we can do a u64/u64 division.
2355 	 */
2356 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2357 		REDUCE_FLS(nsec, frequency);
2358 		REDUCE_FLS(sec, count);
2359 	}
2360 
2361 	if (count_fls + sec_fls > 64) {
2362 		divisor = nsec * frequency;
2363 
2364 		while (count_fls + sec_fls > 64) {
2365 			REDUCE_FLS(count, sec);
2366 			divisor >>= 1;
2367 		}
2368 
2369 		dividend = count * sec;
2370 	} else {
2371 		dividend = count * sec;
2372 
2373 		while (nsec_fls + frequency_fls > 64) {
2374 			REDUCE_FLS(nsec, frequency);
2375 			dividend >>= 1;
2376 		}
2377 
2378 		divisor = nsec * frequency;
2379 	}
2380 
2381 	if (!divisor)
2382 		return dividend;
2383 
2384 	return div64_u64(dividend, divisor);
2385 }
2386 
2387 static DEFINE_PER_CPU(int, perf_throttled_count);
2388 static DEFINE_PER_CPU(u64, perf_throttled_seq);
2389 
2390 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2391 {
2392 	struct hw_perf_event *hwc = &event->hw;
2393 	s64 period, sample_period;
2394 	s64 delta;
2395 
2396 	period = perf_calculate_period(event, nsec, count);
2397 
2398 	delta = (s64)(period - hwc->sample_period);
2399 	delta = (delta + 7) / 8; /* low pass filter */
2400 
2401 	sample_period = hwc->sample_period + delta;
2402 
2403 	if (!sample_period)
2404 		sample_period = 1;
2405 
2406 	hwc->sample_period = sample_period;
2407 
2408 	if (local64_read(&hwc->period_left) > 8*sample_period) {
2409 		if (disable)
2410 			event->pmu->stop(event, PERF_EF_UPDATE);
2411 
2412 		local64_set(&hwc->period_left, 0);
2413 
2414 		if (disable)
2415 			event->pmu->start(event, PERF_EF_RELOAD);
2416 	}
2417 }
2418 
2419 /*
2420  * combine freq adjustment with unthrottling to avoid two passes over the
2421  * events. At the same time, make sure, having freq events does not change
2422  * the rate of unthrottling as that would introduce bias.
2423  */
2424 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2425 					   int needs_unthr)
2426 {
2427 	struct perf_event *event;
2428 	struct hw_perf_event *hwc;
2429 	u64 now, period = TICK_NSEC;
2430 	s64 delta;
2431 
2432 	/*
2433 	 * only need to iterate over all events iff:
2434 	 * - context have events in frequency mode (needs freq adjust)
2435 	 * - there are events to unthrottle on this cpu
2436 	 */
2437 	if (!(ctx->nr_freq || needs_unthr))
2438 		return;
2439 
2440 	raw_spin_lock(&ctx->lock);
2441 	perf_pmu_disable(ctx->pmu);
2442 
2443 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2444 		if (event->state != PERF_EVENT_STATE_ACTIVE)
2445 			continue;
2446 
2447 		if (!event_filter_match(event))
2448 			continue;
2449 
2450 		hwc = &event->hw;
2451 
2452 		if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2453 			hwc->interrupts = 0;
2454 			perf_log_throttle(event, 1);
2455 			event->pmu->start(event, 0);
2456 		}
2457 
2458 		if (!event->attr.freq || !event->attr.sample_freq)
2459 			continue;
2460 
2461 		/*
2462 		 * stop the event and update event->count
2463 		 */
2464 		event->pmu->stop(event, PERF_EF_UPDATE);
2465 
2466 		now = local64_read(&event->count);
2467 		delta = now - hwc->freq_count_stamp;
2468 		hwc->freq_count_stamp = now;
2469 
2470 		/*
2471 		 * restart the event
2472 		 * reload only if value has changed
2473 		 * we have stopped the event so tell that
2474 		 * to perf_adjust_period() to avoid stopping it
2475 		 * twice.
2476 		 */
2477 		if (delta > 0)
2478 			perf_adjust_period(event, period, delta, false);
2479 
2480 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2481 	}
2482 
2483 	perf_pmu_enable(ctx->pmu);
2484 	raw_spin_unlock(&ctx->lock);
2485 }
2486 
2487 /*
2488  * Round-robin a context's events:
2489  */
2490 static void rotate_ctx(struct perf_event_context *ctx)
2491 {
2492 	/*
2493 	 * Rotate the first entry last of non-pinned groups. Rotation might be
2494 	 * disabled by the inheritance code.
2495 	 */
2496 	if (!ctx->rotate_disable)
2497 		list_rotate_left(&ctx->flexible_groups);
2498 }
2499 
2500 /*
2501  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2502  * because they're strictly cpu affine and rotate_start is called with IRQs
2503  * disabled, while rotate_context is called from IRQ context.
2504  */
2505 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2506 {
2507 	struct perf_event_context *ctx = NULL;
2508 	int rotate = 0, remove = 1;
2509 
2510 	if (cpuctx->ctx.nr_events) {
2511 		remove = 0;
2512 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2513 			rotate = 1;
2514 	}
2515 
2516 	ctx = cpuctx->task_ctx;
2517 	if (ctx && ctx->nr_events) {
2518 		remove = 0;
2519 		if (ctx->nr_events != ctx->nr_active)
2520 			rotate = 1;
2521 	}
2522 
2523 	if (!rotate)
2524 		goto done;
2525 
2526 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2527 	perf_pmu_disable(cpuctx->ctx.pmu);
2528 
2529 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2530 	if (ctx)
2531 		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2532 
2533 	rotate_ctx(&cpuctx->ctx);
2534 	if (ctx)
2535 		rotate_ctx(ctx);
2536 
2537 	perf_event_sched_in(cpuctx, ctx, current);
2538 
2539 	perf_pmu_enable(cpuctx->ctx.pmu);
2540 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2541 done:
2542 	if (remove)
2543 		list_del_init(&cpuctx->rotation_list);
2544 }
2545 
2546 void perf_event_task_tick(void)
2547 {
2548 	struct list_head *head = &__get_cpu_var(rotation_list);
2549 	struct perf_cpu_context *cpuctx, *tmp;
2550 	struct perf_event_context *ctx;
2551 	int throttled;
2552 
2553 	WARN_ON(!irqs_disabled());
2554 
2555 	__this_cpu_inc(perf_throttled_seq);
2556 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
2557 
2558 	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2559 		ctx = &cpuctx->ctx;
2560 		perf_adjust_freq_unthr_context(ctx, throttled);
2561 
2562 		ctx = cpuctx->task_ctx;
2563 		if (ctx)
2564 			perf_adjust_freq_unthr_context(ctx, throttled);
2565 
2566 		if (cpuctx->jiffies_interval == 1 ||
2567 				!(jiffies % cpuctx->jiffies_interval))
2568 			perf_rotate_context(cpuctx);
2569 	}
2570 }
2571 
2572 static int event_enable_on_exec(struct perf_event *event,
2573 				struct perf_event_context *ctx)
2574 {
2575 	if (!event->attr.enable_on_exec)
2576 		return 0;
2577 
2578 	event->attr.enable_on_exec = 0;
2579 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2580 		return 0;
2581 
2582 	__perf_event_mark_enabled(event);
2583 
2584 	return 1;
2585 }
2586 
2587 /*
2588  * Enable all of a task's events that have been marked enable-on-exec.
2589  * This expects task == current.
2590  */
2591 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2592 {
2593 	struct perf_event *event;
2594 	unsigned long flags;
2595 	int enabled = 0;
2596 	int ret;
2597 
2598 	local_irq_save(flags);
2599 	if (!ctx || !ctx->nr_events)
2600 		goto out;
2601 
2602 	/*
2603 	 * We must ctxsw out cgroup events to avoid conflict
2604 	 * when invoking perf_task_event_sched_in() later on
2605 	 * in this function. Otherwise we end up trying to
2606 	 * ctxswin cgroup events which are already scheduled
2607 	 * in.
2608 	 */
2609 	perf_cgroup_sched_out(current, NULL);
2610 
2611 	raw_spin_lock(&ctx->lock);
2612 	task_ctx_sched_out(ctx);
2613 
2614 	list_for_each_entry(event, &ctx->event_list, event_entry) {
2615 		ret = event_enable_on_exec(event, ctx);
2616 		if (ret)
2617 			enabled = 1;
2618 	}
2619 
2620 	/*
2621 	 * Unclone this context if we enabled any event.
2622 	 */
2623 	if (enabled)
2624 		unclone_ctx(ctx);
2625 
2626 	raw_spin_unlock(&ctx->lock);
2627 
2628 	/*
2629 	 * Also calls ctxswin for cgroup events, if any:
2630 	 */
2631 	perf_event_context_sched_in(ctx, ctx->task);
2632 out:
2633 	local_irq_restore(flags);
2634 }
2635 
2636 /*
2637  * Cross CPU call to read the hardware event
2638  */
2639 static void __perf_event_read(void *info)
2640 {
2641 	struct perf_event *event = info;
2642 	struct perf_event_context *ctx = event->ctx;
2643 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2644 
2645 	/*
2646 	 * If this is a task context, we need to check whether it is
2647 	 * the current task context of this cpu.  If not it has been
2648 	 * scheduled out before the smp call arrived.  In that case
2649 	 * event->count would have been updated to a recent sample
2650 	 * when the event was scheduled out.
2651 	 */
2652 	if (ctx->task && cpuctx->task_ctx != ctx)
2653 		return;
2654 
2655 	raw_spin_lock(&ctx->lock);
2656 	if (ctx->is_active) {
2657 		update_context_time(ctx);
2658 		update_cgrp_time_from_event(event);
2659 	}
2660 	update_event_times(event);
2661 	if (event->state == PERF_EVENT_STATE_ACTIVE)
2662 		event->pmu->read(event);
2663 	raw_spin_unlock(&ctx->lock);
2664 }
2665 
2666 static inline u64 perf_event_count(struct perf_event *event)
2667 {
2668 	return local64_read(&event->count) + atomic64_read(&event->child_count);
2669 }
2670 
2671 static u64 perf_event_read(struct perf_event *event)
2672 {
2673 	/*
2674 	 * If event is enabled and currently active on a CPU, update the
2675 	 * value in the event structure:
2676 	 */
2677 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
2678 		smp_call_function_single(event->oncpu,
2679 					 __perf_event_read, event, 1);
2680 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2681 		struct perf_event_context *ctx = event->ctx;
2682 		unsigned long flags;
2683 
2684 		raw_spin_lock_irqsave(&ctx->lock, flags);
2685 		/*
2686 		 * may read while context is not active
2687 		 * (e.g., thread is blocked), in that case
2688 		 * we cannot update context time
2689 		 */
2690 		if (ctx->is_active) {
2691 			update_context_time(ctx);
2692 			update_cgrp_time_from_event(event);
2693 		}
2694 		update_event_times(event);
2695 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2696 	}
2697 
2698 	return perf_event_count(event);
2699 }
2700 
2701 /*
2702  * Initialize the perf_event context in a task_struct:
2703  */
2704 static void __perf_event_init_context(struct perf_event_context *ctx)
2705 {
2706 	raw_spin_lock_init(&ctx->lock);
2707 	mutex_init(&ctx->mutex);
2708 	INIT_LIST_HEAD(&ctx->pinned_groups);
2709 	INIT_LIST_HEAD(&ctx->flexible_groups);
2710 	INIT_LIST_HEAD(&ctx->event_list);
2711 	atomic_set(&ctx->refcount, 1);
2712 }
2713 
2714 static struct perf_event_context *
2715 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2716 {
2717 	struct perf_event_context *ctx;
2718 
2719 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2720 	if (!ctx)
2721 		return NULL;
2722 
2723 	__perf_event_init_context(ctx);
2724 	if (task) {
2725 		ctx->task = task;
2726 		get_task_struct(task);
2727 	}
2728 	ctx->pmu = pmu;
2729 
2730 	return ctx;
2731 }
2732 
2733 static struct task_struct *
2734 find_lively_task_by_vpid(pid_t vpid)
2735 {
2736 	struct task_struct *task;
2737 	int err;
2738 
2739 	rcu_read_lock();
2740 	if (!vpid)
2741 		task = current;
2742 	else
2743 		task = find_task_by_vpid(vpid);
2744 	if (task)
2745 		get_task_struct(task);
2746 	rcu_read_unlock();
2747 
2748 	if (!task)
2749 		return ERR_PTR(-ESRCH);
2750 
2751 	/* Reuse ptrace permission checks for now. */
2752 	err = -EACCES;
2753 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
2754 		goto errout;
2755 
2756 	return task;
2757 errout:
2758 	put_task_struct(task);
2759 	return ERR_PTR(err);
2760 
2761 }
2762 
2763 /*
2764  * Returns a matching context with refcount and pincount.
2765  */
2766 static struct perf_event_context *
2767 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2768 {
2769 	struct perf_event_context *ctx;
2770 	struct perf_cpu_context *cpuctx;
2771 	unsigned long flags;
2772 	int ctxn, err;
2773 
2774 	if (!task) {
2775 		/* Must be root to operate on a CPU event: */
2776 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2777 			return ERR_PTR(-EACCES);
2778 
2779 		/*
2780 		 * We could be clever and allow to attach a event to an
2781 		 * offline CPU and activate it when the CPU comes up, but
2782 		 * that's for later.
2783 		 */
2784 		if (!cpu_online(cpu))
2785 			return ERR_PTR(-ENODEV);
2786 
2787 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2788 		ctx = &cpuctx->ctx;
2789 		get_ctx(ctx);
2790 		++ctx->pin_count;
2791 
2792 		return ctx;
2793 	}
2794 
2795 	err = -EINVAL;
2796 	ctxn = pmu->task_ctx_nr;
2797 	if (ctxn < 0)
2798 		goto errout;
2799 
2800 retry:
2801 	ctx = perf_lock_task_context(task, ctxn, &flags);
2802 	if (ctx) {
2803 		unclone_ctx(ctx);
2804 		++ctx->pin_count;
2805 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2806 	} else {
2807 		ctx = alloc_perf_context(pmu, task);
2808 		err = -ENOMEM;
2809 		if (!ctx)
2810 			goto errout;
2811 
2812 		err = 0;
2813 		mutex_lock(&task->perf_event_mutex);
2814 		/*
2815 		 * If it has already passed perf_event_exit_task().
2816 		 * we must see PF_EXITING, it takes this mutex too.
2817 		 */
2818 		if (task->flags & PF_EXITING)
2819 			err = -ESRCH;
2820 		else if (task->perf_event_ctxp[ctxn])
2821 			err = -EAGAIN;
2822 		else {
2823 			get_ctx(ctx);
2824 			++ctx->pin_count;
2825 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2826 		}
2827 		mutex_unlock(&task->perf_event_mutex);
2828 
2829 		if (unlikely(err)) {
2830 			put_ctx(ctx);
2831 
2832 			if (err == -EAGAIN)
2833 				goto retry;
2834 			goto errout;
2835 		}
2836 	}
2837 
2838 	return ctx;
2839 
2840 errout:
2841 	return ERR_PTR(err);
2842 }
2843 
2844 static void perf_event_free_filter(struct perf_event *event);
2845 
2846 static void free_event_rcu(struct rcu_head *head)
2847 {
2848 	struct perf_event *event;
2849 
2850 	event = container_of(head, struct perf_event, rcu_head);
2851 	if (event->ns)
2852 		put_pid_ns(event->ns);
2853 	perf_event_free_filter(event);
2854 	kfree(event);
2855 }
2856 
2857 static void ring_buffer_put(struct ring_buffer *rb);
2858 
2859 static void free_event(struct perf_event *event)
2860 {
2861 	irq_work_sync(&event->pending);
2862 
2863 	if (!event->parent) {
2864 		if (event->attach_state & PERF_ATTACH_TASK)
2865 			static_key_slow_dec_deferred(&perf_sched_events);
2866 		if (event->attr.mmap || event->attr.mmap_data)
2867 			atomic_dec(&nr_mmap_events);
2868 		if (event->attr.comm)
2869 			atomic_dec(&nr_comm_events);
2870 		if (event->attr.task)
2871 			atomic_dec(&nr_task_events);
2872 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2873 			put_callchain_buffers();
2874 		if (is_cgroup_event(event)) {
2875 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2876 			static_key_slow_dec_deferred(&perf_sched_events);
2877 		}
2878 
2879 		if (has_branch_stack(event)) {
2880 			static_key_slow_dec_deferred(&perf_sched_events);
2881 			/* is system-wide event */
2882 			if (!(event->attach_state & PERF_ATTACH_TASK))
2883 				atomic_dec(&per_cpu(perf_branch_stack_events,
2884 						    event->cpu));
2885 		}
2886 	}
2887 
2888 	if (event->rb) {
2889 		ring_buffer_put(event->rb);
2890 		event->rb = NULL;
2891 	}
2892 
2893 	if (is_cgroup_event(event))
2894 		perf_detach_cgroup(event);
2895 
2896 	if (event->destroy)
2897 		event->destroy(event);
2898 
2899 	if (event->ctx)
2900 		put_ctx(event->ctx);
2901 
2902 	call_rcu(&event->rcu_head, free_event_rcu);
2903 }
2904 
2905 int perf_event_release_kernel(struct perf_event *event)
2906 {
2907 	struct perf_event_context *ctx = event->ctx;
2908 
2909 	WARN_ON_ONCE(ctx->parent_ctx);
2910 	/*
2911 	 * There are two ways this annotation is useful:
2912 	 *
2913 	 *  1) there is a lock recursion from perf_event_exit_task
2914 	 *     see the comment there.
2915 	 *
2916 	 *  2) there is a lock-inversion with mmap_sem through
2917 	 *     perf_event_read_group(), which takes faults while
2918 	 *     holding ctx->mutex, however this is called after
2919 	 *     the last filedesc died, so there is no possibility
2920 	 *     to trigger the AB-BA case.
2921 	 */
2922 	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2923 	raw_spin_lock_irq(&ctx->lock);
2924 	perf_group_detach(event);
2925 	raw_spin_unlock_irq(&ctx->lock);
2926 	perf_remove_from_context(event);
2927 	mutex_unlock(&ctx->mutex);
2928 
2929 	free_event(event);
2930 
2931 	return 0;
2932 }
2933 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2934 
2935 /*
2936  * Called when the last reference to the file is gone.
2937  */
2938 static int perf_release(struct inode *inode, struct file *file)
2939 {
2940 	struct perf_event *event = file->private_data;
2941 	struct task_struct *owner;
2942 
2943 	file->private_data = NULL;
2944 
2945 	rcu_read_lock();
2946 	owner = ACCESS_ONCE(event->owner);
2947 	/*
2948 	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2949 	 * !owner it means the list deletion is complete and we can indeed
2950 	 * free this event, otherwise we need to serialize on
2951 	 * owner->perf_event_mutex.
2952 	 */
2953 	smp_read_barrier_depends();
2954 	if (owner) {
2955 		/*
2956 		 * Since delayed_put_task_struct() also drops the last
2957 		 * task reference we can safely take a new reference
2958 		 * while holding the rcu_read_lock().
2959 		 */
2960 		get_task_struct(owner);
2961 	}
2962 	rcu_read_unlock();
2963 
2964 	if (owner) {
2965 		mutex_lock(&owner->perf_event_mutex);
2966 		/*
2967 		 * We have to re-check the event->owner field, if it is cleared
2968 		 * we raced with perf_event_exit_task(), acquiring the mutex
2969 		 * ensured they're done, and we can proceed with freeing the
2970 		 * event.
2971 		 */
2972 		if (event->owner)
2973 			list_del_init(&event->owner_entry);
2974 		mutex_unlock(&owner->perf_event_mutex);
2975 		put_task_struct(owner);
2976 	}
2977 
2978 	return perf_event_release_kernel(event);
2979 }
2980 
2981 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2982 {
2983 	struct perf_event *child;
2984 	u64 total = 0;
2985 
2986 	*enabled = 0;
2987 	*running = 0;
2988 
2989 	mutex_lock(&event->child_mutex);
2990 	total += perf_event_read(event);
2991 	*enabled += event->total_time_enabled +
2992 			atomic64_read(&event->child_total_time_enabled);
2993 	*running += event->total_time_running +
2994 			atomic64_read(&event->child_total_time_running);
2995 
2996 	list_for_each_entry(child, &event->child_list, child_list) {
2997 		total += perf_event_read(child);
2998 		*enabled += child->total_time_enabled;
2999 		*running += child->total_time_running;
3000 	}
3001 	mutex_unlock(&event->child_mutex);
3002 
3003 	return total;
3004 }
3005 EXPORT_SYMBOL_GPL(perf_event_read_value);
3006 
3007 static int perf_event_read_group(struct perf_event *event,
3008 				   u64 read_format, char __user *buf)
3009 {
3010 	struct perf_event *leader = event->group_leader, *sub;
3011 	int n = 0, size = 0, ret = -EFAULT;
3012 	struct perf_event_context *ctx = leader->ctx;
3013 	u64 values[5];
3014 	u64 count, enabled, running;
3015 
3016 	mutex_lock(&ctx->mutex);
3017 	count = perf_event_read_value(leader, &enabled, &running);
3018 
3019 	values[n++] = 1 + leader->nr_siblings;
3020 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3021 		values[n++] = enabled;
3022 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3023 		values[n++] = running;
3024 	values[n++] = count;
3025 	if (read_format & PERF_FORMAT_ID)
3026 		values[n++] = primary_event_id(leader);
3027 
3028 	size = n * sizeof(u64);
3029 
3030 	if (copy_to_user(buf, values, size))
3031 		goto unlock;
3032 
3033 	ret = size;
3034 
3035 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3036 		n = 0;
3037 
3038 		values[n++] = perf_event_read_value(sub, &enabled, &running);
3039 		if (read_format & PERF_FORMAT_ID)
3040 			values[n++] = primary_event_id(sub);
3041 
3042 		size = n * sizeof(u64);
3043 
3044 		if (copy_to_user(buf + ret, values, size)) {
3045 			ret = -EFAULT;
3046 			goto unlock;
3047 		}
3048 
3049 		ret += size;
3050 	}
3051 unlock:
3052 	mutex_unlock(&ctx->mutex);
3053 
3054 	return ret;
3055 }
3056 
3057 static int perf_event_read_one(struct perf_event *event,
3058 				 u64 read_format, char __user *buf)
3059 {
3060 	u64 enabled, running;
3061 	u64 values[4];
3062 	int n = 0;
3063 
3064 	values[n++] = perf_event_read_value(event, &enabled, &running);
3065 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3066 		values[n++] = enabled;
3067 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3068 		values[n++] = running;
3069 	if (read_format & PERF_FORMAT_ID)
3070 		values[n++] = primary_event_id(event);
3071 
3072 	if (copy_to_user(buf, values, n * sizeof(u64)))
3073 		return -EFAULT;
3074 
3075 	return n * sizeof(u64);
3076 }
3077 
3078 /*
3079  * Read the performance event - simple non blocking version for now
3080  */
3081 static ssize_t
3082 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3083 {
3084 	u64 read_format = event->attr.read_format;
3085 	int ret;
3086 
3087 	/*
3088 	 * Return end-of-file for a read on a event that is in
3089 	 * error state (i.e. because it was pinned but it couldn't be
3090 	 * scheduled on to the CPU at some point).
3091 	 */
3092 	if (event->state == PERF_EVENT_STATE_ERROR)
3093 		return 0;
3094 
3095 	if (count < event->read_size)
3096 		return -ENOSPC;
3097 
3098 	WARN_ON_ONCE(event->ctx->parent_ctx);
3099 	if (read_format & PERF_FORMAT_GROUP)
3100 		ret = perf_event_read_group(event, read_format, buf);
3101 	else
3102 		ret = perf_event_read_one(event, read_format, buf);
3103 
3104 	return ret;
3105 }
3106 
3107 static ssize_t
3108 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3109 {
3110 	struct perf_event *event = file->private_data;
3111 
3112 	return perf_read_hw(event, buf, count);
3113 }
3114 
3115 static unsigned int perf_poll(struct file *file, poll_table *wait)
3116 {
3117 	struct perf_event *event = file->private_data;
3118 	struct ring_buffer *rb;
3119 	unsigned int events = POLL_HUP;
3120 
3121 	/*
3122 	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3123 	 * grabs the rb reference but perf_event_set_output() overrides it.
3124 	 * Here is the timeline for two threads T1, T2:
3125 	 * t0: T1, rb = rcu_dereference(event->rb)
3126 	 * t1: T2, old_rb = event->rb
3127 	 * t2: T2, event->rb = new rb
3128 	 * t3: T2, ring_buffer_detach(old_rb)
3129 	 * t4: T1, ring_buffer_attach(rb1)
3130 	 * t5: T1, poll_wait(event->waitq)
3131 	 *
3132 	 * To avoid this problem, we grab mmap_mutex in perf_poll()
3133 	 * thereby ensuring that the assignment of the new ring buffer
3134 	 * and the detachment of the old buffer appear atomic to perf_poll()
3135 	 */
3136 	mutex_lock(&event->mmap_mutex);
3137 
3138 	rcu_read_lock();
3139 	rb = rcu_dereference(event->rb);
3140 	if (rb) {
3141 		ring_buffer_attach(event, rb);
3142 		events = atomic_xchg(&rb->poll, 0);
3143 	}
3144 	rcu_read_unlock();
3145 
3146 	mutex_unlock(&event->mmap_mutex);
3147 
3148 	poll_wait(file, &event->waitq, wait);
3149 
3150 	return events;
3151 }
3152 
3153 static void perf_event_reset(struct perf_event *event)
3154 {
3155 	(void)perf_event_read(event);
3156 	local64_set(&event->count, 0);
3157 	perf_event_update_userpage(event);
3158 }
3159 
3160 /*
3161  * Holding the top-level event's child_mutex means that any
3162  * descendant process that has inherited this event will block
3163  * in sync_child_event if it goes to exit, thus satisfying the
3164  * task existence requirements of perf_event_enable/disable.
3165  */
3166 static void perf_event_for_each_child(struct perf_event *event,
3167 					void (*func)(struct perf_event *))
3168 {
3169 	struct perf_event *child;
3170 
3171 	WARN_ON_ONCE(event->ctx->parent_ctx);
3172 	mutex_lock(&event->child_mutex);
3173 	func(event);
3174 	list_for_each_entry(child, &event->child_list, child_list)
3175 		func(child);
3176 	mutex_unlock(&event->child_mutex);
3177 }
3178 
3179 static void perf_event_for_each(struct perf_event *event,
3180 				  void (*func)(struct perf_event *))
3181 {
3182 	struct perf_event_context *ctx = event->ctx;
3183 	struct perf_event *sibling;
3184 
3185 	WARN_ON_ONCE(ctx->parent_ctx);
3186 	mutex_lock(&ctx->mutex);
3187 	event = event->group_leader;
3188 
3189 	perf_event_for_each_child(event, func);
3190 	func(event);
3191 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3192 		perf_event_for_each_child(sibling, func);
3193 	mutex_unlock(&ctx->mutex);
3194 }
3195 
3196 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3197 {
3198 	struct perf_event_context *ctx = event->ctx;
3199 	int ret = 0;
3200 	u64 value;
3201 
3202 	if (!is_sampling_event(event))
3203 		return -EINVAL;
3204 
3205 	if (copy_from_user(&value, arg, sizeof(value)))
3206 		return -EFAULT;
3207 
3208 	if (!value)
3209 		return -EINVAL;
3210 
3211 	raw_spin_lock_irq(&ctx->lock);
3212 	if (event->attr.freq) {
3213 		if (value > sysctl_perf_event_sample_rate) {
3214 			ret = -EINVAL;
3215 			goto unlock;
3216 		}
3217 
3218 		event->attr.sample_freq = value;
3219 	} else {
3220 		event->attr.sample_period = value;
3221 		event->hw.sample_period = value;
3222 	}
3223 unlock:
3224 	raw_spin_unlock_irq(&ctx->lock);
3225 
3226 	return ret;
3227 }
3228 
3229 static const struct file_operations perf_fops;
3230 
3231 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3232 {
3233 	struct file *file;
3234 
3235 	file = fget_light(fd, fput_needed);
3236 	if (!file)
3237 		return ERR_PTR(-EBADF);
3238 
3239 	if (file->f_op != &perf_fops) {
3240 		fput_light(file, *fput_needed);
3241 		*fput_needed = 0;
3242 		return ERR_PTR(-EBADF);
3243 	}
3244 
3245 	return file->private_data;
3246 }
3247 
3248 static int perf_event_set_output(struct perf_event *event,
3249 				 struct perf_event *output_event);
3250 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3251 
3252 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3253 {
3254 	struct perf_event *event = file->private_data;
3255 	void (*func)(struct perf_event *);
3256 	u32 flags = arg;
3257 
3258 	switch (cmd) {
3259 	case PERF_EVENT_IOC_ENABLE:
3260 		func = perf_event_enable;
3261 		break;
3262 	case PERF_EVENT_IOC_DISABLE:
3263 		func = perf_event_disable;
3264 		break;
3265 	case PERF_EVENT_IOC_RESET:
3266 		func = perf_event_reset;
3267 		break;
3268 
3269 	case PERF_EVENT_IOC_REFRESH:
3270 		return perf_event_refresh(event, arg);
3271 
3272 	case PERF_EVENT_IOC_PERIOD:
3273 		return perf_event_period(event, (u64 __user *)arg);
3274 
3275 	case PERF_EVENT_IOC_SET_OUTPUT:
3276 	{
3277 		struct perf_event *output_event = NULL;
3278 		int fput_needed = 0;
3279 		int ret;
3280 
3281 		if (arg != -1) {
3282 			output_event = perf_fget_light(arg, &fput_needed);
3283 			if (IS_ERR(output_event))
3284 				return PTR_ERR(output_event);
3285 		}
3286 
3287 		ret = perf_event_set_output(event, output_event);
3288 		if (output_event)
3289 			fput_light(output_event->filp, fput_needed);
3290 
3291 		return ret;
3292 	}
3293 
3294 	case PERF_EVENT_IOC_SET_FILTER:
3295 		return perf_event_set_filter(event, (void __user *)arg);
3296 
3297 	default:
3298 		return -ENOTTY;
3299 	}
3300 
3301 	if (flags & PERF_IOC_FLAG_GROUP)
3302 		perf_event_for_each(event, func);
3303 	else
3304 		perf_event_for_each_child(event, func);
3305 
3306 	return 0;
3307 }
3308 
3309 int perf_event_task_enable(void)
3310 {
3311 	struct perf_event *event;
3312 
3313 	mutex_lock(&current->perf_event_mutex);
3314 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3315 		perf_event_for_each_child(event, perf_event_enable);
3316 	mutex_unlock(&current->perf_event_mutex);
3317 
3318 	return 0;
3319 }
3320 
3321 int perf_event_task_disable(void)
3322 {
3323 	struct perf_event *event;
3324 
3325 	mutex_lock(&current->perf_event_mutex);
3326 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3327 		perf_event_for_each_child(event, perf_event_disable);
3328 	mutex_unlock(&current->perf_event_mutex);
3329 
3330 	return 0;
3331 }
3332 
3333 static int perf_event_index(struct perf_event *event)
3334 {
3335 	if (event->hw.state & PERF_HES_STOPPED)
3336 		return 0;
3337 
3338 	if (event->state != PERF_EVENT_STATE_ACTIVE)
3339 		return 0;
3340 
3341 	return event->pmu->event_idx(event);
3342 }
3343 
3344 static void calc_timer_values(struct perf_event *event,
3345 				u64 *now,
3346 				u64 *enabled,
3347 				u64 *running)
3348 {
3349 	u64 ctx_time;
3350 
3351 	*now = perf_clock();
3352 	ctx_time = event->shadow_ctx_time + *now;
3353 	*enabled = ctx_time - event->tstamp_enabled;
3354 	*running = ctx_time - event->tstamp_running;
3355 }
3356 
3357 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3358 {
3359 }
3360 
3361 /*
3362  * Callers need to ensure there can be no nesting of this function, otherwise
3363  * the seqlock logic goes bad. We can not serialize this because the arch
3364  * code calls this from NMI context.
3365  */
3366 void perf_event_update_userpage(struct perf_event *event)
3367 {
3368 	struct perf_event_mmap_page *userpg;
3369 	struct ring_buffer *rb;
3370 	u64 enabled, running, now;
3371 
3372 	rcu_read_lock();
3373 	/*
3374 	 * compute total_time_enabled, total_time_running
3375 	 * based on snapshot values taken when the event
3376 	 * was last scheduled in.
3377 	 *
3378 	 * we cannot simply called update_context_time()
3379 	 * because of locking issue as we can be called in
3380 	 * NMI context
3381 	 */
3382 	calc_timer_values(event, &now, &enabled, &running);
3383 	rb = rcu_dereference(event->rb);
3384 	if (!rb)
3385 		goto unlock;
3386 
3387 	userpg = rb->user_page;
3388 
3389 	/*
3390 	 * Disable preemption so as to not let the corresponding user-space
3391 	 * spin too long if we get preempted.
3392 	 */
3393 	preempt_disable();
3394 	++userpg->lock;
3395 	barrier();
3396 	userpg->index = perf_event_index(event);
3397 	userpg->offset = perf_event_count(event);
3398 	if (userpg->index)
3399 		userpg->offset -= local64_read(&event->hw.prev_count);
3400 
3401 	userpg->time_enabled = enabled +
3402 			atomic64_read(&event->child_total_time_enabled);
3403 
3404 	userpg->time_running = running +
3405 			atomic64_read(&event->child_total_time_running);
3406 
3407 	arch_perf_update_userpage(userpg, now);
3408 
3409 	barrier();
3410 	++userpg->lock;
3411 	preempt_enable();
3412 unlock:
3413 	rcu_read_unlock();
3414 }
3415 
3416 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3417 {
3418 	struct perf_event *event = vma->vm_file->private_data;
3419 	struct ring_buffer *rb;
3420 	int ret = VM_FAULT_SIGBUS;
3421 
3422 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
3423 		if (vmf->pgoff == 0)
3424 			ret = 0;
3425 		return ret;
3426 	}
3427 
3428 	rcu_read_lock();
3429 	rb = rcu_dereference(event->rb);
3430 	if (!rb)
3431 		goto unlock;
3432 
3433 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3434 		goto unlock;
3435 
3436 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3437 	if (!vmf->page)
3438 		goto unlock;
3439 
3440 	get_page(vmf->page);
3441 	vmf->page->mapping = vma->vm_file->f_mapping;
3442 	vmf->page->index   = vmf->pgoff;
3443 
3444 	ret = 0;
3445 unlock:
3446 	rcu_read_unlock();
3447 
3448 	return ret;
3449 }
3450 
3451 static void ring_buffer_attach(struct perf_event *event,
3452 			       struct ring_buffer *rb)
3453 {
3454 	unsigned long flags;
3455 
3456 	if (!list_empty(&event->rb_entry))
3457 		return;
3458 
3459 	spin_lock_irqsave(&rb->event_lock, flags);
3460 	if (!list_empty(&event->rb_entry))
3461 		goto unlock;
3462 
3463 	list_add(&event->rb_entry, &rb->event_list);
3464 unlock:
3465 	spin_unlock_irqrestore(&rb->event_lock, flags);
3466 }
3467 
3468 static void ring_buffer_detach(struct perf_event *event,
3469 			       struct ring_buffer *rb)
3470 {
3471 	unsigned long flags;
3472 
3473 	if (list_empty(&event->rb_entry))
3474 		return;
3475 
3476 	spin_lock_irqsave(&rb->event_lock, flags);
3477 	list_del_init(&event->rb_entry);
3478 	wake_up_all(&event->waitq);
3479 	spin_unlock_irqrestore(&rb->event_lock, flags);
3480 }
3481 
3482 static void ring_buffer_wakeup(struct perf_event *event)
3483 {
3484 	struct ring_buffer *rb;
3485 
3486 	rcu_read_lock();
3487 	rb = rcu_dereference(event->rb);
3488 	if (!rb)
3489 		goto unlock;
3490 
3491 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3492 		wake_up_all(&event->waitq);
3493 
3494 unlock:
3495 	rcu_read_unlock();
3496 }
3497 
3498 static void rb_free_rcu(struct rcu_head *rcu_head)
3499 {
3500 	struct ring_buffer *rb;
3501 
3502 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3503 	rb_free(rb);
3504 }
3505 
3506 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3507 {
3508 	struct ring_buffer *rb;
3509 
3510 	rcu_read_lock();
3511 	rb = rcu_dereference(event->rb);
3512 	if (rb) {
3513 		if (!atomic_inc_not_zero(&rb->refcount))
3514 			rb = NULL;
3515 	}
3516 	rcu_read_unlock();
3517 
3518 	return rb;
3519 }
3520 
3521 static void ring_buffer_put(struct ring_buffer *rb)
3522 {
3523 	struct perf_event *event, *n;
3524 	unsigned long flags;
3525 
3526 	if (!atomic_dec_and_test(&rb->refcount))
3527 		return;
3528 
3529 	spin_lock_irqsave(&rb->event_lock, flags);
3530 	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3531 		list_del_init(&event->rb_entry);
3532 		wake_up_all(&event->waitq);
3533 	}
3534 	spin_unlock_irqrestore(&rb->event_lock, flags);
3535 
3536 	call_rcu(&rb->rcu_head, rb_free_rcu);
3537 }
3538 
3539 static void perf_mmap_open(struct vm_area_struct *vma)
3540 {
3541 	struct perf_event *event = vma->vm_file->private_data;
3542 
3543 	atomic_inc(&event->mmap_count);
3544 }
3545 
3546 static void perf_mmap_close(struct vm_area_struct *vma)
3547 {
3548 	struct perf_event *event = vma->vm_file->private_data;
3549 
3550 	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3551 		unsigned long size = perf_data_size(event->rb);
3552 		struct user_struct *user = event->mmap_user;
3553 		struct ring_buffer *rb = event->rb;
3554 
3555 		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3556 		vma->vm_mm->pinned_vm -= event->mmap_locked;
3557 		rcu_assign_pointer(event->rb, NULL);
3558 		ring_buffer_detach(event, rb);
3559 		mutex_unlock(&event->mmap_mutex);
3560 
3561 		ring_buffer_put(rb);
3562 		free_uid(user);
3563 	}
3564 }
3565 
3566 static const struct vm_operations_struct perf_mmap_vmops = {
3567 	.open		= perf_mmap_open,
3568 	.close		= perf_mmap_close,
3569 	.fault		= perf_mmap_fault,
3570 	.page_mkwrite	= perf_mmap_fault,
3571 };
3572 
3573 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3574 {
3575 	struct perf_event *event = file->private_data;
3576 	unsigned long user_locked, user_lock_limit;
3577 	struct user_struct *user = current_user();
3578 	unsigned long locked, lock_limit;
3579 	struct ring_buffer *rb;
3580 	unsigned long vma_size;
3581 	unsigned long nr_pages;
3582 	long user_extra, extra;
3583 	int ret = 0, flags = 0;
3584 
3585 	/*
3586 	 * Don't allow mmap() of inherited per-task counters. This would
3587 	 * create a performance issue due to all children writing to the
3588 	 * same rb.
3589 	 */
3590 	if (event->cpu == -1 && event->attr.inherit)
3591 		return -EINVAL;
3592 
3593 	if (!(vma->vm_flags & VM_SHARED))
3594 		return -EINVAL;
3595 
3596 	vma_size = vma->vm_end - vma->vm_start;
3597 	nr_pages = (vma_size / PAGE_SIZE) - 1;
3598 
3599 	/*
3600 	 * If we have rb pages ensure they're a power-of-two number, so we
3601 	 * can do bitmasks instead of modulo.
3602 	 */
3603 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3604 		return -EINVAL;
3605 
3606 	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3607 		return -EINVAL;
3608 
3609 	if (vma->vm_pgoff != 0)
3610 		return -EINVAL;
3611 
3612 	WARN_ON_ONCE(event->ctx->parent_ctx);
3613 	mutex_lock(&event->mmap_mutex);
3614 	if (event->rb) {
3615 		if (event->rb->nr_pages == nr_pages)
3616 			atomic_inc(&event->rb->refcount);
3617 		else
3618 			ret = -EINVAL;
3619 		goto unlock;
3620 	}
3621 
3622 	user_extra = nr_pages + 1;
3623 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3624 
3625 	/*
3626 	 * Increase the limit linearly with more CPUs:
3627 	 */
3628 	user_lock_limit *= num_online_cpus();
3629 
3630 	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3631 
3632 	extra = 0;
3633 	if (user_locked > user_lock_limit)
3634 		extra = user_locked - user_lock_limit;
3635 
3636 	lock_limit = rlimit(RLIMIT_MEMLOCK);
3637 	lock_limit >>= PAGE_SHIFT;
3638 	locked = vma->vm_mm->pinned_vm + extra;
3639 
3640 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3641 		!capable(CAP_IPC_LOCK)) {
3642 		ret = -EPERM;
3643 		goto unlock;
3644 	}
3645 
3646 	WARN_ON(event->rb);
3647 
3648 	if (vma->vm_flags & VM_WRITE)
3649 		flags |= RING_BUFFER_WRITABLE;
3650 
3651 	rb = rb_alloc(nr_pages,
3652 		event->attr.watermark ? event->attr.wakeup_watermark : 0,
3653 		event->cpu, flags);
3654 
3655 	if (!rb) {
3656 		ret = -ENOMEM;
3657 		goto unlock;
3658 	}
3659 	rcu_assign_pointer(event->rb, rb);
3660 
3661 	atomic_long_add(user_extra, &user->locked_vm);
3662 	event->mmap_locked = extra;
3663 	event->mmap_user = get_current_user();
3664 	vma->vm_mm->pinned_vm += event->mmap_locked;
3665 
3666 	perf_event_update_userpage(event);
3667 
3668 unlock:
3669 	if (!ret)
3670 		atomic_inc(&event->mmap_count);
3671 	mutex_unlock(&event->mmap_mutex);
3672 
3673 	vma->vm_flags |= VM_RESERVED;
3674 	vma->vm_ops = &perf_mmap_vmops;
3675 
3676 	return ret;
3677 }
3678 
3679 static int perf_fasync(int fd, struct file *filp, int on)
3680 {
3681 	struct inode *inode = filp->f_path.dentry->d_inode;
3682 	struct perf_event *event = filp->private_data;
3683 	int retval;
3684 
3685 	mutex_lock(&inode->i_mutex);
3686 	retval = fasync_helper(fd, filp, on, &event->fasync);
3687 	mutex_unlock(&inode->i_mutex);
3688 
3689 	if (retval < 0)
3690 		return retval;
3691 
3692 	return 0;
3693 }
3694 
3695 static const struct file_operations perf_fops = {
3696 	.llseek			= no_llseek,
3697 	.release		= perf_release,
3698 	.read			= perf_read,
3699 	.poll			= perf_poll,
3700 	.unlocked_ioctl		= perf_ioctl,
3701 	.compat_ioctl		= perf_ioctl,
3702 	.mmap			= perf_mmap,
3703 	.fasync			= perf_fasync,
3704 };
3705 
3706 /*
3707  * Perf event wakeup
3708  *
3709  * If there's data, ensure we set the poll() state and publish everything
3710  * to user-space before waking everybody up.
3711  */
3712 
3713 void perf_event_wakeup(struct perf_event *event)
3714 {
3715 	ring_buffer_wakeup(event);
3716 
3717 	if (event->pending_kill) {
3718 		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3719 		event->pending_kill = 0;
3720 	}
3721 }
3722 
3723 static void perf_pending_event(struct irq_work *entry)
3724 {
3725 	struct perf_event *event = container_of(entry,
3726 			struct perf_event, pending);
3727 
3728 	if (event->pending_disable) {
3729 		event->pending_disable = 0;
3730 		__perf_event_disable(event);
3731 	}
3732 
3733 	if (event->pending_wakeup) {
3734 		event->pending_wakeup = 0;
3735 		perf_event_wakeup(event);
3736 	}
3737 }
3738 
3739 /*
3740  * We assume there is only KVM supporting the callbacks.
3741  * Later on, we might change it to a list if there is
3742  * another virtualization implementation supporting the callbacks.
3743  */
3744 struct perf_guest_info_callbacks *perf_guest_cbs;
3745 
3746 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3747 {
3748 	perf_guest_cbs = cbs;
3749 	return 0;
3750 }
3751 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3752 
3753 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3754 {
3755 	perf_guest_cbs = NULL;
3756 	return 0;
3757 }
3758 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3759 
3760 static void __perf_event_header__init_id(struct perf_event_header *header,
3761 					 struct perf_sample_data *data,
3762 					 struct perf_event *event)
3763 {
3764 	u64 sample_type = event->attr.sample_type;
3765 
3766 	data->type = sample_type;
3767 	header->size += event->id_header_size;
3768 
3769 	if (sample_type & PERF_SAMPLE_TID) {
3770 		/* namespace issues */
3771 		data->tid_entry.pid = perf_event_pid(event, current);
3772 		data->tid_entry.tid = perf_event_tid(event, current);
3773 	}
3774 
3775 	if (sample_type & PERF_SAMPLE_TIME)
3776 		data->time = perf_clock();
3777 
3778 	if (sample_type & PERF_SAMPLE_ID)
3779 		data->id = primary_event_id(event);
3780 
3781 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3782 		data->stream_id = event->id;
3783 
3784 	if (sample_type & PERF_SAMPLE_CPU) {
3785 		data->cpu_entry.cpu	 = raw_smp_processor_id();
3786 		data->cpu_entry.reserved = 0;
3787 	}
3788 }
3789 
3790 void perf_event_header__init_id(struct perf_event_header *header,
3791 				struct perf_sample_data *data,
3792 				struct perf_event *event)
3793 {
3794 	if (event->attr.sample_id_all)
3795 		__perf_event_header__init_id(header, data, event);
3796 }
3797 
3798 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3799 					   struct perf_sample_data *data)
3800 {
3801 	u64 sample_type = data->type;
3802 
3803 	if (sample_type & PERF_SAMPLE_TID)
3804 		perf_output_put(handle, data->tid_entry);
3805 
3806 	if (sample_type & PERF_SAMPLE_TIME)
3807 		perf_output_put(handle, data->time);
3808 
3809 	if (sample_type & PERF_SAMPLE_ID)
3810 		perf_output_put(handle, data->id);
3811 
3812 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3813 		perf_output_put(handle, data->stream_id);
3814 
3815 	if (sample_type & PERF_SAMPLE_CPU)
3816 		perf_output_put(handle, data->cpu_entry);
3817 }
3818 
3819 void perf_event__output_id_sample(struct perf_event *event,
3820 				  struct perf_output_handle *handle,
3821 				  struct perf_sample_data *sample)
3822 {
3823 	if (event->attr.sample_id_all)
3824 		__perf_event__output_id_sample(handle, sample);
3825 }
3826 
3827 static void perf_output_read_one(struct perf_output_handle *handle,
3828 				 struct perf_event *event,
3829 				 u64 enabled, u64 running)
3830 {
3831 	u64 read_format = event->attr.read_format;
3832 	u64 values[4];
3833 	int n = 0;
3834 
3835 	values[n++] = perf_event_count(event);
3836 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3837 		values[n++] = enabled +
3838 			atomic64_read(&event->child_total_time_enabled);
3839 	}
3840 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3841 		values[n++] = running +
3842 			atomic64_read(&event->child_total_time_running);
3843 	}
3844 	if (read_format & PERF_FORMAT_ID)
3845 		values[n++] = primary_event_id(event);
3846 
3847 	__output_copy(handle, values, n * sizeof(u64));
3848 }
3849 
3850 /*
3851  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3852  */
3853 static void perf_output_read_group(struct perf_output_handle *handle,
3854 			    struct perf_event *event,
3855 			    u64 enabled, u64 running)
3856 {
3857 	struct perf_event *leader = event->group_leader, *sub;
3858 	u64 read_format = event->attr.read_format;
3859 	u64 values[5];
3860 	int n = 0;
3861 
3862 	values[n++] = 1 + leader->nr_siblings;
3863 
3864 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3865 		values[n++] = enabled;
3866 
3867 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3868 		values[n++] = running;
3869 
3870 	if (leader != event)
3871 		leader->pmu->read(leader);
3872 
3873 	values[n++] = perf_event_count(leader);
3874 	if (read_format & PERF_FORMAT_ID)
3875 		values[n++] = primary_event_id(leader);
3876 
3877 	__output_copy(handle, values, n * sizeof(u64));
3878 
3879 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3880 		n = 0;
3881 
3882 		if (sub != event)
3883 			sub->pmu->read(sub);
3884 
3885 		values[n++] = perf_event_count(sub);
3886 		if (read_format & PERF_FORMAT_ID)
3887 			values[n++] = primary_event_id(sub);
3888 
3889 		__output_copy(handle, values, n * sizeof(u64));
3890 	}
3891 }
3892 
3893 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3894 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
3895 
3896 static void perf_output_read(struct perf_output_handle *handle,
3897 			     struct perf_event *event)
3898 {
3899 	u64 enabled = 0, running = 0, now;
3900 	u64 read_format = event->attr.read_format;
3901 
3902 	/*
3903 	 * compute total_time_enabled, total_time_running
3904 	 * based on snapshot values taken when the event
3905 	 * was last scheduled in.
3906 	 *
3907 	 * we cannot simply called update_context_time()
3908 	 * because of locking issue as we are called in
3909 	 * NMI context
3910 	 */
3911 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
3912 		calc_timer_values(event, &now, &enabled, &running);
3913 
3914 	if (event->attr.read_format & PERF_FORMAT_GROUP)
3915 		perf_output_read_group(handle, event, enabled, running);
3916 	else
3917 		perf_output_read_one(handle, event, enabled, running);
3918 }
3919 
3920 void perf_output_sample(struct perf_output_handle *handle,
3921 			struct perf_event_header *header,
3922 			struct perf_sample_data *data,
3923 			struct perf_event *event)
3924 {
3925 	u64 sample_type = data->type;
3926 
3927 	perf_output_put(handle, *header);
3928 
3929 	if (sample_type & PERF_SAMPLE_IP)
3930 		perf_output_put(handle, data->ip);
3931 
3932 	if (sample_type & PERF_SAMPLE_TID)
3933 		perf_output_put(handle, data->tid_entry);
3934 
3935 	if (sample_type & PERF_SAMPLE_TIME)
3936 		perf_output_put(handle, data->time);
3937 
3938 	if (sample_type & PERF_SAMPLE_ADDR)
3939 		perf_output_put(handle, data->addr);
3940 
3941 	if (sample_type & PERF_SAMPLE_ID)
3942 		perf_output_put(handle, data->id);
3943 
3944 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3945 		perf_output_put(handle, data->stream_id);
3946 
3947 	if (sample_type & PERF_SAMPLE_CPU)
3948 		perf_output_put(handle, data->cpu_entry);
3949 
3950 	if (sample_type & PERF_SAMPLE_PERIOD)
3951 		perf_output_put(handle, data->period);
3952 
3953 	if (sample_type & PERF_SAMPLE_READ)
3954 		perf_output_read(handle, event);
3955 
3956 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3957 		if (data->callchain) {
3958 			int size = 1;
3959 
3960 			if (data->callchain)
3961 				size += data->callchain->nr;
3962 
3963 			size *= sizeof(u64);
3964 
3965 			__output_copy(handle, data->callchain, size);
3966 		} else {
3967 			u64 nr = 0;
3968 			perf_output_put(handle, nr);
3969 		}
3970 	}
3971 
3972 	if (sample_type & PERF_SAMPLE_RAW) {
3973 		if (data->raw) {
3974 			perf_output_put(handle, data->raw->size);
3975 			__output_copy(handle, data->raw->data,
3976 					   data->raw->size);
3977 		} else {
3978 			struct {
3979 				u32	size;
3980 				u32	data;
3981 			} raw = {
3982 				.size = sizeof(u32),
3983 				.data = 0,
3984 			};
3985 			perf_output_put(handle, raw);
3986 		}
3987 	}
3988 
3989 	if (!event->attr.watermark) {
3990 		int wakeup_events = event->attr.wakeup_events;
3991 
3992 		if (wakeup_events) {
3993 			struct ring_buffer *rb = handle->rb;
3994 			int events = local_inc_return(&rb->events);
3995 
3996 			if (events >= wakeup_events) {
3997 				local_sub(wakeup_events, &rb->events);
3998 				local_inc(&rb->wakeup);
3999 			}
4000 		}
4001 	}
4002 
4003 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4004 		if (data->br_stack) {
4005 			size_t size;
4006 
4007 			size = data->br_stack->nr
4008 			     * sizeof(struct perf_branch_entry);
4009 
4010 			perf_output_put(handle, data->br_stack->nr);
4011 			perf_output_copy(handle, data->br_stack->entries, size);
4012 		} else {
4013 			/*
4014 			 * we always store at least the value of nr
4015 			 */
4016 			u64 nr = 0;
4017 			perf_output_put(handle, nr);
4018 		}
4019 	}
4020 }
4021 
4022 void perf_prepare_sample(struct perf_event_header *header,
4023 			 struct perf_sample_data *data,
4024 			 struct perf_event *event,
4025 			 struct pt_regs *regs)
4026 {
4027 	u64 sample_type = event->attr.sample_type;
4028 
4029 	header->type = PERF_RECORD_SAMPLE;
4030 	header->size = sizeof(*header) + event->header_size;
4031 
4032 	header->misc = 0;
4033 	header->misc |= perf_misc_flags(regs);
4034 
4035 	__perf_event_header__init_id(header, data, event);
4036 
4037 	if (sample_type & PERF_SAMPLE_IP)
4038 		data->ip = perf_instruction_pointer(regs);
4039 
4040 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4041 		int size = 1;
4042 
4043 		data->callchain = perf_callchain(regs);
4044 
4045 		if (data->callchain)
4046 			size += data->callchain->nr;
4047 
4048 		header->size += size * sizeof(u64);
4049 	}
4050 
4051 	if (sample_type & PERF_SAMPLE_RAW) {
4052 		int size = sizeof(u32);
4053 
4054 		if (data->raw)
4055 			size += data->raw->size;
4056 		else
4057 			size += sizeof(u32);
4058 
4059 		WARN_ON_ONCE(size & (sizeof(u64)-1));
4060 		header->size += size;
4061 	}
4062 
4063 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4064 		int size = sizeof(u64); /* nr */
4065 		if (data->br_stack) {
4066 			size += data->br_stack->nr
4067 			      * sizeof(struct perf_branch_entry);
4068 		}
4069 		header->size += size;
4070 	}
4071 }
4072 
4073 static void perf_event_output(struct perf_event *event,
4074 				struct perf_sample_data *data,
4075 				struct pt_regs *regs)
4076 {
4077 	struct perf_output_handle handle;
4078 	struct perf_event_header header;
4079 
4080 	/* protect the callchain buffers */
4081 	rcu_read_lock();
4082 
4083 	perf_prepare_sample(&header, data, event, regs);
4084 
4085 	if (perf_output_begin(&handle, event, header.size))
4086 		goto exit;
4087 
4088 	perf_output_sample(&handle, &header, data, event);
4089 
4090 	perf_output_end(&handle);
4091 
4092 exit:
4093 	rcu_read_unlock();
4094 }
4095 
4096 /*
4097  * read event_id
4098  */
4099 
4100 struct perf_read_event {
4101 	struct perf_event_header	header;
4102 
4103 	u32				pid;
4104 	u32				tid;
4105 };
4106 
4107 static void
4108 perf_event_read_event(struct perf_event *event,
4109 			struct task_struct *task)
4110 {
4111 	struct perf_output_handle handle;
4112 	struct perf_sample_data sample;
4113 	struct perf_read_event read_event = {
4114 		.header = {
4115 			.type = PERF_RECORD_READ,
4116 			.misc = 0,
4117 			.size = sizeof(read_event) + event->read_size,
4118 		},
4119 		.pid = perf_event_pid(event, task),
4120 		.tid = perf_event_tid(event, task),
4121 	};
4122 	int ret;
4123 
4124 	perf_event_header__init_id(&read_event.header, &sample, event);
4125 	ret = perf_output_begin(&handle, event, read_event.header.size);
4126 	if (ret)
4127 		return;
4128 
4129 	perf_output_put(&handle, read_event);
4130 	perf_output_read(&handle, event);
4131 	perf_event__output_id_sample(event, &handle, &sample);
4132 
4133 	perf_output_end(&handle);
4134 }
4135 
4136 /*
4137  * task tracking -- fork/exit
4138  *
4139  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
4140  */
4141 
4142 struct perf_task_event {
4143 	struct task_struct		*task;
4144 	struct perf_event_context	*task_ctx;
4145 
4146 	struct {
4147 		struct perf_event_header	header;
4148 
4149 		u32				pid;
4150 		u32				ppid;
4151 		u32				tid;
4152 		u32				ptid;
4153 		u64				time;
4154 	} event_id;
4155 };
4156 
4157 static void perf_event_task_output(struct perf_event *event,
4158 				     struct perf_task_event *task_event)
4159 {
4160 	struct perf_output_handle handle;
4161 	struct perf_sample_data	sample;
4162 	struct task_struct *task = task_event->task;
4163 	int ret, size = task_event->event_id.header.size;
4164 
4165 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4166 
4167 	ret = perf_output_begin(&handle, event,
4168 				task_event->event_id.header.size);
4169 	if (ret)
4170 		goto out;
4171 
4172 	task_event->event_id.pid = perf_event_pid(event, task);
4173 	task_event->event_id.ppid = perf_event_pid(event, current);
4174 
4175 	task_event->event_id.tid = perf_event_tid(event, task);
4176 	task_event->event_id.ptid = perf_event_tid(event, current);
4177 
4178 	perf_output_put(&handle, task_event->event_id);
4179 
4180 	perf_event__output_id_sample(event, &handle, &sample);
4181 
4182 	perf_output_end(&handle);
4183 out:
4184 	task_event->event_id.header.size = size;
4185 }
4186 
4187 static int perf_event_task_match(struct perf_event *event)
4188 {
4189 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4190 		return 0;
4191 
4192 	if (!event_filter_match(event))
4193 		return 0;
4194 
4195 	if (event->attr.comm || event->attr.mmap ||
4196 	    event->attr.mmap_data || event->attr.task)
4197 		return 1;
4198 
4199 	return 0;
4200 }
4201 
4202 static void perf_event_task_ctx(struct perf_event_context *ctx,
4203 				  struct perf_task_event *task_event)
4204 {
4205 	struct perf_event *event;
4206 
4207 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4208 		if (perf_event_task_match(event))
4209 			perf_event_task_output(event, task_event);
4210 	}
4211 }
4212 
4213 static void perf_event_task_event(struct perf_task_event *task_event)
4214 {
4215 	struct perf_cpu_context *cpuctx;
4216 	struct perf_event_context *ctx;
4217 	struct pmu *pmu;
4218 	int ctxn;
4219 
4220 	rcu_read_lock();
4221 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4222 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4223 		if (cpuctx->active_pmu != pmu)
4224 			goto next;
4225 		perf_event_task_ctx(&cpuctx->ctx, task_event);
4226 
4227 		ctx = task_event->task_ctx;
4228 		if (!ctx) {
4229 			ctxn = pmu->task_ctx_nr;
4230 			if (ctxn < 0)
4231 				goto next;
4232 			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4233 		}
4234 		if (ctx)
4235 			perf_event_task_ctx(ctx, task_event);
4236 next:
4237 		put_cpu_ptr(pmu->pmu_cpu_context);
4238 	}
4239 	rcu_read_unlock();
4240 }
4241 
4242 static void perf_event_task(struct task_struct *task,
4243 			      struct perf_event_context *task_ctx,
4244 			      int new)
4245 {
4246 	struct perf_task_event task_event;
4247 
4248 	if (!atomic_read(&nr_comm_events) &&
4249 	    !atomic_read(&nr_mmap_events) &&
4250 	    !atomic_read(&nr_task_events))
4251 		return;
4252 
4253 	task_event = (struct perf_task_event){
4254 		.task	  = task,
4255 		.task_ctx = task_ctx,
4256 		.event_id    = {
4257 			.header = {
4258 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4259 				.misc = 0,
4260 				.size = sizeof(task_event.event_id),
4261 			},
4262 			/* .pid  */
4263 			/* .ppid */
4264 			/* .tid  */
4265 			/* .ptid */
4266 			.time = perf_clock(),
4267 		},
4268 	};
4269 
4270 	perf_event_task_event(&task_event);
4271 }
4272 
4273 void perf_event_fork(struct task_struct *task)
4274 {
4275 	perf_event_task(task, NULL, 1);
4276 }
4277 
4278 /*
4279  * comm tracking
4280  */
4281 
4282 struct perf_comm_event {
4283 	struct task_struct	*task;
4284 	char			*comm;
4285 	int			comm_size;
4286 
4287 	struct {
4288 		struct perf_event_header	header;
4289 
4290 		u32				pid;
4291 		u32				tid;
4292 	} event_id;
4293 };
4294 
4295 static void perf_event_comm_output(struct perf_event *event,
4296 				     struct perf_comm_event *comm_event)
4297 {
4298 	struct perf_output_handle handle;
4299 	struct perf_sample_data sample;
4300 	int size = comm_event->event_id.header.size;
4301 	int ret;
4302 
4303 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4304 	ret = perf_output_begin(&handle, event,
4305 				comm_event->event_id.header.size);
4306 
4307 	if (ret)
4308 		goto out;
4309 
4310 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4311 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4312 
4313 	perf_output_put(&handle, comm_event->event_id);
4314 	__output_copy(&handle, comm_event->comm,
4315 				   comm_event->comm_size);
4316 
4317 	perf_event__output_id_sample(event, &handle, &sample);
4318 
4319 	perf_output_end(&handle);
4320 out:
4321 	comm_event->event_id.header.size = size;
4322 }
4323 
4324 static int perf_event_comm_match(struct perf_event *event)
4325 {
4326 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4327 		return 0;
4328 
4329 	if (!event_filter_match(event))
4330 		return 0;
4331 
4332 	if (event->attr.comm)
4333 		return 1;
4334 
4335 	return 0;
4336 }
4337 
4338 static void perf_event_comm_ctx(struct perf_event_context *ctx,
4339 				  struct perf_comm_event *comm_event)
4340 {
4341 	struct perf_event *event;
4342 
4343 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4344 		if (perf_event_comm_match(event))
4345 			perf_event_comm_output(event, comm_event);
4346 	}
4347 }
4348 
4349 static void perf_event_comm_event(struct perf_comm_event *comm_event)
4350 {
4351 	struct perf_cpu_context *cpuctx;
4352 	struct perf_event_context *ctx;
4353 	char comm[TASK_COMM_LEN];
4354 	unsigned int size;
4355 	struct pmu *pmu;
4356 	int ctxn;
4357 
4358 	memset(comm, 0, sizeof(comm));
4359 	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4360 	size = ALIGN(strlen(comm)+1, sizeof(u64));
4361 
4362 	comm_event->comm = comm;
4363 	comm_event->comm_size = size;
4364 
4365 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4366 	rcu_read_lock();
4367 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4368 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4369 		if (cpuctx->active_pmu != pmu)
4370 			goto next;
4371 		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4372 
4373 		ctxn = pmu->task_ctx_nr;
4374 		if (ctxn < 0)
4375 			goto next;
4376 
4377 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4378 		if (ctx)
4379 			perf_event_comm_ctx(ctx, comm_event);
4380 next:
4381 		put_cpu_ptr(pmu->pmu_cpu_context);
4382 	}
4383 	rcu_read_unlock();
4384 }
4385 
4386 void perf_event_comm(struct task_struct *task)
4387 {
4388 	struct perf_comm_event comm_event;
4389 	struct perf_event_context *ctx;
4390 	int ctxn;
4391 
4392 	for_each_task_context_nr(ctxn) {
4393 		ctx = task->perf_event_ctxp[ctxn];
4394 		if (!ctx)
4395 			continue;
4396 
4397 		perf_event_enable_on_exec(ctx);
4398 	}
4399 
4400 	if (!atomic_read(&nr_comm_events))
4401 		return;
4402 
4403 	comm_event = (struct perf_comm_event){
4404 		.task	= task,
4405 		/* .comm      */
4406 		/* .comm_size */
4407 		.event_id  = {
4408 			.header = {
4409 				.type = PERF_RECORD_COMM,
4410 				.misc = 0,
4411 				/* .size */
4412 			},
4413 			/* .pid */
4414 			/* .tid */
4415 		},
4416 	};
4417 
4418 	perf_event_comm_event(&comm_event);
4419 }
4420 
4421 /*
4422  * mmap tracking
4423  */
4424 
4425 struct perf_mmap_event {
4426 	struct vm_area_struct	*vma;
4427 
4428 	const char		*file_name;
4429 	int			file_size;
4430 
4431 	struct {
4432 		struct perf_event_header	header;
4433 
4434 		u32				pid;
4435 		u32				tid;
4436 		u64				start;
4437 		u64				len;
4438 		u64				pgoff;
4439 	} event_id;
4440 };
4441 
4442 static void perf_event_mmap_output(struct perf_event *event,
4443 				     struct perf_mmap_event *mmap_event)
4444 {
4445 	struct perf_output_handle handle;
4446 	struct perf_sample_data sample;
4447 	int size = mmap_event->event_id.header.size;
4448 	int ret;
4449 
4450 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4451 	ret = perf_output_begin(&handle, event,
4452 				mmap_event->event_id.header.size);
4453 	if (ret)
4454 		goto out;
4455 
4456 	mmap_event->event_id.pid = perf_event_pid(event, current);
4457 	mmap_event->event_id.tid = perf_event_tid(event, current);
4458 
4459 	perf_output_put(&handle, mmap_event->event_id);
4460 	__output_copy(&handle, mmap_event->file_name,
4461 				   mmap_event->file_size);
4462 
4463 	perf_event__output_id_sample(event, &handle, &sample);
4464 
4465 	perf_output_end(&handle);
4466 out:
4467 	mmap_event->event_id.header.size = size;
4468 }
4469 
4470 static int perf_event_mmap_match(struct perf_event *event,
4471 				   struct perf_mmap_event *mmap_event,
4472 				   int executable)
4473 {
4474 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4475 		return 0;
4476 
4477 	if (!event_filter_match(event))
4478 		return 0;
4479 
4480 	if ((!executable && event->attr.mmap_data) ||
4481 	    (executable && event->attr.mmap))
4482 		return 1;
4483 
4484 	return 0;
4485 }
4486 
4487 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4488 				  struct perf_mmap_event *mmap_event,
4489 				  int executable)
4490 {
4491 	struct perf_event *event;
4492 
4493 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4494 		if (perf_event_mmap_match(event, mmap_event, executable))
4495 			perf_event_mmap_output(event, mmap_event);
4496 	}
4497 }
4498 
4499 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4500 {
4501 	struct perf_cpu_context *cpuctx;
4502 	struct perf_event_context *ctx;
4503 	struct vm_area_struct *vma = mmap_event->vma;
4504 	struct file *file = vma->vm_file;
4505 	unsigned int size;
4506 	char tmp[16];
4507 	char *buf = NULL;
4508 	const char *name;
4509 	struct pmu *pmu;
4510 	int ctxn;
4511 
4512 	memset(tmp, 0, sizeof(tmp));
4513 
4514 	if (file) {
4515 		/*
4516 		 * d_path works from the end of the rb backwards, so we
4517 		 * need to add enough zero bytes after the string to handle
4518 		 * the 64bit alignment we do later.
4519 		 */
4520 		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4521 		if (!buf) {
4522 			name = strncpy(tmp, "//enomem", sizeof(tmp));
4523 			goto got_name;
4524 		}
4525 		name = d_path(&file->f_path, buf, PATH_MAX);
4526 		if (IS_ERR(name)) {
4527 			name = strncpy(tmp, "//toolong", sizeof(tmp));
4528 			goto got_name;
4529 		}
4530 	} else {
4531 		if (arch_vma_name(mmap_event->vma)) {
4532 			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4533 				       sizeof(tmp));
4534 			goto got_name;
4535 		}
4536 
4537 		if (!vma->vm_mm) {
4538 			name = strncpy(tmp, "[vdso]", sizeof(tmp));
4539 			goto got_name;
4540 		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
4541 				vma->vm_end >= vma->vm_mm->brk) {
4542 			name = strncpy(tmp, "[heap]", sizeof(tmp));
4543 			goto got_name;
4544 		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
4545 				vma->vm_end >= vma->vm_mm->start_stack) {
4546 			name = strncpy(tmp, "[stack]", sizeof(tmp));
4547 			goto got_name;
4548 		}
4549 
4550 		name = strncpy(tmp, "//anon", sizeof(tmp));
4551 		goto got_name;
4552 	}
4553 
4554 got_name:
4555 	size = ALIGN(strlen(name)+1, sizeof(u64));
4556 
4557 	mmap_event->file_name = name;
4558 	mmap_event->file_size = size;
4559 
4560 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4561 
4562 	rcu_read_lock();
4563 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4564 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4565 		if (cpuctx->active_pmu != pmu)
4566 			goto next;
4567 		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4568 					vma->vm_flags & VM_EXEC);
4569 
4570 		ctxn = pmu->task_ctx_nr;
4571 		if (ctxn < 0)
4572 			goto next;
4573 
4574 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4575 		if (ctx) {
4576 			perf_event_mmap_ctx(ctx, mmap_event,
4577 					vma->vm_flags & VM_EXEC);
4578 		}
4579 next:
4580 		put_cpu_ptr(pmu->pmu_cpu_context);
4581 	}
4582 	rcu_read_unlock();
4583 
4584 	kfree(buf);
4585 }
4586 
4587 void perf_event_mmap(struct vm_area_struct *vma)
4588 {
4589 	struct perf_mmap_event mmap_event;
4590 
4591 	if (!atomic_read(&nr_mmap_events))
4592 		return;
4593 
4594 	mmap_event = (struct perf_mmap_event){
4595 		.vma	= vma,
4596 		/* .file_name */
4597 		/* .file_size */
4598 		.event_id  = {
4599 			.header = {
4600 				.type = PERF_RECORD_MMAP,
4601 				.misc = PERF_RECORD_MISC_USER,
4602 				/* .size */
4603 			},
4604 			/* .pid */
4605 			/* .tid */
4606 			.start  = vma->vm_start,
4607 			.len    = vma->vm_end - vma->vm_start,
4608 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4609 		},
4610 	};
4611 
4612 	perf_event_mmap_event(&mmap_event);
4613 }
4614 
4615 /*
4616  * IRQ throttle logging
4617  */
4618 
4619 static void perf_log_throttle(struct perf_event *event, int enable)
4620 {
4621 	struct perf_output_handle handle;
4622 	struct perf_sample_data sample;
4623 	int ret;
4624 
4625 	struct {
4626 		struct perf_event_header	header;
4627 		u64				time;
4628 		u64				id;
4629 		u64				stream_id;
4630 	} throttle_event = {
4631 		.header = {
4632 			.type = PERF_RECORD_THROTTLE,
4633 			.misc = 0,
4634 			.size = sizeof(throttle_event),
4635 		},
4636 		.time		= perf_clock(),
4637 		.id		= primary_event_id(event),
4638 		.stream_id	= event->id,
4639 	};
4640 
4641 	if (enable)
4642 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4643 
4644 	perf_event_header__init_id(&throttle_event.header, &sample, event);
4645 
4646 	ret = perf_output_begin(&handle, event,
4647 				throttle_event.header.size);
4648 	if (ret)
4649 		return;
4650 
4651 	perf_output_put(&handle, throttle_event);
4652 	perf_event__output_id_sample(event, &handle, &sample);
4653 	perf_output_end(&handle);
4654 }
4655 
4656 /*
4657  * Generic event overflow handling, sampling.
4658  */
4659 
4660 static int __perf_event_overflow(struct perf_event *event,
4661 				   int throttle, struct perf_sample_data *data,
4662 				   struct pt_regs *regs)
4663 {
4664 	int events = atomic_read(&event->event_limit);
4665 	struct hw_perf_event *hwc = &event->hw;
4666 	u64 seq;
4667 	int ret = 0;
4668 
4669 	/*
4670 	 * Non-sampling counters might still use the PMI to fold short
4671 	 * hardware counters, ignore those.
4672 	 */
4673 	if (unlikely(!is_sampling_event(event)))
4674 		return 0;
4675 
4676 	seq = __this_cpu_read(perf_throttled_seq);
4677 	if (seq != hwc->interrupts_seq) {
4678 		hwc->interrupts_seq = seq;
4679 		hwc->interrupts = 1;
4680 	} else {
4681 		hwc->interrupts++;
4682 		if (unlikely(throttle
4683 			     && hwc->interrupts >= max_samples_per_tick)) {
4684 			__this_cpu_inc(perf_throttled_count);
4685 			hwc->interrupts = MAX_INTERRUPTS;
4686 			perf_log_throttle(event, 0);
4687 			ret = 1;
4688 		}
4689 	}
4690 
4691 	if (event->attr.freq) {
4692 		u64 now = perf_clock();
4693 		s64 delta = now - hwc->freq_time_stamp;
4694 
4695 		hwc->freq_time_stamp = now;
4696 
4697 		if (delta > 0 && delta < 2*TICK_NSEC)
4698 			perf_adjust_period(event, delta, hwc->last_period, true);
4699 	}
4700 
4701 	/*
4702 	 * XXX event_limit might not quite work as expected on inherited
4703 	 * events
4704 	 */
4705 
4706 	event->pending_kill = POLL_IN;
4707 	if (events && atomic_dec_and_test(&event->event_limit)) {
4708 		ret = 1;
4709 		event->pending_kill = POLL_HUP;
4710 		event->pending_disable = 1;
4711 		irq_work_queue(&event->pending);
4712 	}
4713 
4714 	if (event->overflow_handler)
4715 		event->overflow_handler(event, data, regs);
4716 	else
4717 		perf_event_output(event, data, regs);
4718 
4719 	if (event->fasync && event->pending_kill) {
4720 		event->pending_wakeup = 1;
4721 		irq_work_queue(&event->pending);
4722 	}
4723 
4724 	return ret;
4725 }
4726 
4727 int perf_event_overflow(struct perf_event *event,
4728 			  struct perf_sample_data *data,
4729 			  struct pt_regs *regs)
4730 {
4731 	return __perf_event_overflow(event, 1, data, regs);
4732 }
4733 
4734 /*
4735  * Generic software event infrastructure
4736  */
4737 
4738 struct swevent_htable {
4739 	struct swevent_hlist		*swevent_hlist;
4740 	struct mutex			hlist_mutex;
4741 	int				hlist_refcount;
4742 
4743 	/* Recursion avoidance in each contexts */
4744 	int				recursion[PERF_NR_CONTEXTS];
4745 };
4746 
4747 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4748 
4749 /*
4750  * We directly increment event->count and keep a second value in
4751  * event->hw.period_left to count intervals. This period event
4752  * is kept in the range [-sample_period, 0] so that we can use the
4753  * sign as trigger.
4754  */
4755 
4756 static u64 perf_swevent_set_period(struct perf_event *event)
4757 {
4758 	struct hw_perf_event *hwc = &event->hw;
4759 	u64 period = hwc->last_period;
4760 	u64 nr, offset;
4761 	s64 old, val;
4762 
4763 	hwc->last_period = hwc->sample_period;
4764 
4765 again:
4766 	old = val = local64_read(&hwc->period_left);
4767 	if (val < 0)
4768 		return 0;
4769 
4770 	nr = div64_u64(period + val, period);
4771 	offset = nr * period;
4772 	val -= offset;
4773 	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4774 		goto again;
4775 
4776 	return nr;
4777 }
4778 
4779 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4780 				    struct perf_sample_data *data,
4781 				    struct pt_regs *regs)
4782 {
4783 	struct hw_perf_event *hwc = &event->hw;
4784 	int throttle = 0;
4785 
4786 	if (!overflow)
4787 		overflow = perf_swevent_set_period(event);
4788 
4789 	if (hwc->interrupts == MAX_INTERRUPTS)
4790 		return;
4791 
4792 	for (; overflow; overflow--) {
4793 		if (__perf_event_overflow(event, throttle,
4794 					    data, regs)) {
4795 			/*
4796 			 * We inhibit the overflow from happening when
4797 			 * hwc->interrupts == MAX_INTERRUPTS.
4798 			 */
4799 			break;
4800 		}
4801 		throttle = 1;
4802 	}
4803 }
4804 
4805 static void perf_swevent_event(struct perf_event *event, u64 nr,
4806 			       struct perf_sample_data *data,
4807 			       struct pt_regs *regs)
4808 {
4809 	struct hw_perf_event *hwc = &event->hw;
4810 
4811 	local64_add(nr, &event->count);
4812 
4813 	if (!regs)
4814 		return;
4815 
4816 	if (!is_sampling_event(event))
4817 		return;
4818 
4819 	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4820 		data->period = nr;
4821 		return perf_swevent_overflow(event, 1, data, regs);
4822 	} else
4823 		data->period = event->hw.last_period;
4824 
4825 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4826 		return perf_swevent_overflow(event, 1, data, regs);
4827 
4828 	if (local64_add_negative(nr, &hwc->period_left))
4829 		return;
4830 
4831 	perf_swevent_overflow(event, 0, data, regs);
4832 }
4833 
4834 static int perf_exclude_event(struct perf_event *event,
4835 			      struct pt_regs *regs)
4836 {
4837 	if (event->hw.state & PERF_HES_STOPPED)
4838 		return 1;
4839 
4840 	if (regs) {
4841 		if (event->attr.exclude_user && user_mode(regs))
4842 			return 1;
4843 
4844 		if (event->attr.exclude_kernel && !user_mode(regs))
4845 			return 1;
4846 	}
4847 
4848 	return 0;
4849 }
4850 
4851 static int perf_swevent_match(struct perf_event *event,
4852 				enum perf_type_id type,
4853 				u32 event_id,
4854 				struct perf_sample_data *data,
4855 				struct pt_regs *regs)
4856 {
4857 	if (event->attr.type != type)
4858 		return 0;
4859 
4860 	if (event->attr.config != event_id)
4861 		return 0;
4862 
4863 	if (perf_exclude_event(event, regs))
4864 		return 0;
4865 
4866 	return 1;
4867 }
4868 
4869 static inline u64 swevent_hash(u64 type, u32 event_id)
4870 {
4871 	u64 val = event_id | (type << 32);
4872 
4873 	return hash_64(val, SWEVENT_HLIST_BITS);
4874 }
4875 
4876 static inline struct hlist_head *
4877 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4878 {
4879 	u64 hash = swevent_hash(type, event_id);
4880 
4881 	return &hlist->heads[hash];
4882 }
4883 
4884 /* For the read side: events when they trigger */
4885 static inline struct hlist_head *
4886 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4887 {
4888 	struct swevent_hlist *hlist;
4889 
4890 	hlist = rcu_dereference(swhash->swevent_hlist);
4891 	if (!hlist)
4892 		return NULL;
4893 
4894 	return __find_swevent_head(hlist, type, event_id);
4895 }
4896 
4897 /* For the event head insertion and removal in the hlist */
4898 static inline struct hlist_head *
4899 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4900 {
4901 	struct swevent_hlist *hlist;
4902 	u32 event_id = event->attr.config;
4903 	u64 type = event->attr.type;
4904 
4905 	/*
4906 	 * Event scheduling is always serialized against hlist allocation
4907 	 * and release. Which makes the protected version suitable here.
4908 	 * The context lock guarantees that.
4909 	 */
4910 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
4911 					  lockdep_is_held(&event->ctx->lock));
4912 	if (!hlist)
4913 		return NULL;
4914 
4915 	return __find_swevent_head(hlist, type, event_id);
4916 }
4917 
4918 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4919 				    u64 nr,
4920 				    struct perf_sample_data *data,
4921 				    struct pt_regs *regs)
4922 {
4923 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4924 	struct perf_event *event;
4925 	struct hlist_node *node;
4926 	struct hlist_head *head;
4927 
4928 	rcu_read_lock();
4929 	head = find_swevent_head_rcu(swhash, type, event_id);
4930 	if (!head)
4931 		goto end;
4932 
4933 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4934 		if (perf_swevent_match(event, type, event_id, data, regs))
4935 			perf_swevent_event(event, nr, data, regs);
4936 	}
4937 end:
4938 	rcu_read_unlock();
4939 }
4940 
4941 int perf_swevent_get_recursion_context(void)
4942 {
4943 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4944 
4945 	return get_recursion_context(swhash->recursion);
4946 }
4947 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4948 
4949 inline void perf_swevent_put_recursion_context(int rctx)
4950 {
4951 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4952 
4953 	put_recursion_context(swhash->recursion, rctx);
4954 }
4955 
4956 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4957 {
4958 	struct perf_sample_data data;
4959 	int rctx;
4960 
4961 	preempt_disable_notrace();
4962 	rctx = perf_swevent_get_recursion_context();
4963 	if (rctx < 0)
4964 		return;
4965 
4966 	perf_sample_data_init(&data, addr, 0);
4967 
4968 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4969 
4970 	perf_swevent_put_recursion_context(rctx);
4971 	preempt_enable_notrace();
4972 }
4973 
4974 static void perf_swevent_read(struct perf_event *event)
4975 {
4976 }
4977 
4978 static int perf_swevent_add(struct perf_event *event, int flags)
4979 {
4980 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4981 	struct hw_perf_event *hwc = &event->hw;
4982 	struct hlist_head *head;
4983 
4984 	if (is_sampling_event(event)) {
4985 		hwc->last_period = hwc->sample_period;
4986 		perf_swevent_set_period(event);
4987 	}
4988 
4989 	hwc->state = !(flags & PERF_EF_START);
4990 
4991 	head = find_swevent_head(swhash, event);
4992 	if (WARN_ON_ONCE(!head))
4993 		return -EINVAL;
4994 
4995 	hlist_add_head_rcu(&event->hlist_entry, head);
4996 
4997 	return 0;
4998 }
4999 
5000 static void perf_swevent_del(struct perf_event *event, int flags)
5001 {
5002 	hlist_del_rcu(&event->hlist_entry);
5003 }
5004 
5005 static void perf_swevent_start(struct perf_event *event, int flags)
5006 {
5007 	event->hw.state = 0;
5008 }
5009 
5010 static void perf_swevent_stop(struct perf_event *event, int flags)
5011 {
5012 	event->hw.state = PERF_HES_STOPPED;
5013 }
5014 
5015 /* Deref the hlist from the update side */
5016 static inline struct swevent_hlist *
5017 swevent_hlist_deref(struct swevent_htable *swhash)
5018 {
5019 	return rcu_dereference_protected(swhash->swevent_hlist,
5020 					 lockdep_is_held(&swhash->hlist_mutex));
5021 }
5022 
5023 static void swevent_hlist_release(struct swevent_htable *swhash)
5024 {
5025 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5026 
5027 	if (!hlist)
5028 		return;
5029 
5030 	rcu_assign_pointer(swhash->swevent_hlist, NULL);
5031 	kfree_rcu(hlist, rcu_head);
5032 }
5033 
5034 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5035 {
5036 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5037 
5038 	mutex_lock(&swhash->hlist_mutex);
5039 
5040 	if (!--swhash->hlist_refcount)
5041 		swevent_hlist_release(swhash);
5042 
5043 	mutex_unlock(&swhash->hlist_mutex);
5044 }
5045 
5046 static void swevent_hlist_put(struct perf_event *event)
5047 {
5048 	int cpu;
5049 
5050 	if (event->cpu != -1) {
5051 		swevent_hlist_put_cpu(event, event->cpu);
5052 		return;
5053 	}
5054 
5055 	for_each_possible_cpu(cpu)
5056 		swevent_hlist_put_cpu(event, cpu);
5057 }
5058 
5059 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5060 {
5061 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5062 	int err = 0;
5063 
5064 	mutex_lock(&swhash->hlist_mutex);
5065 
5066 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5067 		struct swevent_hlist *hlist;
5068 
5069 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5070 		if (!hlist) {
5071 			err = -ENOMEM;
5072 			goto exit;
5073 		}
5074 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
5075 	}
5076 	swhash->hlist_refcount++;
5077 exit:
5078 	mutex_unlock(&swhash->hlist_mutex);
5079 
5080 	return err;
5081 }
5082 
5083 static int swevent_hlist_get(struct perf_event *event)
5084 {
5085 	int err;
5086 	int cpu, failed_cpu;
5087 
5088 	if (event->cpu != -1)
5089 		return swevent_hlist_get_cpu(event, event->cpu);
5090 
5091 	get_online_cpus();
5092 	for_each_possible_cpu(cpu) {
5093 		err = swevent_hlist_get_cpu(event, cpu);
5094 		if (err) {
5095 			failed_cpu = cpu;
5096 			goto fail;
5097 		}
5098 	}
5099 	put_online_cpus();
5100 
5101 	return 0;
5102 fail:
5103 	for_each_possible_cpu(cpu) {
5104 		if (cpu == failed_cpu)
5105 			break;
5106 		swevent_hlist_put_cpu(event, cpu);
5107 	}
5108 
5109 	put_online_cpus();
5110 	return err;
5111 }
5112 
5113 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5114 
5115 static void sw_perf_event_destroy(struct perf_event *event)
5116 {
5117 	u64 event_id = event->attr.config;
5118 
5119 	WARN_ON(event->parent);
5120 
5121 	static_key_slow_dec(&perf_swevent_enabled[event_id]);
5122 	swevent_hlist_put(event);
5123 }
5124 
5125 static int perf_swevent_init(struct perf_event *event)
5126 {
5127 	int event_id = event->attr.config;
5128 
5129 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5130 		return -ENOENT;
5131 
5132 	/*
5133 	 * no branch sampling for software events
5134 	 */
5135 	if (has_branch_stack(event))
5136 		return -EOPNOTSUPP;
5137 
5138 	switch (event_id) {
5139 	case PERF_COUNT_SW_CPU_CLOCK:
5140 	case PERF_COUNT_SW_TASK_CLOCK:
5141 		return -ENOENT;
5142 
5143 	default:
5144 		break;
5145 	}
5146 
5147 	if (event_id >= PERF_COUNT_SW_MAX)
5148 		return -ENOENT;
5149 
5150 	if (!event->parent) {
5151 		int err;
5152 
5153 		err = swevent_hlist_get(event);
5154 		if (err)
5155 			return err;
5156 
5157 		static_key_slow_inc(&perf_swevent_enabled[event_id]);
5158 		event->destroy = sw_perf_event_destroy;
5159 	}
5160 
5161 	return 0;
5162 }
5163 
5164 static int perf_swevent_event_idx(struct perf_event *event)
5165 {
5166 	return 0;
5167 }
5168 
5169 static struct pmu perf_swevent = {
5170 	.task_ctx_nr	= perf_sw_context,
5171 
5172 	.event_init	= perf_swevent_init,
5173 	.add		= perf_swevent_add,
5174 	.del		= perf_swevent_del,
5175 	.start		= perf_swevent_start,
5176 	.stop		= perf_swevent_stop,
5177 	.read		= perf_swevent_read,
5178 
5179 	.event_idx	= perf_swevent_event_idx,
5180 };
5181 
5182 #ifdef CONFIG_EVENT_TRACING
5183 
5184 static int perf_tp_filter_match(struct perf_event *event,
5185 				struct perf_sample_data *data)
5186 {
5187 	void *record = data->raw->data;
5188 
5189 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
5190 		return 1;
5191 	return 0;
5192 }
5193 
5194 static int perf_tp_event_match(struct perf_event *event,
5195 				struct perf_sample_data *data,
5196 				struct pt_regs *regs)
5197 {
5198 	if (event->hw.state & PERF_HES_STOPPED)
5199 		return 0;
5200 	/*
5201 	 * All tracepoints are from kernel-space.
5202 	 */
5203 	if (event->attr.exclude_kernel)
5204 		return 0;
5205 
5206 	if (!perf_tp_filter_match(event, data))
5207 		return 0;
5208 
5209 	return 1;
5210 }
5211 
5212 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5213 		   struct pt_regs *regs, struct hlist_head *head, int rctx)
5214 {
5215 	struct perf_sample_data data;
5216 	struct perf_event *event;
5217 	struct hlist_node *node;
5218 
5219 	struct perf_raw_record raw = {
5220 		.size = entry_size,
5221 		.data = record,
5222 	};
5223 
5224 	perf_sample_data_init(&data, addr, 0);
5225 	data.raw = &raw;
5226 
5227 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5228 		if (perf_tp_event_match(event, &data, regs))
5229 			perf_swevent_event(event, count, &data, regs);
5230 	}
5231 
5232 	perf_swevent_put_recursion_context(rctx);
5233 }
5234 EXPORT_SYMBOL_GPL(perf_tp_event);
5235 
5236 static void tp_perf_event_destroy(struct perf_event *event)
5237 {
5238 	perf_trace_destroy(event);
5239 }
5240 
5241 static int perf_tp_event_init(struct perf_event *event)
5242 {
5243 	int err;
5244 
5245 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5246 		return -ENOENT;
5247 
5248 	/*
5249 	 * no branch sampling for tracepoint events
5250 	 */
5251 	if (has_branch_stack(event))
5252 		return -EOPNOTSUPP;
5253 
5254 	err = perf_trace_init(event);
5255 	if (err)
5256 		return err;
5257 
5258 	event->destroy = tp_perf_event_destroy;
5259 
5260 	return 0;
5261 }
5262 
5263 static struct pmu perf_tracepoint = {
5264 	.task_ctx_nr	= perf_sw_context,
5265 
5266 	.event_init	= perf_tp_event_init,
5267 	.add		= perf_trace_add,
5268 	.del		= perf_trace_del,
5269 	.start		= perf_swevent_start,
5270 	.stop		= perf_swevent_stop,
5271 	.read		= perf_swevent_read,
5272 
5273 	.event_idx	= perf_swevent_event_idx,
5274 };
5275 
5276 static inline void perf_tp_register(void)
5277 {
5278 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5279 }
5280 
5281 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5282 {
5283 	char *filter_str;
5284 	int ret;
5285 
5286 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5287 		return -EINVAL;
5288 
5289 	filter_str = strndup_user(arg, PAGE_SIZE);
5290 	if (IS_ERR(filter_str))
5291 		return PTR_ERR(filter_str);
5292 
5293 	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5294 
5295 	kfree(filter_str);
5296 	return ret;
5297 }
5298 
5299 static void perf_event_free_filter(struct perf_event *event)
5300 {
5301 	ftrace_profile_free_filter(event);
5302 }
5303 
5304 #else
5305 
5306 static inline void perf_tp_register(void)
5307 {
5308 }
5309 
5310 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5311 {
5312 	return -ENOENT;
5313 }
5314 
5315 static void perf_event_free_filter(struct perf_event *event)
5316 {
5317 }
5318 
5319 #endif /* CONFIG_EVENT_TRACING */
5320 
5321 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5322 void perf_bp_event(struct perf_event *bp, void *data)
5323 {
5324 	struct perf_sample_data sample;
5325 	struct pt_regs *regs = data;
5326 
5327 	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5328 
5329 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
5330 		perf_swevent_event(bp, 1, &sample, regs);
5331 }
5332 #endif
5333 
5334 /*
5335  * hrtimer based swevent callback
5336  */
5337 
5338 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5339 {
5340 	enum hrtimer_restart ret = HRTIMER_RESTART;
5341 	struct perf_sample_data data;
5342 	struct pt_regs *regs;
5343 	struct perf_event *event;
5344 	u64 period;
5345 
5346 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5347 
5348 	if (event->state != PERF_EVENT_STATE_ACTIVE)
5349 		return HRTIMER_NORESTART;
5350 
5351 	event->pmu->read(event);
5352 
5353 	perf_sample_data_init(&data, 0, event->hw.last_period);
5354 	regs = get_irq_regs();
5355 
5356 	if (regs && !perf_exclude_event(event, regs)) {
5357 		if (!(event->attr.exclude_idle && is_idle_task(current)))
5358 			if (__perf_event_overflow(event, 1, &data, regs))
5359 				ret = HRTIMER_NORESTART;
5360 	}
5361 
5362 	period = max_t(u64, 10000, event->hw.sample_period);
5363 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5364 
5365 	return ret;
5366 }
5367 
5368 static void perf_swevent_start_hrtimer(struct perf_event *event)
5369 {
5370 	struct hw_perf_event *hwc = &event->hw;
5371 	s64 period;
5372 
5373 	if (!is_sampling_event(event))
5374 		return;
5375 
5376 	period = local64_read(&hwc->period_left);
5377 	if (period) {
5378 		if (period < 0)
5379 			period = 10000;
5380 
5381 		local64_set(&hwc->period_left, 0);
5382 	} else {
5383 		period = max_t(u64, 10000, hwc->sample_period);
5384 	}
5385 	__hrtimer_start_range_ns(&hwc->hrtimer,
5386 				ns_to_ktime(period), 0,
5387 				HRTIMER_MODE_REL_PINNED, 0);
5388 }
5389 
5390 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5391 {
5392 	struct hw_perf_event *hwc = &event->hw;
5393 
5394 	if (is_sampling_event(event)) {
5395 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5396 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5397 
5398 		hrtimer_cancel(&hwc->hrtimer);
5399 	}
5400 }
5401 
5402 static void perf_swevent_init_hrtimer(struct perf_event *event)
5403 {
5404 	struct hw_perf_event *hwc = &event->hw;
5405 
5406 	if (!is_sampling_event(event))
5407 		return;
5408 
5409 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5410 	hwc->hrtimer.function = perf_swevent_hrtimer;
5411 
5412 	/*
5413 	 * Since hrtimers have a fixed rate, we can do a static freq->period
5414 	 * mapping and avoid the whole period adjust feedback stuff.
5415 	 */
5416 	if (event->attr.freq) {
5417 		long freq = event->attr.sample_freq;
5418 
5419 		event->attr.sample_period = NSEC_PER_SEC / freq;
5420 		hwc->sample_period = event->attr.sample_period;
5421 		local64_set(&hwc->period_left, hwc->sample_period);
5422 		event->attr.freq = 0;
5423 	}
5424 }
5425 
5426 /*
5427  * Software event: cpu wall time clock
5428  */
5429 
5430 static void cpu_clock_event_update(struct perf_event *event)
5431 {
5432 	s64 prev;
5433 	u64 now;
5434 
5435 	now = local_clock();
5436 	prev = local64_xchg(&event->hw.prev_count, now);
5437 	local64_add(now - prev, &event->count);
5438 }
5439 
5440 static void cpu_clock_event_start(struct perf_event *event, int flags)
5441 {
5442 	local64_set(&event->hw.prev_count, local_clock());
5443 	perf_swevent_start_hrtimer(event);
5444 }
5445 
5446 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5447 {
5448 	perf_swevent_cancel_hrtimer(event);
5449 	cpu_clock_event_update(event);
5450 }
5451 
5452 static int cpu_clock_event_add(struct perf_event *event, int flags)
5453 {
5454 	if (flags & PERF_EF_START)
5455 		cpu_clock_event_start(event, flags);
5456 
5457 	return 0;
5458 }
5459 
5460 static void cpu_clock_event_del(struct perf_event *event, int flags)
5461 {
5462 	cpu_clock_event_stop(event, flags);
5463 }
5464 
5465 static void cpu_clock_event_read(struct perf_event *event)
5466 {
5467 	cpu_clock_event_update(event);
5468 }
5469 
5470 static int cpu_clock_event_init(struct perf_event *event)
5471 {
5472 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5473 		return -ENOENT;
5474 
5475 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5476 		return -ENOENT;
5477 
5478 	/*
5479 	 * no branch sampling for software events
5480 	 */
5481 	if (has_branch_stack(event))
5482 		return -EOPNOTSUPP;
5483 
5484 	perf_swevent_init_hrtimer(event);
5485 
5486 	return 0;
5487 }
5488 
5489 static struct pmu perf_cpu_clock = {
5490 	.task_ctx_nr	= perf_sw_context,
5491 
5492 	.event_init	= cpu_clock_event_init,
5493 	.add		= cpu_clock_event_add,
5494 	.del		= cpu_clock_event_del,
5495 	.start		= cpu_clock_event_start,
5496 	.stop		= cpu_clock_event_stop,
5497 	.read		= cpu_clock_event_read,
5498 
5499 	.event_idx	= perf_swevent_event_idx,
5500 };
5501 
5502 /*
5503  * Software event: task time clock
5504  */
5505 
5506 static void task_clock_event_update(struct perf_event *event, u64 now)
5507 {
5508 	u64 prev;
5509 	s64 delta;
5510 
5511 	prev = local64_xchg(&event->hw.prev_count, now);
5512 	delta = now - prev;
5513 	local64_add(delta, &event->count);
5514 }
5515 
5516 static void task_clock_event_start(struct perf_event *event, int flags)
5517 {
5518 	local64_set(&event->hw.prev_count, event->ctx->time);
5519 	perf_swevent_start_hrtimer(event);
5520 }
5521 
5522 static void task_clock_event_stop(struct perf_event *event, int flags)
5523 {
5524 	perf_swevent_cancel_hrtimer(event);
5525 	task_clock_event_update(event, event->ctx->time);
5526 }
5527 
5528 static int task_clock_event_add(struct perf_event *event, int flags)
5529 {
5530 	if (flags & PERF_EF_START)
5531 		task_clock_event_start(event, flags);
5532 
5533 	return 0;
5534 }
5535 
5536 static void task_clock_event_del(struct perf_event *event, int flags)
5537 {
5538 	task_clock_event_stop(event, PERF_EF_UPDATE);
5539 }
5540 
5541 static void task_clock_event_read(struct perf_event *event)
5542 {
5543 	u64 now = perf_clock();
5544 	u64 delta = now - event->ctx->timestamp;
5545 	u64 time = event->ctx->time + delta;
5546 
5547 	task_clock_event_update(event, time);
5548 }
5549 
5550 static int task_clock_event_init(struct perf_event *event)
5551 {
5552 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5553 		return -ENOENT;
5554 
5555 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5556 		return -ENOENT;
5557 
5558 	/*
5559 	 * no branch sampling for software events
5560 	 */
5561 	if (has_branch_stack(event))
5562 		return -EOPNOTSUPP;
5563 
5564 	perf_swevent_init_hrtimer(event);
5565 
5566 	return 0;
5567 }
5568 
5569 static struct pmu perf_task_clock = {
5570 	.task_ctx_nr	= perf_sw_context,
5571 
5572 	.event_init	= task_clock_event_init,
5573 	.add		= task_clock_event_add,
5574 	.del		= task_clock_event_del,
5575 	.start		= task_clock_event_start,
5576 	.stop		= task_clock_event_stop,
5577 	.read		= task_clock_event_read,
5578 
5579 	.event_idx	= perf_swevent_event_idx,
5580 };
5581 
5582 static void perf_pmu_nop_void(struct pmu *pmu)
5583 {
5584 }
5585 
5586 static int perf_pmu_nop_int(struct pmu *pmu)
5587 {
5588 	return 0;
5589 }
5590 
5591 static void perf_pmu_start_txn(struct pmu *pmu)
5592 {
5593 	perf_pmu_disable(pmu);
5594 }
5595 
5596 static int perf_pmu_commit_txn(struct pmu *pmu)
5597 {
5598 	perf_pmu_enable(pmu);
5599 	return 0;
5600 }
5601 
5602 static void perf_pmu_cancel_txn(struct pmu *pmu)
5603 {
5604 	perf_pmu_enable(pmu);
5605 }
5606 
5607 static int perf_event_idx_default(struct perf_event *event)
5608 {
5609 	return event->hw.idx + 1;
5610 }
5611 
5612 /*
5613  * Ensures all contexts with the same task_ctx_nr have the same
5614  * pmu_cpu_context too.
5615  */
5616 static void *find_pmu_context(int ctxn)
5617 {
5618 	struct pmu *pmu;
5619 
5620 	if (ctxn < 0)
5621 		return NULL;
5622 
5623 	list_for_each_entry(pmu, &pmus, entry) {
5624 		if (pmu->task_ctx_nr == ctxn)
5625 			return pmu->pmu_cpu_context;
5626 	}
5627 
5628 	return NULL;
5629 }
5630 
5631 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5632 {
5633 	int cpu;
5634 
5635 	for_each_possible_cpu(cpu) {
5636 		struct perf_cpu_context *cpuctx;
5637 
5638 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5639 
5640 		if (cpuctx->active_pmu == old_pmu)
5641 			cpuctx->active_pmu = pmu;
5642 	}
5643 }
5644 
5645 static void free_pmu_context(struct pmu *pmu)
5646 {
5647 	struct pmu *i;
5648 
5649 	mutex_lock(&pmus_lock);
5650 	/*
5651 	 * Like a real lame refcount.
5652 	 */
5653 	list_for_each_entry(i, &pmus, entry) {
5654 		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5655 			update_pmu_context(i, pmu);
5656 			goto out;
5657 		}
5658 	}
5659 
5660 	free_percpu(pmu->pmu_cpu_context);
5661 out:
5662 	mutex_unlock(&pmus_lock);
5663 }
5664 static struct idr pmu_idr;
5665 
5666 static ssize_t
5667 type_show(struct device *dev, struct device_attribute *attr, char *page)
5668 {
5669 	struct pmu *pmu = dev_get_drvdata(dev);
5670 
5671 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5672 }
5673 
5674 static struct device_attribute pmu_dev_attrs[] = {
5675        __ATTR_RO(type),
5676        __ATTR_NULL,
5677 };
5678 
5679 static int pmu_bus_running;
5680 static struct bus_type pmu_bus = {
5681 	.name		= "event_source",
5682 	.dev_attrs	= pmu_dev_attrs,
5683 };
5684 
5685 static void pmu_dev_release(struct device *dev)
5686 {
5687 	kfree(dev);
5688 }
5689 
5690 static int pmu_dev_alloc(struct pmu *pmu)
5691 {
5692 	int ret = -ENOMEM;
5693 
5694 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5695 	if (!pmu->dev)
5696 		goto out;
5697 
5698 	pmu->dev->groups = pmu->attr_groups;
5699 	device_initialize(pmu->dev);
5700 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
5701 	if (ret)
5702 		goto free_dev;
5703 
5704 	dev_set_drvdata(pmu->dev, pmu);
5705 	pmu->dev->bus = &pmu_bus;
5706 	pmu->dev->release = pmu_dev_release;
5707 	ret = device_add(pmu->dev);
5708 	if (ret)
5709 		goto free_dev;
5710 
5711 out:
5712 	return ret;
5713 
5714 free_dev:
5715 	put_device(pmu->dev);
5716 	goto out;
5717 }
5718 
5719 static struct lock_class_key cpuctx_mutex;
5720 static struct lock_class_key cpuctx_lock;
5721 
5722 int perf_pmu_register(struct pmu *pmu, char *name, int type)
5723 {
5724 	int cpu, ret;
5725 
5726 	mutex_lock(&pmus_lock);
5727 	ret = -ENOMEM;
5728 	pmu->pmu_disable_count = alloc_percpu(int);
5729 	if (!pmu->pmu_disable_count)
5730 		goto unlock;
5731 
5732 	pmu->type = -1;
5733 	if (!name)
5734 		goto skip_type;
5735 	pmu->name = name;
5736 
5737 	if (type < 0) {
5738 		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5739 		if (!err)
5740 			goto free_pdc;
5741 
5742 		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5743 		if (err) {
5744 			ret = err;
5745 			goto free_pdc;
5746 		}
5747 	}
5748 	pmu->type = type;
5749 
5750 	if (pmu_bus_running) {
5751 		ret = pmu_dev_alloc(pmu);
5752 		if (ret)
5753 			goto free_idr;
5754 	}
5755 
5756 skip_type:
5757 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5758 	if (pmu->pmu_cpu_context)
5759 		goto got_cpu_context;
5760 
5761 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5762 	if (!pmu->pmu_cpu_context)
5763 		goto free_dev;
5764 
5765 	for_each_possible_cpu(cpu) {
5766 		struct perf_cpu_context *cpuctx;
5767 
5768 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5769 		__perf_event_init_context(&cpuctx->ctx);
5770 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5771 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5772 		cpuctx->ctx.type = cpu_context;
5773 		cpuctx->ctx.pmu = pmu;
5774 		cpuctx->jiffies_interval = 1;
5775 		INIT_LIST_HEAD(&cpuctx->rotation_list);
5776 		cpuctx->active_pmu = pmu;
5777 	}
5778 
5779 got_cpu_context:
5780 	if (!pmu->start_txn) {
5781 		if (pmu->pmu_enable) {
5782 			/*
5783 			 * If we have pmu_enable/pmu_disable calls, install
5784 			 * transaction stubs that use that to try and batch
5785 			 * hardware accesses.
5786 			 */
5787 			pmu->start_txn  = perf_pmu_start_txn;
5788 			pmu->commit_txn = perf_pmu_commit_txn;
5789 			pmu->cancel_txn = perf_pmu_cancel_txn;
5790 		} else {
5791 			pmu->start_txn  = perf_pmu_nop_void;
5792 			pmu->commit_txn = perf_pmu_nop_int;
5793 			pmu->cancel_txn = perf_pmu_nop_void;
5794 		}
5795 	}
5796 
5797 	if (!pmu->pmu_enable) {
5798 		pmu->pmu_enable  = perf_pmu_nop_void;
5799 		pmu->pmu_disable = perf_pmu_nop_void;
5800 	}
5801 
5802 	if (!pmu->event_idx)
5803 		pmu->event_idx = perf_event_idx_default;
5804 
5805 	list_add_rcu(&pmu->entry, &pmus);
5806 	ret = 0;
5807 unlock:
5808 	mutex_unlock(&pmus_lock);
5809 
5810 	return ret;
5811 
5812 free_dev:
5813 	device_del(pmu->dev);
5814 	put_device(pmu->dev);
5815 
5816 free_idr:
5817 	if (pmu->type >= PERF_TYPE_MAX)
5818 		idr_remove(&pmu_idr, pmu->type);
5819 
5820 free_pdc:
5821 	free_percpu(pmu->pmu_disable_count);
5822 	goto unlock;
5823 }
5824 
5825 void perf_pmu_unregister(struct pmu *pmu)
5826 {
5827 	mutex_lock(&pmus_lock);
5828 	list_del_rcu(&pmu->entry);
5829 	mutex_unlock(&pmus_lock);
5830 
5831 	/*
5832 	 * We dereference the pmu list under both SRCU and regular RCU, so
5833 	 * synchronize against both of those.
5834 	 */
5835 	synchronize_srcu(&pmus_srcu);
5836 	synchronize_rcu();
5837 
5838 	free_percpu(pmu->pmu_disable_count);
5839 	if (pmu->type >= PERF_TYPE_MAX)
5840 		idr_remove(&pmu_idr, pmu->type);
5841 	device_del(pmu->dev);
5842 	put_device(pmu->dev);
5843 	free_pmu_context(pmu);
5844 }
5845 
5846 struct pmu *perf_init_event(struct perf_event *event)
5847 {
5848 	struct pmu *pmu = NULL;
5849 	int idx;
5850 	int ret;
5851 
5852 	idx = srcu_read_lock(&pmus_srcu);
5853 
5854 	rcu_read_lock();
5855 	pmu = idr_find(&pmu_idr, event->attr.type);
5856 	rcu_read_unlock();
5857 	if (pmu) {
5858 		event->pmu = pmu;
5859 		ret = pmu->event_init(event);
5860 		if (ret)
5861 			pmu = ERR_PTR(ret);
5862 		goto unlock;
5863 	}
5864 
5865 	list_for_each_entry_rcu(pmu, &pmus, entry) {
5866 		event->pmu = pmu;
5867 		ret = pmu->event_init(event);
5868 		if (!ret)
5869 			goto unlock;
5870 
5871 		if (ret != -ENOENT) {
5872 			pmu = ERR_PTR(ret);
5873 			goto unlock;
5874 		}
5875 	}
5876 	pmu = ERR_PTR(-ENOENT);
5877 unlock:
5878 	srcu_read_unlock(&pmus_srcu, idx);
5879 
5880 	return pmu;
5881 }
5882 
5883 /*
5884  * Allocate and initialize a event structure
5885  */
5886 static struct perf_event *
5887 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5888 		 struct task_struct *task,
5889 		 struct perf_event *group_leader,
5890 		 struct perf_event *parent_event,
5891 		 perf_overflow_handler_t overflow_handler,
5892 		 void *context)
5893 {
5894 	struct pmu *pmu;
5895 	struct perf_event *event;
5896 	struct hw_perf_event *hwc;
5897 	long err;
5898 
5899 	if ((unsigned)cpu >= nr_cpu_ids) {
5900 		if (!task || cpu != -1)
5901 			return ERR_PTR(-EINVAL);
5902 	}
5903 
5904 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5905 	if (!event)
5906 		return ERR_PTR(-ENOMEM);
5907 
5908 	/*
5909 	 * Single events are their own group leaders, with an
5910 	 * empty sibling list:
5911 	 */
5912 	if (!group_leader)
5913 		group_leader = event;
5914 
5915 	mutex_init(&event->child_mutex);
5916 	INIT_LIST_HEAD(&event->child_list);
5917 
5918 	INIT_LIST_HEAD(&event->group_entry);
5919 	INIT_LIST_HEAD(&event->event_entry);
5920 	INIT_LIST_HEAD(&event->sibling_list);
5921 	INIT_LIST_HEAD(&event->rb_entry);
5922 
5923 	init_waitqueue_head(&event->waitq);
5924 	init_irq_work(&event->pending, perf_pending_event);
5925 
5926 	mutex_init(&event->mmap_mutex);
5927 
5928 	event->cpu		= cpu;
5929 	event->attr		= *attr;
5930 	event->group_leader	= group_leader;
5931 	event->pmu		= NULL;
5932 	event->oncpu		= -1;
5933 
5934 	event->parent		= parent_event;
5935 
5936 	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
5937 	event->id		= atomic64_inc_return(&perf_event_id);
5938 
5939 	event->state		= PERF_EVENT_STATE_INACTIVE;
5940 
5941 	if (task) {
5942 		event->attach_state = PERF_ATTACH_TASK;
5943 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5944 		/*
5945 		 * hw_breakpoint is a bit difficult here..
5946 		 */
5947 		if (attr->type == PERF_TYPE_BREAKPOINT)
5948 			event->hw.bp_target = task;
5949 #endif
5950 	}
5951 
5952 	if (!overflow_handler && parent_event) {
5953 		overflow_handler = parent_event->overflow_handler;
5954 		context = parent_event->overflow_handler_context;
5955 	}
5956 
5957 	event->overflow_handler	= overflow_handler;
5958 	event->overflow_handler_context = context;
5959 
5960 	if (attr->disabled)
5961 		event->state = PERF_EVENT_STATE_OFF;
5962 
5963 	pmu = NULL;
5964 
5965 	hwc = &event->hw;
5966 	hwc->sample_period = attr->sample_period;
5967 	if (attr->freq && attr->sample_freq)
5968 		hwc->sample_period = 1;
5969 	hwc->last_period = hwc->sample_period;
5970 
5971 	local64_set(&hwc->period_left, hwc->sample_period);
5972 
5973 	/*
5974 	 * we currently do not support PERF_FORMAT_GROUP on inherited events
5975 	 */
5976 	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5977 		goto done;
5978 
5979 	pmu = perf_init_event(event);
5980 
5981 done:
5982 	err = 0;
5983 	if (!pmu)
5984 		err = -EINVAL;
5985 	else if (IS_ERR(pmu))
5986 		err = PTR_ERR(pmu);
5987 
5988 	if (err) {
5989 		if (event->ns)
5990 			put_pid_ns(event->ns);
5991 		kfree(event);
5992 		return ERR_PTR(err);
5993 	}
5994 
5995 	if (!event->parent) {
5996 		if (event->attach_state & PERF_ATTACH_TASK)
5997 			static_key_slow_inc(&perf_sched_events.key);
5998 		if (event->attr.mmap || event->attr.mmap_data)
5999 			atomic_inc(&nr_mmap_events);
6000 		if (event->attr.comm)
6001 			atomic_inc(&nr_comm_events);
6002 		if (event->attr.task)
6003 			atomic_inc(&nr_task_events);
6004 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6005 			err = get_callchain_buffers();
6006 			if (err) {
6007 				free_event(event);
6008 				return ERR_PTR(err);
6009 			}
6010 		}
6011 		if (has_branch_stack(event)) {
6012 			static_key_slow_inc(&perf_sched_events.key);
6013 			if (!(event->attach_state & PERF_ATTACH_TASK))
6014 				atomic_inc(&per_cpu(perf_branch_stack_events,
6015 						    event->cpu));
6016 		}
6017 	}
6018 
6019 	return event;
6020 }
6021 
6022 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6023 			  struct perf_event_attr *attr)
6024 {
6025 	u32 size;
6026 	int ret;
6027 
6028 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6029 		return -EFAULT;
6030 
6031 	/*
6032 	 * zero the full structure, so that a short copy will be nice.
6033 	 */
6034 	memset(attr, 0, sizeof(*attr));
6035 
6036 	ret = get_user(size, &uattr->size);
6037 	if (ret)
6038 		return ret;
6039 
6040 	if (size > PAGE_SIZE)	/* silly large */
6041 		goto err_size;
6042 
6043 	if (!size)		/* abi compat */
6044 		size = PERF_ATTR_SIZE_VER0;
6045 
6046 	if (size < PERF_ATTR_SIZE_VER0)
6047 		goto err_size;
6048 
6049 	/*
6050 	 * If we're handed a bigger struct than we know of,
6051 	 * ensure all the unknown bits are 0 - i.e. new
6052 	 * user-space does not rely on any kernel feature
6053 	 * extensions we dont know about yet.
6054 	 */
6055 	if (size > sizeof(*attr)) {
6056 		unsigned char __user *addr;
6057 		unsigned char __user *end;
6058 		unsigned char val;
6059 
6060 		addr = (void __user *)uattr + sizeof(*attr);
6061 		end  = (void __user *)uattr + size;
6062 
6063 		for (; addr < end; addr++) {
6064 			ret = get_user(val, addr);
6065 			if (ret)
6066 				return ret;
6067 			if (val)
6068 				goto err_size;
6069 		}
6070 		size = sizeof(*attr);
6071 	}
6072 
6073 	ret = copy_from_user(attr, uattr, size);
6074 	if (ret)
6075 		return -EFAULT;
6076 
6077 	if (attr->__reserved_1)
6078 		return -EINVAL;
6079 
6080 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6081 		return -EINVAL;
6082 
6083 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6084 		return -EINVAL;
6085 
6086 	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6087 		u64 mask = attr->branch_sample_type;
6088 
6089 		/* only using defined bits */
6090 		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6091 			return -EINVAL;
6092 
6093 		/* at least one branch bit must be set */
6094 		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6095 			return -EINVAL;
6096 
6097 		/* kernel level capture: check permissions */
6098 		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6099 		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6100 			return -EACCES;
6101 
6102 		/* propagate priv level, when not set for branch */
6103 		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6104 
6105 			/* exclude_kernel checked on syscall entry */
6106 			if (!attr->exclude_kernel)
6107 				mask |= PERF_SAMPLE_BRANCH_KERNEL;
6108 
6109 			if (!attr->exclude_user)
6110 				mask |= PERF_SAMPLE_BRANCH_USER;
6111 
6112 			if (!attr->exclude_hv)
6113 				mask |= PERF_SAMPLE_BRANCH_HV;
6114 			/*
6115 			 * adjust user setting (for HW filter setup)
6116 			 */
6117 			attr->branch_sample_type = mask;
6118 		}
6119 	}
6120 out:
6121 	return ret;
6122 
6123 err_size:
6124 	put_user(sizeof(*attr), &uattr->size);
6125 	ret = -E2BIG;
6126 	goto out;
6127 }
6128 
6129 static int
6130 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6131 {
6132 	struct ring_buffer *rb = NULL, *old_rb = NULL;
6133 	int ret = -EINVAL;
6134 
6135 	if (!output_event)
6136 		goto set;
6137 
6138 	/* don't allow circular references */
6139 	if (event == output_event)
6140 		goto out;
6141 
6142 	/*
6143 	 * Don't allow cross-cpu buffers
6144 	 */
6145 	if (output_event->cpu != event->cpu)
6146 		goto out;
6147 
6148 	/*
6149 	 * If its not a per-cpu rb, it must be the same task.
6150 	 */
6151 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6152 		goto out;
6153 
6154 set:
6155 	mutex_lock(&event->mmap_mutex);
6156 	/* Can't redirect output if we've got an active mmap() */
6157 	if (atomic_read(&event->mmap_count))
6158 		goto unlock;
6159 
6160 	if (output_event) {
6161 		/* get the rb we want to redirect to */
6162 		rb = ring_buffer_get(output_event);
6163 		if (!rb)
6164 			goto unlock;
6165 	}
6166 
6167 	old_rb = event->rb;
6168 	rcu_assign_pointer(event->rb, rb);
6169 	if (old_rb)
6170 		ring_buffer_detach(event, old_rb);
6171 	ret = 0;
6172 unlock:
6173 	mutex_unlock(&event->mmap_mutex);
6174 
6175 	if (old_rb)
6176 		ring_buffer_put(old_rb);
6177 out:
6178 	return ret;
6179 }
6180 
6181 /**
6182  * sys_perf_event_open - open a performance event, associate it to a task/cpu
6183  *
6184  * @attr_uptr:	event_id type attributes for monitoring/sampling
6185  * @pid:		target pid
6186  * @cpu:		target cpu
6187  * @group_fd:		group leader event fd
6188  */
6189 SYSCALL_DEFINE5(perf_event_open,
6190 		struct perf_event_attr __user *, attr_uptr,
6191 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6192 {
6193 	struct perf_event *group_leader = NULL, *output_event = NULL;
6194 	struct perf_event *event, *sibling;
6195 	struct perf_event_attr attr;
6196 	struct perf_event_context *ctx;
6197 	struct file *event_file = NULL;
6198 	struct file *group_file = NULL;
6199 	struct task_struct *task = NULL;
6200 	struct pmu *pmu;
6201 	int event_fd;
6202 	int move_group = 0;
6203 	int fput_needed = 0;
6204 	int err;
6205 
6206 	/* for future expandability... */
6207 	if (flags & ~PERF_FLAG_ALL)
6208 		return -EINVAL;
6209 
6210 	err = perf_copy_attr(attr_uptr, &attr);
6211 	if (err)
6212 		return err;
6213 
6214 	if (!attr.exclude_kernel) {
6215 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6216 			return -EACCES;
6217 	}
6218 
6219 	if (attr.freq) {
6220 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
6221 			return -EINVAL;
6222 	}
6223 
6224 	/*
6225 	 * In cgroup mode, the pid argument is used to pass the fd
6226 	 * opened to the cgroup directory in cgroupfs. The cpu argument
6227 	 * designates the cpu on which to monitor threads from that
6228 	 * cgroup.
6229 	 */
6230 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6231 		return -EINVAL;
6232 
6233 	event_fd = get_unused_fd_flags(O_RDWR);
6234 	if (event_fd < 0)
6235 		return event_fd;
6236 
6237 	if (group_fd != -1) {
6238 		group_leader = perf_fget_light(group_fd, &fput_needed);
6239 		if (IS_ERR(group_leader)) {
6240 			err = PTR_ERR(group_leader);
6241 			goto err_fd;
6242 		}
6243 		group_file = group_leader->filp;
6244 		if (flags & PERF_FLAG_FD_OUTPUT)
6245 			output_event = group_leader;
6246 		if (flags & PERF_FLAG_FD_NO_GROUP)
6247 			group_leader = NULL;
6248 	}
6249 
6250 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6251 		task = find_lively_task_by_vpid(pid);
6252 		if (IS_ERR(task)) {
6253 			err = PTR_ERR(task);
6254 			goto err_group_fd;
6255 		}
6256 	}
6257 
6258 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6259 				 NULL, NULL);
6260 	if (IS_ERR(event)) {
6261 		err = PTR_ERR(event);
6262 		goto err_task;
6263 	}
6264 
6265 	if (flags & PERF_FLAG_PID_CGROUP) {
6266 		err = perf_cgroup_connect(pid, event, &attr, group_leader);
6267 		if (err)
6268 			goto err_alloc;
6269 		/*
6270 		 * one more event:
6271 		 * - that has cgroup constraint on event->cpu
6272 		 * - that may need work on context switch
6273 		 */
6274 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6275 		static_key_slow_inc(&perf_sched_events.key);
6276 	}
6277 
6278 	/*
6279 	 * Special case software events and allow them to be part of
6280 	 * any hardware group.
6281 	 */
6282 	pmu = event->pmu;
6283 
6284 	if (group_leader &&
6285 	    (is_software_event(event) != is_software_event(group_leader))) {
6286 		if (is_software_event(event)) {
6287 			/*
6288 			 * If event and group_leader are not both a software
6289 			 * event, and event is, then group leader is not.
6290 			 *
6291 			 * Allow the addition of software events to !software
6292 			 * groups, this is safe because software events never
6293 			 * fail to schedule.
6294 			 */
6295 			pmu = group_leader->pmu;
6296 		} else if (is_software_event(group_leader) &&
6297 			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6298 			/*
6299 			 * In case the group is a pure software group, and we
6300 			 * try to add a hardware event, move the whole group to
6301 			 * the hardware context.
6302 			 */
6303 			move_group = 1;
6304 		}
6305 	}
6306 
6307 	/*
6308 	 * Get the target context (task or percpu):
6309 	 */
6310 	ctx = find_get_context(pmu, task, cpu);
6311 	if (IS_ERR(ctx)) {
6312 		err = PTR_ERR(ctx);
6313 		goto err_alloc;
6314 	}
6315 
6316 	if (task) {
6317 		put_task_struct(task);
6318 		task = NULL;
6319 	}
6320 
6321 	/*
6322 	 * Look up the group leader (we will attach this event to it):
6323 	 */
6324 	if (group_leader) {
6325 		err = -EINVAL;
6326 
6327 		/*
6328 		 * Do not allow a recursive hierarchy (this new sibling
6329 		 * becoming part of another group-sibling):
6330 		 */
6331 		if (group_leader->group_leader != group_leader)
6332 			goto err_context;
6333 		/*
6334 		 * Do not allow to attach to a group in a different
6335 		 * task or CPU context:
6336 		 */
6337 		if (move_group) {
6338 			if (group_leader->ctx->type != ctx->type)
6339 				goto err_context;
6340 		} else {
6341 			if (group_leader->ctx != ctx)
6342 				goto err_context;
6343 		}
6344 
6345 		/*
6346 		 * Only a group leader can be exclusive or pinned
6347 		 */
6348 		if (attr.exclusive || attr.pinned)
6349 			goto err_context;
6350 	}
6351 
6352 	if (output_event) {
6353 		err = perf_event_set_output(event, output_event);
6354 		if (err)
6355 			goto err_context;
6356 	}
6357 
6358 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6359 	if (IS_ERR(event_file)) {
6360 		err = PTR_ERR(event_file);
6361 		goto err_context;
6362 	}
6363 
6364 	if (move_group) {
6365 		struct perf_event_context *gctx = group_leader->ctx;
6366 
6367 		mutex_lock(&gctx->mutex);
6368 		perf_remove_from_context(group_leader);
6369 		list_for_each_entry(sibling, &group_leader->sibling_list,
6370 				    group_entry) {
6371 			perf_remove_from_context(sibling);
6372 			put_ctx(gctx);
6373 		}
6374 		mutex_unlock(&gctx->mutex);
6375 		put_ctx(gctx);
6376 	}
6377 
6378 	event->filp = event_file;
6379 	WARN_ON_ONCE(ctx->parent_ctx);
6380 	mutex_lock(&ctx->mutex);
6381 
6382 	if (move_group) {
6383 		perf_install_in_context(ctx, group_leader, cpu);
6384 		get_ctx(ctx);
6385 		list_for_each_entry(sibling, &group_leader->sibling_list,
6386 				    group_entry) {
6387 			perf_install_in_context(ctx, sibling, cpu);
6388 			get_ctx(ctx);
6389 		}
6390 	}
6391 
6392 	perf_install_in_context(ctx, event, cpu);
6393 	++ctx->generation;
6394 	perf_unpin_context(ctx);
6395 	mutex_unlock(&ctx->mutex);
6396 
6397 	event->owner = current;
6398 
6399 	mutex_lock(&current->perf_event_mutex);
6400 	list_add_tail(&event->owner_entry, &current->perf_event_list);
6401 	mutex_unlock(&current->perf_event_mutex);
6402 
6403 	/*
6404 	 * Precalculate sample_data sizes
6405 	 */
6406 	perf_event__header_size(event);
6407 	perf_event__id_header_size(event);
6408 
6409 	/*
6410 	 * Drop the reference on the group_event after placing the
6411 	 * new event on the sibling_list. This ensures destruction
6412 	 * of the group leader will find the pointer to itself in
6413 	 * perf_group_detach().
6414 	 */
6415 	fput_light(group_file, fput_needed);
6416 	fd_install(event_fd, event_file);
6417 	return event_fd;
6418 
6419 err_context:
6420 	perf_unpin_context(ctx);
6421 	put_ctx(ctx);
6422 err_alloc:
6423 	free_event(event);
6424 err_task:
6425 	if (task)
6426 		put_task_struct(task);
6427 err_group_fd:
6428 	fput_light(group_file, fput_needed);
6429 err_fd:
6430 	put_unused_fd(event_fd);
6431 	return err;
6432 }
6433 
6434 /**
6435  * perf_event_create_kernel_counter
6436  *
6437  * @attr: attributes of the counter to create
6438  * @cpu: cpu in which the counter is bound
6439  * @task: task to profile (NULL for percpu)
6440  */
6441 struct perf_event *
6442 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6443 				 struct task_struct *task,
6444 				 perf_overflow_handler_t overflow_handler,
6445 				 void *context)
6446 {
6447 	struct perf_event_context *ctx;
6448 	struct perf_event *event;
6449 	int err;
6450 
6451 	/*
6452 	 * Get the target context (task or percpu):
6453 	 */
6454 
6455 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6456 				 overflow_handler, context);
6457 	if (IS_ERR(event)) {
6458 		err = PTR_ERR(event);
6459 		goto err;
6460 	}
6461 
6462 	ctx = find_get_context(event->pmu, task, cpu);
6463 	if (IS_ERR(ctx)) {
6464 		err = PTR_ERR(ctx);
6465 		goto err_free;
6466 	}
6467 
6468 	event->filp = NULL;
6469 	WARN_ON_ONCE(ctx->parent_ctx);
6470 	mutex_lock(&ctx->mutex);
6471 	perf_install_in_context(ctx, event, cpu);
6472 	++ctx->generation;
6473 	perf_unpin_context(ctx);
6474 	mutex_unlock(&ctx->mutex);
6475 
6476 	return event;
6477 
6478 err_free:
6479 	free_event(event);
6480 err:
6481 	return ERR_PTR(err);
6482 }
6483 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6484 
6485 static void sync_child_event(struct perf_event *child_event,
6486 			       struct task_struct *child)
6487 {
6488 	struct perf_event *parent_event = child_event->parent;
6489 	u64 child_val;
6490 
6491 	if (child_event->attr.inherit_stat)
6492 		perf_event_read_event(child_event, child);
6493 
6494 	child_val = perf_event_count(child_event);
6495 
6496 	/*
6497 	 * Add back the child's count to the parent's count:
6498 	 */
6499 	atomic64_add(child_val, &parent_event->child_count);
6500 	atomic64_add(child_event->total_time_enabled,
6501 		     &parent_event->child_total_time_enabled);
6502 	atomic64_add(child_event->total_time_running,
6503 		     &parent_event->child_total_time_running);
6504 
6505 	/*
6506 	 * Remove this event from the parent's list
6507 	 */
6508 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6509 	mutex_lock(&parent_event->child_mutex);
6510 	list_del_init(&child_event->child_list);
6511 	mutex_unlock(&parent_event->child_mutex);
6512 
6513 	/*
6514 	 * Release the parent event, if this was the last
6515 	 * reference to it.
6516 	 */
6517 	fput(parent_event->filp);
6518 }
6519 
6520 static void
6521 __perf_event_exit_task(struct perf_event *child_event,
6522 			 struct perf_event_context *child_ctx,
6523 			 struct task_struct *child)
6524 {
6525 	if (child_event->parent) {
6526 		raw_spin_lock_irq(&child_ctx->lock);
6527 		perf_group_detach(child_event);
6528 		raw_spin_unlock_irq(&child_ctx->lock);
6529 	}
6530 
6531 	perf_remove_from_context(child_event);
6532 
6533 	/*
6534 	 * It can happen that the parent exits first, and has events
6535 	 * that are still around due to the child reference. These
6536 	 * events need to be zapped.
6537 	 */
6538 	if (child_event->parent) {
6539 		sync_child_event(child_event, child);
6540 		free_event(child_event);
6541 	}
6542 }
6543 
6544 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6545 {
6546 	struct perf_event *child_event, *tmp;
6547 	struct perf_event_context *child_ctx;
6548 	unsigned long flags;
6549 
6550 	if (likely(!child->perf_event_ctxp[ctxn])) {
6551 		perf_event_task(child, NULL, 0);
6552 		return;
6553 	}
6554 
6555 	local_irq_save(flags);
6556 	/*
6557 	 * We can't reschedule here because interrupts are disabled,
6558 	 * and either child is current or it is a task that can't be
6559 	 * scheduled, so we are now safe from rescheduling changing
6560 	 * our context.
6561 	 */
6562 	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6563 
6564 	/*
6565 	 * Take the context lock here so that if find_get_context is
6566 	 * reading child->perf_event_ctxp, we wait until it has
6567 	 * incremented the context's refcount before we do put_ctx below.
6568 	 */
6569 	raw_spin_lock(&child_ctx->lock);
6570 	task_ctx_sched_out(child_ctx);
6571 	child->perf_event_ctxp[ctxn] = NULL;
6572 	/*
6573 	 * If this context is a clone; unclone it so it can't get
6574 	 * swapped to another process while we're removing all
6575 	 * the events from it.
6576 	 */
6577 	unclone_ctx(child_ctx);
6578 	update_context_time(child_ctx);
6579 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6580 
6581 	/*
6582 	 * Report the task dead after unscheduling the events so that we
6583 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
6584 	 * get a few PERF_RECORD_READ events.
6585 	 */
6586 	perf_event_task(child, child_ctx, 0);
6587 
6588 	/*
6589 	 * We can recurse on the same lock type through:
6590 	 *
6591 	 *   __perf_event_exit_task()
6592 	 *     sync_child_event()
6593 	 *       fput(parent_event->filp)
6594 	 *         perf_release()
6595 	 *           mutex_lock(&ctx->mutex)
6596 	 *
6597 	 * But since its the parent context it won't be the same instance.
6598 	 */
6599 	mutex_lock(&child_ctx->mutex);
6600 
6601 again:
6602 	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6603 				 group_entry)
6604 		__perf_event_exit_task(child_event, child_ctx, child);
6605 
6606 	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6607 				 group_entry)
6608 		__perf_event_exit_task(child_event, child_ctx, child);
6609 
6610 	/*
6611 	 * If the last event was a group event, it will have appended all
6612 	 * its siblings to the list, but we obtained 'tmp' before that which
6613 	 * will still point to the list head terminating the iteration.
6614 	 */
6615 	if (!list_empty(&child_ctx->pinned_groups) ||
6616 	    !list_empty(&child_ctx->flexible_groups))
6617 		goto again;
6618 
6619 	mutex_unlock(&child_ctx->mutex);
6620 
6621 	put_ctx(child_ctx);
6622 }
6623 
6624 /*
6625  * When a child task exits, feed back event values to parent events.
6626  */
6627 void perf_event_exit_task(struct task_struct *child)
6628 {
6629 	struct perf_event *event, *tmp;
6630 	int ctxn;
6631 
6632 	mutex_lock(&child->perf_event_mutex);
6633 	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6634 				 owner_entry) {
6635 		list_del_init(&event->owner_entry);
6636 
6637 		/*
6638 		 * Ensure the list deletion is visible before we clear
6639 		 * the owner, closes a race against perf_release() where
6640 		 * we need to serialize on the owner->perf_event_mutex.
6641 		 */
6642 		smp_wmb();
6643 		event->owner = NULL;
6644 	}
6645 	mutex_unlock(&child->perf_event_mutex);
6646 
6647 	for_each_task_context_nr(ctxn)
6648 		perf_event_exit_task_context(child, ctxn);
6649 }
6650 
6651 static void perf_free_event(struct perf_event *event,
6652 			    struct perf_event_context *ctx)
6653 {
6654 	struct perf_event *parent = event->parent;
6655 
6656 	if (WARN_ON_ONCE(!parent))
6657 		return;
6658 
6659 	mutex_lock(&parent->child_mutex);
6660 	list_del_init(&event->child_list);
6661 	mutex_unlock(&parent->child_mutex);
6662 
6663 	fput(parent->filp);
6664 
6665 	perf_group_detach(event);
6666 	list_del_event(event, ctx);
6667 	free_event(event);
6668 }
6669 
6670 /*
6671  * free an unexposed, unused context as created by inheritance by
6672  * perf_event_init_task below, used by fork() in case of fail.
6673  */
6674 void perf_event_free_task(struct task_struct *task)
6675 {
6676 	struct perf_event_context *ctx;
6677 	struct perf_event *event, *tmp;
6678 	int ctxn;
6679 
6680 	for_each_task_context_nr(ctxn) {
6681 		ctx = task->perf_event_ctxp[ctxn];
6682 		if (!ctx)
6683 			continue;
6684 
6685 		mutex_lock(&ctx->mutex);
6686 again:
6687 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6688 				group_entry)
6689 			perf_free_event(event, ctx);
6690 
6691 		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6692 				group_entry)
6693 			perf_free_event(event, ctx);
6694 
6695 		if (!list_empty(&ctx->pinned_groups) ||
6696 				!list_empty(&ctx->flexible_groups))
6697 			goto again;
6698 
6699 		mutex_unlock(&ctx->mutex);
6700 
6701 		put_ctx(ctx);
6702 	}
6703 }
6704 
6705 void perf_event_delayed_put(struct task_struct *task)
6706 {
6707 	int ctxn;
6708 
6709 	for_each_task_context_nr(ctxn)
6710 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6711 }
6712 
6713 /*
6714  * inherit a event from parent task to child task:
6715  */
6716 static struct perf_event *
6717 inherit_event(struct perf_event *parent_event,
6718 	      struct task_struct *parent,
6719 	      struct perf_event_context *parent_ctx,
6720 	      struct task_struct *child,
6721 	      struct perf_event *group_leader,
6722 	      struct perf_event_context *child_ctx)
6723 {
6724 	struct perf_event *child_event;
6725 	unsigned long flags;
6726 
6727 	/*
6728 	 * Instead of creating recursive hierarchies of events,
6729 	 * we link inherited events back to the original parent,
6730 	 * which has a filp for sure, which we use as the reference
6731 	 * count:
6732 	 */
6733 	if (parent_event->parent)
6734 		parent_event = parent_event->parent;
6735 
6736 	child_event = perf_event_alloc(&parent_event->attr,
6737 					   parent_event->cpu,
6738 					   child,
6739 					   group_leader, parent_event,
6740 				           NULL, NULL);
6741 	if (IS_ERR(child_event))
6742 		return child_event;
6743 	get_ctx(child_ctx);
6744 
6745 	/*
6746 	 * Make the child state follow the state of the parent event,
6747 	 * not its attr.disabled bit.  We hold the parent's mutex,
6748 	 * so we won't race with perf_event_{en, dis}able_family.
6749 	 */
6750 	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6751 		child_event->state = PERF_EVENT_STATE_INACTIVE;
6752 	else
6753 		child_event->state = PERF_EVENT_STATE_OFF;
6754 
6755 	if (parent_event->attr.freq) {
6756 		u64 sample_period = parent_event->hw.sample_period;
6757 		struct hw_perf_event *hwc = &child_event->hw;
6758 
6759 		hwc->sample_period = sample_period;
6760 		hwc->last_period   = sample_period;
6761 
6762 		local64_set(&hwc->period_left, sample_period);
6763 	}
6764 
6765 	child_event->ctx = child_ctx;
6766 	child_event->overflow_handler = parent_event->overflow_handler;
6767 	child_event->overflow_handler_context
6768 		= parent_event->overflow_handler_context;
6769 
6770 	/*
6771 	 * Precalculate sample_data sizes
6772 	 */
6773 	perf_event__header_size(child_event);
6774 	perf_event__id_header_size(child_event);
6775 
6776 	/*
6777 	 * Link it up in the child's context:
6778 	 */
6779 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
6780 	add_event_to_ctx(child_event, child_ctx);
6781 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6782 
6783 	/*
6784 	 * Get a reference to the parent filp - we will fput it
6785 	 * when the child event exits. This is safe to do because
6786 	 * we are in the parent and we know that the filp still
6787 	 * exists and has a nonzero count:
6788 	 */
6789 	atomic_long_inc(&parent_event->filp->f_count);
6790 
6791 	/*
6792 	 * Link this into the parent event's child list
6793 	 */
6794 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6795 	mutex_lock(&parent_event->child_mutex);
6796 	list_add_tail(&child_event->child_list, &parent_event->child_list);
6797 	mutex_unlock(&parent_event->child_mutex);
6798 
6799 	return child_event;
6800 }
6801 
6802 static int inherit_group(struct perf_event *parent_event,
6803 	      struct task_struct *parent,
6804 	      struct perf_event_context *parent_ctx,
6805 	      struct task_struct *child,
6806 	      struct perf_event_context *child_ctx)
6807 {
6808 	struct perf_event *leader;
6809 	struct perf_event *sub;
6810 	struct perf_event *child_ctr;
6811 
6812 	leader = inherit_event(parent_event, parent, parent_ctx,
6813 				 child, NULL, child_ctx);
6814 	if (IS_ERR(leader))
6815 		return PTR_ERR(leader);
6816 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6817 		child_ctr = inherit_event(sub, parent, parent_ctx,
6818 					    child, leader, child_ctx);
6819 		if (IS_ERR(child_ctr))
6820 			return PTR_ERR(child_ctr);
6821 	}
6822 	return 0;
6823 }
6824 
6825 static int
6826 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6827 		   struct perf_event_context *parent_ctx,
6828 		   struct task_struct *child, int ctxn,
6829 		   int *inherited_all)
6830 {
6831 	int ret;
6832 	struct perf_event_context *child_ctx;
6833 
6834 	if (!event->attr.inherit) {
6835 		*inherited_all = 0;
6836 		return 0;
6837 	}
6838 
6839 	child_ctx = child->perf_event_ctxp[ctxn];
6840 	if (!child_ctx) {
6841 		/*
6842 		 * This is executed from the parent task context, so
6843 		 * inherit events that have been marked for cloning.
6844 		 * First allocate and initialize a context for the
6845 		 * child.
6846 		 */
6847 
6848 		child_ctx = alloc_perf_context(event->pmu, child);
6849 		if (!child_ctx)
6850 			return -ENOMEM;
6851 
6852 		child->perf_event_ctxp[ctxn] = child_ctx;
6853 	}
6854 
6855 	ret = inherit_group(event, parent, parent_ctx,
6856 			    child, child_ctx);
6857 
6858 	if (ret)
6859 		*inherited_all = 0;
6860 
6861 	return ret;
6862 }
6863 
6864 /*
6865  * Initialize the perf_event context in task_struct
6866  */
6867 int perf_event_init_context(struct task_struct *child, int ctxn)
6868 {
6869 	struct perf_event_context *child_ctx, *parent_ctx;
6870 	struct perf_event_context *cloned_ctx;
6871 	struct perf_event *event;
6872 	struct task_struct *parent = current;
6873 	int inherited_all = 1;
6874 	unsigned long flags;
6875 	int ret = 0;
6876 
6877 	if (likely(!parent->perf_event_ctxp[ctxn]))
6878 		return 0;
6879 
6880 	/*
6881 	 * If the parent's context is a clone, pin it so it won't get
6882 	 * swapped under us.
6883 	 */
6884 	parent_ctx = perf_pin_task_context(parent, ctxn);
6885 
6886 	/*
6887 	 * No need to check if parent_ctx != NULL here; since we saw
6888 	 * it non-NULL earlier, the only reason for it to become NULL
6889 	 * is if we exit, and since we're currently in the middle of
6890 	 * a fork we can't be exiting at the same time.
6891 	 */
6892 
6893 	/*
6894 	 * Lock the parent list. No need to lock the child - not PID
6895 	 * hashed yet and not running, so nobody can access it.
6896 	 */
6897 	mutex_lock(&parent_ctx->mutex);
6898 
6899 	/*
6900 	 * We dont have to disable NMIs - we are only looking at
6901 	 * the list, not manipulating it:
6902 	 */
6903 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6904 		ret = inherit_task_group(event, parent, parent_ctx,
6905 					 child, ctxn, &inherited_all);
6906 		if (ret)
6907 			break;
6908 	}
6909 
6910 	/*
6911 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
6912 	 * to allocations, but we need to prevent rotation because
6913 	 * rotate_ctx() will change the list from interrupt context.
6914 	 */
6915 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6916 	parent_ctx->rotate_disable = 1;
6917 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6918 
6919 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6920 		ret = inherit_task_group(event, parent, parent_ctx,
6921 					 child, ctxn, &inherited_all);
6922 		if (ret)
6923 			break;
6924 	}
6925 
6926 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6927 	parent_ctx->rotate_disable = 0;
6928 
6929 	child_ctx = child->perf_event_ctxp[ctxn];
6930 
6931 	if (child_ctx && inherited_all) {
6932 		/*
6933 		 * Mark the child context as a clone of the parent
6934 		 * context, or of whatever the parent is a clone of.
6935 		 *
6936 		 * Note that if the parent is a clone, the holding of
6937 		 * parent_ctx->lock avoids it from being uncloned.
6938 		 */
6939 		cloned_ctx = parent_ctx->parent_ctx;
6940 		if (cloned_ctx) {
6941 			child_ctx->parent_ctx = cloned_ctx;
6942 			child_ctx->parent_gen = parent_ctx->parent_gen;
6943 		} else {
6944 			child_ctx->parent_ctx = parent_ctx;
6945 			child_ctx->parent_gen = parent_ctx->generation;
6946 		}
6947 		get_ctx(child_ctx->parent_ctx);
6948 	}
6949 
6950 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6951 	mutex_unlock(&parent_ctx->mutex);
6952 
6953 	perf_unpin_context(parent_ctx);
6954 	put_ctx(parent_ctx);
6955 
6956 	return ret;
6957 }
6958 
6959 /*
6960  * Initialize the perf_event context in task_struct
6961  */
6962 int perf_event_init_task(struct task_struct *child)
6963 {
6964 	int ctxn, ret;
6965 
6966 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6967 	mutex_init(&child->perf_event_mutex);
6968 	INIT_LIST_HEAD(&child->perf_event_list);
6969 
6970 	for_each_task_context_nr(ctxn) {
6971 		ret = perf_event_init_context(child, ctxn);
6972 		if (ret)
6973 			return ret;
6974 	}
6975 
6976 	return 0;
6977 }
6978 
6979 static void __init perf_event_init_all_cpus(void)
6980 {
6981 	struct swevent_htable *swhash;
6982 	int cpu;
6983 
6984 	for_each_possible_cpu(cpu) {
6985 		swhash = &per_cpu(swevent_htable, cpu);
6986 		mutex_init(&swhash->hlist_mutex);
6987 		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6988 	}
6989 }
6990 
6991 static void __cpuinit perf_event_init_cpu(int cpu)
6992 {
6993 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6994 
6995 	mutex_lock(&swhash->hlist_mutex);
6996 	if (swhash->hlist_refcount > 0) {
6997 		struct swevent_hlist *hlist;
6998 
6999 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7000 		WARN_ON(!hlist);
7001 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
7002 	}
7003 	mutex_unlock(&swhash->hlist_mutex);
7004 }
7005 
7006 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7007 static void perf_pmu_rotate_stop(struct pmu *pmu)
7008 {
7009 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7010 
7011 	WARN_ON(!irqs_disabled());
7012 
7013 	list_del_init(&cpuctx->rotation_list);
7014 }
7015 
7016 static void __perf_event_exit_context(void *__info)
7017 {
7018 	struct perf_event_context *ctx = __info;
7019 	struct perf_event *event, *tmp;
7020 
7021 	perf_pmu_rotate_stop(ctx->pmu);
7022 
7023 	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7024 		__perf_remove_from_context(event);
7025 	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7026 		__perf_remove_from_context(event);
7027 }
7028 
7029 static void perf_event_exit_cpu_context(int cpu)
7030 {
7031 	struct perf_event_context *ctx;
7032 	struct pmu *pmu;
7033 	int idx;
7034 
7035 	idx = srcu_read_lock(&pmus_srcu);
7036 	list_for_each_entry_rcu(pmu, &pmus, entry) {
7037 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7038 
7039 		mutex_lock(&ctx->mutex);
7040 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7041 		mutex_unlock(&ctx->mutex);
7042 	}
7043 	srcu_read_unlock(&pmus_srcu, idx);
7044 }
7045 
7046 static void perf_event_exit_cpu(int cpu)
7047 {
7048 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7049 
7050 	mutex_lock(&swhash->hlist_mutex);
7051 	swevent_hlist_release(swhash);
7052 	mutex_unlock(&swhash->hlist_mutex);
7053 
7054 	perf_event_exit_cpu_context(cpu);
7055 }
7056 #else
7057 static inline void perf_event_exit_cpu(int cpu) { }
7058 #endif
7059 
7060 static int
7061 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7062 {
7063 	int cpu;
7064 
7065 	for_each_online_cpu(cpu)
7066 		perf_event_exit_cpu(cpu);
7067 
7068 	return NOTIFY_OK;
7069 }
7070 
7071 /*
7072  * Run the perf reboot notifier at the very last possible moment so that
7073  * the generic watchdog code runs as long as possible.
7074  */
7075 static struct notifier_block perf_reboot_notifier = {
7076 	.notifier_call = perf_reboot,
7077 	.priority = INT_MIN,
7078 };
7079 
7080 static int __cpuinit
7081 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7082 {
7083 	unsigned int cpu = (long)hcpu;
7084 
7085 	switch (action & ~CPU_TASKS_FROZEN) {
7086 
7087 	case CPU_UP_PREPARE:
7088 	case CPU_DOWN_FAILED:
7089 		perf_event_init_cpu(cpu);
7090 		break;
7091 
7092 	case CPU_UP_CANCELED:
7093 	case CPU_DOWN_PREPARE:
7094 		perf_event_exit_cpu(cpu);
7095 		break;
7096 
7097 	default:
7098 		break;
7099 	}
7100 
7101 	return NOTIFY_OK;
7102 }
7103 
7104 void __init perf_event_init(void)
7105 {
7106 	int ret;
7107 
7108 	idr_init(&pmu_idr);
7109 
7110 	perf_event_init_all_cpus();
7111 	init_srcu_struct(&pmus_srcu);
7112 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7113 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
7114 	perf_pmu_register(&perf_task_clock, NULL, -1);
7115 	perf_tp_register();
7116 	perf_cpu_notifier(perf_cpu_notify);
7117 	register_reboot_notifier(&perf_reboot_notifier);
7118 
7119 	ret = init_hw_breakpoint();
7120 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7121 
7122 	/* do not patch jump label more than once per second */
7123 	jump_label_rate_limit(&perf_sched_events, HZ);
7124 
7125 	/*
7126 	 * Build time assertion that we keep the data_head at the intended
7127 	 * location.  IOW, validation we got the __reserved[] size right.
7128 	 */
7129 	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7130 		     != 1024);
7131 }
7132 
7133 static int __init perf_event_sysfs_init(void)
7134 {
7135 	struct pmu *pmu;
7136 	int ret;
7137 
7138 	mutex_lock(&pmus_lock);
7139 
7140 	ret = bus_register(&pmu_bus);
7141 	if (ret)
7142 		goto unlock;
7143 
7144 	list_for_each_entry(pmu, &pmus, entry) {
7145 		if (!pmu->name || pmu->type < 0)
7146 			continue;
7147 
7148 		ret = pmu_dev_alloc(pmu);
7149 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7150 	}
7151 	pmu_bus_running = 1;
7152 	ret = 0;
7153 
7154 unlock:
7155 	mutex_unlock(&pmus_lock);
7156 
7157 	return ret;
7158 }
7159 device_initcall(perf_event_sysfs_init);
7160 
7161 #ifdef CONFIG_CGROUP_PERF
7162 static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
7163 {
7164 	struct perf_cgroup *jc;
7165 
7166 	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7167 	if (!jc)
7168 		return ERR_PTR(-ENOMEM);
7169 
7170 	jc->info = alloc_percpu(struct perf_cgroup_info);
7171 	if (!jc->info) {
7172 		kfree(jc);
7173 		return ERR_PTR(-ENOMEM);
7174 	}
7175 
7176 	return &jc->css;
7177 }
7178 
7179 static void perf_cgroup_destroy(struct cgroup *cont)
7180 {
7181 	struct perf_cgroup *jc;
7182 	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7183 			  struct perf_cgroup, css);
7184 	free_percpu(jc->info);
7185 	kfree(jc);
7186 }
7187 
7188 static int __perf_cgroup_move(void *info)
7189 {
7190 	struct task_struct *task = info;
7191 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7192 	return 0;
7193 }
7194 
7195 static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7196 {
7197 	struct task_struct *task;
7198 
7199 	cgroup_taskset_for_each(task, cgrp, tset)
7200 		task_function_call(task, __perf_cgroup_move, task);
7201 }
7202 
7203 static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7204 			     struct task_struct *task)
7205 {
7206 	/*
7207 	 * cgroup_exit() is called in the copy_process() failure path.
7208 	 * Ignore this case since the task hasn't ran yet, this avoids
7209 	 * trying to poke a half freed task state from generic code.
7210 	 */
7211 	if (!(task->flags & PF_EXITING))
7212 		return;
7213 
7214 	task_function_call(task, __perf_cgroup_move, task);
7215 }
7216 
7217 struct cgroup_subsys perf_subsys = {
7218 	.name		= "perf_event",
7219 	.subsys_id	= perf_subsys_id,
7220 	.create		= perf_cgroup_create,
7221 	.destroy	= perf_cgroup_destroy,
7222 	.exit		= perf_cgroup_exit,
7223 	.attach		= perf_cgroup_attach,
7224 };
7225 #endif /* CONFIG_CGROUP_PERF */
7226