xref: /linux/kernel/events/core.c (revision eecb20720f1b29019725515051e41bc7c079f91f)
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  �  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/rculist.h>
32 #include <linux/uaccess.h>
33 #include <linux/syscalls.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/perf_event.h>
37 #include <linux/ftrace_event.h>
38 #include <linux/hw_breakpoint.h>
39 
40 #include "internal.h"
41 
42 #include <asm/irq_regs.h>
43 
44 struct remote_function_call {
45 	struct task_struct	*p;
46 	int			(*func)(void *info);
47 	void			*info;
48 	int			ret;
49 };
50 
51 static void remote_function(void *data)
52 {
53 	struct remote_function_call *tfc = data;
54 	struct task_struct *p = tfc->p;
55 
56 	if (p) {
57 		tfc->ret = -EAGAIN;
58 		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 			return;
60 	}
61 
62 	tfc->ret = tfc->func(tfc->info);
63 }
64 
65 /**
66  * task_function_call - call a function on the cpu on which a task runs
67  * @p:		the task to evaluate
68  * @func:	the function to be called
69  * @info:	the function call argument
70  *
71  * Calls the function @func when the task is currently running. This might
72  * be on the current CPU, which just calls the function directly
73  *
74  * returns: @func return value, or
75  *	    -ESRCH  - when the process isn't running
76  *	    -EAGAIN - when the process moved away
77  */
78 static int
79 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80 {
81 	struct remote_function_call data = {
82 		.p	= p,
83 		.func	= func,
84 		.info	= info,
85 		.ret	= -ESRCH, /* No such (running) process */
86 	};
87 
88 	if (task_curr(p))
89 		smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90 
91 	return data.ret;
92 }
93 
94 /**
95  * cpu_function_call - call a function on the cpu
96  * @func:	the function to be called
97  * @info:	the function call argument
98  *
99  * Calls the function @func on the remote cpu.
100  *
101  * returns: @func return value or -ENXIO when the cpu is offline
102  */
103 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104 {
105 	struct remote_function_call data = {
106 		.p	= NULL,
107 		.func	= func,
108 		.info	= info,
109 		.ret	= -ENXIO, /* No such CPU */
110 	};
111 
112 	smp_call_function_single(cpu, remote_function, &data, 1);
113 
114 	return data.ret;
115 }
116 
117 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 		       PERF_FLAG_FD_OUTPUT  |\
119 		       PERF_FLAG_PID_CGROUP)
120 
121 enum event_type_t {
122 	EVENT_FLEXIBLE = 0x1,
123 	EVENT_PINNED = 0x2,
124 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
125 };
126 
127 /*
128  * perf_sched_events : >0 events exist
129  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
130  */
131 struct jump_label_key perf_sched_events __read_mostly;
132 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
133 
134 static atomic_t nr_mmap_events __read_mostly;
135 static atomic_t nr_comm_events __read_mostly;
136 static atomic_t nr_task_events __read_mostly;
137 
138 static LIST_HEAD(pmus);
139 static DEFINE_MUTEX(pmus_lock);
140 static struct srcu_struct pmus_srcu;
141 
142 /*
143  * perf event paranoia level:
144  *  -1 - not paranoid at all
145  *   0 - disallow raw tracepoint access for unpriv
146  *   1 - disallow cpu events for unpriv
147  *   2 - disallow kernel profiling for unpriv
148  */
149 int sysctl_perf_event_paranoid __read_mostly = 1;
150 
151 /* Minimum for 512 kiB + 1 user control page */
152 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
153 
154 /*
155  * max perf event sample rate
156  */
157 #define DEFAULT_MAX_SAMPLE_RATE 100000
158 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
159 static int max_samples_per_tick __read_mostly =
160 	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
161 
162 int perf_proc_update_handler(struct ctl_table *table, int write,
163 		void __user *buffer, size_t *lenp,
164 		loff_t *ppos)
165 {
166 	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
167 
168 	if (ret || !write)
169 		return ret;
170 
171 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
172 
173 	return 0;
174 }
175 
176 static atomic64_t perf_event_id;
177 
178 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
179 			      enum event_type_t event_type);
180 
181 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
182 			     enum event_type_t event_type,
183 			     struct task_struct *task);
184 
185 static void update_context_time(struct perf_event_context *ctx);
186 static u64 perf_event_time(struct perf_event *event);
187 
188 static void ring_buffer_attach(struct perf_event *event,
189 			       struct ring_buffer *rb);
190 
191 void __weak perf_event_print_debug(void)	{ }
192 
193 extern __weak const char *perf_pmu_name(void)
194 {
195 	return "pmu";
196 }
197 
198 static inline u64 perf_clock(void)
199 {
200 	return local_clock();
201 }
202 
203 static inline struct perf_cpu_context *
204 __get_cpu_context(struct perf_event_context *ctx)
205 {
206 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
207 }
208 
209 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
210 			  struct perf_event_context *ctx)
211 {
212 	raw_spin_lock(&cpuctx->ctx.lock);
213 	if (ctx)
214 		raw_spin_lock(&ctx->lock);
215 }
216 
217 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
218 			    struct perf_event_context *ctx)
219 {
220 	if (ctx)
221 		raw_spin_unlock(&ctx->lock);
222 	raw_spin_unlock(&cpuctx->ctx.lock);
223 }
224 
225 #ifdef CONFIG_CGROUP_PERF
226 
227 /*
228  * Must ensure cgroup is pinned (css_get) before calling
229  * this function. In other words, we cannot call this function
230  * if there is no cgroup event for the current CPU context.
231  */
232 static inline struct perf_cgroup *
233 perf_cgroup_from_task(struct task_struct *task)
234 {
235 	return container_of(task_subsys_state(task, perf_subsys_id),
236 			struct perf_cgroup, css);
237 }
238 
239 static inline bool
240 perf_cgroup_match(struct perf_event *event)
241 {
242 	struct perf_event_context *ctx = event->ctx;
243 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
244 
245 	return !event->cgrp || event->cgrp == cpuctx->cgrp;
246 }
247 
248 static inline void perf_get_cgroup(struct perf_event *event)
249 {
250 	css_get(&event->cgrp->css);
251 }
252 
253 static inline void perf_put_cgroup(struct perf_event *event)
254 {
255 	css_put(&event->cgrp->css);
256 }
257 
258 static inline void perf_detach_cgroup(struct perf_event *event)
259 {
260 	perf_put_cgroup(event);
261 	event->cgrp = NULL;
262 }
263 
264 static inline int is_cgroup_event(struct perf_event *event)
265 {
266 	return event->cgrp != NULL;
267 }
268 
269 static inline u64 perf_cgroup_event_time(struct perf_event *event)
270 {
271 	struct perf_cgroup_info *t;
272 
273 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
274 	return t->time;
275 }
276 
277 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
278 {
279 	struct perf_cgroup_info *info;
280 	u64 now;
281 
282 	now = perf_clock();
283 
284 	info = this_cpu_ptr(cgrp->info);
285 
286 	info->time += now - info->timestamp;
287 	info->timestamp = now;
288 }
289 
290 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
291 {
292 	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
293 	if (cgrp_out)
294 		__update_cgrp_time(cgrp_out);
295 }
296 
297 static inline void update_cgrp_time_from_event(struct perf_event *event)
298 {
299 	struct perf_cgroup *cgrp;
300 
301 	/*
302 	 * ensure we access cgroup data only when needed and
303 	 * when we know the cgroup is pinned (css_get)
304 	 */
305 	if (!is_cgroup_event(event))
306 		return;
307 
308 	cgrp = perf_cgroup_from_task(current);
309 	/*
310 	 * Do not update time when cgroup is not active
311 	 */
312 	if (cgrp == event->cgrp)
313 		__update_cgrp_time(event->cgrp);
314 }
315 
316 static inline void
317 perf_cgroup_set_timestamp(struct task_struct *task,
318 			  struct perf_event_context *ctx)
319 {
320 	struct perf_cgroup *cgrp;
321 	struct perf_cgroup_info *info;
322 
323 	/*
324 	 * ctx->lock held by caller
325 	 * ensure we do not access cgroup data
326 	 * unless we have the cgroup pinned (css_get)
327 	 */
328 	if (!task || !ctx->nr_cgroups)
329 		return;
330 
331 	cgrp = perf_cgroup_from_task(task);
332 	info = this_cpu_ptr(cgrp->info);
333 	info->timestamp = ctx->timestamp;
334 }
335 
336 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
337 #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
338 
339 /*
340  * reschedule events based on the cgroup constraint of task.
341  *
342  * mode SWOUT : schedule out everything
343  * mode SWIN : schedule in based on cgroup for next
344  */
345 void perf_cgroup_switch(struct task_struct *task, int mode)
346 {
347 	struct perf_cpu_context *cpuctx;
348 	struct pmu *pmu;
349 	unsigned long flags;
350 
351 	/*
352 	 * disable interrupts to avoid geting nr_cgroup
353 	 * changes via __perf_event_disable(). Also
354 	 * avoids preemption.
355 	 */
356 	local_irq_save(flags);
357 
358 	/*
359 	 * we reschedule only in the presence of cgroup
360 	 * constrained events.
361 	 */
362 	rcu_read_lock();
363 
364 	list_for_each_entry_rcu(pmu, &pmus, entry) {
365 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
366 
367 		/*
368 		 * perf_cgroup_events says at least one
369 		 * context on this CPU has cgroup events.
370 		 *
371 		 * ctx->nr_cgroups reports the number of cgroup
372 		 * events for a context.
373 		 */
374 		if (cpuctx->ctx.nr_cgroups > 0) {
375 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
376 			perf_pmu_disable(cpuctx->ctx.pmu);
377 
378 			if (mode & PERF_CGROUP_SWOUT) {
379 				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
380 				/*
381 				 * must not be done before ctxswout due
382 				 * to event_filter_match() in event_sched_out()
383 				 */
384 				cpuctx->cgrp = NULL;
385 			}
386 
387 			if (mode & PERF_CGROUP_SWIN) {
388 				WARN_ON_ONCE(cpuctx->cgrp);
389 				/* set cgrp before ctxsw in to
390 				 * allow event_filter_match() to not
391 				 * have to pass task around
392 				 */
393 				cpuctx->cgrp = perf_cgroup_from_task(task);
394 				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
395 			}
396 			perf_pmu_enable(cpuctx->ctx.pmu);
397 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
398 		}
399 	}
400 
401 	rcu_read_unlock();
402 
403 	local_irq_restore(flags);
404 }
405 
406 static inline void perf_cgroup_sched_out(struct task_struct *task,
407 					 struct task_struct *next)
408 {
409 	struct perf_cgroup *cgrp1;
410 	struct perf_cgroup *cgrp2 = NULL;
411 
412 	/*
413 	 * we come here when we know perf_cgroup_events > 0
414 	 */
415 	cgrp1 = perf_cgroup_from_task(task);
416 
417 	/*
418 	 * next is NULL when called from perf_event_enable_on_exec()
419 	 * that will systematically cause a cgroup_switch()
420 	 */
421 	if (next)
422 		cgrp2 = perf_cgroup_from_task(next);
423 
424 	/*
425 	 * only schedule out current cgroup events if we know
426 	 * that we are switching to a different cgroup. Otherwise,
427 	 * do no touch the cgroup events.
428 	 */
429 	if (cgrp1 != cgrp2)
430 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
431 }
432 
433 static inline void perf_cgroup_sched_in(struct task_struct *prev,
434 					struct task_struct *task)
435 {
436 	struct perf_cgroup *cgrp1;
437 	struct perf_cgroup *cgrp2 = NULL;
438 
439 	/*
440 	 * we come here when we know perf_cgroup_events > 0
441 	 */
442 	cgrp1 = perf_cgroup_from_task(task);
443 
444 	/* prev can never be NULL */
445 	cgrp2 = perf_cgroup_from_task(prev);
446 
447 	/*
448 	 * only need to schedule in cgroup events if we are changing
449 	 * cgroup during ctxsw. Cgroup events were not scheduled
450 	 * out of ctxsw out if that was not the case.
451 	 */
452 	if (cgrp1 != cgrp2)
453 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
454 }
455 
456 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
457 				      struct perf_event_attr *attr,
458 				      struct perf_event *group_leader)
459 {
460 	struct perf_cgroup *cgrp;
461 	struct cgroup_subsys_state *css;
462 	struct file *file;
463 	int ret = 0, fput_needed;
464 
465 	file = fget_light(fd, &fput_needed);
466 	if (!file)
467 		return -EBADF;
468 
469 	css = cgroup_css_from_dir(file, perf_subsys_id);
470 	if (IS_ERR(css)) {
471 		ret = PTR_ERR(css);
472 		goto out;
473 	}
474 
475 	cgrp = container_of(css, struct perf_cgroup, css);
476 	event->cgrp = cgrp;
477 
478 	/* must be done before we fput() the file */
479 	perf_get_cgroup(event);
480 
481 	/*
482 	 * all events in a group must monitor
483 	 * the same cgroup because a task belongs
484 	 * to only one perf cgroup at a time
485 	 */
486 	if (group_leader && group_leader->cgrp != cgrp) {
487 		perf_detach_cgroup(event);
488 		ret = -EINVAL;
489 	}
490 out:
491 	fput_light(file, fput_needed);
492 	return ret;
493 }
494 
495 static inline void
496 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
497 {
498 	struct perf_cgroup_info *t;
499 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
500 	event->shadow_ctx_time = now - t->timestamp;
501 }
502 
503 static inline void
504 perf_cgroup_defer_enabled(struct perf_event *event)
505 {
506 	/*
507 	 * when the current task's perf cgroup does not match
508 	 * the event's, we need to remember to call the
509 	 * perf_mark_enable() function the first time a task with
510 	 * a matching perf cgroup is scheduled in.
511 	 */
512 	if (is_cgroup_event(event) && !perf_cgroup_match(event))
513 		event->cgrp_defer_enabled = 1;
514 }
515 
516 static inline void
517 perf_cgroup_mark_enabled(struct perf_event *event,
518 			 struct perf_event_context *ctx)
519 {
520 	struct perf_event *sub;
521 	u64 tstamp = perf_event_time(event);
522 
523 	if (!event->cgrp_defer_enabled)
524 		return;
525 
526 	event->cgrp_defer_enabled = 0;
527 
528 	event->tstamp_enabled = tstamp - event->total_time_enabled;
529 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
530 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
531 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
532 			sub->cgrp_defer_enabled = 0;
533 		}
534 	}
535 }
536 #else /* !CONFIG_CGROUP_PERF */
537 
538 static inline bool
539 perf_cgroup_match(struct perf_event *event)
540 {
541 	return true;
542 }
543 
544 static inline void perf_detach_cgroup(struct perf_event *event)
545 {}
546 
547 static inline int is_cgroup_event(struct perf_event *event)
548 {
549 	return 0;
550 }
551 
552 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
553 {
554 	return 0;
555 }
556 
557 static inline void update_cgrp_time_from_event(struct perf_event *event)
558 {
559 }
560 
561 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
562 {
563 }
564 
565 static inline void perf_cgroup_sched_out(struct task_struct *task,
566 					 struct task_struct *next)
567 {
568 }
569 
570 static inline void perf_cgroup_sched_in(struct task_struct *prev,
571 					struct task_struct *task)
572 {
573 }
574 
575 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
576 				      struct perf_event_attr *attr,
577 				      struct perf_event *group_leader)
578 {
579 	return -EINVAL;
580 }
581 
582 static inline void
583 perf_cgroup_set_timestamp(struct task_struct *task,
584 			  struct perf_event_context *ctx)
585 {
586 }
587 
588 void
589 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
590 {
591 }
592 
593 static inline void
594 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
595 {
596 }
597 
598 static inline u64 perf_cgroup_event_time(struct perf_event *event)
599 {
600 	return 0;
601 }
602 
603 static inline void
604 perf_cgroup_defer_enabled(struct perf_event *event)
605 {
606 }
607 
608 static inline void
609 perf_cgroup_mark_enabled(struct perf_event *event,
610 			 struct perf_event_context *ctx)
611 {
612 }
613 #endif
614 
615 void perf_pmu_disable(struct pmu *pmu)
616 {
617 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
618 	if (!(*count)++)
619 		pmu->pmu_disable(pmu);
620 }
621 
622 void perf_pmu_enable(struct pmu *pmu)
623 {
624 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
625 	if (!--(*count))
626 		pmu->pmu_enable(pmu);
627 }
628 
629 static DEFINE_PER_CPU(struct list_head, rotation_list);
630 
631 /*
632  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
633  * because they're strictly cpu affine and rotate_start is called with IRQs
634  * disabled, while rotate_context is called from IRQ context.
635  */
636 static void perf_pmu_rotate_start(struct pmu *pmu)
637 {
638 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
639 	struct list_head *head = &__get_cpu_var(rotation_list);
640 
641 	WARN_ON(!irqs_disabled());
642 
643 	if (list_empty(&cpuctx->rotation_list))
644 		list_add(&cpuctx->rotation_list, head);
645 }
646 
647 static void get_ctx(struct perf_event_context *ctx)
648 {
649 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
650 }
651 
652 static void put_ctx(struct perf_event_context *ctx)
653 {
654 	if (atomic_dec_and_test(&ctx->refcount)) {
655 		if (ctx->parent_ctx)
656 			put_ctx(ctx->parent_ctx);
657 		if (ctx->task)
658 			put_task_struct(ctx->task);
659 		kfree_rcu(ctx, rcu_head);
660 	}
661 }
662 
663 static void unclone_ctx(struct perf_event_context *ctx)
664 {
665 	if (ctx->parent_ctx) {
666 		put_ctx(ctx->parent_ctx);
667 		ctx->parent_ctx = NULL;
668 	}
669 }
670 
671 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
672 {
673 	/*
674 	 * only top level events have the pid namespace they were created in
675 	 */
676 	if (event->parent)
677 		event = event->parent;
678 
679 	return task_tgid_nr_ns(p, event->ns);
680 }
681 
682 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
683 {
684 	/*
685 	 * only top level events have the pid namespace they were created in
686 	 */
687 	if (event->parent)
688 		event = event->parent;
689 
690 	return task_pid_nr_ns(p, event->ns);
691 }
692 
693 /*
694  * If we inherit events we want to return the parent event id
695  * to userspace.
696  */
697 static u64 primary_event_id(struct perf_event *event)
698 {
699 	u64 id = event->id;
700 
701 	if (event->parent)
702 		id = event->parent->id;
703 
704 	return id;
705 }
706 
707 /*
708  * Get the perf_event_context for a task and lock it.
709  * This has to cope with with the fact that until it is locked,
710  * the context could get moved to another task.
711  */
712 static struct perf_event_context *
713 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
714 {
715 	struct perf_event_context *ctx;
716 
717 	rcu_read_lock();
718 retry:
719 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
720 	if (ctx) {
721 		/*
722 		 * If this context is a clone of another, it might
723 		 * get swapped for another underneath us by
724 		 * perf_event_task_sched_out, though the
725 		 * rcu_read_lock() protects us from any context
726 		 * getting freed.  Lock the context and check if it
727 		 * got swapped before we could get the lock, and retry
728 		 * if so.  If we locked the right context, then it
729 		 * can't get swapped on us any more.
730 		 */
731 		raw_spin_lock_irqsave(&ctx->lock, *flags);
732 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
733 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
734 			goto retry;
735 		}
736 
737 		if (!atomic_inc_not_zero(&ctx->refcount)) {
738 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
739 			ctx = NULL;
740 		}
741 	}
742 	rcu_read_unlock();
743 	return ctx;
744 }
745 
746 /*
747  * Get the context for a task and increment its pin_count so it
748  * can't get swapped to another task.  This also increments its
749  * reference count so that the context can't get freed.
750  */
751 static struct perf_event_context *
752 perf_pin_task_context(struct task_struct *task, int ctxn)
753 {
754 	struct perf_event_context *ctx;
755 	unsigned long flags;
756 
757 	ctx = perf_lock_task_context(task, ctxn, &flags);
758 	if (ctx) {
759 		++ctx->pin_count;
760 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
761 	}
762 	return ctx;
763 }
764 
765 static void perf_unpin_context(struct perf_event_context *ctx)
766 {
767 	unsigned long flags;
768 
769 	raw_spin_lock_irqsave(&ctx->lock, flags);
770 	--ctx->pin_count;
771 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
772 }
773 
774 /*
775  * Update the record of the current time in a context.
776  */
777 static void update_context_time(struct perf_event_context *ctx)
778 {
779 	u64 now = perf_clock();
780 
781 	ctx->time += now - ctx->timestamp;
782 	ctx->timestamp = now;
783 }
784 
785 static u64 perf_event_time(struct perf_event *event)
786 {
787 	struct perf_event_context *ctx = event->ctx;
788 
789 	if (is_cgroup_event(event))
790 		return perf_cgroup_event_time(event);
791 
792 	return ctx ? ctx->time : 0;
793 }
794 
795 /*
796  * Update the total_time_enabled and total_time_running fields for a event.
797  * The caller of this function needs to hold the ctx->lock.
798  */
799 static void update_event_times(struct perf_event *event)
800 {
801 	struct perf_event_context *ctx = event->ctx;
802 	u64 run_end;
803 
804 	if (event->state < PERF_EVENT_STATE_INACTIVE ||
805 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
806 		return;
807 	/*
808 	 * in cgroup mode, time_enabled represents
809 	 * the time the event was enabled AND active
810 	 * tasks were in the monitored cgroup. This is
811 	 * independent of the activity of the context as
812 	 * there may be a mix of cgroup and non-cgroup events.
813 	 *
814 	 * That is why we treat cgroup events differently
815 	 * here.
816 	 */
817 	if (is_cgroup_event(event))
818 		run_end = perf_event_time(event);
819 	else if (ctx->is_active)
820 		run_end = ctx->time;
821 	else
822 		run_end = event->tstamp_stopped;
823 
824 	event->total_time_enabled = run_end - event->tstamp_enabled;
825 
826 	if (event->state == PERF_EVENT_STATE_INACTIVE)
827 		run_end = event->tstamp_stopped;
828 	else
829 		run_end = perf_event_time(event);
830 
831 	event->total_time_running = run_end - event->tstamp_running;
832 
833 }
834 
835 /*
836  * Update total_time_enabled and total_time_running for all events in a group.
837  */
838 static void update_group_times(struct perf_event *leader)
839 {
840 	struct perf_event *event;
841 
842 	update_event_times(leader);
843 	list_for_each_entry(event, &leader->sibling_list, group_entry)
844 		update_event_times(event);
845 }
846 
847 static struct list_head *
848 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
849 {
850 	if (event->attr.pinned)
851 		return &ctx->pinned_groups;
852 	else
853 		return &ctx->flexible_groups;
854 }
855 
856 /*
857  * Add a event from the lists for its context.
858  * Must be called with ctx->mutex and ctx->lock held.
859  */
860 static void
861 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
862 {
863 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
864 	event->attach_state |= PERF_ATTACH_CONTEXT;
865 
866 	/*
867 	 * If we're a stand alone event or group leader, we go to the context
868 	 * list, group events are kept attached to the group so that
869 	 * perf_group_detach can, at all times, locate all siblings.
870 	 */
871 	if (event->group_leader == event) {
872 		struct list_head *list;
873 
874 		if (is_software_event(event))
875 			event->group_flags |= PERF_GROUP_SOFTWARE;
876 
877 		list = ctx_group_list(event, ctx);
878 		list_add_tail(&event->group_entry, list);
879 	}
880 
881 	if (is_cgroup_event(event))
882 		ctx->nr_cgroups++;
883 
884 	list_add_rcu(&event->event_entry, &ctx->event_list);
885 	if (!ctx->nr_events)
886 		perf_pmu_rotate_start(ctx->pmu);
887 	ctx->nr_events++;
888 	if (event->attr.inherit_stat)
889 		ctx->nr_stat++;
890 }
891 
892 /*
893  * Called at perf_event creation and when events are attached/detached from a
894  * group.
895  */
896 static void perf_event__read_size(struct perf_event *event)
897 {
898 	int entry = sizeof(u64); /* value */
899 	int size = 0;
900 	int nr = 1;
901 
902 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
903 		size += sizeof(u64);
904 
905 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
906 		size += sizeof(u64);
907 
908 	if (event->attr.read_format & PERF_FORMAT_ID)
909 		entry += sizeof(u64);
910 
911 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
912 		nr += event->group_leader->nr_siblings;
913 		size += sizeof(u64);
914 	}
915 
916 	size += entry * nr;
917 	event->read_size = size;
918 }
919 
920 static void perf_event__header_size(struct perf_event *event)
921 {
922 	struct perf_sample_data *data;
923 	u64 sample_type = event->attr.sample_type;
924 	u16 size = 0;
925 
926 	perf_event__read_size(event);
927 
928 	if (sample_type & PERF_SAMPLE_IP)
929 		size += sizeof(data->ip);
930 
931 	if (sample_type & PERF_SAMPLE_ADDR)
932 		size += sizeof(data->addr);
933 
934 	if (sample_type & PERF_SAMPLE_PERIOD)
935 		size += sizeof(data->period);
936 
937 	if (sample_type & PERF_SAMPLE_READ)
938 		size += event->read_size;
939 
940 	event->header_size = size;
941 }
942 
943 static void perf_event__id_header_size(struct perf_event *event)
944 {
945 	struct perf_sample_data *data;
946 	u64 sample_type = event->attr.sample_type;
947 	u16 size = 0;
948 
949 	if (sample_type & PERF_SAMPLE_TID)
950 		size += sizeof(data->tid_entry);
951 
952 	if (sample_type & PERF_SAMPLE_TIME)
953 		size += sizeof(data->time);
954 
955 	if (sample_type & PERF_SAMPLE_ID)
956 		size += sizeof(data->id);
957 
958 	if (sample_type & PERF_SAMPLE_STREAM_ID)
959 		size += sizeof(data->stream_id);
960 
961 	if (sample_type & PERF_SAMPLE_CPU)
962 		size += sizeof(data->cpu_entry);
963 
964 	event->id_header_size = size;
965 }
966 
967 static void perf_group_attach(struct perf_event *event)
968 {
969 	struct perf_event *group_leader = event->group_leader, *pos;
970 
971 	/*
972 	 * We can have double attach due to group movement in perf_event_open.
973 	 */
974 	if (event->attach_state & PERF_ATTACH_GROUP)
975 		return;
976 
977 	event->attach_state |= PERF_ATTACH_GROUP;
978 
979 	if (group_leader == event)
980 		return;
981 
982 	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
983 			!is_software_event(event))
984 		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
985 
986 	list_add_tail(&event->group_entry, &group_leader->sibling_list);
987 	group_leader->nr_siblings++;
988 
989 	perf_event__header_size(group_leader);
990 
991 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
992 		perf_event__header_size(pos);
993 }
994 
995 /*
996  * Remove a event from the lists for its context.
997  * Must be called with ctx->mutex and ctx->lock held.
998  */
999 static void
1000 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1001 {
1002 	struct perf_cpu_context *cpuctx;
1003 	/*
1004 	 * We can have double detach due to exit/hot-unplug + close.
1005 	 */
1006 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1007 		return;
1008 
1009 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
1010 
1011 	if (is_cgroup_event(event)) {
1012 		ctx->nr_cgroups--;
1013 		cpuctx = __get_cpu_context(ctx);
1014 		/*
1015 		 * if there are no more cgroup events
1016 		 * then cler cgrp to avoid stale pointer
1017 		 * in update_cgrp_time_from_cpuctx()
1018 		 */
1019 		if (!ctx->nr_cgroups)
1020 			cpuctx->cgrp = NULL;
1021 	}
1022 
1023 	ctx->nr_events--;
1024 	if (event->attr.inherit_stat)
1025 		ctx->nr_stat--;
1026 
1027 	list_del_rcu(&event->event_entry);
1028 
1029 	if (event->group_leader == event)
1030 		list_del_init(&event->group_entry);
1031 
1032 	update_group_times(event);
1033 
1034 	/*
1035 	 * If event was in error state, then keep it
1036 	 * that way, otherwise bogus counts will be
1037 	 * returned on read(). The only way to get out
1038 	 * of error state is by explicit re-enabling
1039 	 * of the event
1040 	 */
1041 	if (event->state > PERF_EVENT_STATE_OFF)
1042 		event->state = PERF_EVENT_STATE_OFF;
1043 }
1044 
1045 static void perf_group_detach(struct perf_event *event)
1046 {
1047 	struct perf_event *sibling, *tmp;
1048 	struct list_head *list = NULL;
1049 
1050 	/*
1051 	 * We can have double detach due to exit/hot-unplug + close.
1052 	 */
1053 	if (!(event->attach_state & PERF_ATTACH_GROUP))
1054 		return;
1055 
1056 	event->attach_state &= ~PERF_ATTACH_GROUP;
1057 
1058 	/*
1059 	 * If this is a sibling, remove it from its group.
1060 	 */
1061 	if (event->group_leader != event) {
1062 		list_del_init(&event->group_entry);
1063 		event->group_leader->nr_siblings--;
1064 		goto out;
1065 	}
1066 
1067 	if (!list_empty(&event->group_entry))
1068 		list = &event->group_entry;
1069 
1070 	/*
1071 	 * If this was a group event with sibling events then
1072 	 * upgrade the siblings to singleton events by adding them
1073 	 * to whatever list we are on.
1074 	 */
1075 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1076 		if (list)
1077 			list_move_tail(&sibling->group_entry, list);
1078 		sibling->group_leader = sibling;
1079 
1080 		/* Inherit group flags from the previous leader */
1081 		sibling->group_flags = event->group_flags;
1082 	}
1083 
1084 out:
1085 	perf_event__header_size(event->group_leader);
1086 
1087 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1088 		perf_event__header_size(tmp);
1089 }
1090 
1091 static inline int
1092 event_filter_match(struct perf_event *event)
1093 {
1094 	return (event->cpu == -1 || event->cpu == smp_processor_id())
1095 	    && perf_cgroup_match(event);
1096 }
1097 
1098 static void
1099 event_sched_out(struct perf_event *event,
1100 		  struct perf_cpu_context *cpuctx,
1101 		  struct perf_event_context *ctx)
1102 {
1103 	u64 tstamp = perf_event_time(event);
1104 	u64 delta;
1105 	/*
1106 	 * An event which could not be activated because of
1107 	 * filter mismatch still needs to have its timings
1108 	 * maintained, otherwise bogus information is return
1109 	 * via read() for time_enabled, time_running:
1110 	 */
1111 	if (event->state == PERF_EVENT_STATE_INACTIVE
1112 	    && !event_filter_match(event)) {
1113 		delta = tstamp - event->tstamp_stopped;
1114 		event->tstamp_running += delta;
1115 		event->tstamp_stopped = tstamp;
1116 	}
1117 
1118 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1119 		return;
1120 
1121 	event->state = PERF_EVENT_STATE_INACTIVE;
1122 	if (event->pending_disable) {
1123 		event->pending_disable = 0;
1124 		event->state = PERF_EVENT_STATE_OFF;
1125 	}
1126 	event->tstamp_stopped = tstamp;
1127 	event->pmu->del(event, 0);
1128 	event->oncpu = -1;
1129 
1130 	if (!is_software_event(event))
1131 		cpuctx->active_oncpu--;
1132 	ctx->nr_active--;
1133 	if (event->attr.exclusive || !cpuctx->active_oncpu)
1134 		cpuctx->exclusive = 0;
1135 }
1136 
1137 static void
1138 group_sched_out(struct perf_event *group_event,
1139 		struct perf_cpu_context *cpuctx,
1140 		struct perf_event_context *ctx)
1141 {
1142 	struct perf_event *event;
1143 	int state = group_event->state;
1144 
1145 	event_sched_out(group_event, cpuctx, ctx);
1146 
1147 	/*
1148 	 * Schedule out siblings (if any):
1149 	 */
1150 	list_for_each_entry(event, &group_event->sibling_list, group_entry)
1151 		event_sched_out(event, cpuctx, ctx);
1152 
1153 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1154 		cpuctx->exclusive = 0;
1155 }
1156 
1157 /*
1158  * Cross CPU call to remove a performance event
1159  *
1160  * We disable the event on the hardware level first. After that we
1161  * remove it from the context list.
1162  */
1163 static int __perf_remove_from_context(void *info)
1164 {
1165 	struct perf_event *event = info;
1166 	struct perf_event_context *ctx = event->ctx;
1167 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1168 
1169 	raw_spin_lock(&ctx->lock);
1170 	event_sched_out(event, cpuctx, ctx);
1171 	list_del_event(event, ctx);
1172 	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1173 		ctx->is_active = 0;
1174 		cpuctx->task_ctx = NULL;
1175 	}
1176 	raw_spin_unlock(&ctx->lock);
1177 
1178 	return 0;
1179 }
1180 
1181 
1182 /*
1183  * Remove the event from a task's (or a CPU's) list of events.
1184  *
1185  * CPU events are removed with a smp call. For task events we only
1186  * call when the task is on a CPU.
1187  *
1188  * If event->ctx is a cloned context, callers must make sure that
1189  * every task struct that event->ctx->task could possibly point to
1190  * remains valid.  This is OK when called from perf_release since
1191  * that only calls us on the top-level context, which can't be a clone.
1192  * When called from perf_event_exit_task, it's OK because the
1193  * context has been detached from its task.
1194  */
1195 static void perf_remove_from_context(struct perf_event *event)
1196 {
1197 	struct perf_event_context *ctx = event->ctx;
1198 	struct task_struct *task = ctx->task;
1199 
1200 	lockdep_assert_held(&ctx->mutex);
1201 
1202 	if (!task) {
1203 		/*
1204 		 * Per cpu events are removed via an smp call and
1205 		 * the removal is always successful.
1206 		 */
1207 		cpu_function_call(event->cpu, __perf_remove_from_context, event);
1208 		return;
1209 	}
1210 
1211 retry:
1212 	if (!task_function_call(task, __perf_remove_from_context, event))
1213 		return;
1214 
1215 	raw_spin_lock_irq(&ctx->lock);
1216 	/*
1217 	 * If we failed to find a running task, but find the context active now
1218 	 * that we've acquired the ctx->lock, retry.
1219 	 */
1220 	if (ctx->is_active) {
1221 		raw_spin_unlock_irq(&ctx->lock);
1222 		goto retry;
1223 	}
1224 
1225 	/*
1226 	 * Since the task isn't running, its safe to remove the event, us
1227 	 * holding the ctx->lock ensures the task won't get scheduled in.
1228 	 */
1229 	list_del_event(event, ctx);
1230 	raw_spin_unlock_irq(&ctx->lock);
1231 }
1232 
1233 /*
1234  * Cross CPU call to disable a performance event
1235  */
1236 static int __perf_event_disable(void *info)
1237 {
1238 	struct perf_event *event = info;
1239 	struct perf_event_context *ctx = event->ctx;
1240 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1241 
1242 	/*
1243 	 * If this is a per-task event, need to check whether this
1244 	 * event's task is the current task on this cpu.
1245 	 *
1246 	 * Can trigger due to concurrent perf_event_context_sched_out()
1247 	 * flipping contexts around.
1248 	 */
1249 	if (ctx->task && cpuctx->task_ctx != ctx)
1250 		return -EINVAL;
1251 
1252 	raw_spin_lock(&ctx->lock);
1253 
1254 	/*
1255 	 * If the event is on, turn it off.
1256 	 * If it is in error state, leave it in error state.
1257 	 */
1258 	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1259 		update_context_time(ctx);
1260 		update_cgrp_time_from_event(event);
1261 		update_group_times(event);
1262 		if (event == event->group_leader)
1263 			group_sched_out(event, cpuctx, ctx);
1264 		else
1265 			event_sched_out(event, cpuctx, ctx);
1266 		event->state = PERF_EVENT_STATE_OFF;
1267 	}
1268 
1269 	raw_spin_unlock(&ctx->lock);
1270 
1271 	return 0;
1272 }
1273 
1274 /*
1275  * Disable a event.
1276  *
1277  * If event->ctx is a cloned context, callers must make sure that
1278  * every task struct that event->ctx->task could possibly point to
1279  * remains valid.  This condition is satisifed when called through
1280  * perf_event_for_each_child or perf_event_for_each because they
1281  * hold the top-level event's child_mutex, so any descendant that
1282  * goes to exit will block in sync_child_event.
1283  * When called from perf_pending_event it's OK because event->ctx
1284  * is the current context on this CPU and preemption is disabled,
1285  * hence we can't get into perf_event_task_sched_out for this context.
1286  */
1287 void perf_event_disable(struct perf_event *event)
1288 {
1289 	struct perf_event_context *ctx = event->ctx;
1290 	struct task_struct *task = ctx->task;
1291 
1292 	if (!task) {
1293 		/*
1294 		 * Disable the event on the cpu that it's on
1295 		 */
1296 		cpu_function_call(event->cpu, __perf_event_disable, event);
1297 		return;
1298 	}
1299 
1300 retry:
1301 	if (!task_function_call(task, __perf_event_disable, event))
1302 		return;
1303 
1304 	raw_spin_lock_irq(&ctx->lock);
1305 	/*
1306 	 * If the event is still active, we need to retry the cross-call.
1307 	 */
1308 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1309 		raw_spin_unlock_irq(&ctx->lock);
1310 		/*
1311 		 * Reload the task pointer, it might have been changed by
1312 		 * a concurrent perf_event_context_sched_out().
1313 		 */
1314 		task = ctx->task;
1315 		goto retry;
1316 	}
1317 
1318 	/*
1319 	 * Since we have the lock this context can't be scheduled
1320 	 * in, so we can change the state safely.
1321 	 */
1322 	if (event->state == PERF_EVENT_STATE_INACTIVE) {
1323 		update_group_times(event);
1324 		event->state = PERF_EVENT_STATE_OFF;
1325 	}
1326 	raw_spin_unlock_irq(&ctx->lock);
1327 }
1328 
1329 static void perf_set_shadow_time(struct perf_event *event,
1330 				 struct perf_event_context *ctx,
1331 				 u64 tstamp)
1332 {
1333 	/*
1334 	 * use the correct time source for the time snapshot
1335 	 *
1336 	 * We could get by without this by leveraging the
1337 	 * fact that to get to this function, the caller
1338 	 * has most likely already called update_context_time()
1339 	 * and update_cgrp_time_xx() and thus both timestamp
1340 	 * are identical (or very close). Given that tstamp is,
1341 	 * already adjusted for cgroup, we could say that:
1342 	 *    tstamp - ctx->timestamp
1343 	 * is equivalent to
1344 	 *    tstamp - cgrp->timestamp.
1345 	 *
1346 	 * Then, in perf_output_read(), the calculation would
1347 	 * work with no changes because:
1348 	 * - event is guaranteed scheduled in
1349 	 * - no scheduled out in between
1350 	 * - thus the timestamp would be the same
1351 	 *
1352 	 * But this is a bit hairy.
1353 	 *
1354 	 * So instead, we have an explicit cgroup call to remain
1355 	 * within the time time source all along. We believe it
1356 	 * is cleaner and simpler to understand.
1357 	 */
1358 	if (is_cgroup_event(event))
1359 		perf_cgroup_set_shadow_time(event, tstamp);
1360 	else
1361 		event->shadow_ctx_time = tstamp - ctx->timestamp;
1362 }
1363 
1364 #define MAX_INTERRUPTS (~0ULL)
1365 
1366 static void perf_log_throttle(struct perf_event *event, int enable);
1367 
1368 static int
1369 event_sched_in(struct perf_event *event,
1370 		 struct perf_cpu_context *cpuctx,
1371 		 struct perf_event_context *ctx)
1372 {
1373 	u64 tstamp = perf_event_time(event);
1374 
1375 	if (event->state <= PERF_EVENT_STATE_OFF)
1376 		return 0;
1377 
1378 	event->state = PERF_EVENT_STATE_ACTIVE;
1379 	event->oncpu = smp_processor_id();
1380 
1381 	/*
1382 	 * Unthrottle events, since we scheduled we might have missed several
1383 	 * ticks already, also for a heavily scheduling task there is little
1384 	 * guarantee it'll get a tick in a timely manner.
1385 	 */
1386 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1387 		perf_log_throttle(event, 1);
1388 		event->hw.interrupts = 0;
1389 	}
1390 
1391 	/*
1392 	 * The new state must be visible before we turn it on in the hardware:
1393 	 */
1394 	smp_wmb();
1395 
1396 	if (event->pmu->add(event, PERF_EF_START)) {
1397 		event->state = PERF_EVENT_STATE_INACTIVE;
1398 		event->oncpu = -1;
1399 		return -EAGAIN;
1400 	}
1401 
1402 	event->tstamp_running += tstamp - event->tstamp_stopped;
1403 
1404 	perf_set_shadow_time(event, ctx, tstamp);
1405 
1406 	if (!is_software_event(event))
1407 		cpuctx->active_oncpu++;
1408 	ctx->nr_active++;
1409 
1410 	if (event->attr.exclusive)
1411 		cpuctx->exclusive = 1;
1412 
1413 	return 0;
1414 }
1415 
1416 static int
1417 group_sched_in(struct perf_event *group_event,
1418 	       struct perf_cpu_context *cpuctx,
1419 	       struct perf_event_context *ctx)
1420 {
1421 	struct perf_event *event, *partial_group = NULL;
1422 	struct pmu *pmu = group_event->pmu;
1423 	u64 now = ctx->time;
1424 	bool simulate = false;
1425 
1426 	if (group_event->state == PERF_EVENT_STATE_OFF)
1427 		return 0;
1428 
1429 	pmu->start_txn(pmu);
1430 
1431 	if (event_sched_in(group_event, cpuctx, ctx)) {
1432 		pmu->cancel_txn(pmu);
1433 		return -EAGAIN;
1434 	}
1435 
1436 	/*
1437 	 * Schedule in siblings as one group (if any):
1438 	 */
1439 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1440 		if (event_sched_in(event, cpuctx, ctx)) {
1441 			partial_group = event;
1442 			goto group_error;
1443 		}
1444 	}
1445 
1446 	if (!pmu->commit_txn(pmu))
1447 		return 0;
1448 
1449 group_error:
1450 	/*
1451 	 * Groups can be scheduled in as one unit only, so undo any
1452 	 * partial group before returning:
1453 	 * The events up to the failed event are scheduled out normally,
1454 	 * tstamp_stopped will be updated.
1455 	 *
1456 	 * The failed events and the remaining siblings need to have
1457 	 * their timings updated as if they had gone thru event_sched_in()
1458 	 * and event_sched_out(). This is required to get consistent timings
1459 	 * across the group. This also takes care of the case where the group
1460 	 * could never be scheduled by ensuring tstamp_stopped is set to mark
1461 	 * the time the event was actually stopped, such that time delta
1462 	 * calculation in update_event_times() is correct.
1463 	 */
1464 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1465 		if (event == partial_group)
1466 			simulate = true;
1467 
1468 		if (simulate) {
1469 			event->tstamp_running += now - event->tstamp_stopped;
1470 			event->tstamp_stopped = now;
1471 		} else {
1472 			event_sched_out(event, cpuctx, ctx);
1473 		}
1474 	}
1475 	event_sched_out(group_event, cpuctx, ctx);
1476 
1477 	pmu->cancel_txn(pmu);
1478 
1479 	return -EAGAIN;
1480 }
1481 
1482 /*
1483  * Work out whether we can put this event group on the CPU now.
1484  */
1485 static int group_can_go_on(struct perf_event *event,
1486 			   struct perf_cpu_context *cpuctx,
1487 			   int can_add_hw)
1488 {
1489 	/*
1490 	 * Groups consisting entirely of software events can always go on.
1491 	 */
1492 	if (event->group_flags & PERF_GROUP_SOFTWARE)
1493 		return 1;
1494 	/*
1495 	 * If an exclusive group is already on, no other hardware
1496 	 * events can go on.
1497 	 */
1498 	if (cpuctx->exclusive)
1499 		return 0;
1500 	/*
1501 	 * If this group is exclusive and there are already
1502 	 * events on the CPU, it can't go on.
1503 	 */
1504 	if (event->attr.exclusive && cpuctx->active_oncpu)
1505 		return 0;
1506 	/*
1507 	 * Otherwise, try to add it if all previous groups were able
1508 	 * to go on.
1509 	 */
1510 	return can_add_hw;
1511 }
1512 
1513 static void add_event_to_ctx(struct perf_event *event,
1514 			       struct perf_event_context *ctx)
1515 {
1516 	u64 tstamp = perf_event_time(event);
1517 
1518 	list_add_event(event, ctx);
1519 	perf_group_attach(event);
1520 	event->tstamp_enabled = tstamp;
1521 	event->tstamp_running = tstamp;
1522 	event->tstamp_stopped = tstamp;
1523 }
1524 
1525 static void task_ctx_sched_out(struct perf_event_context *ctx);
1526 static void
1527 ctx_sched_in(struct perf_event_context *ctx,
1528 	     struct perf_cpu_context *cpuctx,
1529 	     enum event_type_t event_type,
1530 	     struct task_struct *task);
1531 
1532 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1533 				struct perf_event_context *ctx,
1534 				struct task_struct *task)
1535 {
1536 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1537 	if (ctx)
1538 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1539 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1540 	if (ctx)
1541 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1542 }
1543 
1544 /*
1545  * Cross CPU call to install and enable a performance event
1546  *
1547  * Must be called with ctx->mutex held
1548  */
1549 static int  __perf_install_in_context(void *info)
1550 {
1551 	struct perf_event *event = info;
1552 	struct perf_event_context *ctx = event->ctx;
1553 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1554 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
1555 	struct task_struct *task = current;
1556 
1557 	perf_ctx_lock(cpuctx, task_ctx);
1558 	perf_pmu_disable(cpuctx->ctx.pmu);
1559 
1560 	/*
1561 	 * If there was an active task_ctx schedule it out.
1562 	 */
1563 	if (task_ctx)
1564 		task_ctx_sched_out(task_ctx);
1565 
1566 	/*
1567 	 * If the context we're installing events in is not the
1568 	 * active task_ctx, flip them.
1569 	 */
1570 	if (ctx->task && task_ctx != ctx) {
1571 		if (task_ctx)
1572 			raw_spin_unlock(&task_ctx->lock);
1573 		raw_spin_lock(&ctx->lock);
1574 		task_ctx = ctx;
1575 	}
1576 
1577 	if (task_ctx) {
1578 		cpuctx->task_ctx = task_ctx;
1579 		task = task_ctx->task;
1580 	}
1581 
1582 	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1583 
1584 	update_context_time(ctx);
1585 	/*
1586 	 * update cgrp time only if current cgrp
1587 	 * matches event->cgrp. Must be done before
1588 	 * calling add_event_to_ctx()
1589 	 */
1590 	update_cgrp_time_from_event(event);
1591 
1592 	add_event_to_ctx(event, ctx);
1593 
1594 	/*
1595 	 * Schedule everything back in
1596 	 */
1597 	perf_event_sched_in(cpuctx, task_ctx, task);
1598 
1599 	perf_pmu_enable(cpuctx->ctx.pmu);
1600 	perf_ctx_unlock(cpuctx, task_ctx);
1601 
1602 	return 0;
1603 }
1604 
1605 /*
1606  * Attach a performance event to a context
1607  *
1608  * First we add the event to the list with the hardware enable bit
1609  * in event->hw_config cleared.
1610  *
1611  * If the event is attached to a task which is on a CPU we use a smp
1612  * call to enable it in the task context. The task might have been
1613  * scheduled away, but we check this in the smp call again.
1614  */
1615 static void
1616 perf_install_in_context(struct perf_event_context *ctx,
1617 			struct perf_event *event,
1618 			int cpu)
1619 {
1620 	struct task_struct *task = ctx->task;
1621 
1622 	lockdep_assert_held(&ctx->mutex);
1623 
1624 	event->ctx = ctx;
1625 
1626 	if (!task) {
1627 		/*
1628 		 * Per cpu events are installed via an smp call and
1629 		 * the install is always successful.
1630 		 */
1631 		cpu_function_call(cpu, __perf_install_in_context, event);
1632 		return;
1633 	}
1634 
1635 retry:
1636 	if (!task_function_call(task, __perf_install_in_context, event))
1637 		return;
1638 
1639 	raw_spin_lock_irq(&ctx->lock);
1640 	/*
1641 	 * If we failed to find a running task, but find the context active now
1642 	 * that we've acquired the ctx->lock, retry.
1643 	 */
1644 	if (ctx->is_active) {
1645 		raw_spin_unlock_irq(&ctx->lock);
1646 		goto retry;
1647 	}
1648 
1649 	/*
1650 	 * Since the task isn't running, its safe to add the event, us holding
1651 	 * the ctx->lock ensures the task won't get scheduled in.
1652 	 */
1653 	add_event_to_ctx(event, ctx);
1654 	raw_spin_unlock_irq(&ctx->lock);
1655 }
1656 
1657 /*
1658  * Put a event into inactive state and update time fields.
1659  * Enabling the leader of a group effectively enables all
1660  * the group members that aren't explicitly disabled, so we
1661  * have to update their ->tstamp_enabled also.
1662  * Note: this works for group members as well as group leaders
1663  * since the non-leader members' sibling_lists will be empty.
1664  */
1665 static void __perf_event_mark_enabled(struct perf_event *event,
1666 					struct perf_event_context *ctx)
1667 {
1668 	struct perf_event *sub;
1669 	u64 tstamp = perf_event_time(event);
1670 
1671 	event->state = PERF_EVENT_STATE_INACTIVE;
1672 	event->tstamp_enabled = tstamp - event->total_time_enabled;
1673 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1674 		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1675 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1676 	}
1677 }
1678 
1679 /*
1680  * Cross CPU call to enable a performance event
1681  */
1682 static int __perf_event_enable(void *info)
1683 {
1684 	struct perf_event *event = info;
1685 	struct perf_event_context *ctx = event->ctx;
1686 	struct perf_event *leader = event->group_leader;
1687 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1688 	int err;
1689 
1690 	if (WARN_ON_ONCE(!ctx->is_active))
1691 		return -EINVAL;
1692 
1693 	raw_spin_lock(&ctx->lock);
1694 	update_context_time(ctx);
1695 
1696 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1697 		goto unlock;
1698 
1699 	/*
1700 	 * set current task's cgroup time reference point
1701 	 */
1702 	perf_cgroup_set_timestamp(current, ctx);
1703 
1704 	__perf_event_mark_enabled(event, ctx);
1705 
1706 	if (!event_filter_match(event)) {
1707 		if (is_cgroup_event(event))
1708 			perf_cgroup_defer_enabled(event);
1709 		goto unlock;
1710 	}
1711 
1712 	/*
1713 	 * If the event is in a group and isn't the group leader,
1714 	 * then don't put it on unless the group is on.
1715 	 */
1716 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1717 		goto unlock;
1718 
1719 	if (!group_can_go_on(event, cpuctx, 1)) {
1720 		err = -EEXIST;
1721 	} else {
1722 		if (event == leader)
1723 			err = group_sched_in(event, cpuctx, ctx);
1724 		else
1725 			err = event_sched_in(event, cpuctx, ctx);
1726 	}
1727 
1728 	if (err) {
1729 		/*
1730 		 * If this event can't go on and it's part of a
1731 		 * group, then the whole group has to come off.
1732 		 */
1733 		if (leader != event)
1734 			group_sched_out(leader, cpuctx, ctx);
1735 		if (leader->attr.pinned) {
1736 			update_group_times(leader);
1737 			leader->state = PERF_EVENT_STATE_ERROR;
1738 		}
1739 	}
1740 
1741 unlock:
1742 	raw_spin_unlock(&ctx->lock);
1743 
1744 	return 0;
1745 }
1746 
1747 /*
1748  * Enable a event.
1749  *
1750  * If event->ctx is a cloned context, callers must make sure that
1751  * every task struct that event->ctx->task could possibly point to
1752  * remains valid.  This condition is satisfied when called through
1753  * perf_event_for_each_child or perf_event_for_each as described
1754  * for perf_event_disable.
1755  */
1756 void perf_event_enable(struct perf_event *event)
1757 {
1758 	struct perf_event_context *ctx = event->ctx;
1759 	struct task_struct *task = ctx->task;
1760 
1761 	if (!task) {
1762 		/*
1763 		 * Enable the event on the cpu that it's on
1764 		 */
1765 		cpu_function_call(event->cpu, __perf_event_enable, event);
1766 		return;
1767 	}
1768 
1769 	raw_spin_lock_irq(&ctx->lock);
1770 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1771 		goto out;
1772 
1773 	/*
1774 	 * If the event is in error state, clear that first.
1775 	 * That way, if we see the event in error state below, we
1776 	 * know that it has gone back into error state, as distinct
1777 	 * from the task having been scheduled away before the
1778 	 * cross-call arrived.
1779 	 */
1780 	if (event->state == PERF_EVENT_STATE_ERROR)
1781 		event->state = PERF_EVENT_STATE_OFF;
1782 
1783 retry:
1784 	if (!ctx->is_active) {
1785 		__perf_event_mark_enabled(event, ctx);
1786 		goto out;
1787 	}
1788 
1789 	raw_spin_unlock_irq(&ctx->lock);
1790 
1791 	if (!task_function_call(task, __perf_event_enable, event))
1792 		return;
1793 
1794 	raw_spin_lock_irq(&ctx->lock);
1795 
1796 	/*
1797 	 * If the context is active and the event is still off,
1798 	 * we need to retry the cross-call.
1799 	 */
1800 	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1801 		/*
1802 		 * task could have been flipped by a concurrent
1803 		 * perf_event_context_sched_out()
1804 		 */
1805 		task = ctx->task;
1806 		goto retry;
1807 	}
1808 
1809 out:
1810 	raw_spin_unlock_irq(&ctx->lock);
1811 }
1812 
1813 int perf_event_refresh(struct perf_event *event, int refresh)
1814 {
1815 	/*
1816 	 * not supported on inherited events
1817 	 */
1818 	if (event->attr.inherit || !is_sampling_event(event))
1819 		return -EINVAL;
1820 
1821 	atomic_add(refresh, &event->event_limit);
1822 	perf_event_enable(event);
1823 
1824 	return 0;
1825 }
1826 EXPORT_SYMBOL_GPL(perf_event_refresh);
1827 
1828 static void ctx_sched_out(struct perf_event_context *ctx,
1829 			  struct perf_cpu_context *cpuctx,
1830 			  enum event_type_t event_type)
1831 {
1832 	struct perf_event *event;
1833 	int is_active = ctx->is_active;
1834 
1835 	ctx->is_active &= ~event_type;
1836 	if (likely(!ctx->nr_events))
1837 		return;
1838 
1839 	update_context_time(ctx);
1840 	update_cgrp_time_from_cpuctx(cpuctx);
1841 	if (!ctx->nr_active)
1842 		return;
1843 
1844 	perf_pmu_disable(ctx->pmu);
1845 	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1846 		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1847 			group_sched_out(event, cpuctx, ctx);
1848 	}
1849 
1850 	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1851 		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1852 			group_sched_out(event, cpuctx, ctx);
1853 	}
1854 	perf_pmu_enable(ctx->pmu);
1855 }
1856 
1857 /*
1858  * Test whether two contexts are equivalent, i.e. whether they
1859  * have both been cloned from the same version of the same context
1860  * and they both have the same number of enabled events.
1861  * If the number of enabled events is the same, then the set
1862  * of enabled events should be the same, because these are both
1863  * inherited contexts, therefore we can't access individual events
1864  * in them directly with an fd; we can only enable/disable all
1865  * events via prctl, or enable/disable all events in a family
1866  * via ioctl, which will have the same effect on both contexts.
1867  */
1868 static int context_equiv(struct perf_event_context *ctx1,
1869 			 struct perf_event_context *ctx2)
1870 {
1871 	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1872 		&& ctx1->parent_gen == ctx2->parent_gen
1873 		&& !ctx1->pin_count && !ctx2->pin_count;
1874 }
1875 
1876 static void __perf_event_sync_stat(struct perf_event *event,
1877 				     struct perf_event *next_event)
1878 {
1879 	u64 value;
1880 
1881 	if (!event->attr.inherit_stat)
1882 		return;
1883 
1884 	/*
1885 	 * Update the event value, we cannot use perf_event_read()
1886 	 * because we're in the middle of a context switch and have IRQs
1887 	 * disabled, which upsets smp_call_function_single(), however
1888 	 * we know the event must be on the current CPU, therefore we
1889 	 * don't need to use it.
1890 	 */
1891 	switch (event->state) {
1892 	case PERF_EVENT_STATE_ACTIVE:
1893 		event->pmu->read(event);
1894 		/* fall-through */
1895 
1896 	case PERF_EVENT_STATE_INACTIVE:
1897 		update_event_times(event);
1898 		break;
1899 
1900 	default:
1901 		break;
1902 	}
1903 
1904 	/*
1905 	 * In order to keep per-task stats reliable we need to flip the event
1906 	 * values when we flip the contexts.
1907 	 */
1908 	value = local64_read(&next_event->count);
1909 	value = local64_xchg(&event->count, value);
1910 	local64_set(&next_event->count, value);
1911 
1912 	swap(event->total_time_enabled, next_event->total_time_enabled);
1913 	swap(event->total_time_running, next_event->total_time_running);
1914 
1915 	/*
1916 	 * Since we swizzled the values, update the user visible data too.
1917 	 */
1918 	perf_event_update_userpage(event);
1919 	perf_event_update_userpage(next_event);
1920 }
1921 
1922 #define list_next_entry(pos, member) \
1923 	list_entry(pos->member.next, typeof(*pos), member)
1924 
1925 static void perf_event_sync_stat(struct perf_event_context *ctx,
1926 				   struct perf_event_context *next_ctx)
1927 {
1928 	struct perf_event *event, *next_event;
1929 
1930 	if (!ctx->nr_stat)
1931 		return;
1932 
1933 	update_context_time(ctx);
1934 
1935 	event = list_first_entry(&ctx->event_list,
1936 				   struct perf_event, event_entry);
1937 
1938 	next_event = list_first_entry(&next_ctx->event_list,
1939 					struct perf_event, event_entry);
1940 
1941 	while (&event->event_entry != &ctx->event_list &&
1942 	       &next_event->event_entry != &next_ctx->event_list) {
1943 
1944 		__perf_event_sync_stat(event, next_event);
1945 
1946 		event = list_next_entry(event, event_entry);
1947 		next_event = list_next_entry(next_event, event_entry);
1948 	}
1949 }
1950 
1951 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1952 					 struct task_struct *next)
1953 {
1954 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1955 	struct perf_event_context *next_ctx;
1956 	struct perf_event_context *parent;
1957 	struct perf_cpu_context *cpuctx;
1958 	int do_switch = 1;
1959 
1960 	if (likely(!ctx))
1961 		return;
1962 
1963 	cpuctx = __get_cpu_context(ctx);
1964 	if (!cpuctx->task_ctx)
1965 		return;
1966 
1967 	rcu_read_lock();
1968 	parent = rcu_dereference(ctx->parent_ctx);
1969 	next_ctx = next->perf_event_ctxp[ctxn];
1970 	if (parent && next_ctx &&
1971 	    rcu_dereference(next_ctx->parent_ctx) == parent) {
1972 		/*
1973 		 * Looks like the two contexts are clones, so we might be
1974 		 * able to optimize the context switch.  We lock both
1975 		 * contexts and check that they are clones under the
1976 		 * lock (including re-checking that neither has been
1977 		 * uncloned in the meantime).  It doesn't matter which
1978 		 * order we take the locks because no other cpu could
1979 		 * be trying to lock both of these tasks.
1980 		 */
1981 		raw_spin_lock(&ctx->lock);
1982 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1983 		if (context_equiv(ctx, next_ctx)) {
1984 			/*
1985 			 * XXX do we need a memory barrier of sorts
1986 			 * wrt to rcu_dereference() of perf_event_ctxp
1987 			 */
1988 			task->perf_event_ctxp[ctxn] = next_ctx;
1989 			next->perf_event_ctxp[ctxn] = ctx;
1990 			ctx->task = next;
1991 			next_ctx->task = task;
1992 			do_switch = 0;
1993 
1994 			perf_event_sync_stat(ctx, next_ctx);
1995 		}
1996 		raw_spin_unlock(&next_ctx->lock);
1997 		raw_spin_unlock(&ctx->lock);
1998 	}
1999 	rcu_read_unlock();
2000 
2001 	if (do_switch) {
2002 		raw_spin_lock(&ctx->lock);
2003 		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2004 		cpuctx->task_ctx = NULL;
2005 		raw_spin_unlock(&ctx->lock);
2006 	}
2007 }
2008 
2009 #define for_each_task_context_nr(ctxn)					\
2010 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2011 
2012 /*
2013  * Called from scheduler to remove the events of the current task,
2014  * with interrupts disabled.
2015  *
2016  * We stop each event and update the event value in event->count.
2017  *
2018  * This does not protect us against NMI, but disable()
2019  * sets the disabled bit in the control field of event _before_
2020  * accessing the event control register. If a NMI hits, then it will
2021  * not restart the event.
2022  */
2023 void __perf_event_task_sched_out(struct task_struct *task,
2024 				 struct task_struct *next)
2025 {
2026 	int ctxn;
2027 
2028 	for_each_task_context_nr(ctxn)
2029 		perf_event_context_sched_out(task, ctxn, next);
2030 
2031 	/*
2032 	 * if cgroup events exist on this CPU, then we need
2033 	 * to check if we have to switch out PMU state.
2034 	 * cgroup event are system-wide mode only
2035 	 */
2036 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2037 		perf_cgroup_sched_out(task, next);
2038 }
2039 
2040 static void task_ctx_sched_out(struct perf_event_context *ctx)
2041 {
2042 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2043 
2044 	if (!cpuctx->task_ctx)
2045 		return;
2046 
2047 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2048 		return;
2049 
2050 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2051 	cpuctx->task_ctx = NULL;
2052 }
2053 
2054 /*
2055  * Called with IRQs disabled
2056  */
2057 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2058 			      enum event_type_t event_type)
2059 {
2060 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2061 }
2062 
2063 static void
2064 ctx_pinned_sched_in(struct perf_event_context *ctx,
2065 		    struct perf_cpu_context *cpuctx)
2066 {
2067 	struct perf_event *event;
2068 
2069 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2070 		if (event->state <= PERF_EVENT_STATE_OFF)
2071 			continue;
2072 		if (!event_filter_match(event))
2073 			continue;
2074 
2075 		/* may need to reset tstamp_enabled */
2076 		if (is_cgroup_event(event))
2077 			perf_cgroup_mark_enabled(event, ctx);
2078 
2079 		if (group_can_go_on(event, cpuctx, 1))
2080 			group_sched_in(event, cpuctx, ctx);
2081 
2082 		/*
2083 		 * If this pinned group hasn't been scheduled,
2084 		 * put it in error state.
2085 		 */
2086 		if (event->state == PERF_EVENT_STATE_INACTIVE) {
2087 			update_group_times(event);
2088 			event->state = PERF_EVENT_STATE_ERROR;
2089 		}
2090 	}
2091 }
2092 
2093 static void
2094 ctx_flexible_sched_in(struct perf_event_context *ctx,
2095 		      struct perf_cpu_context *cpuctx)
2096 {
2097 	struct perf_event *event;
2098 	int can_add_hw = 1;
2099 
2100 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2101 		/* Ignore events in OFF or ERROR state */
2102 		if (event->state <= PERF_EVENT_STATE_OFF)
2103 			continue;
2104 		/*
2105 		 * Listen to the 'cpu' scheduling filter constraint
2106 		 * of events:
2107 		 */
2108 		if (!event_filter_match(event))
2109 			continue;
2110 
2111 		/* may need to reset tstamp_enabled */
2112 		if (is_cgroup_event(event))
2113 			perf_cgroup_mark_enabled(event, ctx);
2114 
2115 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2116 			if (group_sched_in(event, cpuctx, ctx))
2117 				can_add_hw = 0;
2118 		}
2119 	}
2120 }
2121 
2122 static void
2123 ctx_sched_in(struct perf_event_context *ctx,
2124 	     struct perf_cpu_context *cpuctx,
2125 	     enum event_type_t event_type,
2126 	     struct task_struct *task)
2127 {
2128 	u64 now;
2129 	int is_active = ctx->is_active;
2130 
2131 	ctx->is_active |= event_type;
2132 	if (likely(!ctx->nr_events))
2133 		return;
2134 
2135 	now = perf_clock();
2136 	ctx->timestamp = now;
2137 	perf_cgroup_set_timestamp(task, ctx);
2138 	/*
2139 	 * First go through the list and put on any pinned groups
2140 	 * in order to give them the best chance of going on.
2141 	 */
2142 	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2143 		ctx_pinned_sched_in(ctx, cpuctx);
2144 
2145 	/* Then walk through the lower prio flexible groups */
2146 	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2147 		ctx_flexible_sched_in(ctx, cpuctx);
2148 }
2149 
2150 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2151 			     enum event_type_t event_type,
2152 			     struct task_struct *task)
2153 {
2154 	struct perf_event_context *ctx = &cpuctx->ctx;
2155 
2156 	ctx_sched_in(ctx, cpuctx, event_type, task);
2157 }
2158 
2159 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2160 					struct task_struct *task)
2161 {
2162 	struct perf_cpu_context *cpuctx;
2163 
2164 	cpuctx = __get_cpu_context(ctx);
2165 	if (cpuctx->task_ctx == ctx)
2166 		return;
2167 
2168 	perf_ctx_lock(cpuctx, ctx);
2169 	perf_pmu_disable(ctx->pmu);
2170 	/*
2171 	 * We want to keep the following priority order:
2172 	 * cpu pinned (that don't need to move), task pinned,
2173 	 * cpu flexible, task flexible.
2174 	 */
2175 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2176 
2177 	if (ctx->nr_events)
2178 		cpuctx->task_ctx = ctx;
2179 
2180 	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2181 
2182 	perf_pmu_enable(ctx->pmu);
2183 	perf_ctx_unlock(cpuctx, ctx);
2184 
2185 	/*
2186 	 * Since these rotations are per-cpu, we need to ensure the
2187 	 * cpu-context we got scheduled on is actually rotating.
2188 	 */
2189 	perf_pmu_rotate_start(ctx->pmu);
2190 }
2191 
2192 /*
2193  * Called from scheduler to add the events of the current task
2194  * with interrupts disabled.
2195  *
2196  * We restore the event value and then enable it.
2197  *
2198  * This does not protect us against NMI, but enable()
2199  * sets the enabled bit in the control field of event _before_
2200  * accessing the event control register. If a NMI hits, then it will
2201  * keep the event running.
2202  */
2203 void __perf_event_task_sched_in(struct task_struct *prev,
2204 				struct task_struct *task)
2205 {
2206 	struct perf_event_context *ctx;
2207 	int ctxn;
2208 
2209 	for_each_task_context_nr(ctxn) {
2210 		ctx = task->perf_event_ctxp[ctxn];
2211 		if (likely(!ctx))
2212 			continue;
2213 
2214 		perf_event_context_sched_in(ctx, task);
2215 	}
2216 	/*
2217 	 * if cgroup events exist on this CPU, then we need
2218 	 * to check if we have to switch in PMU state.
2219 	 * cgroup event are system-wide mode only
2220 	 */
2221 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2222 		perf_cgroup_sched_in(prev, task);
2223 }
2224 
2225 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2226 {
2227 	u64 frequency = event->attr.sample_freq;
2228 	u64 sec = NSEC_PER_SEC;
2229 	u64 divisor, dividend;
2230 
2231 	int count_fls, nsec_fls, frequency_fls, sec_fls;
2232 
2233 	count_fls = fls64(count);
2234 	nsec_fls = fls64(nsec);
2235 	frequency_fls = fls64(frequency);
2236 	sec_fls = 30;
2237 
2238 	/*
2239 	 * We got @count in @nsec, with a target of sample_freq HZ
2240 	 * the target period becomes:
2241 	 *
2242 	 *             @count * 10^9
2243 	 * period = -------------------
2244 	 *          @nsec * sample_freq
2245 	 *
2246 	 */
2247 
2248 	/*
2249 	 * Reduce accuracy by one bit such that @a and @b converge
2250 	 * to a similar magnitude.
2251 	 */
2252 #define REDUCE_FLS(a, b)		\
2253 do {					\
2254 	if (a##_fls > b##_fls) {	\
2255 		a >>= 1;		\
2256 		a##_fls--;		\
2257 	} else {			\
2258 		b >>= 1;		\
2259 		b##_fls--;		\
2260 	}				\
2261 } while (0)
2262 
2263 	/*
2264 	 * Reduce accuracy until either term fits in a u64, then proceed with
2265 	 * the other, so that finally we can do a u64/u64 division.
2266 	 */
2267 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2268 		REDUCE_FLS(nsec, frequency);
2269 		REDUCE_FLS(sec, count);
2270 	}
2271 
2272 	if (count_fls + sec_fls > 64) {
2273 		divisor = nsec * frequency;
2274 
2275 		while (count_fls + sec_fls > 64) {
2276 			REDUCE_FLS(count, sec);
2277 			divisor >>= 1;
2278 		}
2279 
2280 		dividend = count * sec;
2281 	} else {
2282 		dividend = count * sec;
2283 
2284 		while (nsec_fls + frequency_fls > 64) {
2285 			REDUCE_FLS(nsec, frequency);
2286 			dividend >>= 1;
2287 		}
2288 
2289 		divisor = nsec * frequency;
2290 	}
2291 
2292 	if (!divisor)
2293 		return dividend;
2294 
2295 	return div64_u64(dividend, divisor);
2296 }
2297 
2298 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2299 {
2300 	struct hw_perf_event *hwc = &event->hw;
2301 	s64 period, sample_period;
2302 	s64 delta;
2303 
2304 	period = perf_calculate_period(event, nsec, count);
2305 
2306 	delta = (s64)(period - hwc->sample_period);
2307 	delta = (delta + 7) / 8; /* low pass filter */
2308 
2309 	sample_period = hwc->sample_period + delta;
2310 
2311 	if (!sample_period)
2312 		sample_period = 1;
2313 
2314 	hwc->sample_period = sample_period;
2315 
2316 	if (local64_read(&hwc->period_left) > 8*sample_period) {
2317 		event->pmu->stop(event, PERF_EF_UPDATE);
2318 		local64_set(&hwc->period_left, 0);
2319 		event->pmu->start(event, PERF_EF_RELOAD);
2320 	}
2321 }
2322 
2323 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2324 {
2325 	struct perf_event *event;
2326 	struct hw_perf_event *hwc;
2327 	u64 interrupts, now;
2328 	s64 delta;
2329 
2330 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2331 		if (event->state != PERF_EVENT_STATE_ACTIVE)
2332 			continue;
2333 
2334 		if (!event_filter_match(event))
2335 			continue;
2336 
2337 		hwc = &event->hw;
2338 
2339 		interrupts = hwc->interrupts;
2340 		hwc->interrupts = 0;
2341 
2342 		/*
2343 		 * unthrottle events on the tick
2344 		 */
2345 		if (interrupts == MAX_INTERRUPTS) {
2346 			perf_log_throttle(event, 1);
2347 			event->pmu->start(event, 0);
2348 		}
2349 
2350 		if (!event->attr.freq || !event->attr.sample_freq)
2351 			continue;
2352 
2353 		event->pmu->read(event);
2354 		now = local64_read(&event->count);
2355 		delta = now - hwc->freq_count_stamp;
2356 		hwc->freq_count_stamp = now;
2357 
2358 		if (delta > 0)
2359 			perf_adjust_period(event, period, delta);
2360 	}
2361 }
2362 
2363 /*
2364  * Round-robin a context's events:
2365  */
2366 static void rotate_ctx(struct perf_event_context *ctx)
2367 {
2368 	/*
2369 	 * Rotate the first entry last of non-pinned groups. Rotation might be
2370 	 * disabled by the inheritance code.
2371 	 */
2372 	if (!ctx->rotate_disable)
2373 		list_rotate_left(&ctx->flexible_groups);
2374 }
2375 
2376 /*
2377  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2378  * because they're strictly cpu affine and rotate_start is called with IRQs
2379  * disabled, while rotate_context is called from IRQ context.
2380  */
2381 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2382 {
2383 	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
2384 	struct perf_event_context *ctx = NULL;
2385 	int rotate = 0, remove = 1;
2386 
2387 	if (cpuctx->ctx.nr_events) {
2388 		remove = 0;
2389 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2390 			rotate = 1;
2391 	}
2392 
2393 	ctx = cpuctx->task_ctx;
2394 	if (ctx && ctx->nr_events) {
2395 		remove = 0;
2396 		if (ctx->nr_events != ctx->nr_active)
2397 			rotate = 1;
2398 	}
2399 
2400 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2401 	perf_pmu_disable(cpuctx->ctx.pmu);
2402 	perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2403 	if (ctx)
2404 		perf_ctx_adjust_freq(ctx, interval);
2405 
2406 	if (!rotate)
2407 		goto done;
2408 
2409 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2410 	if (ctx)
2411 		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2412 
2413 	rotate_ctx(&cpuctx->ctx);
2414 	if (ctx)
2415 		rotate_ctx(ctx);
2416 
2417 	perf_event_sched_in(cpuctx, ctx, current);
2418 
2419 done:
2420 	if (remove)
2421 		list_del_init(&cpuctx->rotation_list);
2422 
2423 	perf_pmu_enable(cpuctx->ctx.pmu);
2424 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2425 }
2426 
2427 void perf_event_task_tick(void)
2428 {
2429 	struct list_head *head = &__get_cpu_var(rotation_list);
2430 	struct perf_cpu_context *cpuctx, *tmp;
2431 
2432 	WARN_ON(!irqs_disabled());
2433 
2434 	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2435 		if (cpuctx->jiffies_interval == 1 ||
2436 				!(jiffies % cpuctx->jiffies_interval))
2437 			perf_rotate_context(cpuctx);
2438 	}
2439 }
2440 
2441 static int event_enable_on_exec(struct perf_event *event,
2442 				struct perf_event_context *ctx)
2443 {
2444 	if (!event->attr.enable_on_exec)
2445 		return 0;
2446 
2447 	event->attr.enable_on_exec = 0;
2448 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2449 		return 0;
2450 
2451 	__perf_event_mark_enabled(event, ctx);
2452 
2453 	return 1;
2454 }
2455 
2456 /*
2457  * Enable all of a task's events that have been marked enable-on-exec.
2458  * This expects task == current.
2459  */
2460 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2461 {
2462 	struct perf_event *event;
2463 	unsigned long flags;
2464 	int enabled = 0;
2465 	int ret;
2466 
2467 	local_irq_save(flags);
2468 	if (!ctx || !ctx->nr_events)
2469 		goto out;
2470 
2471 	/*
2472 	 * We must ctxsw out cgroup events to avoid conflict
2473 	 * when invoking perf_task_event_sched_in() later on
2474 	 * in this function. Otherwise we end up trying to
2475 	 * ctxswin cgroup events which are already scheduled
2476 	 * in.
2477 	 */
2478 	perf_cgroup_sched_out(current, NULL);
2479 
2480 	raw_spin_lock(&ctx->lock);
2481 	task_ctx_sched_out(ctx);
2482 
2483 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2484 		ret = event_enable_on_exec(event, ctx);
2485 		if (ret)
2486 			enabled = 1;
2487 	}
2488 
2489 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2490 		ret = event_enable_on_exec(event, ctx);
2491 		if (ret)
2492 			enabled = 1;
2493 	}
2494 
2495 	/*
2496 	 * Unclone this context if we enabled any event.
2497 	 */
2498 	if (enabled)
2499 		unclone_ctx(ctx);
2500 
2501 	raw_spin_unlock(&ctx->lock);
2502 
2503 	/*
2504 	 * Also calls ctxswin for cgroup events, if any:
2505 	 */
2506 	perf_event_context_sched_in(ctx, ctx->task);
2507 out:
2508 	local_irq_restore(flags);
2509 }
2510 
2511 /*
2512  * Cross CPU call to read the hardware event
2513  */
2514 static void __perf_event_read(void *info)
2515 {
2516 	struct perf_event *event = info;
2517 	struct perf_event_context *ctx = event->ctx;
2518 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2519 
2520 	/*
2521 	 * If this is a task context, we need to check whether it is
2522 	 * the current task context of this cpu.  If not it has been
2523 	 * scheduled out before the smp call arrived.  In that case
2524 	 * event->count would have been updated to a recent sample
2525 	 * when the event was scheduled out.
2526 	 */
2527 	if (ctx->task && cpuctx->task_ctx != ctx)
2528 		return;
2529 
2530 	raw_spin_lock(&ctx->lock);
2531 	if (ctx->is_active) {
2532 		update_context_time(ctx);
2533 		update_cgrp_time_from_event(event);
2534 	}
2535 	update_event_times(event);
2536 	if (event->state == PERF_EVENT_STATE_ACTIVE)
2537 		event->pmu->read(event);
2538 	raw_spin_unlock(&ctx->lock);
2539 }
2540 
2541 static inline u64 perf_event_count(struct perf_event *event)
2542 {
2543 	return local64_read(&event->count) + atomic64_read(&event->child_count);
2544 }
2545 
2546 static u64 perf_event_read(struct perf_event *event)
2547 {
2548 	/*
2549 	 * If event is enabled and currently active on a CPU, update the
2550 	 * value in the event structure:
2551 	 */
2552 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
2553 		smp_call_function_single(event->oncpu,
2554 					 __perf_event_read, event, 1);
2555 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2556 		struct perf_event_context *ctx = event->ctx;
2557 		unsigned long flags;
2558 
2559 		raw_spin_lock_irqsave(&ctx->lock, flags);
2560 		/*
2561 		 * may read while context is not active
2562 		 * (e.g., thread is blocked), in that case
2563 		 * we cannot update context time
2564 		 */
2565 		if (ctx->is_active) {
2566 			update_context_time(ctx);
2567 			update_cgrp_time_from_event(event);
2568 		}
2569 		update_event_times(event);
2570 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2571 	}
2572 
2573 	return perf_event_count(event);
2574 }
2575 
2576 /*
2577  * Callchain support
2578  */
2579 
2580 struct callchain_cpus_entries {
2581 	struct rcu_head			rcu_head;
2582 	struct perf_callchain_entry	*cpu_entries[0];
2583 };
2584 
2585 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
2586 static atomic_t nr_callchain_events;
2587 static DEFINE_MUTEX(callchain_mutex);
2588 struct callchain_cpus_entries *callchain_cpus_entries;
2589 
2590 
2591 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
2592 				  struct pt_regs *regs)
2593 {
2594 }
2595 
2596 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
2597 				struct pt_regs *regs)
2598 {
2599 }
2600 
2601 static void release_callchain_buffers_rcu(struct rcu_head *head)
2602 {
2603 	struct callchain_cpus_entries *entries;
2604 	int cpu;
2605 
2606 	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
2607 
2608 	for_each_possible_cpu(cpu)
2609 		kfree(entries->cpu_entries[cpu]);
2610 
2611 	kfree(entries);
2612 }
2613 
2614 static void release_callchain_buffers(void)
2615 {
2616 	struct callchain_cpus_entries *entries;
2617 
2618 	entries = callchain_cpus_entries;
2619 	rcu_assign_pointer(callchain_cpus_entries, NULL);
2620 	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
2621 }
2622 
2623 static int alloc_callchain_buffers(void)
2624 {
2625 	int cpu;
2626 	int size;
2627 	struct callchain_cpus_entries *entries;
2628 
2629 	/*
2630 	 * We can't use the percpu allocation API for data that can be
2631 	 * accessed from NMI. Use a temporary manual per cpu allocation
2632 	 * until that gets sorted out.
2633 	 */
2634 	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
2635 
2636 	entries = kzalloc(size, GFP_KERNEL);
2637 	if (!entries)
2638 		return -ENOMEM;
2639 
2640 	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
2641 
2642 	for_each_possible_cpu(cpu) {
2643 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2644 							 cpu_to_node(cpu));
2645 		if (!entries->cpu_entries[cpu])
2646 			goto fail;
2647 	}
2648 
2649 	rcu_assign_pointer(callchain_cpus_entries, entries);
2650 
2651 	return 0;
2652 
2653 fail:
2654 	for_each_possible_cpu(cpu)
2655 		kfree(entries->cpu_entries[cpu]);
2656 	kfree(entries);
2657 
2658 	return -ENOMEM;
2659 }
2660 
2661 static int get_callchain_buffers(void)
2662 {
2663 	int err = 0;
2664 	int count;
2665 
2666 	mutex_lock(&callchain_mutex);
2667 
2668 	count = atomic_inc_return(&nr_callchain_events);
2669 	if (WARN_ON_ONCE(count < 1)) {
2670 		err = -EINVAL;
2671 		goto exit;
2672 	}
2673 
2674 	if (count > 1) {
2675 		/* If the allocation failed, give up */
2676 		if (!callchain_cpus_entries)
2677 			err = -ENOMEM;
2678 		goto exit;
2679 	}
2680 
2681 	err = alloc_callchain_buffers();
2682 	if (err)
2683 		release_callchain_buffers();
2684 exit:
2685 	mutex_unlock(&callchain_mutex);
2686 
2687 	return err;
2688 }
2689 
2690 static void put_callchain_buffers(void)
2691 {
2692 	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2693 		release_callchain_buffers();
2694 		mutex_unlock(&callchain_mutex);
2695 	}
2696 }
2697 
2698 static int get_recursion_context(int *recursion)
2699 {
2700 	int rctx;
2701 
2702 	if (in_nmi())
2703 		rctx = 3;
2704 	else if (in_irq())
2705 		rctx = 2;
2706 	else if (in_softirq())
2707 		rctx = 1;
2708 	else
2709 		rctx = 0;
2710 
2711 	if (recursion[rctx])
2712 		return -1;
2713 
2714 	recursion[rctx]++;
2715 	barrier();
2716 
2717 	return rctx;
2718 }
2719 
2720 static inline void put_recursion_context(int *recursion, int rctx)
2721 {
2722 	barrier();
2723 	recursion[rctx]--;
2724 }
2725 
2726 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2727 {
2728 	int cpu;
2729 	struct callchain_cpus_entries *entries;
2730 
2731 	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2732 	if (*rctx == -1)
2733 		return NULL;
2734 
2735 	entries = rcu_dereference(callchain_cpus_entries);
2736 	if (!entries)
2737 		return NULL;
2738 
2739 	cpu = smp_processor_id();
2740 
2741 	return &entries->cpu_entries[cpu][*rctx];
2742 }
2743 
2744 static void
2745 put_callchain_entry(int rctx)
2746 {
2747 	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2748 }
2749 
2750 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2751 {
2752 	int rctx;
2753 	struct perf_callchain_entry *entry;
2754 
2755 
2756 	entry = get_callchain_entry(&rctx);
2757 	if (rctx == -1)
2758 		return NULL;
2759 
2760 	if (!entry)
2761 		goto exit_put;
2762 
2763 	entry->nr = 0;
2764 
2765 	if (!user_mode(regs)) {
2766 		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2767 		perf_callchain_kernel(entry, regs);
2768 		if (current->mm)
2769 			regs = task_pt_regs(current);
2770 		else
2771 			regs = NULL;
2772 	}
2773 
2774 	if (regs) {
2775 		perf_callchain_store(entry, PERF_CONTEXT_USER);
2776 		perf_callchain_user(entry, regs);
2777 	}
2778 
2779 exit_put:
2780 	put_callchain_entry(rctx);
2781 
2782 	return entry;
2783 }
2784 
2785 /*
2786  * Initialize the perf_event context in a task_struct:
2787  */
2788 static void __perf_event_init_context(struct perf_event_context *ctx)
2789 {
2790 	raw_spin_lock_init(&ctx->lock);
2791 	mutex_init(&ctx->mutex);
2792 	INIT_LIST_HEAD(&ctx->pinned_groups);
2793 	INIT_LIST_HEAD(&ctx->flexible_groups);
2794 	INIT_LIST_HEAD(&ctx->event_list);
2795 	atomic_set(&ctx->refcount, 1);
2796 }
2797 
2798 static struct perf_event_context *
2799 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2800 {
2801 	struct perf_event_context *ctx;
2802 
2803 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2804 	if (!ctx)
2805 		return NULL;
2806 
2807 	__perf_event_init_context(ctx);
2808 	if (task) {
2809 		ctx->task = task;
2810 		get_task_struct(task);
2811 	}
2812 	ctx->pmu = pmu;
2813 
2814 	return ctx;
2815 }
2816 
2817 static struct task_struct *
2818 find_lively_task_by_vpid(pid_t vpid)
2819 {
2820 	struct task_struct *task;
2821 	int err;
2822 
2823 	rcu_read_lock();
2824 	if (!vpid)
2825 		task = current;
2826 	else
2827 		task = find_task_by_vpid(vpid);
2828 	if (task)
2829 		get_task_struct(task);
2830 	rcu_read_unlock();
2831 
2832 	if (!task)
2833 		return ERR_PTR(-ESRCH);
2834 
2835 	/* Reuse ptrace permission checks for now. */
2836 	err = -EACCES;
2837 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
2838 		goto errout;
2839 
2840 	return task;
2841 errout:
2842 	put_task_struct(task);
2843 	return ERR_PTR(err);
2844 
2845 }
2846 
2847 /*
2848  * Returns a matching context with refcount and pincount.
2849  */
2850 static struct perf_event_context *
2851 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2852 {
2853 	struct perf_event_context *ctx;
2854 	struct perf_cpu_context *cpuctx;
2855 	unsigned long flags;
2856 	int ctxn, err;
2857 
2858 	if (!task) {
2859 		/* Must be root to operate on a CPU event: */
2860 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2861 			return ERR_PTR(-EACCES);
2862 
2863 		/*
2864 		 * We could be clever and allow to attach a event to an
2865 		 * offline CPU and activate it when the CPU comes up, but
2866 		 * that's for later.
2867 		 */
2868 		if (!cpu_online(cpu))
2869 			return ERR_PTR(-ENODEV);
2870 
2871 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2872 		ctx = &cpuctx->ctx;
2873 		get_ctx(ctx);
2874 		++ctx->pin_count;
2875 
2876 		return ctx;
2877 	}
2878 
2879 	err = -EINVAL;
2880 	ctxn = pmu->task_ctx_nr;
2881 	if (ctxn < 0)
2882 		goto errout;
2883 
2884 retry:
2885 	ctx = perf_lock_task_context(task, ctxn, &flags);
2886 	if (ctx) {
2887 		unclone_ctx(ctx);
2888 		++ctx->pin_count;
2889 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2890 	} else {
2891 		ctx = alloc_perf_context(pmu, task);
2892 		err = -ENOMEM;
2893 		if (!ctx)
2894 			goto errout;
2895 
2896 		err = 0;
2897 		mutex_lock(&task->perf_event_mutex);
2898 		/*
2899 		 * If it has already passed perf_event_exit_task().
2900 		 * we must see PF_EXITING, it takes this mutex too.
2901 		 */
2902 		if (task->flags & PF_EXITING)
2903 			err = -ESRCH;
2904 		else if (task->perf_event_ctxp[ctxn])
2905 			err = -EAGAIN;
2906 		else {
2907 			get_ctx(ctx);
2908 			++ctx->pin_count;
2909 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2910 		}
2911 		mutex_unlock(&task->perf_event_mutex);
2912 
2913 		if (unlikely(err)) {
2914 			put_ctx(ctx);
2915 
2916 			if (err == -EAGAIN)
2917 				goto retry;
2918 			goto errout;
2919 		}
2920 	}
2921 
2922 	return ctx;
2923 
2924 errout:
2925 	return ERR_PTR(err);
2926 }
2927 
2928 static void perf_event_free_filter(struct perf_event *event);
2929 
2930 static void free_event_rcu(struct rcu_head *head)
2931 {
2932 	struct perf_event *event;
2933 
2934 	event = container_of(head, struct perf_event, rcu_head);
2935 	if (event->ns)
2936 		put_pid_ns(event->ns);
2937 	perf_event_free_filter(event);
2938 	kfree(event);
2939 }
2940 
2941 static void ring_buffer_put(struct ring_buffer *rb);
2942 
2943 static void free_event(struct perf_event *event)
2944 {
2945 	irq_work_sync(&event->pending);
2946 
2947 	if (!event->parent) {
2948 		if (event->attach_state & PERF_ATTACH_TASK)
2949 			jump_label_dec(&perf_sched_events);
2950 		if (event->attr.mmap || event->attr.mmap_data)
2951 			atomic_dec(&nr_mmap_events);
2952 		if (event->attr.comm)
2953 			atomic_dec(&nr_comm_events);
2954 		if (event->attr.task)
2955 			atomic_dec(&nr_task_events);
2956 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2957 			put_callchain_buffers();
2958 		if (is_cgroup_event(event)) {
2959 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2960 			jump_label_dec(&perf_sched_events);
2961 		}
2962 	}
2963 
2964 	if (event->rb) {
2965 		ring_buffer_put(event->rb);
2966 		event->rb = NULL;
2967 	}
2968 
2969 	if (is_cgroup_event(event))
2970 		perf_detach_cgroup(event);
2971 
2972 	if (event->destroy)
2973 		event->destroy(event);
2974 
2975 	if (event->ctx)
2976 		put_ctx(event->ctx);
2977 
2978 	call_rcu(&event->rcu_head, free_event_rcu);
2979 }
2980 
2981 int perf_event_release_kernel(struct perf_event *event)
2982 {
2983 	struct perf_event_context *ctx = event->ctx;
2984 
2985 	WARN_ON_ONCE(ctx->parent_ctx);
2986 	/*
2987 	 * There are two ways this annotation is useful:
2988 	 *
2989 	 *  1) there is a lock recursion from perf_event_exit_task
2990 	 *     see the comment there.
2991 	 *
2992 	 *  2) there is a lock-inversion with mmap_sem through
2993 	 *     perf_event_read_group(), which takes faults while
2994 	 *     holding ctx->mutex, however this is called after
2995 	 *     the last filedesc died, so there is no possibility
2996 	 *     to trigger the AB-BA case.
2997 	 */
2998 	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2999 	raw_spin_lock_irq(&ctx->lock);
3000 	perf_group_detach(event);
3001 	raw_spin_unlock_irq(&ctx->lock);
3002 	perf_remove_from_context(event);
3003 	mutex_unlock(&ctx->mutex);
3004 
3005 	free_event(event);
3006 
3007 	return 0;
3008 }
3009 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3010 
3011 /*
3012  * Called when the last reference to the file is gone.
3013  */
3014 static int perf_release(struct inode *inode, struct file *file)
3015 {
3016 	struct perf_event *event = file->private_data;
3017 	struct task_struct *owner;
3018 
3019 	file->private_data = NULL;
3020 
3021 	rcu_read_lock();
3022 	owner = ACCESS_ONCE(event->owner);
3023 	/*
3024 	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3025 	 * !owner it means the list deletion is complete and we can indeed
3026 	 * free this event, otherwise we need to serialize on
3027 	 * owner->perf_event_mutex.
3028 	 */
3029 	smp_read_barrier_depends();
3030 	if (owner) {
3031 		/*
3032 		 * Since delayed_put_task_struct() also drops the last
3033 		 * task reference we can safely take a new reference
3034 		 * while holding the rcu_read_lock().
3035 		 */
3036 		get_task_struct(owner);
3037 	}
3038 	rcu_read_unlock();
3039 
3040 	if (owner) {
3041 		mutex_lock(&owner->perf_event_mutex);
3042 		/*
3043 		 * We have to re-check the event->owner field, if it is cleared
3044 		 * we raced with perf_event_exit_task(), acquiring the mutex
3045 		 * ensured they're done, and we can proceed with freeing the
3046 		 * event.
3047 		 */
3048 		if (event->owner)
3049 			list_del_init(&event->owner_entry);
3050 		mutex_unlock(&owner->perf_event_mutex);
3051 		put_task_struct(owner);
3052 	}
3053 
3054 	return perf_event_release_kernel(event);
3055 }
3056 
3057 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3058 {
3059 	struct perf_event *child;
3060 	u64 total = 0;
3061 
3062 	*enabled = 0;
3063 	*running = 0;
3064 
3065 	mutex_lock(&event->child_mutex);
3066 	total += perf_event_read(event);
3067 	*enabled += event->total_time_enabled +
3068 			atomic64_read(&event->child_total_time_enabled);
3069 	*running += event->total_time_running +
3070 			atomic64_read(&event->child_total_time_running);
3071 
3072 	list_for_each_entry(child, &event->child_list, child_list) {
3073 		total += perf_event_read(child);
3074 		*enabled += child->total_time_enabled;
3075 		*running += child->total_time_running;
3076 	}
3077 	mutex_unlock(&event->child_mutex);
3078 
3079 	return total;
3080 }
3081 EXPORT_SYMBOL_GPL(perf_event_read_value);
3082 
3083 static int perf_event_read_group(struct perf_event *event,
3084 				   u64 read_format, char __user *buf)
3085 {
3086 	struct perf_event *leader = event->group_leader, *sub;
3087 	int n = 0, size = 0, ret = -EFAULT;
3088 	struct perf_event_context *ctx = leader->ctx;
3089 	u64 values[5];
3090 	u64 count, enabled, running;
3091 
3092 	mutex_lock(&ctx->mutex);
3093 	count = perf_event_read_value(leader, &enabled, &running);
3094 
3095 	values[n++] = 1 + leader->nr_siblings;
3096 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3097 		values[n++] = enabled;
3098 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3099 		values[n++] = running;
3100 	values[n++] = count;
3101 	if (read_format & PERF_FORMAT_ID)
3102 		values[n++] = primary_event_id(leader);
3103 
3104 	size = n * sizeof(u64);
3105 
3106 	if (copy_to_user(buf, values, size))
3107 		goto unlock;
3108 
3109 	ret = size;
3110 
3111 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3112 		n = 0;
3113 
3114 		values[n++] = perf_event_read_value(sub, &enabled, &running);
3115 		if (read_format & PERF_FORMAT_ID)
3116 			values[n++] = primary_event_id(sub);
3117 
3118 		size = n * sizeof(u64);
3119 
3120 		if (copy_to_user(buf + ret, values, size)) {
3121 			ret = -EFAULT;
3122 			goto unlock;
3123 		}
3124 
3125 		ret += size;
3126 	}
3127 unlock:
3128 	mutex_unlock(&ctx->mutex);
3129 
3130 	return ret;
3131 }
3132 
3133 static int perf_event_read_one(struct perf_event *event,
3134 				 u64 read_format, char __user *buf)
3135 {
3136 	u64 enabled, running;
3137 	u64 values[4];
3138 	int n = 0;
3139 
3140 	values[n++] = perf_event_read_value(event, &enabled, &running);
3141 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3142 		values[n++] = enabled;
3143 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3144 		values[n++] = running;
3145 	if (read_format & PERF_FORMAT_ID)
3146 		values[n++] = primary_event_id(event);
3147 
3148 	if (copy_to_user(buf, values, n * sizeof(u64)))
3149 		return -EFAULT;
3150 
3151 	return n * sizeof(u64);
3152 }
3153 
3154 /*
3155  * Read the performance event - simple non blocking version for now
3156  */
3157 static ssize_t
3158 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3159 {
3160 	u64 read_format = event->attr.read_format;
3161 	int ret;
3162 
3163 	/*
3164 	 * Return end-of-file for a read on a event that is in
3165 	 * error state (i.e. because it was pinned but it couldn't be
3166 	 * scheduled on to the CPU at some point).
3167 	 */
3168 	if (event->state == PERF_EVENT_STATE_ERROR)
3169 		return 0;
3170 
3171 	if (count < event->read_size)
3172 		return -ENOSPC;
3173 
3174 	WARN_ON_ONCE(event->ctx->parent_ctx);
3175 	if (read_format & PERF_FORMAT_GROUP)
3176 		ret = perf_event_read_group(event, read_format, buf);
3177 	else
3178 		ret = perf_event_read_one(event, read_format, buf);
3179 
3180 	return ret;
3181 }
3182 
3183 static ssize_t
3184 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3185 {
3186 	struct perf_event *event = file->private_data;
3187 
3188 	return perf_read_hw(event, buf, count);
3189 }
3190 
3191 static unsigned int perf_poll(struct file *file, poll_table *wait)
3192 {
3193 	struct perf_event *event = file->private_data;
3194 	struct ring_buffer *rb;
3195 	unsigned int events = POLL_HUP;
3196 
3197 	/*
3198 	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3199 	 * grabs the rb reference but perf_event_set_output() overrides it.
3200 	 * Here is the timeline for two threads T1, T2:
3201 	 * t0: T1, rb = rcu_dereference(event->rb)
3202 	 * t1: T2, old_rb = event->rb
3203 	 * t2: T2, event->rb = new rb
3204 	 * t3: T2, ring_buffer_detach(old_rb)
3205 	 * t4: T1, ring_buffer_attach(rb1)
3206 	 * t5: T1, poll_wait(event->waitq)
3207 	 *
3208 	 * To avoid this problem, we grab mmap_mutex in perf_poll()
3209 	 * thereby ensuring that the assignment of the new ring buffer
3210 	 * and the detachment of the old buffer appear atomic to perf_poll()
3211 	 */
3212 	mutex_lock(&event->mmap_mutex);
3213 
3214 	rcu_read_lock();
3215 	rb = rcu_dereference(event->rb);
3216 	if (rb) {
3217 		ring_buffer_attach(event, rb);
3218 		events = atomic_xchg(&rb->poll, 0);
3219 	}
3220 	rcu_read_unlock();
3221 
3222 	mutex_unlock(&event->mmap_mutex);
3223 
3224 	poll_wait(file, &event->waitq, wait);
3225 
3226 	return events;
3227 }
3228 
3229 static void perf_event_reset(struct perf_event *event)
3230 {
3231 	(void)perf_event_read(event);
3232 	local64_set(&event->count, 0);
3233 	perf_event_update_userpage(event);
3234 }
3235 
3236 /*
3237  * Holding the top-level event's child_mutex means that any
3238  * descendant process that has inherited this event will block
3239  * in sync_child_event if it goes to exit, thus satisfying the
3240  * task existence requirements of perf_event_enable/disable.
3241  */
3242 static void perf_event_for_each_child(struct perf_event *event,
3243 					void (*func)(struct perf_event *))
3244 {
3245 	struct perf_event *child;
3246 
3247 	WARN_ON_ONCE(event->ctx->parent_ctx);
3248 	mutex_lock(&event->child_mutex);
3249 	func(event);
3250 	list_for_each_entry(child, &event->child_list, child_list)
3251 		func(child);
3252 	mutex_unlock(&event->child_mutex);
3253 }
3254 
3255 static void perf_event_for_each(struct perf_event *event,
3256 				  void (*func)(struct perf_event *))
3257 {
3258 	struct perf_event_context *ctx = event->ctx;
3259 	struct perf_event *sibling;
3260 
3261 	WARN_ON_ONCE(ctx->parent_ctx);
3262 	mutex_lock(&ctx->mutex);
3263 	event = event->group_leader;
3264 
3265 	perf_event_for_each_child(event, func);
3266 	func(event);
3267 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3268 		perf_event_for_each_child(event, func);
3269 	mutex_unlock(&ctx->mutex);
3270 }
3271 
3272 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3273 {
3274 	struct perf_event_context *ctx = event->ctx;
3275 	int ret = 0;
3276 	u64 value;
3277 
3278 	if (!is_sampling_event(event))
3279 		return -EINVAL;
3280 
3281 	if (copy_from_user(&value, arg, sizeof(value)))
3282 		return -EFAULT;
3283 
3284 	if (!value)
3285 		return -EINVAL;
3286 
3287 	raw_spin_lock_irq(&ctx->lock);
3288 	if (event->attr.freq) {
3289 		if (value > sysctl_perf_event_sample_rate) {
3290 			ret = -EINVAL;
3291 			goto unlock;
3292 		}
3293 
3294 		event->attr.sample_freq = value;
3295 	} else {
3296 		event->attr.sample_period = value;
3297 		event->hw.sample_period = value;
3298 	}
3299 unlock:
3300 	raw_spin_unlock_irq(&ctx->lock);
3301 
3302 	return ret;
3303 }
3304 
3305 static const struct file_operations perf_fops;
3306 
3307 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3308 {
3309 	struct file *file;
3310 
3311 	file = fget_light(fd, fput_needed);
3312 	if (!file)
3313 		return ERR_PTR(-EBADF);
3314 
3315 	if (file->f_op != &perf_fops) {
3316 		fput_light(file, *fput_needed);
3317 		*fput_needed = 0;
3318 		return ERR_PTR(-EBADF);
3319 	}
3320 
3321 	return file->private_data;
3322 }
3323 
3324 static int perf_event_set_output(struct perf_event *event,
3325 				 struct perf_event *output_event);
3326 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3327 
3328 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3329 {
3330 	struct perf_event *event = file->private_data;
3331 	void (*func)(struct perf_event *);
3332 	u32 flags = arg;
3333 
3334 	switch (cmd) {
3335 	case PERF_EVENT_IOC_ENABLE:
3336 		func = perf_event_enable;
3337 		break;
3338 	case PERF_EVENT_IOC_DISABLE:
3339 		func = perf_event_disable;
3340 		break;
3341 	case PERF_EVENT_IOC_RESET:
3342 		func = perf_event_reset;
3343 		break;
3344 
3345 	case PERF_EVENT_IOC_REFRESH:
3346 		return perf_event_refresh(event, arg);
3347 
3348 	case PERF_EVENT_IOC_PERIOD:
3349 		return perf_event_period(event, (u64 __user *)arg);
3350 
3351 	case PERF_EVENT_IOC_SET_OUTPUT:
3352 	{
3353 		struct perf_event *output_event = NULL;
3354 		int fput_needed = 0;
3355 		int ret;
3356 
3357 		if (arg != -1) {
3358 			output_event = perf_fget_light(arg, &fput_needed);
3359 			if (IS_ERR(output_event))
3360 				return PTR_ERR(output_event);
3361 		}
3362 
3363 		ret = perf_event_set_output(event, output_event);
3364 		if (output_event)
3365 			fput_light(output_event->filp, fput_needed);
3366 
3367 		return ret;
3368 	}
3369 
3370 	case PERF_EVENT_IOC_SET_FILTER:
3371 		return perf_event_set_filter(event, (void __user *)arg);
3372 
3373 	default:
3374 		return -ENOTTY;
3375 	}
3376 
3377 	if (flags & PERF_IOC_FLAG_GROUP)
3378 		perf_event_for_each(event, func);
3379 	else
3380 		perf_event_for_each_child(event, func);
3381 
3382 	return 0;
3383 }
3384 
3385 int perf_event_task_enable(void)
3386 {
3387 	struct perf_event *event;
3388 
3389 	mutex_lock(&current->perf_event_mutex);
3390 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3391 		perf_event_for_each_child(event, perf_event_enable);
3392 	mutex_unlock(&current->perf_event_mutex);
3393 
3394 	return 0;
3395 }
3396 
3397 int perf_event_task_disable(void)
3398 {
3399 	struct perf_event *event;
3400 
3401 	mutex_lock(&current->perf_event_mutex);
3402 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3403 		perf_event_for_each_child(event, perf_event_disable);
3404 	mutex_unlock(&current->perf_event_mutex);
3405 
3406 	return 0;
3407 }
3408 
3409 #ifndef PERF_EVENT_INDEX_OFFSET
3410 # define PERF_EVENT_INDEX_OFFSET 0
3411 #endif
3412 
3413 static int perf_event_index(struct perf_event *event)
3414 {
3415 	if (event->hw.state & PERF_HES_STOPPED)
3416 		return 0;
3417 
3418 	if (event->state != PERF_EVENT_STATE_ACTIVE)
3419 		return 0;
3420 
3421 	return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
3422 }
3423 
3424 static void calc_timer_values(struct perf_event *event,
3425 				u64 *enabled,
3426 				u64 *running)
3427 {
3428 	u64 now, ctx_time;
3429 
3430 	now = perf_clock();
3431 	ctx_time = event->shadow_ctx_time + now;
3432 	*enabled = ctx_time - event->tstamp_enabled;
3433 	*running = ctx_time - event->tstamp_running;
3434 }
3435 
3436 /*
3437  * Callers need to ensure there can be no nesting of this function, otherwise
3438  * the seqlock logic goes bad. We can not serialize this because the arch
3439  * code calls this from NMI context.
3440  */
3441 void perf_event_update_userpage(struct perf_event *event)
3442 {
3443 	struct perf_event_mmap_page *userpg;
3444 	struct ring_buffer *rb;
3445 	u64 enabled, running;
3446 
3447 	rcu_read_lock();
3448 	/*
3449 	 * compute total_time_enabled, total_time_running
3450 	 * based on snapshot values taken when the event
3451 	 * was last scheduled in.
3452 	 *
3453 	 * we cannot simply called update_context_time()
3454 	 * because of locking issue as we can be called in
3455 	 * NMI context
3456 	 */
3457 	calc_timer_values(event, &enabled, &running);
3458 	rb = rcu_dereference(event->rb);
3459 	if (!rb)
3460 		goto unlock;
3461 
3462 	userpg = rb->user_page;
3463 
3464 	/*
3465 	 * Disable preemption so as to not let the corresponding user-space
3466 	 * spin too long if we get preempted.
3467 	 */
3468 	preempt_disable();
3469 	++userpg->lock;
3470 	barrier();
3471 	userpg->index = perf_event_index(event);
3472 	userpg->offset = perf_event_count(event);
3473 	if (event->state == PERF_EVENT_STATE_ACTIVE)
3474 		userpg->offset -= local64_read(&event->hw.prev_count);
3475 
3476 	userpg->time_enabled = enabled +
3477 			atomic64_read(&event->child_total_time_enabled);
3478 
3479 	userpg->time_running = running +
3480 			atomic64_read(&event->child_total_time_running);
3481 
3482 	barrier();
3483 	++userpg->lock;
3484 	preempt_enable();
3485 unlock:
3486 	rcu_read_unlock();
3487 }
3488 
3489 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3490 {
3491 	struct perf_event *event = vma->vm_file->private_data;
3492 	struct ring_buffer *rb;
3493 	int ret = VM_FAULT_SIGBUS;
3494 
3495 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
3496 		if (vmf->pgoff == 0)
3497 			ret = 0;
3498 		return ret;
3499 	}
3500 
3501 	rcu_read_lock();
3502 	rb = rcu_dereference(event->rb);
3503 	if (!rb)
3504 		goto unlock;
3505 
3506 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3507 		goto unlock;
3508 
3509 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3510 	if (!vmf->page)
3511 		goto unlock;
3512 
3513 	get_page(vmf->page);
3514 	vmf->page->mapping = vma->vm_file->f_mapping;
3515 	vmf->page->index   = vmf->pgoff;
3516 
3517 	ret = 0;
3518 unlock:
3519 	rcu_read_unlock();
3520 
3521 	return ret;
3522 }
3523 
3524 static void ring_buffer_attach(struct perf_event *event,
3525 			       struct ring_buffer *rb)
3526 {
3527 	unsigned long flags;
3528 
3529 	if (!list_empty(&event->rb_entry))
3530 		return;
3531 
3532 	spin_lock_irqsave(&rb->event_lock, flags);
3533 	if (!list_empty(&event->rb_entry))
3534 		goto unlock;
3535 
3536 	list_add(&event->rb_entry, &rb->event_list);
3537 unlock:
3538 	spin_unlock_irqrestore(&rb->event_lock, flags);
3539 }
3540 
3541 static void ring_buffer_detach(struct perf_event *event,
3542 			       struct ring_buffer *rb)
3543 {
3544 	unsigned long flags;
3545 
3546 	if (list_empty(&event->rb_entry))
3547 		return;
3548 
3549 	spin_lock_irqsave(&rb->event_lock, flags);
3550 	list_del_init(&event->rb_entry);
3551 	wake_up_all(&event->waitq);
3552 	spin_unlock_irqrestore(&rb->event_lock, flags);
3553 }
3554 
3555 static void ring_buffer_wakeup(struct perf_event *event)
3556 {
3557 	struct ring_buffer *rb;
3558 
3559 	rcu_read_lock();
3560 	rb = rcu_dereference(event->rb);
3561 	if (!rb)
3562 		goto unlock;
3563 
3564 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3565 		wake_up_all(&event->waitq);
3566 
3567 unlock:
3568 	rcu_read_unlock();
3569 }
3570 
3571 static void rb_free_rcu(struct rcu_head *rcu_head)
3572 {
3573 	struct ring_buffer *rb;
3574 
3575 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3576 	rb_free(rb);
3577 }
3578 
3579 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3580 {
3581 	struct ring_buffer *rb;
3582 
3583 	rcu_read_lock();
3584 	rb = rcu_dereference(event->rb);
3585 	if (rb) {
3586 		if (!atomic_inc_not_zero(&rb->refcount))
3587 			rb = NULL;
3588 	}
3589 	rcu_read_unlock();
3590 
3591 	return rb;
3592 }
3593 
3594 static void ring_buffer_put(struct ring_buffer *rb)
3595 {
3596 	struct perf_event *event, *n;
3597 	unsigned long flags;
3598 
3599 	if (!atomic_dec_and_test(&rb->refcount))
3600 		return;
3601 
3602 	spin_lock_irqsave(&rb->event_lock, flags);
3603 	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3604 		list_del_init(&event->rb_entry);
3605 		wake_up_all(&event->waitq);
3606 	}
3607 	spin_unlock_irqrestore(&rb->event_lock, flags);
3608 
3609 	call_rcu(&rb->rcu_head, rb_free_rcu);
3610 }
3611 
3612 static void perf_mmap_open(struct vm_area_struct *vma)
3613 {
3614 	struct perf_event *event = vma->vm_file->private_data;
3615 
3616 	atomic_inc(&event->mmap_count);
3617 }
3618 
3619 static void perf_mmap_close(struct vm_area_struct *vma)
3620 {
3621 	struct perf_event *event = vma->vm_file->private_data;
3622 
3623 	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3624 		unsigned long size = perf_data_size(event->rb);
3625 		struct user_struct *user = event->mmap_user;
3626 		struct ring_buffer *rb = event->rb;
3627 
3628 		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3629 		vma->vm_mm->pinned_vm -= event->mmap_locked;
3630 		rcu_assign_pointer(event->rb, NULL);
3631 		ring_buffer_detach(event, rb);
3632 		mutex_unlock(&event->mmap_mutex);
3633 
3634 		ring_buffer_put(rb);
3635 		free_uid(user);
3636 	}
3637 }
3638 
3639 static const struct vm_operations_struct perf_mmap_vmops = {
3640 	.open		= perf_mmap_open,
3641 	.close		= perf_mmap_close,
3642 	.fault		= perf_mmap_fault,
3643 	.page_mkwrite	= perf_mmap_fault,
3644 };
3645 
3646 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3647 {
3648 	struct perf_event *event = file->private_data;
3649 	unsigned long user_locked, user_lock_limit;
3650 	struct user_struct *user = current_user();
3651 	unsigned long locked, lock_limit;
3652 	struct ring_buffer *rb;
3653 	unsigned long vma_size;
3654 	unsigned long nr_pages;
3655 	long user_extra, extra;
3656 	int ret = 0, flags = 0;
3657 
3658 	/*
3659 	 * Don't allow mmap() of inherited per-task counters. This would
3660 	 * create a performance issue due to all children writing to the
3661 	 * same rb.
3662 	 */
3663 	if (event->cpu == -1 && event->attr.inherit)
3664 		return -EINVAL;
3665 
3666 	if (!(vma->vm_flags & VM_SHARED))
3667 		return -EINVAL;
3668 
3669 	vma_size = vma->vm_end - vma->vm_start;
3670 	nr_pages = (vma_size / PAGE_SIZE) - 1;
3671 
3672 	/*
3673 	 * If we have rb pages ensure they're a power-of-two number, so we
3674 	 * can do bitmasks instead of modulo.
3675 	 */
3676 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3677 		return -EINVAL;
3678 
3679 	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3680 		return -EINVAL;
3681 
3682 	if (vma->vm_pgoff != 0)
3683 		return -EINVAL;
3684 
3685 	WARN_ON_ONCE(event->ctx->parent_ctx);
3686 	mutex_lock(&event->mmap_mutex);
3687 	if (event->rb) {
3688 		if (event->rb->nr_pages == nr_pages)
3689 			atomic_inc(&event->rb->refcount);
3690 		else
3691 			ret = -EINVAL;
3692 		goto unlock;
3693 	}
3694 
3695 	user_extra = nr_pages + 1;
3696 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3697 
3698 	/*
3699 	 * Increase the limit linearly with more CPUs:
3700 	 */
3701 	user_lock_limit *= num_online_cpus();
3702 
3703 	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3704 
3705 	extra = 0;
3706 	if (user_locked > user_lock_limit)
3707 		extra = user_locked - user_lock_limit;
3708 
3709 	lock_limit = rlimit(RLIMIT_MEMLOCK);
3710 	lock_limit >>= PAGE_SHIFT;
3711 	locked = vma->vm_mm->pinned_vm + extra;
3712 
3713 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3714 		!capable(CAP_IPC_LOCK)) {
3715 		ret = -EPERM;
3716 		goto unlock;
3717 	}
3718 
3719 	WARN_ON(event->rb);
3720 
3721 	if (vma->vm_flags & VM_WRITE)
3722 		flags |= RING_BUFFER_WRITABLE;
3723 
3724 	rb = rb_alloc(nr_pages,
3725 		event->attr.watermark ? event->attr.wakeup_watermark : 0,
3726 		event->cpu, flags);
3727 
3728 	if (!rb) {
3729 		ret = -ENOMEM;
3730 		goto unlock;
3731 	}
3732 	rcu_assign_pointer(event->rb, rb);
3733 
3734 	atomic_long_add(user_extra, &user->locked_vm);
3735 	event->mmap_locked = extra;
3736 	event->mmap_user = get_current_user();
3737 	vma->vm_mm->pinned_vm += event->mmap_locked;
3738 
3739 unlock:
3740 	if (!ret)
3741 		atomic_inc(&event->mmap_count);
3742 	mutex_unlock(&event->mmap_mutex);
3743 
3744 	vma->vm_flags |= VM_RESERVED;
3745 	vma->vm_ops = &perf_mmap_vmops;
3746 
3747 	return ret;
3748 }
3749 
3750 static int perf_fasync(int fd, struct file *filp, int on)
3751 {
3752 	struct inode *inode = filp->f_path.dentry->d_inode;
3753 	struct perf_event *event = filp->private_data;
3754 	int retval;
3755 
3756 	mutex_lock(&inode->i_mutex);
3757 	retval = fasync_helper(fd, filp, on, &event->fasync);
3758 	mutex_unlock(&inode->i_mutex);
3759 
3760 	if (retval < 0)
3761 		return retval;
3762 
3763 	return 0;
3764 }
3765 
3766 static const struct file_operations perf_fops = {
3767 	.llseek			= no_llseek,
3768 	.release		= perf_release,
3769 	.read			= perf_read,
3770 	.poll			= perf_poll,
3771 	.unlocked_ioctl		= perf_ioctl,
3772 	.compat_ioctl		= perf_ioctl,
3773 	.mmap			= perf_mmap,
3774 	.fasync			= perf_fasync,
3775 };
3776 
3777 /*
3778  * Perf event wakeup
3779  *
3780  * If there's data, ensure we set the poll() state and publish everything
3781  * to user-space before waking everybody up.
3782  */
3783 
3784 void perf_event_wakeup(struct perf_event *event)
3785 {
3786 	ring_buffer_wakeup(event);
3787 
3788 	if (event->pending_kill) {
3789 		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3790 		event->pending_kill = 0;
3791 	}
3792 }
3793 
3794 static void perf_pending_event(struct irq_work *entry)
3795 {
3796 	struct perf_event *event = container_of(entry,
3797 			struct perf_event, pending);
3798 
3799 	if (event->pending_disable) {
3800 		event->pending_disable = 0;
3801 		__perf_event_disable(event);
3802 	}
3803 
3804 	if (event->pending_wakeup) {
3805 		event->pending_wakeup = 0;
3806 		perf_event_wakeup(event);
3807 	}
3808 }
3809 
3810 /*
3811  * We assume there is only KVM supporting the callbacks.
3812  * Later on, we might change it to a list if there is
3813  * another virtualization implementation supporting the callbacks.
3814  */
3815 struct perf_guest_info_callbacks *perf_guest_cbs;
3816 
3817 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3818 {
3819 	perf_guest_cbs = cbs;
3820 	return 0;
3821 }
3822 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3823 
3824 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3825 {
3826 	perf_guest_cbs = NULL;
3827 	return 0;
3828 }
3829 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3830 
3831 static void __perf_event_header__init_id(struct perf_event_header *header,
3832 					 struct perf_sample_data *data,
3833 					 struct perf_event *event)
3834 {
3835 	u64 sample_type = event->attr.sample_type;
3836 
3837 	data->type = sample_type;
3838 	header->size += event->id_header_size;
3839 
3840 	if (sample_type & PERF_SAMPLE_TID) {
3841 		/* namespace issues */
3842 		data->tid_entry.pid = perf_event_pid(event, current);
3843 		data->tid_entry.tid = perf_event_tid(event, current);
3844 	}
3845 
3846 	if (sample_type & PERF_SAMPLE_TIME)
3847 		data->time = perf_clock();
3848 
3849 	if (sample_type & PERF_SAMPLE_ID)
3850 		data->id = primary_event_id(event);
3851 
3852 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3853 		data->stream_id = event->id;
3854 
3855 	if (sample_type & PERF_SAMPLE_CPU) {
3856 		data->cpu_entry.cpu	 = raw_smp_processor_id();
3857 		data->cpu_entry.reserved = 0;
3858 	}
3859 }
3860 
3861 void perf_event_header__init_id(struct perf_event_header *header,
3862 				struct perf_sample_data *data,
3863 				struct perf_event *event)
3864 {
3865 	if (event->attr.sample_id_all)
3866 		__perf_event_header__init_id(header, data, event);
3867 }
3868 
3869 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3870 					   struct perf_sample_data *data)
3871 {
3872 	u64 sample_type = data->type;
3873 
3874 	if (sample_type & PERF_SAMPLE_TID)
3875 		perf_output_put(handle, data->tid_entry);
3876 
3877 	if (sample_type & PERF_SAMPLE_TIME)
3878 		perf_output_put(handle, data->time);
3879 
3880 	if (sample_type & PERF_SAMPLE_ID)
3881 		perf_output_put(handle, data->id);
3882 
3883 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3884 		perf_output_put(handle, data->stream_id);
3885 
3886 	if (sample_type & PERF_SAMPLE_CPU)
3887 		perf_output_put(handle, data->cpu_entry);
3888 }
3889 
3890 void perf_event__output_id_sample(struct perf_event *event,
3891 				  struct perf_output_handle *handle,
3892 				  struct perf_sample_data *sample)
3893 {
3894 	if (event->attr.sample_id_all)
3895 		__perf_event__output_id_sample(handle, sample);
3896 }
3897 
3898 static void perf_output_read_one(struct perf_output_handle *handle,
3899 				 struct perf_event *event,
3900 				 u64 enabled, u64 running)
3901 {
3902 	u64 read_format = event->attr.read_format;
3903 	u64 values[4];
3904 	int n = 0;
3905 
3906 	values[n++] = perf_event_count(event);
3907 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3908 		values[n++] = enabled +
3909 			atomic64_read(&event->child_total_time_enabled);
3910 	}
3911 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3912 		values[n++] = running +
3913 			atomic64_read(&event->child_total_time_running);
3914 	}
3915 	if (read_format & PERF_FORMAT_ID)
3916 		values[n++] = primary_event_id(event);
3917 
3918 	__output_copy(handle, values, n * sizeof(u64));
3919 }
3920 
3921 /*
3922  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3923  */
3924 static void perf_output_read_group(struct perf_output_handle *handle,
3925 			    struct perf_event *event,
3926 			    u64 enabled, u64 running)
3927 {
3928 	struct perf_event *leader = event->group_leader, *sub;
3929 	u64 read_format = event->attr.read_format;
3930 	u64 values[5];
3931 	int n = 0;
3932 
3933 	values[n++] = 1 + leader->nr_siblings;
3934 
3935 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3936 		values[n++] = enabled;
3937 
3938 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3939 		values[n++] = running;
3940 
3941 	if (leader != event)
3942 		leader->pmu->read(leader);
3943 
3944 	values[n++] = perf_event_count(leader);
3945 	if (read_format & PERF_FORMAT_ID)
3946 		values[n++] = primary_event_id(leader);
3947 
3948 	__output_copy(handle, values, n * sizeof(u64));
3949 
3950 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3951 		n = 0;
3952 
3953 		if (sub != event)
3954 			sub->pmu->read(sub);
3955 
3956 		values[n++] = perf_event_count(sub);
3957 		if (read_format & PERF_FORMAT_ID)
3958 			values[n++] = primary_event_id(sub);
3959 
3960 		__output_copy(handle, values, n * sizeof(u64));
3961 	}
3962 }
3963 
3964 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3965 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
3966 
3967 static void perf_output_read(struct perf_output_handle *handle,
3968 			     struct perf_event *event)
3969 {
3970 	u64 enabled = 0, running = 0;
3971 	u64 read_format = event->attr.read_format;
3972 
3973 	/*
3974 	 * compute total_time_enabled, total_time_running
3975 	 * based on snapshot values taken when the event
3976 	 * was last scheduled in.
3977 	 *
3978 	 * we cannot simply called update_context_time()
3979 	 * because of locking issue as we are called in
3980 	 * NMI context
3981 	 */
3982 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
3983 		calc_timer_values(event, &enabled, &running);
3984 
3985 	if (event->attr.read_format & PERF_FORMAT_GROUP)
3986 		perf_output_read_group(handle, event, enabled, running);
3987 	else
3988 		perf_output_read_one(handle, event, enabled, running);
3989 }
3990 
3991 void perf_output_sample(struct perf_output_handle *handle,
3992 			struct perf_event_header *header,
3993 			struct perf_sample_data *data,
3994 			struct perf_event *event)
3995 {
3996 	u64 sample_type = data->type;
3997 
3998 	perf_output_put(handle, *header);
3999 
4000 	if (sample_type & PERF_SAMPLE_IP)
4001 		perf_output_put(handle, data->ip);
4002 
4003 	if (sample_type & PERF_SAMPLE_TID)
4004 		perf_output_put(handle, data->tid_entry);
4005 
4006 	if (sample_type & PERF_SAMPLE_TIME)
4007 		perf_output_put(handle, data->time);
4008 
4009 	if (sample_type & PERF_SAMPLE_ADDR)
4010 		perf_output_put(handle, data->addr);
4011 
4012 	if (sample_type & PERF_SAMPLE_ID)
4013 		perf_output_put(handle, data->id);
4014 
4015 	if (sample_type & PERF_SAMPLE_STREAM_ID)
4016 		perf_output_put(handle, data->stream_id);
4017 
4018 	if (sample_type & PERF_SAMPLE_CPU)
4019 		perf_output_put(handle, data->cpu_entry);
4020 
4021 	if (sample_type & PERF_SAMPLE_PERIOD)
4022 		perf_output_put(handle, data->period);
4023 
4024 	if (sample_type & PERF_SAMPLE_READ)
4025 		perf_output_read(handle, event);
4026 
4027 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4028 		if (data->callchain) {
4029 			int size = 1;
4030 
4031 			if (data->callchain)
4032 				size += data->callchain->nr;
4033 
4034 			size *= sizeof(u64);
4035 
4036 			__output_copy(handle, data->callchain, size);
4037 		} else {
4038 			u64 nr = 0;
4039 			perf_output_put(handle, nr);
4040 		}
4041 	}
4042 
4043 	if (sample_type & PERF_SAMPLE_RAW) {
4044 		if (data->raw) {
4045 			perf_output_put(handle, data->raw->size);
4046 			__output_copy(handle, data->raw->data,
4047 					   data->raw->size);
4048 		} else {
4049 			struct {
4050 				u32	size;
4051 				u32	data;
4052 			} raw = {
4053 				.size = sizeof(u32),
4054 				.data = 0,
4055 			};
4056 			perf_output_put(handle, raw);
4057 		}
4058 	}
4059 
4060 	if (!event->attr.watermark) {
4061 		int wakeup_events = event->attr.wakeup_events;
4062 
4063 		if (wakeup_events) {
4064 			struct ring_buffer *rb = handle->rb;
4065 			int events = local_inc_return(&rb->events);
4066 
4067 			if (events >= wakeup_events) {
4068 				local_sub(wakeup_events, &rb->events);
4069 				local_inc(&rb->wakeup);
4070 			}
4071 		}
4072 	}
4073 }
4074 
4075 void perf_prepare_sample(struct perf_event_header *header,
4076 			 struct perf_sample_data *data,
4077 			 struct perf_event *event,
4078 			 struct pt_regs *regs)
4079 {
4080 	u64 sample_type = event->attr.sample_type;
4081 
4082 	header->type = PERF_RECORD_SAMPLE;
4083 	header->size = sizeof(*header) + event->header_size;
4084 
4085 	header->misc = 0;
4086 	header->misc |= perf_misc_flags(regs);
4087 
4088 	__perf_event_header__init_id(header, data, event);
4089 
4090 	if (sample_type & PERF_SAMPLE_IP)
4091 		data->ip = perf_instruction_pointer(regs);
4092 
4093 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4094 		int size = 1;
4095 
4096 		data->callchain = perf_callchain(regs);
4097 
4098 		if (data->callchain)
4099 			size += data->callchain->nr;
4100 
4101 		header->size += size * sizeof(u64);
4102 	}
4103 
4104 	if (sample_type & PERF_SAMPLE_RAW) {
4105 		int size = sizeof(u32);
4106 
4107 		if (data->raw)
4108 			size += data->raw->size;
4109 		else
4110 			size += sizeof(u32);
4111 
4112 		WARN_ON_ONCE(size & (sizeof(u64)-1));
4113 		header->size += size;
4114 	}
4115 }
4116 
4117 static void perf_event_output(struct perf_event *event,
4118 				struct perf_sample_data *data,
4119 				struct pt_regs *regs)
4120 {
4121 	struct perf_output_handle handle;
4122 	struct perf_event_header header;
4123 
4124 	/* protect the callchain buffers */
4125 	rcu_read_lock();
4126 
4127 	perf_prepare_sample(&header, data, event, regs);
4128 
4129 	if (perf_output_begin(&handle, event, header.size))
4130 		goto exit;
4131 
4132 	perf_output_sample(&handle, &header, data, event);
4133 
4134 	perf_output_end(&handle);
4135 
4136 exit:
4137 	rcu_read_unlock();
4138 }
4139 
4140 /*
4141  * read event_id
4142  */
4143 
4144 struct perf_read_event {
4145 	struct perf_event_header	header;
4146 
4147 	u32				pid;
4148 	u32				tid;
4149 };
4150 
4151 static void
4152 perf_event_read_event(struct perf_event *event,
4153 			struct task_struct *task)
4154 {
4155 	struct perf_output_handle handle;
4156 	struct perf_sample_data sample;
4157 	struct perf_read_event read_event = {
4158 		.header = {
4159 			.type = PERF_RECORD_READ,
4160 			.misc = 0,
4161 			.size = sizeof(read_event) + event->read_size,
4162 		},
4163 		.pid = perf_event_pid(event, task),
4164 		.tid = perf_event_tid(event, task),
4165 	};
4166 	int ret;
4167 
4168 	perf_event_header__init_id(&read_event.header, &sample, event);
4169 	ret = perf_output_begin(&handle, event, read_event.header.size);
4170 	if (ret)
4171 		return;
4172 
4173 	perf_output_put(&handle, read_event);
4174 	perf_output_read(&handle, event);
4175 	perf_event__output_id_sample(event, &handle, &sample);
4176 
4177 	perf_output_end(&handle);
4178 }
4179 
4180 /*
4181  * task tracking -- fork/exit
4182  *
4183  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
4184  */
4185 
4186 struct perf_task_event {
4187 	struct task_struct		*task;
4188 	struct perf_event_context	*task_ctx;
4189 
4190 	struct {
4191 		struct perf_event_header	header;
4192 
4193 		u32				pid;
4194 		u32				ppid;
4195 		u32				tid;
4196 		u32				ptid;
4197 		u64				time;
4198 	} event_id;
4199 };
4200 
4201 static void perf_event_task_output(struct perf_event *event,
4202 				     struct perf_task_event *task_event)
4203 {
4204 	struct perf_output_handle handle;
4205 	struct perf_sample_data	sample;
4206 	struct task_struct *task = task_event->task;
4207 	int ret, size = task_event->event_id.header.size;
4208 
4209 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4210 
4211 	ret = perf_output_begin(&handle, event,
4212 				task_event->event_id.header.size);
4213 	if (ret)
4214 		goto out;
4215 
4216 	task_event->event_id.pid = perf_event_pid(event, task);
4217 	task_event->event_id.ppid = perf_event_pid(event, current);
4218 
4219 	task_event->event_id.tid = perf_event_tid(event, task);
4220 	task_event->event_id.ptid = perf_event_tid(event, current);
4221 
4222 	perf_output_put(&handle, task_event->event_id);
4223 
4224 	perf_event__output_id_sample(event, &handle, &sample);
4225 
4226 	perf_output_end(&handle);
4227 out:
4228 	task_event->event_id.header.size = size;
4229 }
4230 
4231 static int perf_event_task_match(struct perf_event *event)
4232 {
4233 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4234 		return 0;
4235 
4236 	if (!event_filter_match(event))
4237 		return 0;
4238 
4239 	if (event->attr.comm || event->attr.mmap ||
4240 	    event->attr.mmap_data || event->attr.task)
4241 		return 1;
4242 
4243 	return 0;
4244 }
4245 
4246 static void perf_event_task_ctx(struct perf_event_context *ctx,
4247 				  struct perf_task_event *task_event)
4248 {
4249 	struct perf_event *event;
4250 
4251 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4252 		if (perf_event_task_match(event))
4253 			perf_event_task_output(event, task_event);
4254 	}
4255 }
4256 
4257 static void perf_event_task_event(struct perf_task_event *task_event)
4258 {
4259 	struct perf_cpu_context *cpuctx;
4260 	struct perf_event_context *ctx;
4261 	struct pmu *pmu;
4262 	int ctxn;
4263 
4264 	rcu_read_lock();
4265 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4266 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4267 		if (cpuctx->active_pmu != pmu)
4268 			goto next;
4269 		perf_event_task_ctx(&cpuctx->ctx, task_event);
4270 
4271 		ctx = task_event->task_ctx;
4272 		if (!ctx) {
4273 			ctxn = pmu->task_ctx_nr;
4274 			if (ctxn < 0)
4275 				goto next;
4276 			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4277 		}
4278 		if (ctx)
4279 			perf_event_task_ctx(ctx, task_event);
4280 next:
4281 		put_cpu_ptr(pmu->pmu_cpu_context);
4282 	}
4283 	rcu_read_unlock();
4284 }
4285 
4286 static void perf_event_task(struct task_struct *task,
4287 			      struct perf_event_context *task_ctx,
4288 			      int new)
4289 {
4290 	struct perf_task_event task_event;
4291 
4292 	if (!atomic_read(&nr_comm_events) &&
4293 	    !atomic_read(&nr_mmap_events) &&
4294 	    !atomic_read(&nr_task_events))
4295 		return;
4296 
4297 	task_event = (struct perf_task_event){
4298 		.task	  = task,
4299 		.task_ctx = task_ctx,
4300 		.event_id    = {
4301 			.header = {
4302 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4303 				.misc = 0,
4304 				.size = sizeof(task_event.event_id),
4305 			},
4306 			/* .pid  */
4307 			/* .ppid */
4308 			/* .tid  */
4309 			/* .ptid */
4310 			.time = perf_clock(),
4311 		},
4312 	};
4313 
4314 	perf_event_task_event(&task_event);
4315 }
4316 
4317 void perf_event_fork(struct task_struct *task)
4318 {
4319 	perf_event_task(task, NULL, 1);
4320 }
4321 
4322 /*
4323  * comm tracking
4324  */
4325 
4326 struct perf_comm_event {
4327 	struct task_struct	*task;
4328 	char			*comm;
4329 	int			comm_size;
4330 
4331 	struct {
4332 		struct perf_event_header	header;
4333 
4334 		u32				pid;
4335 		u32				tid;
4336 	} event_id;
4337 };
4338 
4339 static void perf_event_comm_output(struct perf_event *event,
4340 				     struct perf_comm_event *comm_event)
4341 {
4342 	struct perf_output_handle handle;
4343 	struct perf_sample_data sample;
4344 	int size = comm_event->event_id.header.size;
4345 	int ret;
4346 
4347 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4348 	ret = perf_output_begin(&handle, event,
4349 				comm_event->event_id.header.size);
4350 
4351 	if (ret)
4352 		goto out;
4353 
4354 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4355 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4356 
4357 	perf_output_put(&handle, comm_event->event_id);
4358 	__output_copy(&handle, comm_event->comm,
4359 				   comm_event->comm_size);
4360 
4361 	perf_event__output_id_sample(event, &handle, &sample);
4362 
4363 	perf_output_end(&handle);
4364 out:
4365 	comm_event->event_id.header.size = size;
4366 }
4367 
4368 static int perf_event_comm_match(struct perf_event *event)
4369 {
4370 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4371 		return 0;
4372 
4373 	if (!event_filter_match(event))
4374 		return 0;
4375 
4376 	if (event->attr.comm)
4377 		return 1;
4378 
4379 	return 0;
4380 }
4381 
4382 static void perf_event_comm_ctx(struct perf_event_context *ctx,
4383 				  struct perf_comm_event *comm_event)
4384 {
4385 	struct perf_event *event;
4386 
4387 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4388 		if (perf_event_comm_match(event))
4389 			perf_event_comm_output(event, comm_event);
4390 	}
4391 }
4392 
4393 static void perf_event_comm_event(struct perf_comm_event *comm_event)
4394 {
4395 	struct perf_cpu_context *cpuctx;
4396 	struct perf_event_context *ctx;
4397 	char comm[TASK_COMM_LEN];
4398 	unsigned int size;
4399 	struct pmu *pmu;
4400 	int ctxn;
4401 
4402 	memset(comm, 0, sizeof(comm));
4403 	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4404 	size = ALIGN(strlen(comm)+1, sizeof(u64));
4405 
4406 	comm_event->comm = comm;
4407 	comm_event->comm_size = size;
4408 
4409 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4410 	rcu_read_lock();
4411 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4412 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4413 		if (cpuctx->active_pmu != pmu)
4414 			goto next;
4415 		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4416 
4417 		ctxn = pmu->task_ctx_nr;
4418 		if (ctxn < 0)
4419 			goto next;
4420 
4421 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4422 		if (ctx)
4423 			perf_event_comm_ctx(ctx, comm_event);
4424 next:
4425 		put_cpu_ptr(pmu->pmu_cpu_context);
4426 	}
4427 	rcu_read_unlock();
4428 }
4429 
4430 void perf_event_comm(struct task_struct *task)
4431 {
4432 	struct perf_comm_event comm_event;
4433 	struct perf_event_context *ctx;
4434 	int ctxn;
4435 
4436 	for_each_task_context_nr(ctxn) {
4437 		ctx = task->perf_event_ctxp[ctxn];
4438 		if (!ctx)
4439 			continue;
4440 
4441 		perf_event_enable_on_exec(ctx);
4442 	}
4443 
4444 	if (!atomic_read(&nr_comm_events))
4445 		return;
4446 
4447 	comm_event = (struct perf_comm_event){
4448 		.task	= task,
4449 		/* .comm      */
4450 		/* .comm_size */
4451 		.event_id  = {
4452 			.header = {
4453 				.type = PERF_RECORD_COMM,
4454 				.misc = 0,
4455 				/* .size */
4456 			},
4457 			/* .pid */
4458 			/* .tid */
4459 		},
4460 	};
4461 
4462 	perf_event_comm_event(&comm_event);
4463 }
4464 
4465 /*
4466  * mmap tracking
4467  */
4468 
4469 struct perf_mmap_event {
4470 	struct vm_area_struct	*vma;
4471 
4472 	const char		*file_name;
4473 	int			file_size;
4474 
4475 	struct {
4476 		struct perf_event_header	header;
4477 
4478 		u32				pid;
4479 		u32				tid;
4480 		u64				start;
4481 		u64				len;
4482 		u64				pgoff;
4483 	} event_id;
4484 };
4485 
4486 static void perf_event_mmap_output(struct perf_event *event,
4487 				     struct perf_mmap_event *mmap_event)
4488 {
4489 	struct perf_output_handle handle;
4490 	struct perf_sample_data sample;
4491 	int size = mmap_event->event_id.header.size;
4492 	int ret;
4493 
4494 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4495 	ret = perf_output_begin(&handle, event,
4496 				mmap_event->event_id.header.size);
4497 	if (ret)
4498 		goto out;
4499 
4500 	mmap_event->event_id.pid = perf_event_pid(event, current);
4501 	mmap_event->event_id.tid = perf_event_tid(event, current);
4502 
4503 	perf_output_put(&handle, mmap_event->event_id);
4504 	__output_copy(&handle, mmap_event->file_name,
4505 				   mmap_event->file_size);
4506 
4507 	perf_event__output_id_sample(event, &handle, &sample);
4508 
4509 	perf_output_end(&handle);
4510 out:
4511 	mmap_event->event_id.header.size = size;
4512 }
4513 
4514 static int perf_event_mmap_match(struct perf_event *event,
4515 				   struct perf_mmap_event *mmap_event,
4516 				   int executable)
4517 {
4518 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4519 		return 0;
4520 
4521 	if (!event_filter_match(event))
4522 		return 0;
4523 
4524 	if ((!executable && event->attr.mmap_data) ||
4525 	    (executable && event->attr.mmap))
4526 		return 1;
4527 
4528 	return 0;
4529 }
4530 
4531 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4532 				  struct perf_mmap_event *mmap_event,
4533 				  int executable)
4534 {
4535 	struct perf_event *event;
4536 
4537 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4538 		if (perf_event_mmap_match(event, mmap_event, executable))
4539 			perf_event_mmap_output(event, mmap_event);
4540 	}
4541 }
4542 
4543 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4544 {
4545 	struct perf_cpu_context *cpuctx;
4546 	struct perf_event_context *ctx;
4547 	struct vm_area_struct *vma = mmap_event->vma;
4548 	struct file *file = vma->vm_file;
4549 	unsigned int size;
4550 	char tmp[16];
4551 	char *buf = NULL;
4552 	const char *name;
4553 	struct pmu *pmu;
4554 	int ctxn;
4555 
4556 	memset(tmp, 0, sizeof(tmp));
4557 
4558 	if (file) {
4559 		/*
4560 		 * d_path works from the end of the rb backwards, so we
4561 		 * need to add enough zero bytes after the string to handle
4562 		 * the 64bit alignment we do later.
4563 		 */
4564 		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4565 		if (!buf) {
4566 			name = strncpy(tmp, "//enomem", sizeof(tmp));
4567 			goto got_name;
4568 		}
4569 		name = d_path(&file->f_path, buf, PATH_MAX);
4570 		if (IS_ERR(name)) {
4571 			name = strncpy(tmp, "//toolong", sizeof(tmp));
4572 			goto got_name;
4573 		}
4574 	} else {
4575 		if (arch_vma_name(mmap_event->vma)) {
4576 			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4577 				       sizeof(tmp));
4578 			goto got_name;
4579 		}
4580 
4581 		if (!vma->vm_mm) {
4582 			name = strncpy(tmp, "[vdso]", sizeof(tmp));
4583 			goto got_name;
4584 		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
4585 				vma->vm_end >= vma->vm_mm->brk) {
4586 			name = strncpy(tmp, "[heap]", sizeof(tmp));
4587 			goto got_name;
4588 		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
4589 				vma->vm_end >= vma->vm_mm->start_stack) {
4590 			name = strncpy(tmp, "[stack]", sizeof(tmp));
4591 			goto got_name;
4592 		}
4593 
4594 		name = strncpy(tmp, "//anon", sizeof(tmp));
4595 		goto got_name;
4596 	}
4597 
4598 got_name:
4599 	size = ALIGN(strlen(name)+1, sizeof(u64));
4600 
4601 	mmap_event->file_name = name;
4602 	mmap_event->file_size = size;
4603 
4604 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4605 
4606 	rcu_read_lock();
4607 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4608 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4609 		if (cpuctx->active_pmu != pmu)
4610 			goto next;
4611 		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4612 					vma->vm_flags & VM_EXEC);
4613 
4614 		ctxn = pmu->task_ctx_nr;
4615 		if (ctxn < 0)
4616 			goto next;
4617 
4618 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4619 		if (ctx) {
4620 			perf_event_mmap_ctx(ctx, mmap_event,
4621 					vma->vm_flags & VM_EXEC);
4622 		}
4623 next:
4624 		put_cpu_ptr(pmu->pmu_cpu_context);
4625 	}
4626 	rcu_read_unlock();
4627 
4628 	kfree(buf);
4629 }
4630 
4631 void perf_event_mmap(struct vm_area_struct *vma)
4632 {
4633 	struct perf_mmap_event mmap_event;
4634 
4635 	if (!atomic_read(&nr_mmap_events))
4636 		return;
4637 
4638 	mmap_event = (struct perf_mmap_event){
4639 		.vma	= vma,
4640 		/* .file_name */
4641 		/* .file_size */
4642 		.event_id  = {
4643 			.header = {
4644 				.type = PERF_RECORD_MMAP,
4645 				.misc = PERF_RECORD_MISC_USER,
4646 				/* .size */
4647 			},
4648 			/* .pid */
4649 			/* .tid */
4650 			.start  = vma->vm_start,
4651 			.len    = vma->vm_end - vma->vm_start,
4652 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4653 		},
4654 	};
4655 
4656 	perf_event_mmap_event(&mmap_event);
4657 }
4658 
4659 /*
4660  * IRQ throttle logging
4661  */
4662 
4663 static void perf_log_throttle(struct perf_event *event, int enable)
4664 {
4665 	struct perf_output_handle handle;
4666 	struct perf_sample_data sample;
4667 	int ret;
4668 
4669 	struct {
4670 		struct perf_event_header	header;
4671 		u64				time;
4672 		u64				id;
4673 		u64				stream_id;
4674 	} throttle_event = {
4675 		.header = {
4676 			.type = PERF_RECORD_THROTTLE,
4677 			.misc = 0,
4678 			.size = sizeof(throttle_event),
4679 		},
4680 		.time		= perf_clock(),
4681 		.id		= primary_event_id(event),
4682 		.stream_id	= event->id,
4683 	};
4684 
4685 	if (enable)
4686 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4687 
4688 	perf_event_header__init_id(&throttle_event.header, &sample, event);
4689 
4690 	ret = perf_output_begin(&handle, event,
4691 				throttle_event.header.size);
4692 	if (ret)
4693 		return;
4694 
4695 	perf_output_put(&handle, throttle_event);
4696 	perf_event__output_id_sample(event, &handle, &sample);
4697 	perf_output_end(&handle);
4698 }
4699 
4700 /*
4701  * Generic event overflow handling, sampling.
4702  */
4703 
4704 static int __perf_event_overflow(struct perf_event *event,
4705 				   int throttle, struct perf_sample_data *data,
4706 				   struct pt_regs *regs)
4707 {
4708 	int events = atomic_read(&event->event_limit);
4709 	struct hw_perf_event *hwc = &event->hw;
4710 	int ret = 0;
4711 
4712 	/*
4713 	 * Non-sampling counters might still use the PMI to fold short
4714 	 * hardware counters, ignore those.
4715 	 */
4716 	if (unlikely(!is_sampling_event(event)))
4717 		return 0;
4718 
4719 	if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
4720 		if (throttle) {
4721 			hwc->interrupts = MAX_INTERRUPTS;
4722 			perf_log_throttle(event, 0);
4723 			ret = 1;
4724 		}
4725 	} else
4726 		hwc->interrupts++;
4727 
4728 	if (event->attr.freq) {
4729 		u64 now = perf_clock();
4730 		s64 delta = now - hwc->freq_time_stamp;
4731 
4732 		hwc->freq_time_stamp = now;
4733 
4734 		if (delta > 0 && delta < 2*TICK_NSEC)
4735 			perf_adjust_period(event, delta, hwc->last_period);
4736 	}
4737 
4738 	/*
4739 	 * XXX event_limit might not quite work as expected on inherited
4740 	 * events
4741 	 */
4742 
4743 	event->pending_kill = POLL_IN;
4744 	if (events && atomic_dec_and_test(&event->event_limit)) {
4745 		ret = 1;
4746 		event->pending_kill = POLL_HUP;
4747 		event->pending_disable = 1;
4748 		irq_work_queue(&event->pending);
4749 	}
4750 
4751 	if (event->overflow_handler)
4752 		event->overflow_handler(event, data, regs);
4753 	else
4754 		perf_event_output(event, data, regs);
4755 
4756 	if (event->fasync && event->pending_kill) {
4757 		event->pending_wakeup = 1;
4758 		irq_work_queue(&event->pending);
4759 	}
4760 
4761 	return ret;
4762 }
4763 
4764 int perf_event_overflow(struct perf_event *event,
4765 			  struct perf_sample_data *data,
4766 			  struct pt_regs *regs)
4767 {
4768 	return __perf_event_overflow(event, 1, data, regs);
4769 }
4770 
4771 /*
4772  * Generic software event infrastructure
4773  */
4774 
4775 struct swevent_htable {
4776 	struct swevent_hlist		*swevent_hlist;
4777 	struct mutex			hlist_mutex;
4778 	int				hlist_refcount;
4779 
4780 	/* Recursion avoidance in each contexts */
4781 	int				recursion[PERF_NR_CONTEXTS];
4782 };
4783 
4784 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4785 
4786 /*
4787  * We directly increment event->count and keep a second value in
4788  * event->hw.period_left to count intervals. This period event
4789  * is kept in the range [-sample_period, 0] so that we can use the
4790  * sign as trigger.
4791  */
4792 
4793 static u64 perf_swevent_set_period(struct perf_event *event)
4794 {
4795 	struct hw_perf_event *hwc = &event->hw;
4796 	u64 period = hwc->last_period;
4797 	u64 nr, offset;
4798 	s64 old, val;
4799 
4800 	hwc->last_period = hwc->sample_period;
4801 
4802 again:
4803 	old = val = local64_read(&hwc->period_left);
4804 	if (val < 0)
4805 		return 0;
4806 
4807 	nr = div64_u64(period + val, period);
4808 	offset = nr * period;
4809 	val -= offset;
4810 	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4811 		goto again;
4812 
4813 	return nr;
4814 }
4815 
4816 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4817 				    struct perf_sample_data *data,
4818 				    struct pt_regs *regs)
4819 {
4820 	struct hw_perf_event *hwc = &event->hw;
4821 	int throttle = 0;
4822 
4823 	data->period = event->hw.last_period;
4824 	if (!overflow)
4825 		overflow = perf_swevent_set_period(event);
4826 
4827 	if (hwc->interrupts == MAX_INTERRUPTS)
4828 		return;
4829 
4830 	for (; overflow; overflow--) {
4831 		if (__perf_event_overflow(event, throttle,
4832 					    data, regs)) {
4833 			/*
4834 			 * We inhibit the overflow from happening when
4835 			 * hwc->interrupts == MAX_INTERRUPTS.
4836 			 */
4837 			break;
4838 		}
4839 		throttle = 1;
4840 	}
4841 }
4842 
4843 static void perf_swevent_event(struct perf_event *event, u64 nr,
4844 			       struct perf_sample_data *data,
4845 			       struct pt_regs *regs)
4846 {
4847 	struct hw_perf_event *hwc = &event->hw;
4848 
4849 	local64_add(nr, &event->count);
4850 
4851 	if (!regs)
4852 		return;
4853 
4854 	if (!is_sampling_event(event))
4855 		return;
4856 
4857 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4858 		return perf_swevent_overflow(event, 1, data, regs);
4859 
4860 	if (local64_add_negative(nr, &hwc->period_left))
4861 		return;
4862 
4863 	perf_swevent_overflow(event, 0, data, regs);
4864 }
4865 
4866 static int perf_exclude_event(struct perf_event *event,
4867 			      struct pt_regs *regs)
4868 {
4869 	if (event->hw.state & PERF_HES_STOPPED)
4870 		return 1;
4871 
4872 	if (regs) {
4873 		if (event->attr.exclude_user && user_mode(regs))
4874 			return 1;
4875 
4876 		if (event->attr.exclude_kernel && !user_mode(regs))
4877 			return 1;
4878 	}
4879 
4880 	return 0;
4881 }
4882 
4883 static int perf_swevent_match(struct perf_event *event,
4884 				enum perf_type_id type,
4885 				u32 event_id,
4886 				struct perf_sample_data *data,
4887 				struct pt_regs *regs)
4888 {
4889 	if (event->attr.type != type)
4890 		return 0;
4891 
4892 	if (event->attr.config != event_id)
4893 		return 0;
4894 
4895 	if (perf_exclude_event(event, regs))
4896 		return 0;
4897 
4898 	return 1;
4899 }
4900 
4901 static inline u64 swevent_hash(u64 type, u32 event_id)
4902 {
4903 	u64 val = event_id | (type << 32);
4904 
4905 	return hash_64(val, SWEVENT_HLIST_BITS);
4906 }
4907 
4908 static inline struct hlist_head *
4909 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4910 {
4911 	u64 hash = swevent_hash(type, event_id);
4912 
4913 	return &hlist->heads[hash];
4914 }
4915 
4916 /* For the read side: events when they trigger */
4917 static inline struct hlist_head *
4918 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4919 {
4920 	struct swevent_hlist *hlist;
4921 
4922 	hlist = rcu_dereference(swhash->swevent_hlist);
4923 	if (!hlist)
4924 		return NULL;
4925 
4926 	return __find_swevent_head(hlist, type, event_id);
4927 }
4928 
4929 /* For the event head insertion and removal in the hlist */
4930 static inline struct hlist_head *
4931 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4932 {
4933 	struct swevent_hlist *hlist;
4934 	u32 event_id = event->attr.config;
4935 	u64 type = event->attr.type;
4936 
4937 	/*
4938 	 * Event scheduling is always serialized against hlist allocation
4939 	 * and release. Which makes the protected version suitable here.
4940 	 * The context lock guarantees that.
4941 	 */
4942 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
4943 					  lockdep_is_held(&event->ctx->lock));
4944 	if (!hlist)
4945 		return NULL;
4946 
4947 	return __find_swevent_head(hlist, type, event_id);
4948 }
4949 
4950 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4951 				    u64 nr,
4952 				    struct perf_sample_data *data,
4953 				    struct pt_regs *regs)
4954 {
4955 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4956 	struct perf_event *event;
4957 	struct hlist_node *node;
4958 	struct hlist_head *head;
4959 
4960 	rcu_read_lock();
4961 	head = find_swevent_head_rcu(swhash, type, event_id);
4962 	if (!head)
4963 		goto end;
4964 
4965 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4966 		if (perf_swevent_match(event, type, event_id, data, regs))
4967 			perf_swevent_event(event, nr, data, regs);
4968 	}
4969 end:
4970 	rcu_read_unlock();
4971 }
4972 
4973 int perf_swevent_get_recursion_context(void)
4974 {
4975 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4976 
4977 	return get_recursion_context(swhash->recursion);
4978 }
4979 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4980 
4981 inline void perf_swevent_put_recursion_context(int rctx)
4982 {
4983 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4984 
4985 	put_recursion_context(swhash->recursion, rctx);
4986 }
4987 
4988 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4989 {
4990 	struct perf_sample_data data;
4991 	int rctx;
4992 
4993 	preempt_disable_notrace();
4994 	rctx = perf_swevent_get_recursion_context();
4995 	if (rctx < 0)
4996 		return;
4997 
4998 	perf_sample_data_init(&data, addr);
4999 
5000 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5001 
5002 	perf_swevent_put_recursion_context(rctx);
5003 	preempt_enable_notrace();
5004 }
5005 
5006 static void perf_swevent_read(struct perf_event *event)
5007 {
5008 }
5009 
5010 static int perf_swevent_add(struct perf_event *event, int flags)
5011 {
5012 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5013 	struct hw_perf_event *hwc = &event->hw;
5014 	struct hlist_head *head;
5015 
5016 	if (is_sampling_event(event)) {
5017 		hwc->last_period = hwc->sample_period;
5018 		perf_swevent_set_period(event);
5019 	}
5020 
5021 	hwc->state = !(flags & PERF_EF_START);
5022 
5023 	head = find_swevent_head(swhash, event);
5024 	if (WARN_ON_ONCE(!head))
5025 		return -EINVAL;
5026 
5027 	hlist_add_head_rcu(&event->hlist_entry, head);
5028 
5029 	return 0;
5030 }
5031 
5032 static void perf_swevent_del(struct perf_event *event, int flags)
5033 {
5034 	hlist_del_rcu(&event->hlist_entry);
5035 }
5036 
5037 static void perf_swevent_start(struct perf_event *event, int flags)
5038 {
5039 	event->hw.state = 0;
5040 }
5041 
5042 static void perf_swevent_stop(struct perf_event *event, int flags)
5043 {
5044 	event->hw.state = PERF_HES_STOPPED;
5045 }
5046 
5047 /* Deref the hlist from the update side */
5048 static inline struct swevent_hlist *
5049 swevent_hlist_deref(struct swevent_htable *swhash)
5050 {
5051 	return rcu_dereference_protected(swhash->swevent_hlist,
5052 					 lockdep_is_held(&swhash->hlist_mutex));
5053 }
5054 
5055 static void swevent_hlist_release(struct swevent_htable *swhash)
5056 {
5057 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5058 
5059 	if (!hlist)
5060 		return;
5061 
5062 	rcu_assign_pointer(swhash->swevent_hlist, NULL);
5063 	kfree_rcu(hlist, rcu_head);
5064 }
5065 
5066 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5067 {
5068 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5069 
5070 	mutex_lock(&swhash->hlist_mutex);
5071 
5072 	if (!--swhash->hlist_refcount)
5073 		swevent_hlist_release(swhash);
5074 
5075 	mutex_unlock(&swhash->hlist_mutex);
5076 }
5077 
5078 static void swevent_hlist_put(struct perf_event *event)
5079 {
5080 	int cpu;
5081 
5082 	if (event->cpu != -1) {
5083 		swevent_hlist_put_cpu(event, event->cpu);
5084 		return;
5085 	}
5086 
5087 	for_each_possible_cpu(cpu)
5088 		swevent_hlist_put_cpu(event, cpu);
5089 }
5090 
5091 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5092 {
5093 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5094 	int err = 0;
5095 
5096 	mutex_lock(&swhash->hlist_mutex);
5097 
5098 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5099 		struct swevent_hlist *hlist;
5100 
5101 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5102 		if (!hlist) {
5103 			err = -ENOMEM;
5104 			goto exit;
5105 		}
5106 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
5107 	}
5108 	swhash->hlist_refcount++;
5109 exit:
5110 	mutex_unlock(&swhash->hlist_mutex);
5111 
5112 	return err;
5113 }
5114 
5115 static int swevent_hlist_get(struct perf_event *event)
5116 {
5117 	int err;
5118 	int cpu, failed_cpu;
5119 
5120 	if (event->cpu != -1)
5121 		return swevent_hlist_get_cpu(event, event->cpu);
5122 
5123 	get_online_cpus();
5124 	for_each_possible_cpu(cpu) {
5125 		err = swevent_hlist_get_cpu(event, cpu);
5126 		if (err) {
5127 			failed_cpu = cpu;
5128 			goto fail;
5129 		}
5130 	}
5131 	put_online_cpus();
5132 
5133 	return 0;
5134 fail:
5135 	for_each_possible_cpu(cpu) {
5136 		if (cpu == failed_cpu)
5137 			break;
5138 		swevent_hlist_put_cpu(event, cpu);
5139 	}
5140 
5141 	put_online_cpus();
5142 	return err;
5143 }
5144 
5145 struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5146 
5147 static void sw_perf_event_destroy(struct perf_event *event)
5148 {
5149 	u64 event_id = event->attr.config;
5150 
5151 	WARN_ON(event->parent);
5152 
5153 	jump_label_dec(&perf_swevent_enabled[event_id]);
5154 	swevent_hlist_put(event);
5155 }
5156 
5157 static int perf_swevent_init(struct perf_event *event)
5158 {
5159 	int event_id = event->attr.config;
5160 
5161 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5162 		return -ENOENT;
5163 
5164 	switch (event_id) {
5165 	case PERF_COUNT_SW_CPU_CLOCK:
5166 	case PERF_COUNT_SW_TASK_CLOCK:
5167 		return -ENOENT;
5168 
5169 	default:
5170 		break;
5171 	}
5172 
5173 	if (event_id >= PERF_COUNT_SW_MAX)
5174 		return -ENOENT;
5175 
5176 	if (!event->parent) {
5177 		int err;
5178 
5179 		err = swevent_hlist_get(event);
5180 		if (err)
5181 			return err;
5182 
5183 		jump_label_inc(&perf_swevent_enabled[event_id]);
5184 		event->destroy = sw_perf_event_destroy;
5185 	}
5186 
5187 	return 0;
5188 }
5189 
5190 static struct pmu perf_swevent = {
5191 	.task_ctx_nr	= perf_sw_context,
5192 
5193 	.event_init	= perf_swevent_init,
5194 	.add		= perf_swevent_add,
5195 	.del		= perf_swevent_del,
5196 	.start		= perf_swevent_start,
5197 	.stop		= perf_swevent_stop,
5198 	.read		= perf_swevent_read,
5199 };
5200 
5201 #ifdef CONFIG_EVENT_TRACING
5202 
5203 static int perf_tp_filter_match(struct perf_event *event,
5204 				struct perf_sample_data *data)
5205 {
5206 	void *record = data->raw->data;
5207 
5208 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
5209 		return 1;
5210 	return 0;
5211 }
5212 
5213 static int perf_tp_event_match(struct perf_event *event,
5214 				struct perf_sample_data *data,
5215 				struct pt_regs *regs)
5216 {
5217 	if (event->hw.state & PERF_HES_STOPPED)
5218 		return 0;
5219 	/*
5220 	 * All tracepoints are from kernel-space.
5221 	 */
5222 	if (event->attr.exclude_kernel)
5223 		return 0;
5224 
5225 	if (!perf_tp_filter_match(event, data))
5226 		return 0;
5227 
5228 	return 1;
5229 }
5230 
5231 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5232 		   struct pt_regs *regs, struct hlist_head *head, int rctx)
5233 {
5234 	struct perf_sample_data data;
5235 	struct perf_event *event;
5236 	struct hlist_node *node;
5237 
5238 	struct perf_raw_record raw = {
5239 		.size = entry_size,
5240 		.data = record,
5241 	};
5242 
5243 	perf_sample_data_init(&data, addr);
5244 	data.raw = &raw;
5245 
5246 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5247 		if (perf_tp_event_match(event, &data, regs))
5248 			perf_swevent_event(event, count, &data, regs);
5249 	}
5250 
5251 	perf_swevent_put_recursion_context(rctx);
5252 }
5253 EXPORT_SYMBOL_GPL(perf_tp_event);
5254 
5255 static void tp_perf_event_destroy(struct perf_event *event)
5256 {
5257 	perf_trace_destroy(event);
5258 }
5259 
5260 static int perf_tp_event_init(struct perf_event *event)
5261 {
5262 	int err;
5263 
5264 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5265 		return -ENOENT;
5266 
5267 	err = perf_trace_init(event);
5268 	if (err)
5269 		return err;
5270 
5271 	event->destroy = tp_perf_event_destroy;
5272 
5273 	return 0;
5274 }
5275 
5276 static struct pmu perf_tracepoint = {
5277 	.task_ctx_nr	= perf_sw_context,
5278 
5279 	.event_init	= perf_tp_event_init,
5280 	.add		= perf_trace_add,
5281 	.del		= perf_trace_del,
5282 	.start		= perf_swevent_start,
5283 	.stop		= perf_swevent_stop,
5284 	.read		= perf_swevent_read,
5285 };
5286 
5287 static inline void perf_tp_register(void)
5288 {
5289 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5290 }
5291 
5292 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5293 {
5294 	char *filter_str;
5295 	int ret;
5296 
5297 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5298 		return -EINVAL;
5299 
5300 	filter_str = strndup_user(arg, PAGE_SIZE);
5301 	if (IS_ERR(filter_str))
5302 		return PTR_ERR(filter_str);
5303 
5304 	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5305 
5306 	kfree(filter_str);
5307 	return ret;
5308 }
5309 
5310 static void perf_event_free_filter(struct perf_event *event)
5311 {
5312 	ftrace_profile_free_filter(event);
5313 }
5314 
5315 #else
5316 
5317 static inline void perf_tp_register(void)
5318 {
5319 }
5320 
5321 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5322 {
5323 	return -ENOENT;
5324 }
5325 
5326 static void perf_event_free_filter(struct perf_event *event)
5327 {
5328 }
5329 
5330 #endif /* CONFIG_EVENT_TRACING */
5331 
5332 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5333 void perf_bp_event(struct perf_event *bp, void *data)
5334 {
5335 	struct perf_sample_data sample;
5336 	struct pt_regs *regs = data;
5337 
5338 	perf_sample_data_init(&sample, bp->attr.bp_addr);
5339 
5340 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
5341 		perf_swevent_event(bp, 1, &sample, regs);
5342 }
5343 #endif
5344 
5345 /*
5346  * hrtimer based swevent callback
5347  */
5348 
5349 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5350 {
5351 	enum hrtimer_restart ret = HRTIMER_RESTART;
5352 	struct perf_sample_data data;
5353 	struct pt_regs *regs;
5354 	struct perf_event *event;
5355 	u64 period;
5356 
5357 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5358 
5359 	if (event->state != PERF_EVENT_STATE_ACTIVE)
5360 		return HRTIMER_NORESTART;
5361 
5362 	event->pmu->read(event);
5363 
5364 	perf_sample_data_init(&data, 0);
5365 	data.period = event->hw.last_period;
5366 	regs = get_irq_regs();
5367 
5368 	if (regs && !perf_exclude_event(event, regs)) {
5369 		if (!(event->attr.exclude_idle && current->pid == 0))
5370 			if (perf_event_overflow(event, &data, regs))
5371 				ret = HRTIMER_NORESTART;
5372 	}
5373 
5374 	period = max_t(u64, 10000, event->hw.sample_period);
5375 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5376 
5377 	return ret;
5378 }
5379 
5380 static void perf_swevent_start_hrtimer(struct perf_event *event)
5381 {
5382 	struct hw_perf_event *hwc = &event->hw;
5383 	s64 period;
5384 
5385 	if (!is_sampling_event(event))
5386 		return;
5387 
5388 	period = local64_read(&hwc->period_left);
5389 	if (period) {
5390 		if (period < 0)
5391 			period = 10000;
5392 
5393 		local64_set(&hwc->period_left, 0);
5394 	} else {
5395 		period = max_t(u64, 10000, hwc->sample_period);
5396 	}
5397 	__hrtimer_start_range_ns(&hwc->hrtimer,
5398 				ns_to_ktime(period), 0,
5399 				HRTIMER_MODE_REL_PINNED, 0);
5400 }
5401 
5402 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5403 {
5404 	struct hw_perf_event *hwc = &event->hw;
5405 
5406 	if (is_sampling_event(event)) {
5407 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5408 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5409 
5410 		hrtimer_cancel(&hwc->hrtimer);
5411 	}
5412 }
5413 
5414 static void perf_swevent_init_hrtimer(struct perf_event *event)
5415 {
5416 	struct hw_perf_event *hwc = &event->hw;
5417 
5418 	if (!is_sampling_event(event))
5419 		return;
5420 
5421 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5422 	hwc->hrtimer.function = perf_swevent_hrtimer;
5423 
5424 	/*
5425 	 * Since hrtimers have a fixed rate, we can do a static freq->period
5426 	 * mapping and avoid the whole period adjust feedback stuff.
5427 	 */
5428 	if (event->attr.freq) {
5429 		long freq = event->attr.sample_freq;
5430 
5431 		event->attr.sample_period = NSEC_PER_SEC / freq;
5432 		hwc->sample_period = event->attr.sample_period;
5433 		local64_set(&hwc->period_left, hwc->sample_period);
5434 		event->attr.freq = 0;
5435 	}
5436 }
5437 
5438 /*
5439  * Software event: cpu wall time clock
5440  */
5441 
5442 static void cpu_clock_event_update(struct perf_event *event)
5443 {
5444 	s64 prev;
5445 	u64 now;
5446 
5447 	now = local_clock();
5448 	prev = local64_xchg(&event->hw.prev_count, now);
5449 	local64_add(now - prev, &event->count);
5450 }
5451 
5452 static void cpu_clock_event_start(struct perf_event *event, int flags)
5453 {
5454 	local64_set(&event->hw.prev_count, local_clock());
5455 	perf_swevent_start_hrtimer(event);
5456 }
5457 
5458 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5459 {
5460 	perf_swevent_cancel_hrtimer(event);
5461 	cpu_clock_event_update(event);
5462 }
5463 
5464 static int cpu_clock_event_add(struct perf_event *event, int flags)
5465 {
5466 	if (flags & PERF_EF_START)
5467 		cpu_clock_event_start(event, flags);
5468 
5469 	return 0;
5470 }
5471 
5472 static void cpu_clock_event_del(struct perf_event *event, int flags)
5473 {
5474 	cpu_clock_event_stop(event, flags);
5475 }
5476 
5477 static void cpu_clock_event_read(struct perf_event *event)
5478 {
5479 	cpu_clock_event_update(event);
5480 }
5481 
5482 static int cpu_clock_event_init(struct perf_event *event)
5483 {
5484 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5485 		return -ENOENT;
5486 
5487 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5488 		return -ENOENT;
5489 
5490 	perf_swevent_init_hrtimer(event);
5491 
5492 	return 0;
5493 }
5494 
5495 static struct pmu perf_cpu_clock = {
5496 	.task_ctx_nr	= perf_sw_context,
5497 
5498 	.event_init	= cpu_clock_event_init,
5499 	.add		= cpu_clock_event_add,
5500 	.del		= cpu_clock_event_del,
5501 	.start		= cpu_clock_event_start,
5502 	.stop		= cpu_clock_event_stop,
5503 	.read		= cpu_clock_event_read,
5504 };
5505 
5506 /*
5507  * Software event: task time clock
5508  */
5509 
5510 static void task_clock_event_update(struct perf_event *event, u64 now)
5511 {
5512 	u64 prev;
5513 	s64 delta;
5514 
5515 	prev = local64_xchg(&event->hw.prev_count, now);
5516 	delta = now - prev;
5517 	local64_add(delta, &event->count);
5518 }
5519 
5520 static void task_clock_event_start(struct perf_event *event, int flags)
5521 {
5522 	local64_set(&event->hw.prev_count, event->ctx->time);
5523 	perf_swevent_start_hrtimer(event);
5524 }
5525 
5526 static void task_clock_event_stop(struct perf_event *event, int flags)
5527 {
5528 	perf_swevent_cancel_hrtimer(event);
5529 	task_clock_event_update(event, event->ctx->time);
5530 }
5531 
5532 static int task_clock_event_add(struct perf_event *event, int flags)
5533 {
5534 	if (flags & PERF_EF_START)
5535 		task_clock_event_start(event, flags);
5536 
5537 	return 0;
5538 }
5539 
5540 static void task_clock_event_del(struct perf_event *event, int flags)
5541 {
5542 	task_clock_event_stop(event, PERF_EF_UPDATE);
5543 }
5544 
5545 static void task_clock_event_read(struct perf_event *event)
5546 {
5547 	u64 now = perf_clock();
5548 	u64 delta = now - event->ctx->timestamp;
5549 	u64 time = event->ctx->time + delta;
5550 
5551 	task_clock_event_update(event, time);
5552 }
5553 
5554 static int task_clock_event_init(struct perf_event *event)
5555 {
5556 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5557 		return -ENOENT;
5558 
5559 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5560 		return -ENOENT;
5561 
5562 	perf_swevent_init_hrtimer(event);
5563 
5564 	return 0;
5565 }
5566 
5567 static struct pmu perf_task_clock = {
5568 	.task_ctx_nr	= perf_sw_context,
5569 
5570 	.event_init	= task_clock_event_init,
5571 	.add		= task_clock_event_add,
5572 	.del		= task_clock_event_del,
5573 	.start		= task_clock_event_start,
5574 	.stop		= task_clock_event_stop,
5575 	.read		= task_clock_event_read,
5576 };
5577 
5578 static void perf_pmu_nop_void(struct pmu *pmu)
5579 {
5580 }
5581 
5582 static int perf_pmu_nop_int(struct pmu *pmu)
5583 {
5584 	return 0;
5585 }
5586 
5587 static void perf_pmu_start_txn(struct pmu *pmu)
5588 {
5589 	perf_pmu_disable(pmu);
5590 }
5591 
5592 static int perf_pmu_commit_txn(struct pmu *pmu)
5593 {
5594 	perf_pmu_enable(pmu);
5595 	return 0;
5596 }
5597 
5598 static void perf_pmu_cancel_txn(struct pmu *pmu)
5599 {
5600 	perf_pmu_enable(pmu);
5601 }
5602 
5603 /*
5604  * Ensures all contexts with the same task_ctx_nr have the same
5605  * pmu_cpu_context too.
5606  */
5607 static void *find_pmu_context(int ctxn)
5608 {
5609 	struct pmu *pmu;
5610 
5611 	if (ctxn < 0)
5612 		return NULL;
5613 
5614 	list_for_each_entry(pmu, &pmus, entry) {
5615 		if (pmu->task_ctx_nr == ctxn)
5616 			return pmu->pmu_cpu_context;
5617 	}
5618 
5619 	return NULL;
5620 }
5621 
5622 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5623 {
5624 	int cpu;
5625 
5626 	for_each_possible_cpu(cpu) {
5627 		struct perf_cpu_context *cpuctx;
5628 
5629 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5630 
5631 		if (cpuctx->active_pmu == old_pmu)
5632 			cpuctx->active_pmu = pmu;
5633 	}
5634 }
5635 
5636 static void free_pmu_context(struct pmu *pmu)
5637 {
5638 	struct pmu *i;
5639 
5640 	mutex_lock(&pmus_lock);
5641 	/*
5642 	 * Like a real lame refcount.
5643 	 */
5644 	list_for_each_entry(i, &pmus, entry) {
5645 		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5646 			update_pmu_context(i, pmu);
5647 			goto out;
5648 		}
5649 	}
5650 
5651 	free_percpu(pmu->pmu_cpu_context);
5652 out:
5653 	mutex_unlock(&pmus_lock);
5654 }
5655 static struct idr pmu_idr;
5656 
5657 static ssize_t
5658 type_show(struct device *dev, struct device_attribute *attr, char *page)
5659 {
5660 	struct pmu *pmu = dev_get_drvdata(dev);
5661 
5662 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5663 }
5664 
5665 static struct device_attribute pmu_dev_attrs[] = {
5666        __ATTR_RO(type),
5667        __ATTR_NULL,
5668 };
5669 
5670 static int pmu_bus_running;
5671 static struct bus_type pmu_bus = {
5672 	.name		= "event_source",
5673 	.dev_attrs	= pmu_dev_attrs,
5674 };
5675 
5676 static void pmu_dev_release(struct device *dev)
5677 {
5678 	kfree(dev);
5679 }
5680 
5681 static int pmu_dev_alloc(struct pmu *pmu)
5682 {
5683 	int ret = -ENOMEM;
5684 
5685 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5686 	if (!pmu->dev)
5687 		goto out;
5688 
5689 	device_initialize(pmu->dev);
5690 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
5691 	if (ret)
5692 		goto free_dev;
5693 
5694 	dev_set_drvdata(pmu->dev, pmu);
5695 	pmu->dev->bus = &pmu_bus;
5696 	pmu->dev->release = pmu_dev_release;
5697 	ret = device_add(pmu->dev);
5698 	if (ret)
5699 		goto free_dev;
5700 
5701 out:
5702 	return ret;
5703 
5704 free_dev:
5705 	put_device(pmu->dev);
5706 	goto out;
5707 }
5708 
5709 static struct lock_class_key cpuctx_mutex;
5710 static struct lock_class_key cpuctx_lock;
5711 
5712 int perf_pmu_register(struct pmu *pmu, char *name, int type)
5713 {
5714 	int cpu, ret;
5715 
5716 	mutex_lock(&pmus_lock);
5717 	ret = -ENOMEM;
5718 	pmu->pmu_disable_count = alloc_percpu(int);
5719 	if (!pmu->pmu_disable_count)
5720 		goto unlock;
5721 
5722 	pmu->type = -1;
5723 	if (!name)
5724 		goto skip_type;
5725 	pmu->name = name;
5726 
5727 	if (type < 0) {
5728 		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5729 		if (!err)
5730 			goto free_pdc;
5731 
5732 		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5733 		if (err) {
5734 			ret = err;
5735 			goto free_pdc;
5736 		}
5737 	}
5738 	pmu->type = type;
5739 
5740 	if (pmu_bus_running) {
5741 		ret = pmu_dev_alloc(pmu);
5742 		if (ret)
5743 			goto free_idr;
5744 	}
5745 
5746 skip_type:
5747 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5748 	if (pmu->pmu_cpu_context)
5749 		goto got_cpu_context;
5750 
5751 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5752 	if (!pmu->pmu_cpu_context)
5753 		goto free_dev;
5754 
5755 	for_each_possible_cpu(cpu) {
5756 		struct perf_cpu_context *cpuctx;
5757 
5758 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5759 		__perf_event_init_context(&cpuctx->ctx);
5760 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5761 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5762 		cpuctx->ctx.type = cpu_context;
5763 		cpuctx->ctx.pmu = pmu;
5764 		cpuctx->jiffies_interval = 1;
5765 		INIT_LIST_HEAD(&cpuctx->rotation_list);
5766 		cpuctx->active_pmu = pmu;
5767 	}
5768 
5769 got_cpu_context:
5770 	if (!pmu->start_txn) {
5771 		if (pmu->pmu_enable) {
5772 			/*
5773 			 * If we have pmu_enable/pmu_disable calls, install
5774 			 * transaction stubs that use that to try and batch
5775 			 * hardware accesses.
5776 			 */
5777 			pmu->start_txn  = perf_pmu_start_txn;
5778 			pmu->commit_txn = perf_pmu_commit_txn;
5779 			pmu->cancel_txn = perf_pmu_cancel_txn;
5780 		} else {
5781 			pmu->start_txn  = perf_pmu_nop_void;
5782 			pmu->commit_txn = perf_pmu_nop_int;
5783 			pmu->cancel_txn = perf_pmu_nop_void;
5784 		}
5785 	}
5786 
5787 	if (!pmu->pmu_enable) {
5788 		pmu->pmu_enable  = perf_pmu_nop_void;
5789 		pmu->pmu_disable = perf_pmu_nop_void;
5790 	}
5791 
5792 	list_add_rcu(&pmu->entry, &pmus);
5793 	ret = 0;
5794 unlock:
5795 	mutex_unlock(&pmus_lock);
5796 
5797 	return ret;
5798 
5799 free_dev:
5800 	device_del(pmu->dev);
5801 	put_device(pmu->dev);
5802 
5803 free_idr:
5804 	if (pmu->type >= PERF_TYPE_MAX)
5805 		idr_remove(&pmu_idr, pmu->type);
5806 
5807 free_pdc:
5808 	free_percpu(pmu->pmu_disable_count);
5809 	goto unlock;
5810 }
5811 
5812 void perf_pmu_unregister(struct pmu *pmu)
5813 {
5814 	mutex_lock(&pmus_lock);
5815 	list_del_rcu(&pmu->entry);
5816 	mutex_unlock(&pmus_lock);
5817 
5818 	/*
5819 	 * We dereference the pmu list under both SRCU and regular RCU, so
5820 	 * synchronize against both of those.
5821 	 */
5822 	synchronize_srcu(&pmus_srcu);
5823 	synchronize_rcu();
5824 
5825 	free_percpu(pmu->pmu_disable_count);
5826 	if (pmu->type >= PERF_TYPE_MAX)
5827 		idr_remove(&pmu_idr, pmu->type);
5828 	device_del(pmu->dev);
5829 	put_device(pmu->dev);
5830 	free_pmu_context(pmu);
5831 }
5832 
5833 struct pmu *perf_init_event(struct perf_event *event)
5834 {
5835 	struct pmu *pmu = NULL;
5836 	int idx;
5837 	int ret;
5838 
5839 	idx = srcu_read_lock(&pmus_srcu);
5840 
5841 	rcu_read_lock();
5842 	pmu = idr_find(&pmu_idr, event->attr.type);
5843 	rcu_read_unlock();
5844 	if (pmu) {
5845 		event->pmu = pmu;
5846 		ret = pmu->event_init(event);
5847 		if (ret)
5848 			pmu = ERR_PTR(ret);
5849 		goto unlock;
5850 	}
5851 
5852 	list_for_each_entry_rcu(pmu, &pmus, entry) {
5853 		event->pmu = pmu;
5854 		ret = pmu->event_init(event);
5855 		if (!ret)
5856 			goto unlock;
5857 
5858 		if (ret != -ENOENT) {
5859 			pmu = ERR_PTR(ret);
5860 			goto unlock;
5861 		}
5862 	}
5863 	pmu = ERR_PTR(-ENOENT);
5864 unlock:
5865 	srcu_read_unlock(&pmus_srcu, idx);
5866 
5867 	return pmu;
5868 }
5869 
5870 /*
5871  * Allocate and initialize a event structure
5872  */
5873 static struct perf_event *
5874 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5875 		 struct task_struct *task,
5876 		 struct perf_event *group_leader,
5877 		 struct perf_event *parent_event,
5878 		 perf_overflow_handler_t overflow_handler,
5879 		 void *context)
5880 {
5881 	struct pmu *pmu;
5882 	struct perf_event *event;
5883 	struct hw_perf_event *hwc;
5884 	long err;
5885 
5886 	if ((unsigned)cpu >= nr_cpu_ids) {
5887 		if (!task || cpu != -1)
5888 			return ERR_PTR(-EINVAL);
5889 	}
5890 
5891 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5892 	if (!event)
5893 		return ERR_PTR(-ENOMEM);
5894 
5895 	/*
5896 	 * Single events are their own group leaders, with an
5897 	 * empty sibling list:
5898 	 */
5899 	if (!group_leader)
5900 		group_leader = event;
5901 
5902 	mutex_init(&event->child_mutex);
5903 	INIT_LIST_HEAD(&event->child_list);
5904 
5905 	INIT_LIST_HEAD(&event->group_entry);
5906 	INIT_LIST_HEAD(&event->event_entry);
5907 	INIT_LIST_HEAD(&event->sibling_list);
5908 	INIT_LIST_HEAD(&event->rb_entry);
5909 
5910 	init_waitqueue_head(&event->waitq);
5911 	init_irq_work(&event->pending, perf_pending_event);
5912 
5913 	mutex_init(&event->mmap_mutex);
5914 
5915 	event->cpu		= cpu;
5916 	event->attr		= *attr;
5917 	event->group_leader	= group_leader;
5918 	event->pmu		= NULL;
5919 	event->oncpu		= -1;
5920 
5921 	event->parent		= parent_event;
5922 
5923 	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
5924 	event->id		= atomic64_inc_return(&perf_event_id);
5925 
5926 	event->state		= PERF_EVENT_STATE_INACTIVE;
5927 
5928 	if (task) {
5929 		event->attach_state = PERF_ATTACH_TASK;
5930 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5931 		/*
5932 		 * hw_breakpoint is a bit difficult here..
5933 		 */
5934 		if (attr->type == PERF_TYPE_BREAKPOINT)
5935 			event->hw.bp_target = task;
5936 #endif
5937 	}
5938 
5939 	if (!overflow_handler && parent_event) {
5940 		overflow_handler = parent_event->overflow_handler;
5941 		context = parent_event->overflow_handler_context;
5942 	}
5943 
5944 	event->overflow_handler	= overflow_handler;
5945 	event->overflow_handler_context = context;
5946 
5947 	if (attr->disabled)
5948 		event->state = PERF_EVENT_STATE_OFF;
5949 
5950 	pmu = NULL;
5951 
5952 	hwc = &event->hw;
5953 	hwc->sample_period = attr->sample_period;
5954 	if (attr->freq && attr->sample_freq)
5955 		hwc->sample_period = 1;
5956 	hwc->last_period = hwc->sample_period;
5957 
5958 	local64_set(&hwc->period_left, hwc->sample_period);
5959 
5960 	/*
5961 	 * we currently do not support PERF_FORMAT_GROUP on inherited events
5962 	 */
5963 	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5964 		goto done;
5965 
5966 	pmu = perf_init_event(event);
5967 
5968 done:
5969 	err = 0;
5970 	if (!pmu)
5971 		err = -EINVAL;
5972 	else if (IS_ERR(pmu))
5973 		err = PTR_ERR(pmu);
5974 
5975 	if (err) {
5976 		if (event->ns)
5977 			put_pid_ns(event->ns);
5978 		kfree(event);
5979 		return ERR_PTR(err);
5980 	}
5981 
5982 	if (!event->parent) {
5983 		if (event->attach_state & PERF_ATTACH_TASK)
5984 			jump_label_inc(&perf_sched_events);
5985 		if (event->attr.mmap || event->attr.mmap_data)
5986 			atomic_inc(&nr_mmap_events);
5987 		if (event->attr.comm)
5988 			atomic_inc(&nr_comm_events);
5989 		if (event->attr.task)
5990 			atomic_inc(&nr_task_events);
5991 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5992 			err = get_callchain_buffers();
5993 			if (err) {
5994 				free_event(event);
5995 				return ERR_PTR(err);
5996 			}
5997 		}
5998 	}
5999 
6000 	return event;
6001 }
6002 
6003 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6004 			  struct perf_event_attr *attr)
6005 {
6006 	u32 size;
6007 	int ret;
6008 
6009 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6010 		return -EFAULT;
6011 
6012 	/*
6013 	 * zero the full structure, so that a short copy will be nice.
6014 	 */
6015 	memset(attr, 0, sizeof(*attr));
6016 
6017 	ret = get_user(size, &uattr->size);
6018 	if (ret)
6019 		return ret;
6020 
6021 	if (size > PAGE_SIZE)	/* silly large */
6022 		goto err_size;
6023 
6024 	if (!size)		/* abi compat */
6025 		size = PERF_ATTR_SIZE_VER0;
6026 
6027 	if (size < PERF_ATTR_SIZE_VER0)
6028 		goto err_size;
6029 
6030 	/*
6031 	 * If we're handed a bigger struct than we know of,
6032 	 * ensure all the unknown bits are 0 - i.e. new
6033 	 * user-space does not rely on any kernel feature
6034 	 * extensions we dont know about yet.
6035 	 */
6036 	if (size > sizeof(*attr)) {
6037 		unsigned char __user *addr;
6038 		unsigned char __user *end;
6039 		unsigned char val;
6040 
6041 		addr = (void __user *)uattr + sizeof(*attr);
6042 		end  = (void __user *)uattr + size;
6043 
6044 		for (; addr < end; addr++) {
6045 			ret = get_user(val, addr);
6046 			if (ret)
6047 				return ret;
6048 			if (val)
6049 				goto err_size;
6050 		}
6051 		size = sizeof(*attr);
6052 	}
6053 
6054 	ret = copy_from_user(attr, uattr, size);
6055 	if (ret)
6056 		return -EFAULT;
6057 
6058 	if (attr->__reserved_1)
6059 		return -EINVAL;
6060 
6061 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6062 		return -EINVAL;
6063 
6064 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6065 		return -EINVAL;
6066 
6067 out:
6068 	return ret;
6069 
6070 err_size:
6071 	put_user(sizeof(*attr), &uattr->size);
6072 	ret = -E2BIG;
6073 	goto out;
6074 }
6075 
6076 static int
6077 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6078 {
6079 	struct ring_buffer *rb = NULL, *old_rb = NULL;
6080 	int ret = -EINVAL;
6081 
6082 	if (!output_event)
6083 		goto set;
6084 
6085 	/* don't allow circular references */
6086 	if (event == output_event)
6087 		goto out;
6088 
6089 	/*
6090 	 * Don't allow cross-cpu buffers
6091 	 */
6092 	if (output_event->cpu != event->cpu)
6093 		goto out;
6094 
6095 	/*
6096 	 * If its not a per-cpu rb, it must be the same task.
6097 	 */
6098 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6099 		goto out;
6100 
6101 set:
6102 	mutex_lock(&event->mmap_mutex);
6103 	/* Can't redirect output if we've got an active mmap() */
6104 	if (atomic_read(&event->mmap_count))
6105 		goto unlock;
6106 
6107 	if (output_event) {
6108 		/* get the rb we want to redirect to */
6109 		rb = ring_buffer_get(output_event);
6110 		if (!rb)
6111 			goto unlock;
6112 	}
6113 
6114 	old_rb = event->rb;
6115 	rcu_assign_pointer(event->rb, rb);
6116 	if (old_rb)
6117 		ring_buffer_detach(event, old_rb);
6118 	ret = 0;
6119 unlock:
6120 	mutex_unlock(&event->mmap_mutex);
6121 
6122 	if (old_rb)
6123 		ring_buffer_put(old_rb);
6124 out:
6125 	return ret;
6126 }
6127 
6128 /**
6129  * sys_perf_event_open - open a performance event, associate it to a task/cpu
6130  *
6131  * @attr_uptr:	event_id type attributes for monitoring/sampling
6132  * @pid:		target pid
6133  * @cpu:		target cpu
6134  * @group_fd:		group leader event fd
6135  */
6136 SYSCALL_DEFINE5(perf_event_open,
6137 		struct perf_event_attr __user *, attr_uptr,
6138 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6139 {
6140 	struct perf_event *group_leader = NULL, *output_event = NULL;
6141 	struct perf_event *event, *sibling;
6142 	struct perf_event_attr attr;
6143 	struct perf_event_context *ctx;
6144 	struct file *event_file = NULL;
6145 	struct file *group_file = NULL;
6146 	struct task_struct *task = NULL;
6147 	struct pmu *pmu;
6148 	int event_fd;
6149 	int move_group = 0;
6150 	int fput_needed = 0;
6151 	int err;
6152 
6153 	/* for future expandability... */
6154 	if (flags & ~PERF_FLAG_ALL)
6155 		return -EINVAL;
6156 
6157 	err = perf_copy_attr(attr_uptr, &attr);
6158 	if (err)
6159 		return err;
6160 
6161 	if (!attr.exclude_kernel) {
6162 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6163 			return -EACCES;
6164 	}
6165 
6166 	if (attr.freq) {
6167 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
6168 			return -EINVAL;
6169 	}
6170 
6171 	/*
6172 	 * In cgroup mode, the pid argument is used to pass the fd
6173 	 * opened to the cgroup directory in cgroupfs. The cpu argument
6174 	 * designates the cpu on which to monitor threads from that
6175 	 * cgroup.
6176 	 */
6177 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6178 		return -EINVAL;
6179 
6180 	event_fd = get_unused_fd_flags(O_RDWR);
6181 	if (event_fd < 0)
6182 		return event_fd;
6183 
6184 	if (group_fd != -1) {
6185 		group_leader = perf_fget_light(group_fd, &fput_needed);
6186 		if (IS_ERR(group_leader)) {
6187 			err = PTR_ERR(group_leader);
6188 			goto err_fd;
6189 		}
6190 		group_file = group_leader->filp;
6191 		if (flags & PERF_FLAG_FD_OUTPUT)
6192 			output_event = group_leader;
6193 		if (flags & PERF_FLAG_FD_NO_GROUP)
6194 			group_leader = NULL;
6195 	}
6196 
6197 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6198 		task = find_lively_task_by_vpid(pid);
6199 		if (IS_ERR(task)) {
6200 			err = PTR_ERR(task);
6201 			goto err_group_fd;
6202 		}
6203 	}
6204 
6205 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6206 				 NULL, NULL);
6207 	if (IS_ERR(event)) {
6208 		err = PTR_ERR(event);
6209 		goto err_task;
6210 	}
6211 
6212 	if (flags & PERF_FLAG_PID_CGROUP) {
6213 		err = perf_cgroup_connect(pid, event, &attr, group_leader);
6214 		if (err)
6215 			goto err_alloc;
6216 		/*
6217 		 * one more event:
6218 		 * - that has cgroup constraint on event->cpu
6219 		 * - that may need work on context switch
6220 		 */
6221 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6222 		jump_label_inc(&perf_sched_events);
6223 	}
6224 
6225 	/*
6226 	 * Special case software events and allow them to be part of
6227 	 * any hardware group.
6228 	 */
6229 	pmu = event->pmu;
6230 
6231 	if (group_leader &&
6232 	    (is_software_event(event) != is_software_event(group_leader))) {
6233 		if (is_software_event(event)) {
6234 			/*
6235 			 * If event and group_leader are not both a software
6236 			 * event, and event is, then group leader is not.
6237 			 *
6238 			 * Allow the addition of software events to !software
6239 			 * groups, this is safe because software events never
6240 			 * fail to schedule.
6241 			 */
6242 			pmu = group_leader->pmu;
6243 		} else if (is_software_event(group_leader) &&
6244 			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6245 			/*
6246 			 * In case the group is a pure software group, and we
6247 			 * try to add a hardware event, move the whole group to
6248 			 * the hardware context.
6249 			 */
6250 			move_group = 1;
6251 		}
6252 	}
6253 
6254 	/*
6255 	 * Get the target context (task or percpu):
6256 	 */
6257 	ctx = find_get_context(pmu, task, cpu);
6258 	if (IS_ERR(ctx)) {
6259 		err = PTR_ERR(ctx);
6260 		goto err_alloc;
6261 	}
6262 
6263 	if (task) {
6264 		put_task_struct(task);
6265 		task = NULL;
6266 	}
6267 
6268 	/*
6269 	 * Look up the group leader (we will attach this event to it):
6270 	 */
6271 	if (group_leader) {
6272 		err = -EINVAL;
6273 
6274 		/*
6275 		 * Do not allow a recursive hierarchy (this new sibling
6276 		 * becoming part of another group-sibling):
6277 		 */
6278 		if (group_leader->group_leader != group_leader)
6279 			goto err_context;
6280 		/*
6281 		 * Do not allow to attach to a group in a different
6282 		 * task or CPU context:
6283 		 */
6284 		if (move_group) {
6285 			if (group_leader->ctx->type != ctx->type)
6286 				goto err_context;
6287 		} else {
6288 			if (group_leader->ctx != ctx)
6289 				goto err_context;
6290 		}
6291 
6292 		/*
6293 		 * Only a group leader can be exclusive or pinned
6294 		 */
6295 		if (attr.exclusive || attr.pinned)
6296 			goto err_context;
6297 	}
6298 
6299 	if (output_event) {
6300 		err = perf_event_set_output(event, output_event);
6301 		if (err)
6302 			goto err_context;
6303 	}
6304 
6305 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6306 	if (IS_ERR(event_file)) {
6307 		err = PTR_ERR(event_file);
6308 		goto err_context;
6309 	}
6310 
6311 	if (move_group) {
6312 		struct perf_event_context *gctx = group_leader->ctx;
6313 
6314 		mutex_lock(&gctx->mutex);
6315 		perf_remove_from_context(group_leader);
6316 		list_for_each_entry(sibling, &group_leader->sibling_list,
6317 				    group_entry) {
6318 			perf_remove_from_context(sibling);
6319 			put_ctx(gctx);
6320 		}
6321 		mutex_unlock(&gctx->mutex);
6322 		put_ctx(gctx);
6323 	}
6324 
6325 	event->filp = event_file;
6326 	WARN_ON_ONCE(ctx->parent_ctx);
6327 	mutex_lock(&ctx->mutex);
6328 
6329 	if (move_group) {
6330 		perf_install_in_context(ctx, group_leader, cpu);
6331 		get_ctx(ctx);
6332 		list_for_each_entry(sibling, &group_leader->sibling_list,
6333 				    group_entry) {
6334 			perf_install_in_context(ctx, sibling, cpu);
6335 			get_ctx(ctx);
6336 		}
6337 	}
6338 
6339 	perf_install_in_context(ctx, event, cpu);
6340 	++ctx->generation;
6341 	perf_unpin_context(ctx);
6342 	mutex_unlock(&ctx->mutex);
6343 
6344 	event->owner = current;
6345 
6346 	mutex_lock(&current->perf_event_mutex);
6347 	list_add_tail(&event->owner_entry, &current->perf_event_list);
6348 	mutex_unlock(&current->perf_event_mutex);
6349 
6350 	/*
6351 	 * Precalculate sample_data sizes
6352 	 */
6353 	perf_event__header_size(event);
6354 	perf_event__id_header_size(event);
6355 
6356 	/*
6357 	 * Drop the reference on the group_event after placing the
6358 	 * new event on the sibling_list. This ensures destruction
6359 	 * of the group leader will find the pointer to itself in
6360 	 * perf_group_detach().
6361 	 */
6362 	fput_light(group_file, fput_needed);
6363 	fd_install(event_fd, event_file);
6364 	return event_fd;
6365 
6366 err_context:
6367 	perf_unpin_context(ctx);
6368 	put_ctx(ctx);
6369 err_alloc:
6370 	free_event(event);
6371 err_task:
6372 	if (task)
6373 		put_task_struct(task);
6374 err_group_fd:
6375 	fput_light(group_file, fput_needed);
6376 err_fd:
6377 	put_unused_fd(event_fd);
6378 	return err;
6379 }
6380 
6381 /**
6382  * perf_event_create_kernel_counter
6383  *
6384  * @attr: attributes of the counter to create
6385  * @cpu: cpu in which the counter is bound
6386  * @task: task to profile (NULL for percpu)
6387  */
6388 struct perf_event *
6389 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6390 				 struct task_struct *task,
6391 				 perf_overflow_handler_t overflow_handler,
6392 				 void *context)
6393 {
6394 	struct perf_event_context *ctx;
6395 	struct perf_event *event;
6396 	int err;
6397 
6398 	/*
6399 	 * Get the target context (task or percpu):
6400 	 */
6401 
6402 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6403 				 overflow_handler, context);
6404 	if (IS_ERR(event)) {
6405 		err = PTR_ERR(event);
6406 		goto err;
6407 	}
6408 
6409 	ctx = find_get_context(event->pmu, task, cpu);
6410 	if (IS_ERR(ctx)) {
6411 		err = PTR_ERR(ctx);
6412 		goto err_free;
6413 	}
6414 
6415 	event->filp = NULL;
6416 	WARN_ON_ONCE(ctx->parent_ctx);
6417 	mutex_lock(&ctx->mutex);
6418 	perf_install_in_context(ctx, event, cpu);
6419 	++ctx->generation;
6420 	perf_unpin_context(ctx);
6421 	mutex_unlock(&ctx->mutex);
6422 
6423 	return event;
6424 
6425 err_free:
6426 	free_event(event);
6427 err:
6428 	return ERR_PTR(err);
6429 }
6430 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6431 
6432 static void sync_child_event(struct perf_event *child_event,
6433 			       struct task_struct *child)
6434 {
6435 	struct perf_event *parent_event = child_event->parent;
6436 	u64 child_val;
6437 
6438 	if (child_event->attr.inherit_stat)
6439 		perf_event_read_event(child_event, child);
6440 
6441 	child_val = perf_event_count(child_event);
6442 
6443 	/*
6444 	 * Add back the child's count to the parent's count:
6445 	 */
6446 	atomic64_add(child_val, &parent_event->child_count);
6447 	atomic64_add(child_event->total_time_enabled,
6448 		     &parent_event->child_total_time_enabled);
6449 	atomic64_add(child_event->total_time_running,
6450 		     &parent_event->child_total_time_running);
6451 
6452 	/*
6453 	 * Remove this event from the parent's list
6454 	 */
6455 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6456 	mutex_lock(&parent_event->child_mutex);
6457 	list_del_init(&child_event->child_list);
6458 	mutex_unlock(&parent_event->child_mutex);
6459 
6460 	/*
6461 	 * Release the parent event, if this was the last
6462 	 * reference to it.
6463 	 */
6464 	fput(parent_event->filp);
6465 }
6466 
6467 static void
6468 __perf_event_exit_task(struct perf_event *child_event,
6469 			 struct perf_event_context *child_ctx,
6470 			 struct task_struct *child)
6471 {
6472 	if (child_event->parent) {
6473 		raw_spin_lock_irq(&child_ctx->lock);
6474 		perf_group_detach(child_event);
6475 		raw_spin_unlock_irq(&child_ctx->lock);
6476 	}
6477 
6478 	perf_remove_from_context(child_event);
6479 
6480 	/*
6481 	 * It can happen that the parent exits first, and has events
6482 	 * that are still around due to the child reference. These
6483 	 * events need to be zapped.
6484 	 */
6485 	if (child_event->parent) {
6486 		sync_child_event(child_event, child);
6487 		free_event(child_event);
6488 	}
6489 }
6490 
6491 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6492 {
6493 	struct perf_event *child_event, *tmp;
6494 	struct perf_event_context *child_ctx;
6495 	unsigned long flags;
6496 
6497 	if (likely(!child->perf_event_ctxp[ctxn])) {
6498 		perf_event_task(child, NULL, 0);
6499 		return;
6500 	}
6501 
6502 	local_irq_save(flags);
6503 	/*
6504 	 * We can't reschedule here because interrupts are disabled,
6505 	 * and either child is current or it is a task that can't be
6506 	 * scheduled, so we are now safe from rescheduling changing
6507 	 * our context.
6508 	 */
6509 	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6510 
6511 	/*
6512 	 * Take the context lock here so that if find_get_context is
6513 	 * reading child->perf_event_ctxp, we wait until it has
6514 	 * incremented the context's refcount before we do put_ctx below.
6515 	 */
6516 	raw_spin_lock(&child_ctx->lock);
6517 	task_ctx_sched_out(child_ctx);
6518 	child->perf_event_ctxp[ctxn] = NULL;
6519 	/*
6520 	 * If this context is a clone; unclone it so it can't get
6521 	 * swapped to another process while we're removing all
6522 	 * the events from it.
6523 	 */
6524 	unclone_ctx(child_ctx);
6525 	update_context_time(child_ctx);
6526 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6527 
6528 	/*
6529 	 * Report the task dead after unscheduling the events so that we
6530 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
6531 	 * get a few PERF_RECORD_READ events.
6532 	 */
6533 	perf_event_task(child, child_ctx, 0);
6534 
6535 	/*
6536 	 * We can recurse on the same lock type through:
6537 	 *
6538 	 *   __perf_event_exit_task()
6539 	 *     sync_child_event()
6540 	 *       fput(parent_event->filp)
6541 	 *         perf_release()
6542 	 *           mutex_lock(&ctx->mutex)
6543 	 *
6544 	 * But since its the parent context it won't be the same instance.
6545 	 */
6546 	mutex_lock(&child_ctx->mutex);
6547 
6548 again:
6549 	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6550 				 group_entry)
6551 		__perf_event_exit_task(child_event, child_ctx, child);
6552 
6553 	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6554 				 group_entry)
6555 		__perf_event_exit_task(child_event, child_ctx, child);
6556 
6557 	/*
6558 	 * If the last event was a group event, it will have appended all
6559 	 * its siblings to the list, but we obtained 'tmp' before that which
6560 	 * will still point to the list head terminating the iteration.
6561 	 */
6562 	if (!list_empty(&child_ctx->pinned_groups) ||
6563 	    !list_empty(&child_ctx->flexible_groups))
6564 		goto again;
6565 
6566 	mutex_unlock(&child_ctx->mutex);
6567 
6568 	put_ctx(child_ctx);
6569 }
6570 
6571 /*
6572  * When a child task exits, feed back event values to parent events.
6573  */
6574 void perf_event_exit_task(struct task_struct *child)
6575 {
6576 	struct perf_event *event, *tmp;
6577 	int ctxn;
6578 
6579 	mutex_lock(&child->perf_event_mutex);
6580 	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6581 				 owner_entry) {
6582 		list_del_init(&event->owner_entry);
6583 
6584 		/*
6585 		 * Ensure the list deletion is visible before we clear
6586 		 * the owner, closes a race against perf_release() where
6587 		 * we need to serialize on the owner->perf_event_mutex.
6588 		 */
6589 		smp_wmb();
6590 		event->owner = NULL;
6591 	}
6592 	mutex_unlock(&child->perf_event_mutex);
6593 
6594 	for_each_task_context_nr(ctxn)
6595 		perf_event_exit_task_context(child, ctxn);
6596 }
6597 
6598 static void perf_free_event(struct perf_event *event,
6599 			    struct perf_event_context *ctx)
6600 {
6601 	struct perf_event *parent = event->parent;
6602 
6603 	if (WARN_ON_ONCE(!parent))
6604 		return;
6605 
6606 	mutex_lock(&parent->child_mutex);
6607 	list_del_init(&event->child_list);
6608 	mutex_unlock(&parent->child_mutex);
6609 
6610 	fput(parent->filp);
6611 
6612 	perf_group_detach(event);
6613 	list_del_event(event, ctx);
6614 	free_event(event);
6615 }
6616 
6617 /*
6618  * free an unexposed, unused context as created by inheritance by
6619  * perf_event_init_task below, used by fork() in case of fail.
6620  */
6621 void perf_event_free_task(struct task_struct *task)
6622 {
6623 	struct perf_event_context *ctx;
6624 	struct perf_event *event, *tmp;
6625 	int ctxn;
6626 
6627 	for_each_task_context_nr(ctxn) {
6628 		ctx = task->perf_event_ctxp[ctxn];
6629 		if (!ctx)
6630 			continue;
6631 
6632 		mutex_lock(&ctx->mutex);
6633 again:
6634 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6635 				group_entry)
6636 			perf_free_event(event, ctx);
6637 
6638 		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6639 				group_entry)
6640 			perf_free_event(event, ctx);
6641 
6642 		if (!list_empty(&ctx->pinned_groups) ||
6643 				!list_empty(&ctx->flexible_groups))
6644 			goto again;
6645 
6646 		mutex_unlock(&ctx->mutex);
6647 
6648 		put_ctx(ctx);
6649 	}
6650 }
6651 
6652 void perf_event_delayed_put(struct task_struct *task)
6653 {
6654 	int ctxn;
6655 
6656 	for_each_task_context_nr(ctxn)
6657 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6658 }
6659 
6660 /*
6661  * inherit a event from parent task to child task:
6662  */
6663 static struct perf_event *
6664 inherit_event(struct perf_event *parent_event,
6665 	      struct task_struct *parent,
6666 	      struct perf_event_context *parent_ctx,
6667 	      struct task_struct *child,
6668 	      struct perf_event *group_leader,
6669 	      struct perf_event_context *child_ctx)
6670 {
6671 	struct perf_event *child_event;
6672 	unsigned long flags;
6673 
6674 	/*
6675 	 * Instead of creating recursive hierarchies of events,
6676 	 * we link inherited events back to the original parent,
6677 	 * which has a filp for sure, which we use as the reference
6678 	 * count:
6679 	 */
6680 	if (parent_event->parent)
6681 		parent_event = parent_event->parent;
6682 
6683 	child_event = perf_event_alloc(&parent_event->attr,
6684 					   parent_event->cpu,
6685 					   child,
6686 					   group_leader, parent_event,
6687 				           NULL, NULL);
6688 	if (IS_ERR(child_event))
6689 		return child_event;
6690 	get_ctx(child_ctx);
6691 
6692 	/*
6693 	 * Make the child state follow the state of the parent event,
6694 	 * not its attr.disabled bit.  We hold the parent's mutex,
6695 	 * so we won't race with perf_event_{en, dis}able_family.
6696 	 */
6697 	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6698 		child_event->state = PERF_EVENT_STATE_INACTIVE;
6699 	else
6700 		child_event->state = PERF_EVENT_STATE_OFF;
6701 
6702 	if (parent_event->attr.freq) {
6703 		u64 sample_period = parent_event->hw.sample_period;
6704 		struct hw_perf_event *hwc = &child_event->hw;
6705 
6706 		hwc->sample_period = sample_period;
6707 		hwc->last_period   = sample_period;
6708 
6709 		local64_set(&hwc->period_left, sample_period);
6710 	}
6711 
6712 	child_event->ctx = child_ctx;
6713 	child_event->overflow_handler = parent_event->overflow_handler;
6714 	child_event->overflow_handler_context
6715 		= parent_event->overflow_handler_context;
6716 
6717 	/*
6718 	 * Precalculate sample_data sizes
6719 	 */
6720 	perf_event__header_size(child_event);
6721 	perf_event__id_header_size(child_event);
6722 
6723 	/*
6724 	 * Link it up in the child's context:
6725 	 */
6726 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
6727 	add_event_to_ctx(child_event, child_ctx);
6728 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6729 
6730 	/*
6731 	 * Get a reference to the parent filp - we will fput it
6732 	 * when the child event exits. This is safe to do because
6733 	 * we are in the parent and we know that the filp still
6734 	 * exists and has a nonzero count:
6735 	 */
6736 	atomic_long_inc(&parent_event->filp->f_count);
6737 
6738 	/*
6739 	 * Link this into the parent event's child list
6740 	 */
6741 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6742 	mutex_lock(&parent_event->child_mutex);
6743 	list_add_tail(&child_event->child_list, &parent_event->child_list);
6744 	mutex_unlock(&parent_event->child_mutex);
6745 
6746 	return child_event;
6747 }
6748 
6749 static int inherit_group(struct perf_event *parent_event,
6750 	      struct task_struct *parent,
6751 	      struct perf_event_context *parent_ctx,
6752 	      struct task_struct *child,
6753 	      struct perf_event_context *child_ctx)
6754 {
6755 	struct perf_event *leader;
6756 	struct perf_event *sub;
6757 	struct perf_event *child_ctr;
6758 
6759 	leader = inherit_event(parent_event, parent, parent_ctx,
6760 				 child, NULL, child_ctx);
6761 	if (IS_ERR(leader))
6762 		return PTR_ERR(leader);
6763 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6764 		child_ctr = inherit_event(sub, parent, parent_ctx,
6765 					    child, leader, child_ctx);
6766 		if (IS_ERR(child_ctr))
6767 			return PTR_ERR(child_ctr);
6768 	}
6769 	return 0;
6770 }
6771 
6772 static int
6773 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6774 		   struct perf_event_context *parent_ctx,
6775 		   struct task_struct *child, int ctxn,
6776 		   int *inherited_all)
6777 {
6778 	int ret;
6779 	struct perf_event_context *child_ctx;
6780 
6781 	if (!event->attr.inherit) {
6782 		*inherited_all = 0;
6783 		return 0;
6784 	}
6785 
6786 	child_ctx = child->perf_event_ctxp[ctxn];
6787 	if (!child_ctx) {
6788 		/*
6789 		 * This is executed from the parent task context, so
6790 		 * inherit events that have been marked for cloning.
6791 		 * First allocate and initialize a context for the
6792 		 * child.
6793 		 */
6794 
6795 		child_ctx = alloc_perf_context(event->pmu, child);
6796 		if (!child_ctx)
6797 			return -ENOMEM;
6798 
6799 		child->perf_event_ctxp[ctxn] = child_ctx;
6800 	}
6801 
6802 	ret = inherit_group(event, parent, parent_ctx,
6803 			    child, child_ctx);
6804 
6805 	if (ret)
6806 		*inherited_all = 0;
6807 
6808 	return ret;
6809 }
6810 
6811 /*
6812  * Initialize the perf_event context in task_struct
6813  */
6814 int perf_event_init_context(struct task_struct *child, int ctxn)
6815 {
6816 	struct perf_event_context *child_ctx, *parent_ctx;
6817 	struct perf_event_context *cloned_ctx;
6818 	struct perf_event *event;
6819 	struct task_struct *parent = current;
6820 	int inherited_all = 1;
6821 	unsigned long flags;
6822 	int ret = 0;
6823 
6824 	if (likely(!parent->perf_event_ctxp[ctxn]))
6825 		return 0;
6826 
6827 	/*
6828 	 * If the parent's context is a clone, pin it so it won't get
6829 	 * swapped under us.
6830 	 */
6831 	parent_ctx = perf_pin_task_context(parent, ctxn);
6832 
6833 	/*
6834 	 * No need to check if parent_ctx != NULL here; since we saw
6835 	 * it non-NULL earlier, the only reason for it to become NULL
6836 	 * is if we exit, and since we're currently in the middle of
6837 	 * a fork we can't be exiting at the same time.
6838 	 */
6839 
6840 	/*
6841 	 * Lock the parent list. No need to lock the child - not PID
6842 	 * hashed yet and not running, so nobody can access it.
6843 	 */
6844 	mutex_lock(&parent_ctx->mutex);
6845 
6846 	/*
6847 	 * We dont have to disable NMIs - we are only looking at
6848 	 * the list, not manipulating it:
6849 	 */
6850 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6851 		ret = inherit_task_group(event, parent, parent_ctx,
6852 					 child, ctxn, &inherited_all);
6853 		if (ret)
6854 			break;
6855 	}
6856 
6857 	/*
6858 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
6859 	 * to allocations, but we need to prevent rotation because
6860 	 * rotate_ctx() will change the list from interrupt context.
6861 	 */
6862 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6863 	parent_ctx->rotate_disable = 1;
6864 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6865 
6866 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6867 		ret = inherit_task_group(event, parent, parent_ctx,
6868 					 child, ctxn, &inherited_all);
6869 		if (ret)
6870 			break;
6871 	}
6872 
6873 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6874 	parent_ctx->rotate_disable = 0;
6875 
6876 	child_ctx = child->perf_event_ctxp[ctxn];
6877 
6878 	if (child_ctx && inherited_all) {
6879 		/*
6880 		 * Mark the child context as a clone of the parent
6881 		 * context, or of whatever the parent is a clone of.
6882 		 *
6883 		 * Note that if the parent is a clone, the holding of
6884 		 * parent_ctx->lock avoids it from being uncloned.
6885 		 */
6886 		cloned_ctx = parent_ctx->parent_ctx;
6887 		if (cloned_ctx) {
6888 			child_ctx->parent_ctx = cloned_ctx;
6889 			child_ctx->parent_gen = parent_ctx->parent_gen;
6890 		} else {
6891 			child_ctx->parent_ctx = parent_ctx;
6892 			child_ctx->parent_gen = parent_ctx->generation;
6893 		}
6894 		get_ctx(child_ctx->parent_ctx);
6895 	}
6896 
6897 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6898 	mutex_unlock(&parent_ctx->mutex);
6899 
6900 	perf_unpin_context(parent_ctx);
6901 	put_ctx(parent_ctx);
6902 
6903 	return ret;
6904 }
6905 
6906 /*
6907  * Initialize the perf_event context in task_struct
6908  */
6909 int perf_event_init_task(struct task_struct *child)
6910 {
6911 	int ctxn, ret;
6912 
6913 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6914 	mutex_init(&child->perf_event_mutex);
6915 	INIT_LIST_HEAD(&child->perf_event_list);
6916 
6917 	for_each_task_context_nr(ctxn) {
6918 		ret = perf_event_init_context(child, ctxn);
6919 		if (ret)
6920 			return ret;
6921 	}
6922 
6923 	return 0;
6924 }
6925 
6926 static void __init perf_event_init_all_cpus(void)
6927 {
6928 	struct swevent_htable *swhash;
6929 	int cpu;
6930 
6931 	for_each_possible_cpu(cpu) {
6932 		swhash = &per_cpu(swevent_htable, cpu);
6933 		mutex_init(&swhash->hlist_mutex);
6934 		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6935 	}
6936 }
6937 
6938 static void __cpuinit perf_event_init_cpu(int cpu)
6939 {
6940 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6941 
6942 	mutex_lock(&swhash->hlist_mutex);
6943 	if (swhash->hlist_refcount > 0) {
6944 		struct swevent_hlist *hlist;
6945 
6946 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6947 		WARN_ON(!hlist);
6948 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6949 	}
6950 	mutex_unlock(&swhash->hlist_mutex);
6951 }
6952 
6953 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
6954 static void perf_pmu_rotate_stop(struct pmu *pmu)
6955 {
6956 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6957 
6958 	WARN_ON(!irqs_disabled());
6959 
6960 	list_del_init(&cpuctx->rotation_list);
6961 }
6962 
6963 static void __perf_event_exit_context(void *__info)
6964 {
6965 	struct perf_event_context *ctx = __info;
6966 	struct perf_event *event, *tmp;
6967 
6968 	perf_pmu_rotate_stop(ctx->pmu);
6969 
6970 	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6971 		__perf_remove_from_context(event);
6972 	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6973 		__perf_remove_from_context(event);
6974 }
6975 
6976 static void perf_event_exit_cpu_context(int cpu)
6977 {
6978 	struct perf_event_context *ctx;
6979 	struct pmu *pmu;
6980 	int idx;
6981 
6982 	idx = srcu_read_lock(&pmus_srcu);
6983 	list_for_each_entry_rcu(pmu, &pmus, entry) {
6984 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
6985 
6986 		mutex_lock(&ctx->mutex);
6987 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6988 		mutex_unlock(&ctx->mutex);
6989 	}
6990 	srcu_read_unlock(&pmus_srcu, idx);
6991 }
6992 
6993 static void perf_event_exit_cpu(int cpu)
6994 {
6995 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6996 
6997 	mutex_lock(&swhash->hlist_mutex);
6998 	swevent_hlist_release(swhash);
6999 	mutex_unlock(&swhash->hlist_mutex);
7000 
7001 	perf_event_exit_cpu_context(cpu);
7002 }
7003 #else
7004 static inline void perf_event_exit_cpu(int cpu) { }
7005 #endif
7006 
7007 static int
7008 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7009 {
7010 	int cpu;
7011 
7012 	for_each_online_cpu(cpu)
7013 		perf_event_exit_cpu(cpu);
7014 
7015 	return NOTIFY_OK;
7016 }
7017 
7018 /*
7019  * Run the perf reboot notifier at the very last possible moment so that
7020  * the generic watchdog code runs as long as possible.
7021  */
7022 static struct notifier_block perf_reboot_notifier = {
7023 	.notifier_call = perf_reboot,
7024 	.priority = INT_MIN,
7025 };
7026 
7027 static int __cpuinit
7028 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7029 {
7030 	unsigned int cpu = (long)hcpu;
7031 
7032 	switch (action & ~CPU_TASKS_FROZEN) {
7033 
7034 	case CPU_UP_PREPARE:
7035 	case CPU_DOWN_FAILED:
7036 		perf_event_init_cpu(cpu);
7037 		break;
7038 
7039 	case CPU_UP_CANCELED:
7040 	case CPU_DOWN_PREPARE:
7041 		perf_event_exit_cpu(cpu);
7042 		break;
7043 
7044 	default:
7045 		break;
7046 	}
7047 
7048 	return NOTIFY_OK;
7049 }
7050 
7051 void __init perf_event_init(void)
7052 {
7053 	int ret;
7054 
7055 	idr_init(&pmu_idr);
7056 
7057 	perf_event_init_all_cpus();
7058 	init_srcu_struct(&pmus_srcu);
7059 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7060 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
7061 	perf_pmu_register(&perf_task_clock, NULL, -1);
7062 	perf_tp_register();
7063 	perf_cpu_notifier(perf_cpu_notify);
7064 	register_reboot_notifier(&perf_reboot_notifier);
7065 
7066 	ret = init_hw_breakpoint();
7067 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7068 }
7069 
7070 static int __init perf_event_sysfs_init(void)
7071 {
7072 	struct pmu *pmu;
7073 	int ret;
7074 
7075 	mutex_lock(&pmus_lock);
7076 
7077 	ret = bus_register(&pmu_bus);
7078 	if (ret)
7079 		goto unlock;
7080 
7081 	list_for_each_entry(pmu, &pmus, entry) {
7082 		if (!pmu->name || pmu->type < 0)
7083 			continue;
7084 
7085 		ret = pmu_dev_alloc(pmu);
7086 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7087 	}
7088 	pmu_bus_running = 1;
7089 	ret = 0;
7090 
7091 unlock:
7092 	mutex_unlock(&pmus_lock);
7093 
7094 	return ret;
7095 }
7096 device_initcall(perf_event_sysfs_init);
7097 
7098 #ifdef CONFIG_CGROUP_PERF
7099 static struct cgroup_subsys_state *perf_cgroup_create(
7100 	struct cgroup_subsys *ss, struct cgroup *cont)
7101 {
7102 	struct perf_cgroup *jc;
7103 
7104 	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7105 	if (!jc)
7106 		return ERR_PTR(-ENOMEM);
7107 
7108 	jc->info = alloc_percpu(struct perf_cgroup_info);
7109 	if (!jc->info) {
7110 		kfree(jc);
7111 		return ERR_PTR(-ENOMEM);
7112 	}
7113 
7114 	return &jc->css;
7115 }
7116 
7117 static void perf_cgroup_destroy(struct cgroup_subsys *ss,
7118 				struct cgroup *cont)
7119 {
7120 	struct perf_cgroup *jc;
7121 	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7122 			  struct perf_cgroup, css);
7123 	free_percpu(jc->info);
7124 	kfree(jc);
7125 }
7126 
7127 static int __perf_cgroup_move(void *info)
7128 {
7129 	struct task_struct *task = info;
7130 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7131 	return 0;
7132 }
7133 
7134 static void
7135 perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
7136 {
7137 	task_function_call(task, __perf_cgroup_move, task);
7138 }
7139 
7140 static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7141 		struct cgroup *old_cgrp, struct task_struct *task)
7142 {
7143 	/*
7144 	 * cgroup_exit() is called in the copy_process() failure path.
7145 	 * Ignore this case since the task hasn't ran yet, this avoids
7146 	 * trying to poke a half freed task state from generic code.
7147 	 */
7148 	if (!(task->flags & PF_EXITING))
7149 		return;
7150 
7151 	perf_cgroup_attach_task(cgrp, task);
7152 }
7153 
7154 struct cgroup_subsys perf_subsys = {
7155 	.name		= "perf_event",
7156 	.subsys_id	= perf_subsys_id,
7157 	.create		= perf_cgroup_create,
7158 	.destroy	= perf_cgroup_destroy,
7159 	.exit		= perf_cgroup_exit,
7160 	.attach_task	= perf_cgroup_attach_task,
7161 };
7162 #endif /* CONFIG_CGROUP_PERF */
7163