xref: /linux/kernel/events/core.c (revision 3b64b1881143ce9e461c211cc81acc72d0cdc476)
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11 
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/rculist.h>
32 #include <linux/uaccess.h>
33 #include <linux/syscalls.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/perf_event.h>
37 #include <linux/ftrace_event.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/mm_types.h>
40 
41 #include "internal.h"
42 
43 #include <asm/irq_regs.h>
44 
45 struct remote_function_call {
46 	struct task_struct	*p;
47 	int			(*func)(void *info);
48 	void			*info;
49 	int			ret;
50 };
51 
52 static void remote_function(void *data)
53 {
54 	struct remote_function_call *tfc = data;
55 	struct task_struct *p = tfc->p;
56 
57 	if (p) {
58 		tfc->ret = -EAGAIN;
59 		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
60 			return;
61 	}
62 
63 	tfc->ret = tfc->func(tfc->info);
64 }
65 
66 /**
67  * task_function_call - call a function on the cpu on which a task runs
68  * @p:		the task to evaluate
69  * @func:	the function to be called
70  * @info:	the function call argument
71  *
72  * Calls the function @func when the task is currently running. This might
73  * be on the current CPU, which just calls the function directly
74  *
75  * returns: @func return value, or
76  *	    -ESRCH  - when the process isn't running
77  *	    -EAGAIN - when the process moved away
78  */
79 static int
80 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
81 {
82 	struct remote_function_call data = {
83 		.p	= p,
84 		.func	= func,
85 		.info	= info,
86 		.ret	= -ESRCH, /* No such (running) process */
87 	};
88 
89 	if (task_curr(p))
90 		smp_call_function_single(task_cpu(p), remote_function, &data, 1);
91 
92 	return data.ret;
93 }
94 
95 /**
96  * cpu_function_call - call a function on the cpu
97  * @func:	the function to be called
98  * @info:	the function call argument
99  *
100  * Calls the function @func on the remote cpu.
101  *
102  * returns: @func return value or -ENXIO when the cpu is offline
103  */
104 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
105 {
106 	struct remote_function_call data = {
107 		.p	= NULL,
108 		.func	= func,
109 		.info	= info,
110 		.ret	= -ENXIO, /* No such CPU */
111 	};
112 
113 	smp_call_function_single(cpu, remote_function, &data, 1);
114 
115 	return data.ret;
116 }
117 
118 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
119 		       PERF_FLAG_FD_OUTPUT  |\
120 		       PERF_FLAG_PID_CGROUP)
121 
122 /*
123  * branch priv levels that need permission checks
124  */
125 #define PERF_SAMPLE_BRANCH_PERM_PLM \
126 	(PERF_SAMPLE_BRANCH_KERNEL |\
127 	 PERF_SAMPLE_BRANCH_HV)
128 
129 enum event_type_t {
130 	EVENT_FLEXIBLE = 0x1,
131 	EVENT_PINNED = 0x2,
132 	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
133 };
134 
135 /*
136  * perf_sched_events : >0 events exist
137  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
138  */
139 struct static_key_deferred perf_sched_events __read_mostly;
140 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
141 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
142 
143 static atomic_t nr_mmap_events __read_mostly;
144 static atomic_t nr_comm_events __read_mostly;
145 static atomic_t nr_task_events __read_mostly;
146 
147 static LIST_HEAD(pmus);
148 static DEFINE_MUTEX(pmus_lock);
149 static struct srcu_struct pmus_srcu;
150 
151 /*
152  * perf event paranoia level:
153  *  -1 - not paranoid at all
154  *   0 - disallow raw tracepoint access for unpriv
155  *   1 - disallow cpu events for unpriv
156  *   2 - disallow kernel profiling for unpriv
157  */
158 int sysctl_perf_event_paranoid __read_mostly = 1;
159 
160 /* Minimum for 512 kiB + 1 user control page */
161 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
162 
163 /*
164  * max perf event sample rate
165  */
166 #define DEFAULT_MAX_SAMPLE_RATE 100000
167 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
168 static int max_samples_per_tick __read_mostly =
169 	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
170 
171 int perf_proc_update_handler(struct ctl_table *table, int write,
172 		void __user *buffer, size_t *lenp,
173 		loff_t *ppos)
174 {
175 	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
176 
177 	if (ret || !write)
178 		return ret;
179 
180 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
181 
182 	return 0;
183 }
184 
185 static atomic64_t perf_event_id;
186 
187 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
188 			      enum event_type_t event_type);
189 
190 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
191 			     enum event_type_t event_type,
192 			     struct task_struct *task);
193 
194 static void update_context_time(struct perf_event_context *ctx);
195 static u64 perf_event_time(struct perf_event *event);
196 
197 static void ring_buffer_attach(struct perf_event *event,
198 			       struct ring_buffer *rb);
199 
200 void __weak perf_event_print_debug(void)	{ }
201 
202 extern __weak const char *perf_pmu_name(void)
203 {
204 	return "pmu";
205 }
206 
207 static inline u64 perf_clock(void)
208 {
209 	return local_clock();
210 }
211 
212 static inline struct perf_cpu_context *
213 __get_cpu_context(struct perf_event_context *ctx)
214 {
215 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
216 }
217 
218 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
219 			  struct perf_event_context *ctx)
220 {
221 	raw_spin_lock(&cpuctx->ctx.lock);
222 	if (ctx)
223 		raw_spin_lock(&ctx->lock);
224 }
225 
226 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
227 			    struct perf_event_context *ctx)
228 {
229 	if (ctx)
230 		raw_spin_unlock(&ctx->lock);
231 	raw_spin_unlock(&cpuctx->ctx.lock);
232 }
233 
234 #ifdef CONFIG_CGROUP_PERF
235 
236 /*
237  * Must ensure cgroup is pinned (css_get) before calling
238  * this function. In other words, we cannot call this function
239  * if there is no cgroup event for the current CPU context.
240  */
241 static inline struct perf_cgroup *
242 perf_cgroup_from_task(struct task_struct *task)
243 {
244 	return container_of(task_subsys_state(task, perf_subsys_id),
245 			struct perf_cgroup, css);
246 }
247 
248 static inline bool
249 perf_cgroup_match(struct perf_event *event)
250 {
251 	struct perf_event_context *ctx = event->ctx;
252 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
253 
254 	return !event->cgrp || event->cgrp == cpuctx->cgrp;
255 }
256 
257 static inline bool perf_tryget_cgroup(struct perf_event *event)
258 {
259 	return css_tryget(&event->cgrp->css);
260 }
261 
262 static inline void perf_put_cgroup(struct perf_event *event)
263 {
264 	css_put(&event->cgrp->css);
265 }
266 
267 static inline void perf_detach_cgroup(struct perf_event *event)
268 {
269 	perf_put_cgroup(event);
270 	event->cgrp = NULL;
271 }
272 
273 static inline int is_cgroup_event(struct perf_event *event)
274 {
275 	return event->cgrp != NULL;
276 }
277 
278 static inline u64 perf_cgroup_event_time(struct perf_event *event)
279 {
280 	struct perf_cgroup_info *t;
281 
282 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
283 	return t->time;
284 }
285 
286 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
287 {
288 	struct perf_cgroup_info *info;
289 	u64 now;
290 
291 	now = perf_clock();
292 
293 	info = this_cpu_ptr(cgrp->info);
294 
295 	info->time += now - info->timestamp;
296 	info->timestamp = now;
297 }
298 
299 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
300 {
301 	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
302 	if (cgrp_out)
303 		__update_cgrp_time(cgrp_out);
304 }
305 
306 static inline void update_cgrp_time_from_event(struct perf_event *event)
307 {
308 	struct perf_cgroup *cgrp;
309 
310 	/*
311 	 * ensure we access cgroup data only when needed and
312 	 * when we know the cgroup is pinned (css_get)
313 	 */
314 	if (!is_cgroup_event(event))
315 		return;
316 
317 	cgrp = perf_cgroup_from_task(current);
318 	/*
319 	 * Do not update time when cgroup is not active
320 	 */
321 	if (cgrp == event->cgrp)
322 		__update_cgrp_time(event->cgrp);
323 }
324 
325 static inline void
326 perf_cgroup_set_timestamp(struct task_struct *task,
327 			  struct perf_event_context *ctx)
328 {
329 	struct perf_cgroup *cgrp;
330 	struct perf_cgroup_info *info;
331 
332 	/*
333 	 * ctx->lock held by caller
334 	 * ensure we do not access cgroup data
335 	 * unless we have the cgroup pinned (css_get)
336 	 */
337 	if (!task || !ctx->nr_cgroups)
338 		return;
339 
340 	cgrp = perf_cgroup_from_task(task);
341 	info = this_cpu_ptr(cgrp->info);
342 	info->timestamp = ctx->timestamp;
343 }
344 
345 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
346 #define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */
347 
348 /*
349  * reschedule events based on the cgroup constraint of task.
350  *
351  * mode SWOUT : schedule out everything
352  * mode SWIN : schedule in based on cgroup for next
353  */
354 void perf_cgroup_switch(struct task_struct *task, int mode)
355 {
356 	struct perf_cpu_context *cpuctx;
357 	struct pmu *pmu;
358 	unsigned long flags;
359 
360 	/*
361 	 * disable interrupts to avoid geting nr_cgroup
362 	 * changes via __perf_event_disable(). Also
363 	 * avoids preemption.
364 	 */
365 	local_irq_save(flags);
366 
367 	/*
368 	 * we reschedule only in the presence of cgroup
369 	 * constrained events.
370 	 */
371 	rcu_read_lock();
372 
373 	list_for_each_entry_rcu(pmu, &pmus, entry) {
374 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
375 
376 		/*
377 		 * perf_cgroup_events says at least one
378 		 * context on this CPU has cgroup events.
379 		 *
380 		 * ctx->nr_cgroups reports the number of cgroup
381 		 * events for a context.
382 		 */
383 		if (cpuctx->ctx.nr_cgroups > 0) {
384 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
385 			perf_pmu_disable(cpuctx->ctx.pmu);
386 
387 			if (mode & PERF_CGROUP_SWOUT) {
388 				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
389 				/*
390 				 * must not be done before ctxswout due
391 				 * to event_filter_match() in event_sched_out()
392 				 */
393 				cpuctx->cgrp = NULL;
394 			}
395 
396 			if (mode & PERF_CGROUP_SWIN) {
397 				WARN_ON_ONCE(cpuctx->cgrp);
398 				/* set cgrp before ctxsw in to
399 				 * allow event_filter_match() to not
400 				 * have to pass task around
401 				 */
402 				cpuctx->cgrp = perf_cgroup_from_task(task);
403 				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
404 			}
405 			perf_pmu_enable(cpuctx->ctx.pmu);
406 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
407 		}
408 	}
409 
410 	rcu_read_unlock();
411 
412 	local_irq_restore(flags);
413 }
414 
415 static inline void perf_cgroup_sched_out(struct task_struct *task,
416 					 struct task_struct *next)
417 {
418 	struct perf_cgroup *cgrp1;
419 	struct perf_cgroup *cgrp2 = NULL;
420 
421 	/*
422 	 * we come here when we know perf_cgroup_events > 0
423 	 */
424 	cgrp1 = perf_cgroup_from_task(task);
425 
426 	/*
427 	 * next is NULL when called from perf_event_enable_on_exec()
428 	 * that will systematically cause a cgroup_switch()
429 	 */
430 	if (next)
431 		cgrp2 = perf_cgroup_from_task(next);
432 
433 	/*
434 	 * only schedule out current cgroup events if we know
435 	 * that we are switching to a different cgroup. Otherwise,
436 	 * do no touch the cgroup events.
437 	 */
438 	if (cgrp1 != cgrp2)
439 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
440 }
441 
442 static inline void perf_cgroup_sched_in(struct task_struct *prev,
443 					struct task_struct *task)
444 {
445 	struct perf_cgroup *cgrp1;
446 	struct perf_cgroup *cgrp2 = NULL;
447 
448 	/*
449 	 * we come here when we know perf_cgroup_events > 0
450 	 */
451 	cgrp1 = perf_cgroup_from_task(task);
452 
453 	/* prev can never be NULL */
454 	cgrp2 = perf_cgroup_from_task(prev);
455 
456 	/*
457 	 * only need to schedule in cgroup events if we are changing
458 	 * cgroup during ctxsw. Cgroup events were not scheduled
459 	 * out of ctxsw out if that was not the case.
460 	 */
461 	if (cgrp1 != cgrp2)
462 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
463 }
464 
465 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
466 				      struct perf_event_attr *attr,
467 				      struct perf_event *group_leader)
468 {
469 	struct perf_cgroup *cgrp;
470 	struct cgroup_subsys_state *css;
471 	struct fd f = fdget(fd);
472 	int ret = 0;
473 
474 	if (!f.file)
475 		return -EBADF;
476 
477 	css = cgroup_css_from_dir(f.file, perf_subsys_id);
478 	if (IS_ERR(css)) {
479 		ret = PTR_ERR(css);
480 		goto out;
481 	}
482 
483 	cgrp = container_of(css, struct perf_cgroup, css);
484 	event->cgrp = cgrp;
485 
486 	/* must be done before we fput() the file */
487 	if (!perf_tryget_cgroup(event)) {
488 		event->cgrp = NULL;
489 		ret = -ENOENT;
490 		goto out;
491 	}
492 
493 	/*
494 	 * all events in a group must monitor
495 	 * the same cgroup because a task belongs
496 	 * to only one perf cgroup at a time
497 	 */
498 	if (group_leader && group_leader->cgrp != cgrp) {
499 		perf_detach_cgroup(event);
500 		ret = -EINVAL;
501 	}
502 out:
503 	fdput(f);
504 	return ret;
505 }
506 
507 static inline void
508 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
509 {
510 	struct perf_cgroup_info *t;
511 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
512 	event->shadow_ctx_time = now - t->timestamp;
513 }
514 
515 static inline void
516 perf_cgroup_defer_enabled(struct perf_event *event)
517 {
518 	/*
519 	 * when the current task's perf cgroup does not match
520 	 * the event's, we need to remember to call the
521 	 * perf_mark_enable() function the first time a task with
522 	 * a matching perf cgroup is scheduled in.
523 	 */
524 	if (is_cgroup_event(event) && !perf_cgroup_match(event))
525 		event->cgrp_defer_enabled = 1;
526 }
527 
528 static inline void
529 perf_cgroup_mark_enabled(struct perf_event *event,
530 			 struct perf_event_context *ctx)
531 {
532 	struct perf_event *sub;
533 	u64 tstamp = perf_event_time(event);
534 
535 	if (!event->cgrp_defer_enabled)
536 		return;
537 
538 	event->cgrp_defer_enabled = 0;
539 
540 	event->tstamp_enabled = tstamp - event->total_time_enabled;
541 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
542 		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
543 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
544 			sub->cgrp_defer_enabled = 0;
545 		}
546 	}
547 }
548 #else /* !CONFIG_CGROUP_PERF */
549 
550 static inline bool
551 perf_cgroup_match(struct perf_event *event)
552 {
553 	return true;
554 }
555 
556 static inline void perf_detach_cgroup(struct perf_event *event)
557 {}
558 
559 static inline int is_cgroup_event(struct perf_event *event)
560 {
561 	return 0;
562 }
563 
564 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
565 {
566 	return 0;
567 }
568 
569 static inline void update_cgrp_time_from_event(struct perf_event *event)
570 {
571 }
572 
573 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
574 {
575 }
576 
577 static inline void perf_cgroup_sched_out(struct task_struct *task,
578 					 struct task_struct *next)
579 {
580 }
581 
582 static inline void perf_cgroup_sched_in(struct task_struct *prev,
583 					struct task_struct *task)
584 {
585 }
586 
587 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
588 				      struct perf_event_attr *attr,
589 				      struct perf_event *group_leader)
590 {
591 	return -EINVAL;
592 }
593 
594 static inline void
595 perf_cgroup_set_timestamp(struct task_struct *task,
596 			  struct perf_event_context *ctx)
597 {
598 }
599 
600 void
601 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
602 {
603 }
604 
605 static inline void
606 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
607 {
608 }
609 
610 static inline u64 perf_cgroup_event_time(struct perf_event *event)
611 {
612 	return 0;
613 }
614 
615 static inline void
616 perf_cgroup_defer_enabled(struct perf_event *event)
617 {
618 }
619 
620 static inline void
621 perf_cgroup_mark_enabled(struct perf_event *event,
622 			 struct perf_event_context *ctx)
623 {
624 }
625 #endif
626 
627 void perf_pmu_disable(struct pmu *pmu)
628 {
629 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
630 	if (!(*count)++)
631 		pmu->pmu_disable(pmu);
632 }
633 
634 void perf_pmu_enable(struct pmu *pmu)
635 {
636 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
637 	if (!--(*count))
638 		pmu->pmu_enable(pmu);
639 }
640 
641 static DEFINE_PER_CPU(struct list_head, rotation_list);
642 
643 /*
644  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
645  * because they're strictly cpu affine and rotate_start is called with IRQs
646  * disabled, while rotate_context is called from IRQ context.
647  */
648 static void perf_pmu_rotate_start(struct pmu *pmu)
649 {
650 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
651 	struct list_head *head = &__get_cpu_var(rotation_list);
652 
653 	WARN_ON(!irqs_disabled());
654 
655 	if (list_empty(&cpuctx->rotation_list))
656 		list_add(&cpuctx->rotation_list, head);
657 }
658 
659 static void get_ctx(struct perf_event_context *ctx)
660 {
661 	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
662 }
663 
664 static void put_ctx(struct perf_event_context *ctx)
665 {
666 	if (atomic_dec_and_test(&ctx->refcount)) {
667 		if (ctx->parent_ctx)
668 			put_ctx(ctx->parent_ctx);
669 		if (ctx->task)
670 			put_task_struct(ctx->task);
671 		kfree_rcu(ctx, rcu_head);
672 	}
673 }
674 
675 static void unclone_ctx(struct perf_event_context *ctx)
676 {
677 	if (ctx->parent_ctx) {
678 		put_ctx(ctx->parent_ctx);
679 		ctx->parent_ctx = NULL;
680 	}
681 }
682 
683 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
684 {
685 	/*
686 	 * only top level events have the pid namespace they were created in
687 	 */
688 	if (event->parent)
689 		event = event->parent;
690 
691 	return task_tgid_nr_ns(p, event->ns);
692 }
693 
694 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
695 {
696 	/*
697 	 * only top level events have the pid namespace they were created in
698 	 */
699 	if (event->parent)
700 		event = event->parent;
701 
702 	return task_pid_nr_ns(p, event->ns);
703 }
704 
705 /*
706  * If we inherit events we want to return the parent event id
707  * to userspace.
708  */
709 static u64 primary_event_id(struct perf_event *event)
710 {
711 	u64 id = event->id;
712 
713 	if (event->parent)
714 		id = event->parent->id;
715 
716 	return id;
717 }
718 
719 /*
720  * Get the perf_event_context for a task and lock it.
721  * This has to cope with with the fact that until it is locked,
722  * the context could get moved to another task.
723  */
724 static struct perf_event_context *
725 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
726 {
727 	struct perf_event_context *ctx;
728 
729 	rcu_read_lock();
730 retry:
731 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
732 	if (ctx) {
733 		/*
734 		 * If this context is a clone of another, it might
735 		 * get swapped for another underneath us by
736 		 * perf_event_task_sched_out, though the
737 		 * rcu_read_lock() protects us from any context
738 		 * getting freed.  Lock the context and check if it
739 		 * got swapped before we could get the lock, and retry
740 		 * if so.  If we locked the right context, then it
741 		 * can't get swapped on us any more.
742 		 */
743 		raw_spin_lock_irqsave(&ctx->lock, *flags);
744 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
745 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
746 			goto retry;
747 		}
748 
749 		if (!atomic_inc_not_zero(&ctx->refcount)) {
750 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
751 			ctx = NULL;
752 		}
753 	}
754 	rcu_read_unlock();
755 	return ctx;
756 }
757 
758 /*
759  * Get the context for a task and increment its pin_count so it
760  * can't get swapped to another task.  This also increments its
761  * reference count so that the context can't get freed.
762  */
763 static struct perf_event_context *
764 perf_pin_task_context(struct task_struct *task, int ctxn)
765 {
766 	struct perf_event_context *ctx;
767 	unsigned long flags;
768 
769 	ctx = perf_lock_task_context(task, ctxn, &flags);
770 	if (ctx) {
771 		++ctx->pin_count;
772 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
773 	}
774 	return ctx;
775 }
776 
777 static void perf_unpin_context(struct perf_event_context *ctx)
778 {
779 	unsigned long flags;
780 
781 	raw_spin_lock_irqsave(&ctx->lock, flags);
782 	--ctx->pin_count;
783 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
784 }
785 
786 /*
787  * Update the record of the current time in a context.
788  */
789 static void update_context_time(struct perf_event_context *ctx)
790 {
791 	u64 now = perf_clock();
792 
793 	ctx->time += now - ctx->timestamp;
794 	ctx->timestamp = now;
795 }
796 
797 static u64 perf_event_time(struct perf_event *event)
798 {
799 	struct perf_event_context *ctx = event->ctx;
800 
801 	if (is_cgroup_event(event))
802 		return perf_cgroup_event_time(event);
803 
804 	return ctx ? ctx->time : 0;
805 }
806 
807 /*
808  * Update the total_time_enabled and total_time_running fields for a event.
809  * The caller of this function needs to hold the ctx->lock.
810  */
811 static void update_event_times(struct perf_event *event)
812 {
813 	struct perf_event_context *ctx = event->ctx;
814 	u64 run_end;
815 
816 	if (event->state < PERF_EVENT_STATE_INACTIVE ||
817 	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
818 		return;
819 	/*
820 	 * in cgroup mode, time_enabled represents
821 	 * the time the event was enabled AND active
822 	 * tasks were in the monitored cgroup. This is
823 	 * independent of the activity of the context as
824 	 * there may be a mix of cgroup and non-cgroup events.
825 	 *
826 	 * That is why we treat cgroup events differently
827 	 * here.
828 	 */
829 	if (is_cgroup_event(event))
830 		run_end = perf_cgroup_event_time(event);
831 	else if (ctx->is_active)
832 		run_end = ctx->time;
833 	else
834 		run_end = event->tstamp_stopped;
835 
836 	event->total_time_enabled = run_end - event->tstamp_enabled;
837 
838 	if (event->state == PERF_EVENT_STATE_INACTIVE)
839 		run_end = event->tstamp_stopped;
840 	else
841 		run_end = perf_event_time(event);
842 
843 	event->total_time_running = run_end - event->tstamp_running;
844 
845 }
846 
847 /*
848  * Update total_time_enabled and total_time_running for all events in a group.
849  */
850 static void update_group_times(struct perf_event *leader)
851 {
852 	struct perf_event *event;
853 
854 	update_event_times(leader);
855 	list_for_each_entry(event, &leader->sibling_list, group_entry)
856 		update_event_times(event);
857 }
858 
859 static struct list_head *
860 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
861 {
862 	if (event->attr.pinned)
863 		return &ctx->pinned_groups;
864 	else
865 		return &ctx->flexible_groups;
866 }
867 
868 /*
869  * Add a event from the lists for its context.
870  * Must be called with ctx->mutex and ctx->lock held.
871  */
872 static void
873 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
874 {
875 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
876 	event->attach_state |= PERF_ATTACH_CONTEXT;
877 
878 	/*
879 	 * If we're a stand alone event or group leader, we go to the context
880 	 * list, group events are kept attached to the group so that
881 	 * perf_group_detach can, at all times, locate all siblings.
882 	 */
883 	if (event->group_leader == event) {
884 		struct list_head *list;
885 
886 		if (is_software_event(event))
887 			event->group_flags |= PERF_GROUP_SOFTWARE;
888 
889 		list = ctx_group_list(event, ctx);
890 		list_add_tail(&event->group_entry, list);
891 	}
892 
893 	if (is_cgroup_event(event))
894 		ctx->nr_cgroups++;
895 
896 	if (has_branch_stack(event))
897 		ctx->nr_branch_stack++;
898 
899 	list_add_rcu(&event->event_entry, &ctx->event_list);
900 	if (!ctx->nr_events)
901 		perf_pmu_rotate_start(ctx->pmu);
902 	ctx->nr_events++;
903 	if (event->attr.inherit_stat)
904 		ctx->nr_stat++;
905 }
906 
907 /*
908  * Called at perf_event creation and when events are attached/detached from a
909  * group.
910  */
911 static void perf_event__read_size(struct perf_event *event)
912 {
913 	int entry = sizeof(u64); /* value */
914 	int size = 0;
915 	int nr = 1;
916 
917 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
918 		size += sizeof(u64);
919 
920 	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
921 		size += sizeof(u64);
922 
923 	if (event->attr.read_format & PERF_FORMAT_ID)
924 		entry += sizeof(u64);
925 
926 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
927 		nr += event->group_leader->nr_siblings;
928 		size += sizeof(u64);
929 	}
930 
931 	size += entry * nr;
932 	event->read_size = size;
933 }
934 
935 static void perf_event__header_size(struct perf_event *event)
936 {
937 	struct perf_sample_data *data;
938 	u64 sample_type = event->attr.sample_type;
939 	u16 size = 0;
940 
941 	perf_event__read_size(event);
942 
943 	if (sample_type & PERF_SAMPLE_IP)
944 		size += sizeof(data->ip);
945 
946 	if (sample_type & PERF_SAMPLE_ADDR)
947 		size += sizeof(data->addr);
948 
949 	if (sample_type & PERF_SAMPLE_PERIOD)
950 		size += sizeof(data->period);
951 
952 	if (sample_type & PERF_SAMPLE_READ)
953 		size += event->read_size;
954 
955 	event->header_size = size;
956 }
957 
958 static void perf_event__id_header_size(struct perf_event *event)
959 {
960 	struct perf_sample_data *data;
961 	u64 sample_type = event->attr.sample_type;
962 	u16 size = 0;
963 
964 	if (sample_type & PERF_SAMPLE_TID)
965 		size += sizeof(data->tid_entry);
966 
967 	if (sample_type & PERF_SAMPLE_TIME)
968 		size += sizeof(data->time);
969 
970 	if (sample_type & PERF_SAMPLE_ID)
971 		size += sizeof(data->id);
972 
973 	if (sample_type & PERF_SAMPLE_STREAM_ID)
974 		size += sizeof(data->stream_id);
975 
976 	if (sample_type & PERF_SAMPLE_CPU)
977 		size += sizeof(data->cpu_entry);
978 
979 	event->id_header_size = size;
980 }
981 
982 static void perf_group_attach(struct perf_event *event)
983 {
984 	struct perf_event *group_leader = event->group_leader, *pos;
985 
986 	/*
987 	 * We can have double attach due to group movement in perf_event_open.
988 	 */
989 	if (event->attach_state & PERF_ATTACH_GROUP)
990 		return;
991 
992 	event->attach_state |= PERF_ATTACH_GROUP;
993 
994 	if (group_leader == event)
995 		return;
996 
997 	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
998 			!is_software_event(event))
999 		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1000 
1001 	list_add_tail(&event->group_entry, &group_leader->sibling_list);
1002 	group_leader->nr_siblings++;
1003 
1004 	perf_event__header_size(group_leader);
1005 
1006 	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1007 		perf_event__header_size(pos);
1008 }
1009 
1010 /*
1011  * Remove a event from the lists for its context.
1012  * Must be called with ctx->mutex and ctx->lock held.
1013  */
1014 static void
1015 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1016 {
1017 	struct perf_cpu_context *cpuctx;
1018 	/*
1019 	 * We can have double detach due to exit/hot-unplug + close.
1020 	 */
1021 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1022 		return;
1023 
1024 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
1025 
1026 	if (is_cgroup_event(event)) {
1027 		ctx->nr_cgroups--;
1028 		cpuctx = __get_cpu_context(ctx);
1029 		/*
1030 		 * if there are no more cgroup events
1031 		 * then cler cgrp to avoid stale pointer
1032 		 * in update_cgrp_time_from_cpuctx()
1033 		 */
1034 		if (!ctx->nr_cgroups)
1035 			cpuctx->cgrp = NULL;
1036 	}
1037 
1038 	if (has_branch_stack(event))
1039 		ctx->nr_branch_stack--;
1040 
1041 	ctx->nr_events--;
1042 	if (event->attr.inherit_stat)
1043 		ctx->nr_stat--;
1044 
1045 	list_del_rcu(&event->event_entry);
1046 
1047 	if (event->group_leader == event)
1048 		list_del_init(&event->group_entry);
1049 
1050 	update_group_times(event);
1051 
1052 	/*
1053 	 * If event was in error state, then keep it
1054 	 * that way, otherwise bogus counts will be
1055 	 * returned on read(). The only way to get out
1056 	 * of error state is by explicit re-enabling
1057 	 * of the event
1058 	 */
1059 	if (event->state > PERF_EVENT_STATE_OFF)
1060 		event->state = PERF_EVENT_STATE_OFF;
1061 }
1062 
1063 static void perf_group_detach(struct perf_event *event)
1064 {
1065 	struct perf_event *sibling, *tmp;
1066 	struct list_head *list = NULL;
1067 
1068 	/*
1069 	 * We can have double detach due to exit/hot-unplug + close.
1070 	 */
1071 	if (!(event->attach_state & PERF_ATTACH_GROUP))
1072 		return;
1073 
1074 	event->attach_state &= ~PERF_ATTACH_GROUP;
1075 
1076 	/*
1077 	 * If this is a sibling, remove it from its group.
1078 	 */
1079 	if (event->group_leader != event) {
1080 		list_del_init(&event->group_entry);
1081 		event->group_leader->nr_siblings--;
1082 		goto out;
1083 	}
1084 
1085 	if (!list_empty(&event->group_entry))
1086 		list = &event->group_entry;
1087 
1088 	/*
1089 	 * If this was a group event with sibling events then
1090 	 * upgrade the siblings to singleton events by adding them
1091 	 * to whatever list we are on.
1092 	 */
1093 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1094 		if (list)
1095 			list_move_tail(&sibling->group_entry, list);
1096 		sibling->group_leader = sibling;
1097 
1098 		/* Inherit group flags from the previous leader */
1099 		sibling->group_flags = event->group_flags;
1100 	}
1101 
1102 out:
1103 	perf_event__header_size(event->group_leader);
1104 
1105 	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1106 		perf_event__header_size(tmp);
1107 }
1108 
1109 static inline int
1110 event_filter_match(struct perf_event *event)
1111 {
1112 	return (event->cpu == -1 || event->cpu == smp_processor_id())
1113 	    && perf_cgroup_match(event);
1114 }
1115 
1116 static void
1117 event_sched_out(struct perf_event *event,
1118 		  struct perf_cpu_context *cpuctx,
1119 		  struct perf_event_context *ctx)
1120 {
1121 	u64 tstamp = perf_event_time(event);
1122 	u64 delta;
1123 	/*
1124 	 * An event which could not be activated because of
1125 	 * filter mismatch still needs to have its timings
1126 	 * maintained, otherwise bogus information is return
1127 	 * via read() for time_enabled, time_running:
1128 	 */
1129 	if (event->state == PERF_EVENT_STATE_INACTIVE
1130 	    && !event_filter_match(event)) {
1131 		delta = tstamp - event->tstamp_stopped;
1132 		event->tstamp_running += delta;
1133 		event->tstamp_stopped = tstamp;
1134 	}
1135 
1136 	if (event->state != PERF_EVENT_STATE_ACTIVE)
1137 		return;
1138 
1139 	event->state = PERF_EVENT_STATE_INACTIVE;
1140 	if (event->pending_disable) {
1141 		event->pending_disable = 0;
1142 		event->state = PERF_EVENT_STATE_OFF;
1143 	}
1144 	event->tstamp_stopped = tstamp;
1145 	event->pmu->del(event, 0);
1146 	event->oncpu = -1;
1147 
1148 	if (!is_software_event(event))
1149 		cpuctx->active_oncpu--;
1150 	ctx->nr_active--;
1151 	if (event->attr.freq && event->attr.sample_freq)
1152 		ctx->nr_freq--;
1153 	if (event->attr.exclusive || !cpuctx->active_oncpu)
1154 		cpuctx->exclusive = 0;
1155 }
1156 
1157 static void
1158 group_sched_out(struct perf_event *group_event,
1159 		struct perf_cpu_context *cpuctx,
1160 		struct perf_event_context *ctx)
1161 {
1162 	struct perf_event *event;
1163 	int state = group_event->state;
1164 
1165 	event_sched_out(group_event, cpuctx, ctx);
1166 
1167 	/*
1168 	 * Schedule out siblings (if any):
1169 	 */
1170 	list_for_each_entry(event, &group_event->sibling_list, group_entry)
1171 		event_sched_out(event, cpuctx, ctx);
1172 
1173 	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1174 		cpuctx->exclusive = 0;
1175 }
1176 
1177 /*
1178  * Cross CPU call to remove a performance event
1179  *
1180  * We disable the event on the hardware level first. After that we
1181  * remove it from the context list.
1182  */
1183 static int __perf_remove_from_context(void *info)
1184 {
1185 	struct perf_event *event = info;
1186 	struct perf_event_context *ctx = event->ctx;
1187 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1188 
1189 	raw_spin_lock(&ctx->lock);
1190 	event_sched_out(event, cpuctx, ctx);
1191 	list_del_event(event, ctx);
1192 	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1193 		ctx->is_active = 0;
1194 		cpuctx->task_ctx = NULL;
1195 	}
1196 	raw_spin_unlock(&ctx->lock);
1197 
1198 	return 0;
1199 }
1200 
1201 
1202 /*
1203  * Remove the event from a task's (or a CPU's) list of events.
1204  *
1205  * CPU events are removed with a smp call. For task events we only
1206  * call when the task is on a CPU.
1207  *
1208  * If event->ctx is a cloned context, callers must make sure that
1209  * every task struct that event->ctx->task could possibly point to
1210  * remains valid.  This is OK when called from perf_release since
1211  * that only calls us on the top-level context, which can't be a clone.
1212  * When called from perf_event_exit_task, it's OK because the
1213  * context has been detached from its task.
1214  */
1215 static void perf_remove_from_context(struct perf_event *event)
1216 {
1217 	struct perf_event_context *ctx = event->ctx;
1218 	struct task_struct *task = ctx->task;
1219 
1220 	lockdep_assert_held(&ctx->mutex);
1221 
1222 	if (!task) {
1223 		/*
1224 		 * Per cpu events are removed via an smp call and
1225 		 * the removal is always successful.
1226 		 */
1227 		cpu_function_call(event->cpu, __perf_remove_from_context, event);
1228 		return;
1229 	}
1230 
1231 retry:
1232 	if (!task_function_call(task, __perf_remove_from_context, event))
1233 		return;
1234 
1235 	raw_spin_lock_irq(&ctx->lock);
1236 	/*
1237 	 * If we failed to find a running task, but find the context active now
1238 	 * that we've acquired the ctx->lock, retry.
1239 	 */
1240 	if (ctx->is_active) {
1241 		raw_spin_unlock_irq(&ctx->lock);
1242 		goto retry;
1243 	}
1244 
1245 	/*
1246 	 * Since the task isn't running, its safe to remove the event, us
1247 	 * holding the ctx->lock ensures the task won't get scheduled in.
1248 	 */
1249 	list_del_event(event, ctx);
1250 	raw_spin_unlock_irq(&ctx->lock);
1251 }
1252 
1253 /*
1254  * Cross CPU call to disable a performance event
1255  */
1256 int __perf_event_disable(void *info)
1257 {
1258 	struct perf_event *event = info;
1259 	struct perf_event_context *ctx = event->ctx;
1260 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1261 
1262 	/*
1263 	 * If this is a per-task event, need to check whether this
1264 	 * event's task is the current task on this cpu.
1265 	 *
1266 	 * Can trigger due to concurrent perf_event_context_sched_out()
1267 	 * flipping contexts around.
1268 	 */
1269 	if (ctx->task && cpuctx->task_ctx != ctx)
1270 		return -EINVAL;
1271 
1272 	raw_spin_lock(&ctx->lock);
1273 
1274 	/*
1275 	 * If the event is on, turn it off.
1276 	 * If it is in error state, leave it in error state.
1277 	 */
1278 	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1279 		update_context_time(ctx);
1280 		update_cgrp_time_from_event(event);
1281 		update_group_times(event);
1282 		if (event == event->group_leader)
1283 			group_sched_out(event, cpuctx, ctx);
1284 		else
1285 			event_sched_out(event, cpuctx, ctx);
1286 		event->state = PERF_EVENT_STATE_OFF;
1287 	}
1288 
1289 	raw_spin_unlock(&ctx->lock);
1290 
1291 	return 0;
1292 }
1293 
1294 /*
1295  * Disable a event.
1296  *
1297  * If event->ctx is a cloned context, callers must make sure that
1298  * every task struct that event->ctx->task could possibly point to
1299  * remains valid.  This condition is satisifed when called through
1300  * perf_event_for_each_child or perf_event_for_each because they
1301  * hold the top-level event's child_mutex, so any descendant that
1302  * goes to exit will block in sync_child_event.
1303  * When called from perf_pending_event it's OK because event->ctx
1304  * is the current context on this CPU and preemption is disabled,
1305  * hence we can't get into perf_event_task_sched_out for this context.
1306  */
1307 void perf_event_disable(struct perf_event *event)
1308 {
1309 	struct perf_event_context *ctx = event->ctx;
1310 	struct task_struct *task = ctx->task;
1311 
1312 	if (!task) {
1313 		/*
1314 		 * Disable the event on the cpu that it's on
1315 		 */
1316 		cpu_function_call(event->cpu, __perf_event_disable, event);
1317 		return;
1318 	}
1319 
1320 retry:
1321 	if (!task_function_call(task, __perf_event_disable, event))
1322 		return;
1323 
1324 	raw_spin_lock_irq(&ctx->lock);
1325 	/*
1326 	 * If the event is still active, we need to retry the cross-call.
1327 	 */
1328 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1329 		raw_spin_unlock_irq(&ctx->lock);
1330 		/*
1331 		 * Reload the task pointer, it might have been changed by
1332 		 * a concurrent perf_event_context_sched_out().
1333 		 */
1334 		task = ctx->task;
1335 		goto retry;
1336 	}
1337 
1338 	/*
1339 	 * Since we have the lock this context can't be scheduled
1340 	 * in, so we can change the state safely.
1341 	 */
1342 	if (event->state == PERF_EVENT_STATE_INACTIVE) {
1343 		update_group_times(event);
1344 		event->state = PERF_EVENT_STATE_OFF;
1345 	}
1346 	raw_spin_unlock_irq(&ctx->lock);
1347 }
1348 EXPORT_SYMBOL_GPL(perf_event_disable);
1349 
1350 static void perf_set_shadow_time(struct perf_event *event,
1351 				 struct perf_event_context *ctx,
1352 				 u64 tstamp)
1353 {
1354 	/*
1355 	 * use the correct time source for the time snapshot
1356 	 *
1357 	 * We could get by without this by leveraging the
1358 	 * fact that to get to this function, the caller
1359 	 * has most likely already called update_context_time()
1360 	 * and update_cgrp_time_xx() and thus both timestamp
1361 	 * are identical (or very close). Given that tstamp is,
1362 	 * already adjusted for cgroup, we could say that:
1363 	 *    tstamp - ctx->timestamp
1364 	 * is equivalent to
1365 	 *    tstamp - cgrp->timestamp.
1366 	 *
1367 	 * Then, in perf_output_read(), the calculation would
1368 	 * work with no changes because:
1369 	 * - event is guaranteed scheduled in
1370 	 * - no scheduled out in between
1371 	 * - thus the timestamp would be the same
1372 	 *
1373 	 * But this is a bit hairy.
1374 	 *
1375 	 * So instead, we have an explicit cgroup call to remain
1376 	 * within the time time source all along. We believe it
1377 	 * is cleaner and simpler to understand.
1378 	 */
1379 	if (is_cgroup_event(event))
1380 		perf_cgroup_set_shadow_time(event, tstamp);
1381 	else
1382 		event->shadow_ctx_time = tstamp - ctx->timestamp;
1383 }
1384 
1385 #define MAX_INTERRUPTS (~0ULL)
1386 
1387 static void perf_log_throttle(struct perf_event *event, int enable);
1388 
1389 static int
1390 event_sched_in(struct perf_event *event,
1391 		 struct perf_cpu_context *cpuctx,
1392 		 struct perf_event_context *ctx)
1393 {
1394 	u64 tstamp = perf_event_time(event);
1395 
1396 	if (event->state <= PERF_EVENT_STATE_OFF)
1397 		return 0;
1398 
1399 	event->state = PERF_EVENT_STATE_ACTIVE;
1400 	event->oncpu = smp_processor_id();
1401 
1402 	/*
1403 	 * Unthrottle events, since we scheduled we might have missed several
1404 	 * ticks already, also for a heavily scheduling task there is little
1405 	 * guarantee it'll get a tick in a timely manner.
1406 	 */
1407 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1408 		perf_log_throttle(event, 1);
1409 		event->hw.interrupts = 0;
1410 	}
1411 
1412 	/*
1413 	 * The new state must be visible before we turn it on in the hardware:
1414 	 */
1415 	smp_wmb();
1416 
1417 	if (event->pmu->add(event, PERF_EF_START)) {
1418 		event->state = PERF_EVENT_STATE_INACTIVE;
1419 		event->oncpu = -1;
1420 		return -EAGAIN;
1421 	}
1422 
1423 	event->tstamp_running += tstamp - event->tstamp_stopped;
1424 
1425 	perf_set_shadow_time(event, ctx, tstamp);
1426 
1427 	if (!is_software_event(event))
1428 		cpuctx->active_oncpu++;
1429 	ctx->nr_active++;
1430 	if (event->attr.freq && event->attr.sample_freq)
1431 		ctx->nr_freq++;
1432 
1433 	if (event->attr.exclusive)
1434 		cpuctx->exclusive = 1;
1435 
1436 	return 0;
1437 }
1438 
1439 static int
1440 group_sched_in(struct perf_event *group_event,
1441 	       struct perf_cpu_context *cpuctx,
1442 	       struct perf_event_context *ctx)
1443 {
1444 	struct perf_event *event, *partial_group = NULL;
1445 	struct pmu *pmu = group_event->pmu;
1446 	u64 now = ctx->time;
1447 	bool simulate = false;
1448 
1449 	if (group_event->state == PERF_EVENT_STATE_OFF)
1450 		return 0;
1451 
1452 	pmu->start_txn(pmu);
1453 
1454 	if (event_sched_in(group_event, cpuctx, ctx)) {
1455 		pmu->cancel_txn(pmu);
1456 		return -EAGAIN;
1457 	}
1458 
1459 	/*
1460 	 * Schedule in siblings as one group (if any):
1461 	 */
1462 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1463 		if (event_sched_in(event, cpuctx, ctx)) {
1464 			partial_group = event;
1465 			goto group_error;
1466 		}
1467 	}
1468 
1469 	if (!pmu->commit_txn(pmu))
1470 		return 0;
1471 
1472 group_error:
1473 	/*
1474 	 * Groups can be scheduled in as one unit only, so undo any
1475 	 * partial group before returning:
1476 	 * The events up to the failed event are scheduled out normally,
1477 	 * tstamp_stopped will be updated.
1478 	 *
1479 	 * The failed events and the remaining siblings need to have
1480 	 * their timings updated as if they had gone thru event_sched_in()
1481 	 * and event_sched_out(). This is required to get consistent timings
1482 	 * across the group. This also takes care of the case where the group
1483 	 * could never be scheduled by ensuring tstamp_stopped is set to mark
1484 	 * the time the event was actually stopped, such that time delta
1485 	 * calculation in update_event_times() is correct.
1486 	 */
1487 	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1488 		if (event == partial_group)
1489 			simulate = true;
1490 
1491 		if (simulate) {
1492 			event->tstamp_running += now - event->tstamp_stopped;
1493 			event->tstamp_stopped = now;
1494 		} else {
1495 			event_sched_out(event, cpuctx, ctx);
1496 		}
1497 	}
1498 	event_sched_out(group_event, cpuctx, ctx);
1499 
1500 	pmu->cancel_txn(pmu);
1501 
1502 	return -EAGAIN;
1503 }
1504 
1505 /*
1506  * Work out whether we can put this event group on the CPU now.
1507  */
1508 static int group_can_go_on(struct perf_event *event,
1509 			   struct perf_cpu_context *cpuctx,
1510 			   int can_add_hw)
1511 {
1512 	/*
1513 	 * Groups consisting entirely of software events can always go on.
1514 	 */
1515 	if (event->group_flags & PERF_GROUP_SOFTWARE)
1516 		return 1;
1517 	/*
1518 	 * If an exclusive group is already on, no other hardware
1519 	 * events can go on.
1520 	 */
1521 	if (cpuctx->exclusive)
1522 		return 0;
1523 	/*
1524 	 * If this group is exclusive and there are already
1525 	 * events on the CPU, it can't go on.
1526 	 */
1527 	if (event->attr.exclusive && cpuctx->active_oncpu)
1528 		return 0;
1529 	/*
1530 	 * Otherwise, try to add it if all previous groups were able
1531 	 * to go on.
1532 	 */
1533 	return can_add_hw;
1534 }
1535 
1536 static void add_event_to_ctx(struct perf_event *event,
1537 			       struct perf_event_context *ctx)
1538 {
1539 	u64 tstamp = perf_event_time(event);
1540 
1541 	list_add_event(event, ctx);
1542 	perf_group_attach(event);
1543 	event->tstamp_enabled = tstamp;
1544 	event->tstamp_running = tstamp;
1545 	event->tstamp_stopped = tstamp;
1546 }
1547 
1548 static void task_ctx_sched_out(struct perf_event_context *ctx);
1549 static void
1550 ctx_sched_in(struct perf_event_context *ctx,
1551 	     struct perf_cpu_context *cpuctx,
1552 	     enum event_type_t event_type,
1553 	     struct task_struct *task);
1554 
1555 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1556 				struct perf_event_context *ctx,
1557 				struct task_struct *task)
1558 {
1559 	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1560 	if (ctx)
1561 		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1562 	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1563 	if (ctx)
1564 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1565 }
1566 
1567 /*
1568  * Cross CPU call to install and enable a performance event
1569  *
1570  * Must be called with ctx->mutex held
1571  */
1572 static int  __perf_install_in_context(void *info)
1573 {
1574 	struct perf_event *event = info;
1575 	struct perf_event_context *ctx = event->ctx;
1576 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1577 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
1578 	struct task_struct *task = current;
1579 
1580 	perf_ctx_lock(cpuctx, task_ctx);
1581 	perf_pmu_disable(cpuctx->ctx.pmu);
1582 
1583 	/*
1584 	 * If there was an active task_ctx schedule it out.
1585 	 */
1586 	if (task_ctx)
1587 		task_ctx_sched_out(task_ctx);
1588 
1589 	/*
1590 	 * If the context we're installing events in is not the
1591 	 * active task_ctx, flip them.
1592 	 */
1593 	if (ctx->task && task_ctx != ctx) {
1594 		if (task_ctx)
1595 			raw_spin_unlock(&task_ctx->lock);
1596 		raw_spin_lock(&ctx->lock);
1597 		task_ctx = ctx;
1598 	}
1599 
1600 	if (task_ctx) {
1601 		cpuctx->task_ctx = task_ctx;
1602 		task = task_ctx->task;
1603 	}
1604 
1605 	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1606 
1607 	update_context_time(ctx);
1608 	/*
1609 	 * update cgrp time only if current cgrp
1610 	 * matches event->cgrp. Must be done before
1611 	 * calling add_event_to_ctx()
1612 	 */
1613 	update_cgrp_time_from_event(event);
1614 
1615 	add_event_to_ctx(event, ctx);
1616 
1617 	/*
1618 	 * Schedule everything back in
1619 	 */
1620 	perf_event_sched_in(cpuctx, task_ctx, task);
1621 
1622 	perf_pmu_enable(cpuctx->ctx.pmu);
1623 	perf_ctx_unlock(cpuctx, task_ctx);
1624 
1625 	return 0;
1626 }
1627 
1628 /*
1629  * Attach a performance event to a context
1630  *
1631  * First we add the event to the list with the hardware enable bit
1632  * in event->hw_config cleared.
1633  *
1634  * If the event is attached to a task which is on a CPU we use a smp
1635  * call to enable it in the task context. The task might have been
1636  * scheduled away, but we check this in the smp call again.
1637  */
1638 static void
1639 perf_install_in_context(struct perf_event_context *ctx,
1640 			struct perf_event *event,
1641 			int cpu)
1642 {
1643 	struct task_struct *task = ctx->task;
1644 
1645 	lockdep_assert_held(&ctx->mutex);
1646 
1647 	event->ctx = ctx;
1648 	if (event->cpu != -1)
1649 		event->cpu = cpu;
1650 
1651 	if (!task) {
1652 		/*
1653 		 * Per cpu events are installed via an smp call and
1654 		 * the install is always successful.
1655 		 */
1656 		cpu_function_call(cpu, __perf_install_in_context, event);
1657 		return;
1658 	}
1659 
1660 retry:
1661 	if (!task_function_call(task, __perf_install_in_context, event))
1662 		return;
1663 
1664 	raw_spin_lock_irq(&ctx->lock);
1665 	/*
1666 	 * If we failed to find a running task, but find the context active now
1667 	 * that we've acquired the ctx->lock, retry.
1668 	 */
1669 	if (ctx->is_active) {
1670 		raw_spin_unlock_irq(&ctx->lock);
1671 		goto retry;
1672 	}
1673 
1674 	/*
1675 	 * Since the task isn't running, its safe to add the event, us holding
1676 	 * the ctx->lock ensures the task won't get scheduled in.
1677 	 */
1678 	add_event_to_ctx(event, ctx);
1679 	raw_spin_unlock_irq(&ctx->lock);
1680 }
1681 
1682 /*
1683  * Put a event into inactive state and update time fields.
1684  * Enabling the leader of a group effectively enables all
1685  * the group members that aren't explicitly disabled, so we
1686  * have to update their ->tstamp_enabled also.
1687  * Note: this works for group members as well as group leaders
1688  * since the non-leader members' sibling_lists will be empty.
1689  */
1690 static void __perf_event_mark_enabled(struct perf_event *event)
1691 {
1692 	struct perf_event *sub;
1693 	u64 tstamp = perf_event_time(event);
1694 
1695 	event->state = PERF_EVENT_STATE_INACTIVE;
1696 	event->tstamp_enabled = tstamp - event->total_time_enabled;
1697 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1698 		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1699 			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1700 	}
1701 }
1702 
1703 /*
1704  * Cross CPU call to enable a performance event
1705  */
1706 static int __perf_event_enable(void *info)
1707 {
1708 	struct perf_event *event = info;
1709 	struct perf_event_context *ctx = event->ctx;
1710 	struct perf_event *leader = event->group_leader;
1711 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1712 	int err;
1713 
1714 	if (WARN_ON_ONCE(!ctx->is_active))
1715 		return -EINVAL;
1716 
1717 	raw_spin_lock(&ctx->lock);
1718 	update_context_time(ctx);
1719 
1720 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1721 		goto unlock;
1722 
1723 	/*
1724 	 * set current task's cgroup time reference point
1725 	 */
1726 	perf_cgroup_set_timestamp(current, ctx);
1727 
1728 	__perf_event_mark_enabled(event);
1729 
1730 	if (!event_filter_match(event)) {
1731 		if (is_cgroup_event(event))
1732 			perf_cgroup_defer_enabled(event);
1733 		goto unlock;
1734 	}
1735 
1736 	/*
1737 	 * If the event is in a group and isn't the group leader,
1738 	 * then don't put it on unless the group is on.
1739 	 */
1740 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1741 		goto unlock;
1742 
1743 	if (!group_can_go_on(event, cpuctx, 1)) {
1744 		err = -EEXIST;
1745 	} else {
1746 		if (event == leader)
1747 			err = group_sched_in(event, cpuctx, ctx);
1748 		else
1749 			err = event_sched_in(event, cpuctx, ctx);
1750 	}
1751 
1752 	if (err) {
1753 		/*
1754 		 * If this event can't go on and it's part of a
1755 		 * group, then the whole group has to come off.
1756 		 */
1757 		if (leader != event)
1758 			group_sched_out(leader, cpuctx, ctx);
1759 		if (leader->attr.pinned) {
1760 			update_group_times(leader);
1761 			leader->state = PERF_EVENT_STATE_ERROR;
1762 		}
1763 	}
1764 
1765 unlock:
1766 	raw_spin_unlock(&ctx->lock);
1767 
1768 	return 0;
1769 }
1770 
1771 /*
1772  * Enable a event.
1773  *
1774  * If event->ctx is a cloned context, callers must make sure that
1775  * every task struct that event->ctx->task could possibly point to
1776  * remains valid.  This condition is satisfied when called through
1777  * perf_event_for_each_child or perf_event_for_each as described
1778  * for perf_event_disable.
1779  */
1780 void perf_event_enable(struct perf_event *event)
1781 {
1782 	struct perf_event_context *ctx = event->ctx;
1783 	struct task_struct *task = ctx->task;
1784 
1785 	if (!task) {
1786 		/*
1787 		 * Enable the event on the cpu that it's on
1788 		 */
1789 		cpu_function_call(event->cpu, __perf_event_enable, event);
1790 		return;
1791 	}
1792 
1793 	raw_spin_lock_irq(&ctx->lock);
1794 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1795 		goto out;
1796 
1797 	/*
1798 	 * If the event is in error state, clear that first.
1799 	 * That way, if we see the event in error state below, we
1800 	 * know that it has gone back into error state, as distinct
1801 	 * from the task having been scheduled away before the
1802 	 * cross-call arrived.
1803 	 */
1804 	if (event->state == PERF_EVENT_STATE_ERROR)
1805 		event->state = PERF_EVENT_STATE_OFF;
1806 
1807 retry:
1808 	if (!ctx->is_active) {
1809 		__perf_event_mark_enabled(event);
1810 		goto out;
1811 	}
1812 
1813 	raw_spin_unlock_irq(&ctx->lock);
1814 
1815 	if (!task_function_call(task, __perf_event_enable, event))
1816 		return;
1817 
1818 	raw_spin_lock_irq(&ctx->lock);
1819 
1820 	/*
1821 	 * If the context is active and the event is still off,
1822 	 * we need to retry the cross-call.
1823 	 */
1824 	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1825 		/*
1826 		 * task could have been flipped by a concurrent
1827 		 * perf_event_context_sched_out()
1828 		 */
1829 		task = ctx->task;
1830 		goto retry;
1831 	}
1832 
1833 out:
1834 	raw_spin_unlock_irq(&ctx->lock);
1835 }
1836 EXPORT_SYMBOL_GPL(perf_event_enable);
1837 
1838 int perf_event_refresh(struct perf_event *event, int refresh)
1839 {
1840 	/*
1841 	 * not supported on inherited events
1842 	 */
1843 	if (event->attr.inherit || !is_sampling_event(event))
1844 		return -EINVAL;
1845 
1846 	atomic_add(refresh, &event->event_limit);
1847 	perf_event_enable(event);
1848 
1849 	return 0;
1850 }
1851 EXPORT_SYMBOL_GPL(perf_event_refresh);
1852 
1853 static void ctx_sched_out(struct perf_event_context *ctx,
1854 			  struct perf_cpu_context *cpuctx,
1855 			  enum event_type_t event_type)
1856 {
1857 	struct perf_event *event;
1858 	int is_active = ctx->is_active;
1859 
1860 	ctx->is_active &= ~event_type;
1861 	if (likely(!ctx->nr_events))
1862 		return;
1863 
1864 	update_context_time(ctx);
1865 	update_cgrp_time_from_cpuctx(cpuctx);
1866 	if (!ctx->nr_active)
1867 		return;
1868 
1869 	perf_pmu_disable(ctx->pmu);
1870 	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1871 		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1872 			group_sched_out(event, cpuctx, ctx);
1873 	}
1874 
1875 	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1876 		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1877 			group_sched_out(event, cpuctx, ctx);
1878 	}
1879 	perf_pmu_enable(ctx->pmu);
1880 }
1881 
1882 /*
1883  * Test whether two contexts are equivalent, i.e. whether they
1884  * have both been cloned from the same version of the same context
1885  * and they both have the same number of enabled events.
1886  * If the number of enabled events is the same, then the set
1887  * of enabled events should be the same, because these are both
1888  * inherited contexts, therefore we can't access individual events
1889  * in them directly with an fd; we can only enable/disable all
1890  * events via prctl, or enable/disable all events in a family
1891  * via ioctl, which will have the same effect on both contexts.
1892  */
1893 static int context_equiv(struct perf_event_context *ctx1,
1894 			 struct perf_event_context *ctx2)
1895 {
1896 	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1897 		&& ctx1->parent_gen == ctx2->parent_gen
1898 		&& !ctx1->pin_count && !ctx2->pin_count;
1899 }
1900 
1901 static void __perf_event_sync_stat(struct perf_event *event,
1902 				     struct perf_event *next_event)
1903 {
1904 	u64 value;
1905 
1906 	if (!event->attr.inherit_stat)
1907 		return;
1908 
1909 	/*
1910 	 * Update the event value, we cannot use perf_event_read()
1911 	 * because we're in the middle of a context switch and have IRQs
1912 	 * disabled, which upsets smp_call_function_single(), however
1913 	 * we know the event must be on the current CPU, therefore we
1914 	 * don't need to use it.
1915 	 */
1916 	switch (event->state) {
1917 	case PERF_EVENT_STATE_ACTIVE:
1918 		event->pmu->read(event);
1919 		/* fall-through */
1920 
1921 	case PERF_EVENT_STATE_INACTIVE:
1922 		update_event_times(event);
1923 		break;
1924 
1925 	default:
1926 		break;
1927 	}
1928 
1929 	/*
1930 	 * In order to keep per-task stats reliable we need to flip the event
1931 	 * values when we flip the contexts.
1932 	 */
1933 	value = local64_read(&next_event->count);
1934 	value = local64_xchg(&event->count, value);
1935 	local64_set(&next_event->count, value);
1936 
1937 	swap(event->total_time_enabled, next_event->total_time_enabled);
1938 	swap(event->total_time_running, next_event->total_time_running);
1939 
1940 	/*
1941 	 * Since we swizzled the values, update the user visible data too.
1942 	 */
1943 	perf_event_update_userpage(event);
1944 	perf_event_update_userpage(next_event);
1945 }
1946 
1947 #define list_next_entry(pos, member) \
1948 	list_entry(pos->member.next, typeof(*pos), member)
1949 
1950 static void perf_event_sync_stat(struct perf_event_context *ctx,
1951 				   struct perf_event_context *next_ctx)
1952 {
1953 	struct perf_event *event, *next_event;
1954 
1955 	if (!ctx->nr_stat)
1956 		return;
1957 
1958 	update_context_time(ctx);
1959 
1960 	event = list_first_entry(&ctx->event_list,
1961 				   struct perf_event, event_entry);
1962 
1963 	next_event = list_first_entry(&next_ctx->event_list,
1964 					struct perf_event, event_entry);
1965 
1966 	while (&event->event_entry != &ctx->event_list &&
1967 	       &next_event->event_entry != &next_ctx->event_list) {
1968 
1969 		__perf_event_sync_stat(event, next_event);
1970 
1971 		event = list_next_entry(event, event_entry);
1972 		next_event = list_next_entry(next_event, event_entry);
1973 	}
1974 }
1975 
1976 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1977 					 struct task_struct *next)
1978 {
1979 	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1980 	struct perf_event_context *next_ctx;
1981 	struct perf_event_context *parent;
1982 	struct perf_cpu_context *cpuctx;
1983 	int do_switch = 1;
1984 
1985 	if (likely(!ctx))
1986 		return;
1987 
1988 	cpuctx = __get_cpu_context(ctx);
1989 	if (!cpuctx->task_ctx)
1990 		return;
1991 
1992 	rcu_read_lock();
1993 	parent = rcu_dereference(ctx->parent_ctx);
1994 	next_ctx = next->perf_event_ctxp[ctxn];
1995 	if (parent && next_ctx &&
1996 	    rcu_dereference(next_ctx->parent_ctx) == parent) {
1997 		/*
1998 		 * Looks like the two contexts are clones, so we might be
1999 		 * able to optimize the context switch.  We lock both
2000 		 * contexts and check that they are clones under the
2001 		 * lock (including re-checking that neither has been
2002 		 * uncloned in the meantime).  It doesn't matter which
2003 		 * order we take the locks because no other cpu could
2004 		 * be trying to lock both of these tasks.
2005 		 */
2006 		raw_spin_lock(&ctx->lock);
2007 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2008 		if (context_equiv(ctx, next_ctx)) {
2009 			/*
2010 			 * XXX do we need a memory barrier of sorts
2011 			 * wrt to rcu_dereference() of perf_event_ctxp
2012 			 */
2013 			task->perf_event_ctxp[ctxn] = next_ctx;
2014 			next->perf_event_ctxp[ctxn] = ctx;
2015 			ctx->task = next;
2016 			next_ctx->task = task;
2017 			do_switch = 0;
2018 
2019 			perf_event_sync_stat(ctx, next_ctx);
2020 		}
2021 		raw_spin_unlock(&next_ctx->lock);
2022 		raw_spin_unlock(&ctx->lock);
2023 	}
2024 	rcu_read_unlock();
2025 
2026 	if (do_switch) {
2027 		raw_spin_lock(&ctx->lock);
2028 		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2029 		cpuctx->task_ctx = NULL;
2030 		raw_spin_unlock(&ctx->lock);
2031 	}
2032 }
2033 
2034 #define for_each_task_context_nr(ctxn)					\
2035 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2036 
2037 /*
2038  * Called from scheduler to remove the events of the current task,
2039  * with interrupts disabled.
2040  *
2041  * We stop each event and update the event value in event->count.
2042  *
2043  * This does not protect us against NMI, but disable()
2044  * sets the disabled bit in the control field of event _before_
2045  * accessing the event control register. If a NMI hits, then it will
2046  * not restart the event.
2047  */
2048 void __perf_event_task_sched_out(struct task_struct *task,
2049 				 struct task_struct *next)
2050 {
2051 	int ctxn;
2052 
2053 	for_each_task_context_nr(ctxn)
2054 		perf_event_context_sched_out(task, ctxn, next);
2055 
2056 	/*
2057 	 * if cgroup events exist on this CPU, then we need
2058 	 * to check if we have to switch out PMU state.
2059 	 * cgroup event are system-wide mode only
2060 	 */
2061 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2062 		perf_cgroup_sched_out(task, next);
2063 }
2064 
2065 static void task_ctx_sched_out(struct perf_event_context *ctx)
2066 {
2067 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2068 
2069 	if (!cpuctx->task_ctx)
2070 		return;
2071 
2072 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2073 		return;
2074 
2075 	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2076 	cpuctx->task_ctx = NULL;
2077 }
2078 
2079 /*
2080  * Called with IRQs disabled
2081  */
2082 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2083 			      enum event_type_t event_type)
2084 {
2085 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2086 }
2087 
2088 static void
2089 ctx_pinned_sched_in(struct perf_event_context *ctx,
2090 		    struct perf_cpu_context *cpuctx)
2091 {
2092 	struct perf_event *event;
2093 
2094 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2095 		if (event->state <= PERF_EVENT_STATE_OFF)
2096 			continue;
2097 		if (!event_filter_match(event))
2098 			continue;
2099 
2100 		/* may need to reset tstamp_enabled */
2101 		if (is_cgroup_event(event))
2102 			perf_cgroup_mark_enabled(event, ctx);
2103 
2104 		if (group_can_go_on(event, cpuctx, 1))
2105 			group_sched_in(event, cpuctx, ctx);
2106 
2107 		/*
2108 		 * If this pinned group hasn't been scheduled,
2109 		 * put it in error state.
2110 		 */
2111 		if (event->state == PERF_EVENT_STATE_INACTIVE) {
2112 			update_group_times(event);
2113 			event->state = PERF_EVENT_STATE_ERROR;
2114 		}
2115 	}
2116 }
2117 
2118 static void
2119 ctx_flexible_sched_in(struct perf_event_context *ctx,
2120 		      struct perf_cpu_context *cpuctx)
2121 {
2122 	struct perf_event *event;
2123 	int can_add_hw = 1;
2124 
2125 	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2126 		/* Ignore events in OFF or ERROR state */
2127 		if (event->state <= PERF_EVENT_STATE_OFF)
2128 			continue;
2129 		/*
2130 		 * Listen to the 'cpu' scheduling filter constraint
2131 		 * of events:
2132 		 */
2133 		if (!event_filter_match(event))
2134 			continue;
2135 
2136 		/* may need to reset tstamp_enabled */
2137 		if (is_cgroup_event(event))
2138 			perf_cgroup_mark_enabled(event, ctx);
2139 
2140 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2141 			if (group_sched_in(event, cpuctx, ctx))
2142 				can_add_hw = 0;
2143 		}
2144 	}
2145 }
2146 
2147 static void
2148 ctx_sched_in(struct perf_event_context *ctx,
2149 	     struct perf_cpu_context *cpuctx,
2150 	     enum event_type_t event_type,
2151 	     struct task_struct *task)
2152 {
2153 	u64 now;
2154 	int is_active = ctx->is_active;
2155 
2156 	ctx->is_active |= event_type;
2157 	if (likely(!ctx->nr_events))
2158 		return;
2159 
2160 	now = perf_clock();
2161 	ctx->timestamp = now;
2162 	perf_cgroup_set_timestamp(task, ctx);
2163 	/*
2164 	 * First go through the list and put on any pinned groups
2165 	 * in order to give them the best chance of going on.
2166 	 */
2167 	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2168 		ctx_pinned_sched_in(ctx, cpuctx);
2169 
2170 	/* Then walk through the lower prio flexible groups */
2171 	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2172 		ctx_flexible_sched_in(ctx, cpuctx);
2173 }
2174 
2175 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2176 			     enum event_type_t event_type,
2177 			     struct task_struct *task)
2178 {
2179 	struct perf_event_context *ctx = &cpuctx->ctx;
2180 
2181 	ctx_sched_in(ctx, cpuctx, event_type, task);
2182 }
2183 
2184 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2185 					struct task_struct *task)
2186 {
2187 	struct perf_cpu_context *cpuctx;
2188 
2189 	cpuctx = __get_cpu_context(ctx);
2190 	if (cpuctx->task_ctx == ctx)
2191 		return;
2192 
2193 	perf_ctx_lock(cpuctx, ctx);
2194 	perf_pmu_disable(ctx->pmu);
2195 	/*
2196 	 * We want to keep the following priority order:
2197 	 * cpu pinned (that don't need to move), task pinned,
2198 	 * cpu flexible, task flexible.
2199 	 */
2200 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2201 
2202 	if (ctx->nr_events)
2203 		cpuctx->task_ctx = ctx;
2204 
2205 	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2206 
2207 	perf_pmu_enable(ctx->pmu);
2208 	perf_ctx_unlock(cpuctx, ctx);
2209 
2210 	/*
2211 	 * Since these rotations are per-cpu, we need to ensure the
2212 	 * cpu-context we got scheduled on is actually rotating.
2213 	 */
2214 	perf_pmu_rotate_start(ctx->pmu);
2215 }
2216 
2217 /*
2218  * When sampling the branck stack in system-wide, it may be necessary
2219  * to flush the stack on context switch. This happens when the branch
2220  * stack does not tag its entries with the pid of the current task.
2221  * Otherwise it becomes impossible to associate a branch entry with a
2222  * task. This ambiguity is more likely to appear when the branch stack
2223  * supports priv level filtering and the user sets it to monitor only
2224  * at the user level (which could be a useful measurement in system-wide
2225  * mode). In that case, the risk is high of having a branch stack with
2226  * branch from multiple tasks. Flushing may mean dropping the existing
2227  * entries or stashing them somewhere in the PMU specific code layer.
2228  *
2229  * This function provides the context switch callback to the lower code
2230  * layer. It is invoked ONLY when there is at least one system-wide context
2231  * with at least one active event using taken branch sampling.
2232  */
2233 static void perf_branch_stack_sched_in(struct task_struct *prev,
2234 				       struct task_struct *task)
2235 {
2236 	struct perf_cpu_context *cpuctx;
2237 	struct pmu *pmu;
2238 	unsigned long flags;
2239 
2240 	/* no need to flush branch stack if not changing task */
2241 	if (prev == task)
2242 		return;
2243 
2244 	local_irq_save(flags);
2245 
2246 	rcu_read_lock();
2247 
2248 	list_for_each_entry_rcu(pmu, &pmus, entry) {
2249 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2250 
2251 		/*
2252 		 * check if the context has at least one
2253 		 * event using PERF_SAMPLE_BRANCH_STACK
2254 		 */
2255 		if (cpuctx->ctx.nr_branch_stack > 0
2256 		    && pmu->flush_branch_stack) {
2257 
2258 			pmu = cpuctx->ctx.pmu;
2259 
2260 			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2261 
2262 			perf_pmu_disable(pmu);
2263 
2264 			pmu->flush_branch_stack();
2265 
2266 			perf_pmu_enable(pmu);
2267 
2268 			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2269 		}
2270 	}
2271 
2272 	rcu_read_unlock();
2273 
2274 	local_irq_restore(flags);
2275 }
2276 
2277 /*
2278  * Called from scheduler to add the events of the current task
2279  * with interrupts disabled.
2280  *
2281  * We restore the event value and then enable it.
2282  *
2283  * This does not protect us against NMI, but enable()
2284  * sets the enabled bit in the control field of event _before_
2285  * accessing the event control register. If a NMI hits, then it will
2286  * keep the event running.
2287  */
2288 void __perf_event_task_sched_in(struct task_struct *prev,
2289 				struct task_struct *task)
2290 {
2291 	struct perf_event_context *ctx;
2292 	int ctxn;
2293 
2294 	for_each_task_context_nr(ctxn) {
2295 		ctx = task->perf_event_ctxp[ctxn];
2296 		if (likely(!ctx))
2297 			continue;
2298 
2299 		perf_event_context_sched_in(ctx, task);
2300 	}
2301 	/*
2302 	 * if cgroup events exist on this CPU, then we need
2303 	 * to check if we have to switch in PMU state.
2304 	 * cgroup event are system-wide mode only
2305 	 */
2306 	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2307 		perf_cgroup_sched_in(prev, task);
2308 
2309 	/* check for system-wide branch_stack events */
2310 	if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2311 		perf_branch_stack_sched_in(prev, task);
2312 }
2313 
2314 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2315 {
2316 	u64 frequency = event->attr.sample_freq;
2317 	u64 sec = NSEC_PER_SEC;
2318 	u64 divisor, dividend;
2319 
2320 	int count_fls, nsec_fls, frequency_fls, sec_fls;
2321 
2322 	count_fls = fls64(count);
2323 	nsec_fls = fls64(nsec);
2324 	frequency_fls = fls64(frequency);
2325 	sec_fls = 30;
2326 
2327 	/*
2328 	 * We got @count in @nsec, with a target of sample_freq HZ
2329 	 * the target period becomes:
2330 	 *
2331 	 *             @count * 10^9
2332 	 * period = -------------------
2333 	 *          @nsec * sample_freq
2334 	 *
2335 	 */
2336 
2337 	/*
2338 	 * Reduce accuracy by one bit such that @a and @b converge
2339 	 * to a similar magnitude.
2340 	 */
2341 #define REDUCE_FLS(a, b)		\
2342 do {					\
2343 	if (a##_fls > b##_fls) {	\
2344 		a >>= 1;		\
2345 		a##_fls--;		\
2346 	} else {			\
2347 		b >>= 1;		\
2348 		b##_fls--;		\
2349 	}				\
2350 } while (0)
2351 
2352 	/*
2353 	 * Reduce accuracy until either term fits in a u64, then proceed with
2354 	 * the other, so that finally we can do a u64/u64 division.
2355 	 */
2356 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2357 		REDUCE_FLS(nsec, frequency);
2358 		REDUCE_FLS(sec, count);
2359 	}
2360 
2361 	if (count_fls + sec_fls > 64) {
2362 		divisor = nsec * frequency;
2363 
2364 		while (count_fls + sec_fls > 64) {
2365 			REDUCE_FLS(count, sec);
2366 			divisor >>= 1;
2367 		}
2368 
2369 		dividend = count * sec;
2370 	} else {
2371 		dividend = count * sec;
2372 
2373 		while (nsec_fls + frequency_fls > 64) {
2374 			REDUCE_FLS(nsec, frequency);
2375 			dividend >>= 1;
2376 		}
2377 
2378 		divisor = nsec * frequency;
2379 	}
2380 
2381 	if (!divisor)
2382 		return dividend;
2383 
2384 	return div64_u64(dividend, divisor);
2385 }
2386 
2387 static DEFINE_PER_CPU(int, perf_throttled_count);
2388 static DEFINE_PER_CPU(u64, perf_throttled_seq);
2389 
2390 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2391 {
2392 	struct hw_perf_event *hwc = &event->hw;
2393 	s64 period, sample_period;
2394 	s64 delta;
2395 
2396 	period = perf_calculate_period(event, nsec, count);
2397 
2398 	delta = (s64)(period - hwc->sample_period);
2399 	delta = (delta + 7) / 8; /* low pass filter */
2400 
2401 	sample_period = hwc->sample_period + delta;
2402 
2403 	if (!sample_period)
2404 		sample_period = 1;
2405 
2406 	hwc->sample_period = sample_period;
2407 
2408 	if (local64_read(&hwc->period_left) > 8*sample_period) {
2409 		if (disable)
2410 			event->pmu->stop(event, PERF_EF_UPDATE);
2411 
2412 		local64_set(&hwc->period_left, 0);
2413 
2414 		if (disable)
2415 			event->pmu->start(event, PERF_EF_RELOAD);
2416 	}
2417 }
2418 
2419 /*
2420  * combine freq adjustment with unthrottling to avoid two passes over the
2421  * events. At the same time, make sure, having freq events does not change
2422  * the rate of unthrottling as that would introduce bias.
2423  */
2424 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2425 					   int needs_unthr)
2426 {
2427 	struct perf_event *event;
2428 	struct hw_perf_event *hwc;
2429 	u64 now, period = TICK_NSEC;
2430 	s64 delta;
2431 
2432 	/*
2433 	 * only need to iterate over all events iff:
2434 	 * - context have events in frequency mode (needs freq adjust)
2435 	 * - there are events to unthrottle on this cpu
2436 	 */
2437 	if (!(ctx->nr_freq || needs_unthr))
2438 		return;
2439 
2440 	raw_spin_lock(&ctx->lock);
2441 	perf_pmu_disable(ctx->pmu);
2442 
2443 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2444 		if (event->state != PERF_EVENT_STATE_ACTIVE)
2445 			continue;
2446 
2447 		if (!event_filter_match(event))
2448 			continue;
2449 
2450 		hwc = &event->hw;
2451 
2452 		if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2453 			hwc->interrupts = 0;
2454 			perf_log_throttle(event, 1);
2455 			event->pmu->start(event, 0);
2456 		}
2457 
2458 		if (!event->attr.freq || !event->attr.sample_freq)
2459 			continue;
2460 
2461 		/*
2462 		 * stop the event and update event->count
2463 		 */
2464 		event->pmu->stop(event, PERF_EF_UPDATE);
2465 
2466 		now = local64_read(&event->count);
2467 		delta = now - hwc->freq_count_stamp;
2468 		hwc->freq_count_stamp = now;
2469 
2470 		/*
2471 		 * restart the event
2472 		 * reload only if value has changed
2473 		 * we have stopped the event so tell that
2474 		 * to perf_adjust_period() to avoid stopping it
2475 		 * twice.
2476 		 */
2477 		if (delta > 0)
2478 			perf_adjust_period(event, period, delta, false);
2479 
2480 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2481 	}
2482 
2483 	perf_pmu_enable(ctx->pmu);
2484 	raw_spin_unlock(&ctx->lock);
2485 }
2486 
2487 /*
2488  * Round-robin a context's events:
2489  */
2490 static void rotate_ctx(struct perf_event_context *ctx)
2491 {
2492 	/*
2493 	 * Rotate the first entry last of non-pinned groups. Rotation might be
2494 	 * disabled by the inheritance code.
2495 	 */
2496 	if (!ctx->rotate_disable)
2497 		list_rotate_left(&ctx->flexible_groups);
2498 }
2499 
2500 /*
2501  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2502  * because they're strictly cpu affine and rotate_start is called with IRQs
2503  * disabled, while rotate_context is called from IRQ context.
2504  */
2505 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2506 {
2507 	struct perf_event_context *ctx = NULL;
2508 	int rotate = 0, remove = 1;
2509 
2510 	if (cpuctx->ctx.nr_events) {
2511 		remove = 0;
2512 		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2513 			rotate = 1;
2514 	}
2515 
2516 	ctx = cpuctx->task_ctx;
2517 	if (ctx && ctx->nr_events) {
2518 		remove = 0;
2519 		if (ctx->nr_events != ctx->nr_active)
2520 			rotate = 1;
2521 	}
2522 
2523 	if (!rotate)
2524 		goto done;
2525 
2526 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2527 	perf_pmu_disable(cpuctx->ctx.pmu);
2528 
2529 	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2530 	if (ctx)
2531 		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2532 
2533 	rotate_ctx(&cpuctx->ctx);
2534 	if (ctx)
2535 		rotate_ctx(ctx);
2536 
2537 	perf_event_sched_in(cpuctx, ctx, current);
2538 
2539 	perf_pmu_enable(cpuctx->ctx.pmu);
2540 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2541 done:
2542 	if (remove)
2543 		list_del_init(&cpuctx->rotation_list);
2544 }
2545 
2546 void perf_event_task_tick(void)
2547 {
2548 	struct list_head *head = &__get_cpu_var(rotation_list);
2549 	struct perf_cpu_context *cpuctx, *tmp;
2550 	struct perf_event_context *ctx;
2551 	int throttled;
2552 
2553 	WARN_ON(!irqs_disabled());
2554 
2555 	__this_cpu_inc(perf_throttled_seq);
2556 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
2557 
2558 	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2559 		ctx = &cpuctx->ctx;
2560 		perf_adjust_freq_unthr_context(ctx, throttled);
2561 
2562 		ctx = cpuctx->task_ctx;
2563 		if (ctx)
2564 			perf_adjust_freq_unthr_context(ctx, throttled);
2565 
2566 		if (cpuctx->jiffies_interval == 1 ||
2567 				!(jiffies % cpuctx->jiffies_interval))
2568 			perf_rotate_context(cpuctx);
2569 	}
2570 }
2571 
2572 static int event_enable_on_exec(struct perf_event *event,
2573 				struct perf_event_context *ctx)
2574 {
2575 	if (!event->attr.enable_on_exec)
2576 		return 0;
2577 
2578 	event->attr.enable_on_exec = 0;
2579 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2580 		return 0;
2581 
2582 	__perf_event_mark_enabled(event);
2583 
2584 	return 1;
2585 }
2586 
2587 /*
2588  * Enable all of a task's events that have been marked enable-on-exec.
2589  * This expects task == current.
2590  */
2591 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2592 {
2593 	struct perf_event *event;
2594 	unsigned long flags;
2595 	int enabled = 0;
2596 	int ret;
2597 
2598 	local_irq_save(flags);
2599 	if (!ctx || !ctx->nr_events)
2600 		goto out;
2601 
2602 	/*
2603 	 * We must ctxsw out cgroup events to avoid conflict
2604 	 * when invoking perf_task_event_sched_in() later on
2605 	 * in this function. Otherwise we end up trying to
2606 	 * ctxswin cgroup events which are already scheduled
2607 	 * in.
2608 	 */
2609 	perf_cgroup_sched_out(current, NULL);
2610 
2611 	raw_spin_lock(&ctx->lock);
2612 	task_ctx_sched_out(ctx);
2613 
2614 	list_for_each_entry(event, &ctx->event_list, event_entry) {
2615 		ret = event_enable_on_exec(event, ctx);
2616 		if (ret)
2617 			enabled = 1;
2618 	}
2619 
2620 	/*
2621 	 * Unclone this context if we enabled any event.
2622 	 */
2623 	if (enabled)
2624 		unclone_ctx(ctx);
2625 
2626 	raw_spin_unlock(&ctx->lock);
2627 
2628 	/*
2629 	 * Also calls ctxswin for cgroup events, if any:
2630 	 */
2631 	perf_event_context_sched_in(ctx, ctx->task);
2632 out:
2633 	local_irq_restore(flags);
2634 }
2635 
2636 /*
2637  * Cross CPU call to read the hardware event
2638  */
2639 static void __perf_event_read(void *info)
2640 {
2641 	struct perf_event *event = info;
2642 	struct perf_event_context *ctx = event->ctx;
2643 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2644 
2645 	/*
2646 	 * If this is a task context, we need to check whether it is
2647 	 * the current task context of this cpu.  If not it has been
2648 	 * scheduled out before the smp call arrived.  In that case
2649 	 * event->count would have been updated to a recent sample
2650 	 * when the event was scheduled out.
2651 	 */
2652 	if (ctx->task && cpuctx->task_ctx != ctx)
2653 		return;
2654 
2655 	raw_spin_lock(&ctx->lock);
2656 	if (ctx->is_active) {
2657 		update_context_time(ctx);
2658 		update_cgrp_time_from_event(event);
2659 	}
2660 	update_event_times(event);
2661 	if (event->state == PERF_EVENT_STATE_ACTIVE)
2662 		event->pmu->read(event);
2663 	raw_spin_unlock(&ctx->lock);
2664 }
2665 
2666 static inline u64 perf_event_count(struct perf_event *event)
2667 {
2668 	return local64_read(&event->count) + atomic64_read(&event->child_count);
2669 }
2670 
2671 static u64 perf_event_read(struct perf_event *event)
2672 {
2673 	/*
2674 	 * If event is enabled and currently active on a CPU, update the
2675 	 * value in the event structure:
2676 	 */
2677 	if (event->state == PERF_EVENT_STATE_ACTIVE) {
2678 		smp_call_function_single(event->oncpu,
2679 					 __perf_event_read, event, 1);
2680 	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2681 		struct perf_event_context *ctx = event->ctx;
2682 		unsigned long flags;
2683 
2684 		raw_spin_lock_irqsave(&ctx->lock, flags);
2685 		/*
2686 		 * may read while context is not active
2687 		 * (e.g., thread is blocked), in that case
2688 		 * we cannot update context time
2689 		 */
2690 		if (ctx->is_active) {
2691 			update_context_time(ctx);
2692 			update_cgrp_time_from_event(event);
2693 		}
2694 		update_event_times(event);
2695 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2696 	}
2697 
2698 	return perf_event_count(event);
2699 }
2700 
2701 /*
2702  * Initialize the perf_event context in a task_struct:
2703  */
2704 static void __perf_event_init_context(struct perf_event_context *ctx)
2705 {
2706 	raw_spin_lock_init(&ctx->lock);
2707 	mutex_init(&ctx->mutex);
2708 	INIT_LIST_HEAD(&ctx->pinned_groups);
2709 	INIT_LIST_HEAD(&ctx->flexible_groups);
2710 	INIT_LIST_HEAD(&ctx->event_list);
2711 	atomic_set(&ctx->refcount, 1);
2712 }
2713 
2714 static struct perf_event_context *
2715 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2716 {
2717 	struct perf_event_context *ctx;
2718 
2719 	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2720 	if (!ctx)
2721 		return NULL;
2722 
2723 	__perf_event_init_context(ctx);
2724 	if (task) {
2725 		ctx->task = task;
2726 		get_task_struct(task);
2727 	}
2728 	ctx->pmu = pmu;
2729 
2730 	return ctx;
2731 }
2732 
2733 static struct task_struct *
2734 find_lively_task_by_vpid(pid_t vpid)
2735 {
2736 	struct task_struct *task;
2737 	int err;
2738 
2739 	rcu_read_lock();
2740 	if (!vpid)
2741 		task = current;
2742 	else
2743 		task = find_task_by_vpid(vpid);
2744 	if (task)
2745 		get_task_struct(task);
2746 	rcu_read_unlock();
2747 
2748 	if (!task)
2749 		return ERR_PTR(-ESRCH);
2750 
2751 	/* Reuse ptrace permission checks for now. */
2752 	err = -EACCES;
2753 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
2754 		goto errout;
2755 
2756 	return task;
2757 errout:
2758 	put_task_struct(task);
2759 	return ERR_PTR(err);
2760 
2761 }
2762 
2763 /*
2764  * Returns a matching context with refcount and pincount.
2765  */
2766 static struct perf_event_context *
2767 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2768 {
2769 	struct perf_event_context *ctx;
2770 	struct perf_cpu_context *cpuctx;
2771 	unsigned long flags;
2772 	int ctxn, err;
2773 
2774 	if (!task) {
2775 		/* Must be root to operate on a CPU event: */
2776 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2777 			return ERR_PTR(-EACCES);
2778 
2779 		/*
2780 		 * We could be clever and allow to attach a event to an
2781 		 * offline CPU and activate it when the CPU comes up, but
2782 		 * that's for later.
2783 		 */
2784 		if (!cpu_online(cpu))
2785 			return ERR_PTR(-ENODEV);
2786 
2787 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2788 		ctx = &cpuctx->ctx;
2789 		get_ctx(ctx);
2790 		++ctx->pin_count;
2791 
2792 		return ctx;
2793 	}
2794 
2795 	err = -EINVAL;
2796 	ctxn = pmu->task_ctx_nr;
2797 	if (ctxn < 0)
2798 		goto errout;
2799 
2800 retry:
2801 	ctx = perf_lock_task_context(task, ctxn, &flags);
2802 	if (ctx) {
2803 		unclone_ctx(ctx);
2804 		++ctx->pin_count;
2805 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2806 	} else {
2807 		ctx = alloc_perf_context(pmu, task);
2808 		err = -ENOMEM;
2809 		if (!ctx)
2810 			goto errout;
2811 
2812 		err = 0;
2813 		mutex_lock(&task->perf_event_mutex);
2814 		/*
2815 		 * If it has already passed perf_event_exit_task().
2816 		 * we must see PF_EXITING, it takes this mutex too.
2817 		 */
2818 		if (task->flags & PF_EXITING)
2819 			err = -ESRCH;
2820 		else if (task->perf_event_ctxp[ctxn])
2821 			err = -EAGAIN;
2822 		else {
2823 			get_ctx(ctx);
2824 			++ctx->pin_count;
2825 			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2826 		}
2827 		mutex_unlock(&task->perf_event_mutex);
2828 
2829 		if (unlikely(err)) {
2830 			put_ctx(ctx);
2831 
2832 			if (err == -EAGAIN)
2833 				goto retry;
2834 			goto errout;
2835 		}
2836 	}
2837 
2838 	return ctx;
2839 
2840 errout:
2841 	return ERR_PTR(err);
2842 }
2843 
2844 static void perf_event_free_filter(struct perf_event *event);
2845 
2846 static void free_event_rcu(struct rcu_head *head)
2847 {
2848 	struct perf_event *event;
2849 
2850 	event = container_of(head, struct perf_event, rcu_head);
2851 	if (event->ns)
2852 		put_pid_ns(event->ns);
2853 	perf_event_free_filter(event);
2854 	kfree(event);
2855 }
2856 
2857 static void ring_buffer_put(struct ring_buffer *rb);
2858 
2859 static void free_event(struct perf_event *event)
2860 {
2861 	irq_work_sync(&event->pending);
2862 
2863 	if (!event->parent) {
2864 		if (event->attach_state & PERF_ATTACH_TASK)
2865 			static_key_slow_dec_deferred(&perf_sched_events);
2866 		if (event->attr.mmap || event->attr.mmap_data)
2867 			atomic_dec(&nr_mmap_events);
2868 		if (event->attr.comm)
2869 			atomic_dec(&nr_comm_events);
2870 		if (event->attr.task)
2871 			atomic_dec(&nr_task_events);
2872 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2873 			put_callchain_buffers();
2874 		if (is_cgroup_event(event)) {
2875 			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2876 			static_key_slow_dec_deferred(&perf_sched_events);
2877 		}
2878 
2879 		if (has_branch_stack(event)) {
2880 			static_key_slow_dec_deferred(&perf_sched_events);
2881 			/* is system-wide event */
2882 			if (!(event->attach_state & PERF_ATTACH_TASK))
2883 				atomic_dec(&per_cpu(perf_branch_stack_events,
2884 						    event->cpu));
2885 		}
2886 	}
2887 
2888 	if (event->rb) {
2889 		ring_buffer_put(event->rb);
2890 		event->rb = NULL;
2891 	}
2892 
2893 	if (is_cgroup_event(event))
2894 		perf_detach_cgroup(event);
2895 
2896 	if (event->destroy)
2897 		event->destroy(event);
2898 
2899 	if (event->ctx)
2900 		put_ctx(event->ctx);
2901 
2902 	call_rcu(&event->rcu_head, free_event_rcu);
2903 }
2904 
2905 int perf_event_release_kernel(struct perf_event *event)
2906 {
2907 	struct perf_event_context *ctx = event->ctx;
2908 
2909 	WARN_ON_ONCE(ctx->parent_ctx);
2910 	/*
2911 	 * There are two ways this annotation is useful:
2912 	 *
2913 	 *  1) there is a lock recursion from perf_event_exit_task
2914 	 *     see the comment there.
2915 	 *
2916 	 *  2) there is a lock-inversion with mmap_sem through
2917 	 *     perf_event_read_group(), which takes faults while
2918 	 *     holding ctx->mutex, however this is called after
2919 	 *     the last filedesc died, so there is no possibility
2920 	 *     to trigger the AB-BA case.
2921 	 */
2922 	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2923 	raw_spin_lock_irq(&ctx->lock);
2924 	perf_group_detach(event);
2925 	raw_spin_unlock_irq(&ctx->lock);
2926 	perf_remove_from_context(event);
2927 	mutex_unlock(&ctx->mutex);
2928 
2929 	free_event(event);
2930 
2931 	return 0;
2932 }
2933 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2934 
2935 /*
2936  * Called when the last reference to the file is gone.
2937  */
2938 static void put_event(struct perf_event *event)
2939 {
2940 	struct task_struct *owner;
2941 
2942 	if (!atomic_long_dec_and_test(&event->refcount))
2943 		return;
2944 
2945 	rcu_read_lock();
2946 	owner = ACCESS_ONCE(event->owner);
2947 	/*
2948 	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2949 	 * !owner it means the list deletion is complete and we can indeed
2950 	 * free this event, otherwise we need to serialize on
2951 	 * owner->perf_event_mutex.
2952 	 */
2953 	smp_read_barrier_depends();
2954 	if (owner) {
2955 		/*
2956 		 * Since delayed_put_task_struct() also drops the last
2957 		 * task reference we can safely take a new reference
2958 		 * while holding the rcu_read_lock().
2959 		 */
2960 		get_task_struct(owner);
2961 	}
2962 	rcu_read_unlock();
2963 
2964 	if (owner) {
2965 		mutex_lock(&owner->perf_event_mutex);
2966 		/*
2967 		 * We have to re-check the event->owner field, if it is cleared
2968 		 * we raced with perf_event_exit_task(), acquiring the mutex
2969 		 * ensured they're done, and we can proceed with freeing the
2970 		 * event.
2971 		 */
2972 		if (event->owner)
2973 			list_del_init(&event->owner_entry);
2974 		mutex_unlock(&owner->perf_event_mutex);
2975 		put_task_struct(owner);
2976 	}
2977 
2978 	perf_event_release_kernel(event);
2979 }
2980 
2981 static int perf_release(struct inode *inode, struct file *file)
2982 {
2983 	put_event(file->private_data);
2984 	return 0;
2985 }
2986 
2987 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2988 {
2989 	struct perf_event *child;
2990 	u64 total = 0;
2991 
2992 	*enabled = 0;
2993 	*running = 0;
2994 
2995 	mutex_lock(&event->child_mutex);
2996 	total += perf_event_read(event);
2997 	*enabled += event->total_time_enabled +
2998 			atomic64_read(&event->child_total_time_enabled);
2999 	*running += event->total_time_running +
3000 			atomic64_read(&event->child_total_time_running);
3001 
3002 	list_for_each_entry(child, &event->child_list, child_list) {
3003 		total += perf_event_read(child);
3004 		*enabled += child->total_time_enabled;
3005 		*running += child->total_time_running;
3006 	}
3007 	mutex_unlock(&event->child_mutex);
3008 
3009 	return total;
3010 }
3011 EXPORT_SYMBOL_GPL(perf_event_read_value);
3012 
3013 static int perf_event_read_group(struct perf_event *event,
3014 				   u64 read_format, char __user *buf)
3015 {
3016 	struct perf_event *leader = event->group_leader, *sub;
3017 	int n = 0, size = 0, ret = -EFAULT;
3018 	struct perf_event_context *ctx = leader->ctx;
3019 	u64 values[5];
3020 	u64 count, enabled, running;
3021 
3022 	mutex_lock(&ctx->mutex);
3023 	count = perf_event_read_value(leader, &enabled, &running);
3024 
3025 	values[n++] = 1 + leader->nr_siblings;
3026 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3027 		values[n++] = enabled;
3028 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3029 		values[n++] = running;
3030 	values[n++] = count;
3031 	if (read_format & PERF_FORMAT_ID)
3032 		values[n++] = primary_event_id(leader);
3033 
3034 	size = n * sizeof(u64);
3035 
3036 	if (copy_to_user(buf, values, size))
3037 		goto unlock;
3038 
3039 	ret = size;
3040 
3041 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3042 		n = 0;
3043 
3044 		values[n++] = perf_event_read_value(sub, &enabled, &running);
3045 		if (read_format & PERF_FORMAT_ID)
3046 			values[n++] = primary_event_id(sub);
3047 
3048 		size = n * sizeof(u64);
3049 
3050 		if (copy_to_user(buf + ret, values, size)) {
3051 			ret = -EFAULT;
3052 			goto unlock;
3053 		}
3054 
3055 		ret += size;
3056 	}
3057 unlock:
3058 	mutex_unlock(&ctx->mutex);
3059 
3060 	return ret;
3061 }
3062 
3063 static int perf_event_read_one(struct perf_event *event,
3064 				 u64 read_format, char __user *buf)
3065 {
3066 	u64 enabled, running;
3067 	u64 values[4];
3068 	int n = 0;
3069 
3070 	values[n++] = perf_event_read_value(event, &enabled, &running);
3071 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3072 		values[n++] = enabled;
3073 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3074 		values[n++] = running;
3075 	if (read_format & PERF_FORMAT_ID)
3076 		values[n++] = primary_event_id(event);
3077 
3078 	if (copy_to_user(buf, values, n * sizeof(u64)))
3079 		return -EFAULT;
3080 
3081 	return n * sizeof(u64);
3082 }
3083 
3084 /*
3085  * Read the performance event - simple non blocking version for now
3086  */
3087 static ssize_t
3088 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3089 {
3090 	u64 read_format = event->attr.read_format;
3091 	int ret;
3092 
3093 	/*
3094 	 * Return end-of-file for a read on a event that is in
3095 	 * error state (i.e. because it was pinned but it couldn't be
3096 	 * scheduled on to the CPU at some point).
3097 	 */
3098 	if (event->state == PERF_EVENT_STATE_ERROR)
3099 		return 0;
3100 
3101 	if (count < event->read_size)
3102 		return -ENOSPC;
3103 
3104 	WARN_ON_ONCE(event->ctx->parent_ctx);
3105 	if (read_format & PERF_FORMAT_GROUP)
3106 		ret = perf_event_read_group(event, read_format, buf);
3107 	else
3108 		ret = perf_event_read_one(event, read_format, buf);
3109 
3110 	return ret;
3111 }
3112 
3113 static ssize_t
3114 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3115 {
3116 	struct perf_event *event = file->private_data;
3117 
3118 	return perf_read_hw(event, buf, count);
3119 }
3120 
3121 static unsigned int perf_poll(struct file *file, poll_table *wait)
3122 {
3123 	struct perf_event *event = file->private_data;
3124 	struct ring_buffer *rb;
3125 	unsigned int events = POLL_HUP;
3126 
3127 	/*
3128 	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3129 	 * grabs the rb reference but perf_event_set_output() overrides it.
3130 	 * Here is the timeline for two threads T1, T2:
3131 	 * t0: T1, rb = rcu_dereference(event->rb)
3132 	 * t1: T2, old_rb = event->rb
3133 	 * t2: T2, event->rb = new rb
3134 	 * t3: T2, ring_buffer_detach(old_rb)
3135 	 * t4: T1, ring_buffer_attach(rb1)
3136 	 * t5: T1, poll_wait(event->waitq)
3137 	 *
3138 	 * To avoid this problem, we grab mmap_mutex in perf_poll()
3139 	 * thereby ensuring that the assignment of the new ring buffer
3140 	 * and the detachment of the old buffer appear atomic to perf_poll()
3141 	 */
3142 	mutex_lock(&event->mmap_mutex);
3143 
3144 	rcu_read_lock();
3145 	rb = rcu_dereference(event->rb);
3146 	if (rb) {
3147 		ring_buffer_attach(event, rb);
3148 		events = atomic_xchg(&rb->poll, 0);
3149 	}
3150 	rcu_read_unlock();
3151 
3152 	mutex_unlock(&event->mmap_mutex);
3153 
3154 	poll_wait(file, &event->waitq, wait);
3155 
3156 	return events;
3157 }
3158 
3159 static void perf_event_reset(struct perf_event *event)
3160 {
3161 	(void)perf_event_read(event);
3162 	local64_set(&event->count, 0);
3163 	perf_event_update_userpage(event);
3164 }
3165 
3166 /*
3167  * Holding the top-level event's child_mutex means that any
3168  * descendant process that has inherited this event will block
3169  * in sync_child_event if it goes to exit, thus satisfying the
3170  * task existence requirements of perf_event_enable/disable.
3171  */
3172 static void perf_event_for_each_child(struct perf_event *event,
3173 					void (*func)(struct perf_event *))
3174 {
3175 	struct perf_event *child;
3176 
3177 	WARN_ON_ONCE(event->ctx->parent_ctx);
3178 	mutex_lock(&event->child_mutex);
3179 	func(event);
3180 	list_for_each_entry(child, &event->child_list, child_list)
3181 		func(child);
3182 	mutex_unlock(&event->child_mutex);
3183 }
3184 
3185 static void perf_event_for_each(struct perf_event *event,
3186 				  void (*func)(struct perf_event *))
3187 {
3188 	struct perf_event_context *ctx = event->ctx;
3189 	struct perf_event *sibling;
3190 
3191 	WARN_ON_ONCE(ctx->parent_ctx);
3192 	mutex_lock(&ctx->mutex);
3193 	event = event->group_leader;
3194 
3195 	perf_event_for_each_child(event, func);
3196 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3197 		perf_event_for_each_child(sibling, func);
3198 	mutex_unlock(&ctx->mutex);
3199 }
3200 
3201 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3202 {
3203 	struct perf_event_context *ctx = event->ctx;
3204 	int ret = 0;
3205 	u64 value;
3206 
3207 	if (!is_sampling_event(event))
3208 		return -EINVAL;
3209 
3210 	if (copy_from_user(&value, arg, sizeof(value)))
3211 		return -EFAULT;
3212 
3213 	if (!value)
3214 		return -EINVAL;
3215 
3216 	raw_spin_lock_irq(&ctx->lock);
3217 	if (event->attr.freq) {
3218 		if (value > sysctl_perf_event_sample_rate) {
3219 			ret = -EINVAL;
3220 			goto unlock;
3221 		}
3222 
3223 		event->attr.sample_freq = value;
3224 	} else {
3225 		event->attr.sample_period = value;
3226 		event->hw.sample_period = value;
3227 	}
3228 unlock:
3229 	raw_spin_unlock_irq(&ctx->lock);
3230 
3231 	return ret;
3232 }
3233 
3234 static const struct file_operations perf_fops;
3235 
3236 static inline int perf_fget_light(int fd, struct fd *p)
3237 {
3238 	struct fd f = fdget(fd);
3239 	if (!f.file)
3240 		return -EBADF;
3241 
3242 	if (f.file->f_op != &perf_fops) {
3243 		fdput(f);
3244 		return -EBADF;
3245 	}
3246 	*p = f;
3247 	return 0;
3248 }
3249 
3250 static int perf_event_set_output(struct perf_event *event,
3251 				 struct perf_event *output_event);
3252 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3253 
3254 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3255 {
3256 	struct perf_event *event = file->private_data;
3257 	void (*func)(struct perf_event *);
3258 	u32 flags = arg;
3259 
3260 	switch (cmd) {
3261 	case PERF_EVENT_IOC_ENABLE:
3262 		func = perf_event_enable;
3263 		break;
3264 	case PERF_EVENT_IOC_DISABLE:
3265 		func = perf_event_disable;
3266 		break;
3267 	case PERF_EVENT_IOC_RESET:
3268 		func = perf_event_reset;
3269 		break;
3270 
3271 	case PERF_EVENT_IOC_REFRESH:
3272 		return perf_event_refresh(event, arg);
3273 
3274 	case PERF_EVENT_IOC_PERIOD:
3275 		return perf_event_period(event, (u64 __user *)arg);
3276 
3277 	case PERF_EVENT_IOC_SET_OUTPUT:
3278 	{
3279 		int ret;
3280 		if (arg != -1) {
3281 			struct perf_event *output_event;
3282 			struct fd output;
3283 			ret = perf_fget_light(arg, &output);
3284 			if (ret)
3285 				return ret;
3286 			output_event = output.file->private_data;
3287 			ret = perf_event_set_output(event, output_event);
3288 			fdput(output);
3289 		} else {
3290 			ret = perf_event_set_output(event, NULL);
3291 		}
3292 		return ret;
3293 	}
3294 
3295 	case PERF_EVENT_IOC_SET_FILTER:
3296 		return perf_event_set_filter(event, (void __user *)arg);
3297 
3298 	default:
3299 		return -ENOTTY;
3300 	}
3301 
3302 	if (flags & PERF_IOC_FLAG_GROUP)
3303 		perf_event_for_each(event, func);
3304 	else
3305 		perf_event_for_each_child(event, func);
3306 
3307 	return 0;
3308 }
3309 
3310 int perf_event_task_enable(void)
3311 {
3312 	struct perf_event *event;
3313 
3314 	mutex_lock(&current->perf_event_mutex);
3315 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3316 		perf_event_for_each_child(event, perf_event_enable);
3317 	mutex_unlock(&current->perf_event_mutex);
3318 
3319 	return 0;
3320 }
3321 
3322 int perf_event_task_disable(void)
3323 {
3324 	struct perf_event *event;
3325 
3326 	mutex_lock(&current->perf_event_mutex);
3327 	list_for_each_entry(event, &current->perf_event_list, owner_entry)
3328 		perf_event_for_each_child(event, perf_event_disable);
3329 	mutex_unlock(&current->perf_event_mutex);
3330 
3331 	return 0;
3332 }
3333 
3334 static int perf_event_index(struct perf_event *event)
3335 {
3336 	if (event->hw.state & PERF_HES_STOPPED)
3337 		return 0;
3338 
3339 	if (event->state != PERF_EVENT_STATE_ACTIVE)
3340 		return 0;
3341 
3342 	return event->pmu->event_idx(event);
3343 }
3344 
3345 static void calc_timer_values(struct perf_event *event,
3346 				u64 *now,
3347 				u64 *enabled,
3348 				u64 *running)
3349 {
3350 	u64 ctx_time;
3351 
3352 	*now = perf_clock();
3353 	ctx_time = event->shadow_ctx_time + *now;
3354 	*enabled = ctx_time - event->tstamp_enabled;
3355 	*running = ctx_time - event->tstamp_running;
3356 }
3357 
3358 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3359 {
3360 }
3361 
3362 /*
3363  * Callers need to ensure there can be no nesting of this function, otherwise
3364  * the seqlock logic goes bad. We can not serialize this because the arch
3365  * code calls this from NMI context.
3366  */
3367 void perf_event_update_userpage(struct perf_event *event)
3368 {
3369 	struct perf_event_mmap_page *userpg;
3370 	struct ring_buffer *rb;
3371 	u64 enabled, running, now;
3372 
3373 	rcu_read_lock();
3374 	/*
3375 	 * compute total_time_enabled, total_time_running
3376 	 * based on snapshot values taken when the event
3377 	 * was last scheduled in.
3378 	 *
3379 	 * we cannot simply called update_context_time()
3380 	 * because of locking issue as we can be called in
3381 	 * NMI context
3382 	 */
3383 	calc_timer_values(event, &now, &enabled, &running);
3384 	rb = rcu_dereference(event->rb);
3385 	if (!rb)
3386 		goto unlock;
3387 
3388 	userpg = rb->user_page;
3389 
3390 	/*
3391 	 * Disable preemption so as to not let the corresponding user-space
3392 	 * spin too long if we get preempted.
3393 	 */
3394 	preempt_disable();
3395 	++userpg->lock;
3396 	barrier();
3397 	userpg->index = perf_event_index(event);
3398 	userpg->offset = perf_event_count(event);
3399 	if (userpg->index)
3400 		userpg->offset -= local64_read(&event->hw.prev_count);
3401 
3402 	userpg->time_enabled = enabled +
3403 			atomic64_read(&event->child_total_time_enabled);
3404 
3405 	userpg->time_running = running +
3406 			atomic64_read(&event->child_total_time_running);
3407 
3408 	arch_perf_update_userpage(userpg, now);
3409 
3410 	barrier();
3411 	++userpg->lock;
3412 	preempt_enable();
3413 unlock:
3414 	rcu_read_unlock();
3415 }
3416 
3417 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3418 {
3419 	struct perf_event *event = vma->vm_file->private_data;
3420 	struct ring_buffer *rb;
3421 	int ret = VM_FAULT_SIGBUS;
3422 
3423 	if (vmf->flags & FAULT_FLAG_MKWRITE) {
3424 		if (vmf->pgoff == 0)
3425 			ret = 0;
3426 		return ret;
3427 	}
3428 
3429 	rcu_read_lock();
3430 	rb = rcu_dereference(event->rb);
3431 	if (!rb)
3432 		goto unlock;
3433 
3434 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3435 		goto unlock;
3436 
3437 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3438 	if (!vmf->page)
3439 		goto unlock;
3440 
3441 	get_page(vmf->page);
3442 	vmf->page->mapping = vma->vm_file->f_mapping;
3443 	vmf->page->index   = vmf->pgoff;
3444 
3445 	ret = 0;
3446 unlock:
3447 	rcu_read_unlock();
3448 
3449 	return ret;
3450 }
3451 
3452 static void ring_buffer_attach(struct perf_event *event,
3453 			       struct ring_buffer *rb)
3454 {
3455 	unsigned long flags;
3456 
3457 	if (!list_empty(&event->rb_entry))
3458 		return;
3459 
3460 	spin_lock_irqsave(&rb->event_lock, flags);
3461 	if (!list_empty(&event->rb_entry))
3462 		goto unlock;
3463 
3464 	list_add(&event->rb_entry, &rb->event_list);
3465 unlock:
3466 	spin_unlock_irqrestore(&rb->event_lock, flags);
3467 }
3468 
3469 static void ring_buffer_detach(struct perf_event *event,
3470 			       struct ring_buffer *rb)
3471 {
3472 	unsigned long flags;
3473 
3474 	if (list_empty(&event->rb_entry))
3475 		return;
3476 
3477 	spin_lock_irqsave(&rb->event_lock, flags);
3478 	list_del_init(&event->rb_entry);
3479 	wake_up_all(&event->waitq);
3480 	spin_unlock_irqrestore(&rb->event_lock, flags);
3481 }
3482 
3483 static void ring_buffer_wakeup(struct perf_event *event)
3484 {
3485 	struct ring_buffer *rb;
3486 
3487 	rcu_read_lock();
3488 	rb = rcu_dereference(event->rb);
3489 	if (!rb)
3490 		goto unlock;
3491 
3492 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3493 		wake_up_all(&event->waitq);
3494 
3495 unlock:
3496 	rcu_read_unlock();
3497 }
3498 
3499 static void rb_free_rcu(struct rcu_head *rcu_head)
3500 {
3501 	struct ring_buffer *rb;
3502 
3503 	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3504 	rb_free(rb);
3505 }
3506 
3507 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3508 {
3509 	struct ring_buffer *rb;
3510 
3511 	rcu_read_lock();
3512 	rb = rcu_dereference(event->rb);
3513 	if (rb) {
3514 		if (!atomic_inc_not_zero(&rb->refcount))
3515 			rb = NULL;
3516 	}
3517 	rcu_read_unlock();
3518 
3519 	return rb;
3520 }
3521 
3522 static void ring_buffer_put(struct ring_buffer *rb)
3523 {
3524 	struct perf_event *event, *n;
3525 	unsigned long flags;
3526 
3527 	if (!atomic_dec_and_test(&rb->refcount))
3528 		return;
3529 
3530 	spin_lock_irqsave(&rb->event_lock, flags);
3531 	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3532 		list_del_init(&event->rb_entry);
3533 		wake_up_all(&event->waitq);
3534 	}
3535 	spin_unlock_irqrestore(&rb->event_lock, flags);
3536 
3537 	call_rcu(&rb->rcu_head, rb_free_rcu);
3538 }
3539 
3540 static void perf_mmap_open(struct vm_area_struct *vma)
3541 {
3542 	struct perf_event *event = vma->vm_file->private_data;
3543 
3544 	atomic_inc(&event->mmap_count);
3545 }
3546 
3547 static void perf_mmap_close(struct vm_area_struct *vma)
3548 {
3549 	struct perf_event *event = vma->vm_file->private_data;
3550 
3551 	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3552 		unsigned long size = perf_data_size(event->rb);
3553 		struct user_struct *user = event->mmap_user;
3554 		struct ring_buffer *rb = event->rb;
3555 
3556 		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3557 		vma->vm_mm->pinned_vm -= event->mmap_locked;
3558 		rcu_assign_pointer(event->rb, NULL);
3559 		ring_buffer_detach(event, rb);
3560 		mutex_unlock(&event->mmap_mutex);
3561 
3562 		ring_buffer_put(rb);
3563 		free_uid(user);
3564 	}
3565 }
3566 
3567 static const struct vm_operations_struct perf_mmap_vmops = {
3568 	.open		= perf_mmap_open,
3569 	.close		= perf_mmap_close,
3570 	.fault		= perf_mmap_fault,
3571 	.page_mkwrite	= perf_mmap_fault,
3572 };
3573 
3574 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3575 {
3576 	struct perf_event *event = file->private_data;
3577 	unsigned long user_locked, user_lock_limit;
3578 	struct user_struct *user = current_user();
3579 	unsigned long locked, lock_limit;
3580 	struct ring_buffer *rb;
3581 	unsigned long vma_size;
3582 	unsigned long nr_pages;
3583 	long user_extra, extra;
3584 	int ret = 0, flags = 0;
3585 
3586 	/*
3587 	 * Don't allow mmap() of inherited per-task counters. This would
3588 	 * create a performance issue due to all children writing to the
3589 	 * same rb.
3590 	 */
3591 	if (event->cpu == -1 && event->attr.inherit)
3592 		return -EINVAL;
3593 
3594 	if (!(vma->vm_flags & VM_SHARED))
3595 		return -EINVAL;
3596 
3597 	vma_size = vma->vm_end - vma->vm_start;
3598 	nr_pages = (vma_size / PAGE_SIZE) - 1;
3599 
3600 	/*
3601 	 * If we have rb pages ensure they're a power-of-two number, so we
3602 	 * can do bitmasks instead of modulo.
3603 	 */
3604 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3605 		return -EINVAL;
3606 
3607 	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3608 		return -EINVAL;
3609 
3610 	if (vma->vm_pgoff != 0)
3611 		return -EINVAL;
3612 
3613 	WARN_ON_ONCE(event->ctx->parent_ctx);
3614 	mutex_lock(&event->mmap_mutex);
3615 	if (event->rb) {
3616 		if (event->rb->nr_pages == nr_pages)
3617 			atomic_inc(&event->rb->refcount);
3618 		else
3619 			ret = -EINVAL;
3620 		goto unlock;
3621 	}
3622 
3623 	user_extra = nr_pages + 1;
3624 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3625 
3626 	/*
3627 	 * Increase the limit linearly with more CPUs:
3628 	 */
3629 	user_lock_limit *= num_online_cpus();
3630 
3631 	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3632 
3633 	extra = 0;
3634 	if (user_locked > user_lock_limit)
3635 		extra = user_locked - user_lock_limit;
3636 
3637 	lock_limit = rlimit(RLIMIT_MEMLOCK);
3638 	lock_limit >>= PAGE_SHIFT;
3639 	locked = vma->vm_mm->pinned_vm + extra;
3640 
3641 	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3642 		!capable(CAP_IPC_LOCK)) {
3643 		ret = -EPERM;
3644 		goto unlock;
3645 	}
3646 
3647 	WARN_ON(event->rb);
3648 
3649 	if (vma->vm_flags & VM_WRITE)
3650 		flags |= RING_BUFFER_WRITABLE;
3651 
3652 	rb = rb_alloc(nr_pages,
3653 		event->attr.watermark ? event->attr.wakeup_watermark : 0,
3654 		event->cpu, flags);
3655 
3656 	if (!rb) {
3657 		ret = -ENOMEM;
3658 		goto unlock;
3659 	}
3660 	rcu_assign_pointer(event->rb, rb);
3661 
3662 	atomic_long_add(user_extra, &user->locked_vm);
3663 	event->mmap_locked = extra;
3664 	event->mmap_user = get_current_user();
3665 	vma->vm_mm->pinned_vm += event->mmap_locked;
3666 
3667 	perf_event_update_userpage(event);
3668 
3669 unlock:
3670 	if (!ret)
3671 		atomic_inc(&event->mmap_count);
3672 	mutex_unlock(&event->mmap_mutex);
3673 
3674 	vma->vm_flags |= VM_RESERVED;
3675 	vma->vm_ops = &perf_mmap_vmops;
3676 
3677 	return ret;
3678 }
3679 
3680 static int perf_fasync(int fd, struct file *filp, int on)
3681 {
3682 	struct inode *inode = filp->f_path.dentry->d_inode;
3683 	struct perf_event *event = filp->private_data;
3684 	int retval;
3685 
3686 	mutex_lock(&inode->i_mutex);
3687 	retval = fasync_helper(fd, filp, on, &event->fasync);
3688 	mutex_unlock(&inode->i_mutex);
3689 
3690 	if (retval < 0)
3691 		return retval;
3692 
3693 	return 0;
3694 }
3695 
3696 static const struct file_operations perf_fops = {
3697 	.llseek			= no_llseek,
3698 	.release		= perf_release,
3699 	.read			= perf_read,
3700 	.poll			= perf_poll,
3701 	.unlocked_ioctl		= perf_ioctl,
3702 	.compat_ioctl		= perf_ioctl,
3703 	.mmap			= perf_mmap,
3704 	.fasync			= perf_fasync,
3705 };
3706 
3707 /*
3708  * Perf event wakeup
3709  *
3710  * If there's data, ensure we set the poll() state and publish everything
3711  * to user-space before waking everybody up.
3712  */
3713 
3714 void perf_event_wakeup(struct perf_event *event)
3715 {
3716 	ring_buffer_wakeup(event);
3717 
3718 	if (event->pending_kill) {
3719 		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3720 		event->pending_kill = 0;
3721 	}
3722 }
3723 
3724 static void perf_pending_event(struct irq_work *entry)
3725 {
3726 	struct perf_event *event = container_of(entry,
3727 			struct perf_event, pending);
3728 
3729 	if (event->pending_disable) {
3730 		event->pending_disable = 0;
3731 		__perf_event_disable(event);
3732 	}
3733 
3734 	if (event->pending_wakeup) {
3735 		event->pending_wakeup = 0;
3736 		perf_event_wakeup(event);
3737 	}
3738 }
3739 
3740 /*
3741  * We assume there is only KVM supporting the callbacks.
3742  * Later on, we might change it to a list if there is
3743  * another virtualization implementation supporting the callbacks.
3744  */
3745 struct perf_guest_info_callbacks *perf_guest_cbs;
3746 
3747 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3748 {
3749 	perf_guest_cbs = cbs;
3750 	return 0;
3751 }
3752 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3753 
3754 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3755 {
3756 	perf_guest_cbs = NULL;
3757 	return 0;
3758 }
3759 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3760 
3761 static void
3762 perf_output_sample_regs(struct perf_output_handle *handle,
3763 			struct pt_regs *regs, u64 mask)
3764 {
3765 	int bit;
3766 
3767 	for_each_set_bit(bit, (const unsigned long *) &mask,
3768 			 sizeof(mask) * BITS_PER_BYTE) {
3769 		u64 val;
3770 
3771 		val = perf_reg_value(regs, bit);
3772 		perf_output_put(handle, val);
3773 	}
3774 }
3775 
3776 static void perf_sample_regs_user(struct perf_regs_user *regs_user,
3777 				  struct pt_regs *regs)
3778 {
3779 	if (!user_mode(regs)) {
3780 		if (current->mm)
3781 			regs = task_pt_regs(current);
3782 		else
3783 			regs = NULL;
3784 	}
3785 
3786 	if (regs) {
3787 		regs_user->regs = regs;
3788 		regs_user->abi  = perf_reg_abi(current);
3789 	}
3790 }
3791 
3792 /*
3793  * Get remaining task size from user stack pointer.
3794  *
3795  * It'd be better to take stack vma map and limit this more
3796  * precisly, but there's no way to get it safely under interrupt,
3797  * so using TASK_SIZE as limit.
3798  */
3799 static u64 perf_ustack_task_size(struct pt_regs *regs)
3800 {
3801 	unsigned long addr = perf_user_stack_pointer(regs);
3802 
3803 	if (!addr || addr >= TASK_SIZE)
3804 		return 0;
3805 
3806 	return TASK_SIZE - addr;
3807 }
3808 
3809 static u16
3810 perf_sample_ustack_size(u16 stack_size, u16 header_size,
3811 			struct pt_regs *regs)
3812 {
3813 	u64 task_size;
3814 
3815 	/* No regs, no stack pointer, no dump. */
3816 	if (!regs)
3817 		return 0;
3818 
3819 	/*
3820 	 * Check if we fit in with the requested stack size into the:
3821 	 * - TASK_SIZE
3822 	 *   If we don't, we limit the size to the TASK_SIZE.
3823 	 *
3824 	 * - remaining sample size
3825 	 *   If we don't, we customize the stack size to
3826 	 *   fit in to the remaining sample size.
3827 	 */
3828 
3829 	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
3830 	stack_size = min(stack_size, (u16) task_size);
3831 
3832 	/* Current header size plus static size and dynamic size. */
3833 	header_size += 2 * sizeof(u64);
3834 
3835 	/* Do we fit in with the current stack dump size? */
3836 	if ((u16) (header_size + stack_size) < header_size) {
3837 		/*
3838 		 * If we overflow the maximum size for the sample,
3839 		 * we customize the stack dump size to fit in.
3840 		 */
3841 		stack_size = USHRT_MAX - header_size - sizeof(u64);
3842 		stack_size = round_up(stack_size, sizeof(u64));
3843 	}
3844 
3845 	return stack_size;
3846 }
3847 
3848 static void
3849 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
3850 			  struct pt_regs *regs)
3851 {
3852 	/* Case of a kernel thread, nothing to dump */
3853 	if (!regs) {
3854 		u64 size = 0;
3855 		perf_output_put(handle, size);
3856 	} else {
3857 		unsigned long sp;
3858 		unsigned int rem;
3859 		u64 dyn_size;
3860 
3861 		/*
3862 		 * We dump:
3863 		 * static size
3864 		 *   - the size requested by user or the best one we can fit
3865 		 *     in to the sample max size
3866 		 * data
3867 		 *   - user stack dump data
3868 		 * dynamic size
3869 		 *   - the actual dumped size
3870 		 */
3871 
3872 		/* Static size. */
3873 		perf_output_put(handle, dump_size);
3874 
3875 		/* Data. */
3876 		sp = perf_user_stack_pointer(regs);
3877 		rem = __output_copy_user(handle, (void *) sp, dump_size);
3878 		dyn_size = dump_size - rem;
3879 
3880 		perf_output_skip(handle, rem);
3881 
3882 		/* Dynamic size. */
3883 		perf_output_put(handle, dyn_size);
3884 	}
3885 }
3886 
3887 static void __perf_event_header__init_id(struct perf_event_header *header,
3888 					 struct perf_sample_data *data,
3889 					 struct perf_event *event)
3890 {
3891 	u64 sample_type = event->attr.sample_type;
3892 
3893 	data->type = sample_type;
3894 	header->size += event->id_header_size;
3895 
3896 	if (sample_type & PERF_SAMPLE_TID) {
3897 		/* namespace issues */
3898 		data->tid_entry.pid = perf_event_pid(event, current);
3899 		data->tid_entry.tid = perf_event_tid(event, current);
3900 	}
3901 
3902 	if (sample_type & PERF_SAMPLE_TIME)
3903 		data->time = perf_clock();
3904 
3905 	if (sample_type & PERF_SAMPLE_ID)
3906 		data->id = primary_event_id(event);
3907 
3908 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3909 		data->stream_id = event->id;
3910 
3911 	if (sample_type & PERF_SAMPLE_CPU) {
3912 		data->cpu_entry.cpu	 = raw_smp_processor_id();
3913 		data->cpu_entry.reserved = 0;
3914 	}
3915 }
3916 
3917 void perf_event_header__init_id(struct perf_event_header *header,
3918 				struct perf_sample_data *data,
3919 				struct perf_event *event)
3920 {
3921 	if (event->attr.sample_id_all)
3922 		__perf_event_header__init_id(header, data, event);
3923 }
3924 
3925 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3926 					   struct perf_sample_data *data)
3927 {
3928 	u64 sample_type = data->type;
3929 
3930 	if (sample_type & PERF_SAMPLE_TID)
3931 		perf_output_put(handle, data->tid_entry);
3932 
3933 	if (sample_type & PERF_SAMPLE_TIME)
3934 		perf_output_put(handle, data->time);
3935 
3936 	if (sample_type & PERF_SAMPLE_ID)
3937 		perf_output_put(handle, data->id);
3938 
3939 	if (sample_type & PERF_SAMPLE_STREAM_ID)
3940 		perf_output_put(handle, data->stream_id);
3941 
3942 	if (sample_type & PERF_SAMPLE_CPU)
3943 		perf_output_put(handle, data->cpu_entry);
3944 }
3945 
3946 void perf_event__output_id_sample(struct perf_event *event,
3947 				  struct perf_output_handle *handle,
3948 				  struct perf_sample_data *sample)
3949 {
3950 	if (event->attr.sample_id_all)
3951 		__perf_event__output_id_sample(handle, sample);
3952 }
3953 
3954 static void perf_output_read_one(struct perf_output_handle *handle,
3955 				 struct perf_event *event,
3956 				 u64 enabled, u64 running)
3957 {
3958 	u64 read_format = event->attr.read_format;
3959 	u64 values[4];
3960 	int n = 0;
3961 
3962 	values[n++] = perf_event_count(event);
3963 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3964 		values[n++] = enabled +
3965 			atomic64_read(&event->child_total_time_enabled);
3966 	}
3967 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3968 		values[n++] = running +
3969 			atomic64_read(&event->child_total_time_running);
3970 	}
3971 	if (read_format & PERF_FORMAT_ID)
3972 		values[n++] = primary_event_id(event);
3973 
3974 	__output_copy(handle, values, n * sizeof(u64));
3975 }
3976 
3977 /*
3978  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3979  */
3980 static void perf_output_read_group(struct perf_output_handle *handle,
3981 			    struct perf_event *event,
3982 			    u64 enabled, u64 running)
3983 {
3984 	struct perf_event *leader = event->group_leader, *sub;
3985 	u64 read_format = event->attr.read_format;
3986 	u64 values[5];
3987 	int n = 0;
3988 
3989 	values[n++] = 1 + leader->nr_siblings;
3990 
3991 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3992 		values[n++] = enabled;
3993 
3994 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3995 		values[n++] = running;
3996 
3997 	if (leader != event)
3998 		leader->pmu->read(leader);
3999 
4000 	values[n++] = perf_event_count(leader);
4001 	if (read_format & PERF_FORMAT_ID)
4002 		values[n++] = primary_event_id(leader);
4003 
4004 	__output_copy(handle, values, n * sizeof(u64));
4005 
4006 	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4007 		n = 0;
4008 
4009 		if (sub != event)
4010 			sub->pmu->read(sub);
4011 
4012 		values[n++] = perf_event_count(sub);
4013 		if (read_format & PERF_FORMAT_ID)
4014 			values[n++] = primary_event_id(sub);
4015 
4016 		__output_copy(handle, values, n * sizeof(u64));
4017 	}
4018 }
4019 
4020 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4021 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
4022 
4023 static void perf_output_read(struct perf_output_handle *handle,
4024 			     struct perf_event *event)
4025 {
4026 	u64 enabled = 0, running = 0, now;
4027 	u64 read_format = event->attr.read_format;
4028 
4029 	/*
4030 	 * compute total_time_enabled, total_time_running
4031 	 * based on snapshot values taken when the event
4032 	 * was last scheduled in.
4033 	 *
4034 	 * we cannot simply called update_context_time()
4035 	 * because of locking issue as we are called in
4036 	 * NMI context
4037 	 */
4038 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
4039 		calc_timer_values(event, &now, &enabled, &running);
4040 
4041 	if (event->attr.read_format & PERF_FORMAT_GROUP)
4042 		perf_output_read_group(handle, event, enabled, running);
4043 	else
4044 		perf_output_read_one(handle, event, enabled, running);
4045 }
4046 
4047 void perf_output_sample(struct perf_output_handle *handle,
4048 			struct perf_event_header *header,
4049 			struct perf_sample_data *data,
4050 			struct perf_event *event)
4051 {
4052 	u64 sample_type = data->type;
4053 
4054 	perf_output_put(handle, *header);
4055 
4056 	if (sample_type & PERF_SAMPLE_IP)
4057 		perf_output_put(handle, data->ip);
4058 
4059 	if (sample_type & PERF_SAMPLE_TID)
4060 		perf_output_put(handle, data->tid_entry);
4061 
4062 	if (sample_type & PERF_SAMPLE_TIME)
4063 		perf_output_put(handle, data->time);
4064 
4065 	if (sample_type & PERF_SAMPLE_ADDR)
4066 		perf_output_put(handle, data->addr);
4067 
4068 	if (sample_type & PERF_SAMPLE_ID)
4069 		perf_output_put(handle, data->id);
4070 
4071 	if (sample_type & PERF_SAMPLE_STREAM_ID)
4072 		perf_output_put(handle, data->stream_id);
4073 
4074 	if (sample_type & PERF_SAMPLE_CPU)
4075 		perf_output_put(handle, data->cpu_entry);
4076 
4077 	if (sample_type & PERF_SAMPLE_PERIOD)
4078 		perf_output_put(handle, data->period);
4079 
4080 	if (sample_type & PERF_SAMPLE_READ)
4081 		perf_output_read(handle, event);
4082 
4083 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4084 		if (data->callchain) {
4085 			int size = 1;
4086 
4087 			if (data->callchain)
4088 				size += data->callchain->nr;
4089 
4090 			size *= sizeof(u64);
4091 
4092 			__output_copy(handle, data->callchain, size);
4093 		} else {
4094 			u64 nr = 0;
4095 			perf_output_put(handle, nr);
4096 		}
4097 	}
4098 
4099 	if (sample_type & PERF_SAMPLE_RAW) {
4100 		if (data->raw) {
4101 			perf_output_put(handle, data->raw->size);
4102 			__output_copy(handle, data->raw->data,
4103 					   data->raw->size);
4104 		} else {
4105 			struct {
4106 				u32	size;
4107 				u32	data;
4108 			} raw = {
4109 				.size = sizeof(u32),
4110 				.data = 0,
4111 			};
4112 			perf_output_put(handle, raw);
4113 		}
4114 	}
4115 
4116 	if (!event->attr.watermark) {
4117 		int wakeup_events = event->attr.wakeup_events;
4118 
4119 		if (wakeup_events) {
4120 			struct ring_buffer *rb = handle->rb;
4121 			int events = local_inc_return(&rb->events);
4122 
4123 			if (events >= wakeup_events) {
4124 				local_sub(wakeup_events, &rb->events);
4125 				local_inc(&rb->wakeup);
4126 			}
4127 		}
4128 	}
4129 
4130 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4131 		if (data->br_stack) {
4132 			size_t size;
4133 
4134 			size = data->br_stack->nr
4135 			     * sizeof(struct perf_branch_entry);
4136 
4137 			perf_output_put(handle, data->br_stack->nr);
4138 			perf_output_copy(handle, data->br_stack->entries, size);
4139 		} else {
4140 			/*
4141 			 * we always store at least the value of nr
4142 			 */
4143 			u64 nr = 0;
4144 			perf_output_put(handle, nr);
4145 		}
4146 	}
4147 
4148 	if (sample_type & PERF_SAMPLE_REGS_USER) {
4149 		u64 abi = data->regs_user.abi;
4150 
4151 		/*
4152 		 * If there are no regs to dump, notice it through
4153 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4154 		 */
4155 		perf_output_put(handle, abi);
4156 
4157 		if (abi) {
4158 			u64 mask = event->attr.sample_regs_user;
4159 			perf_output_sample_regs(handle,
4160 						data->regs_user.regs,
4161 						mask);
4162 		}
4163 	}
4164 
4165 	if (sample_type & PERF_SAMPLE_STACK_USER)
4166 		perf_output_sample_ustack(handle,
4167 					  data->stack_user_size,
4168 					  data->regs_user.regs);
4169 }
4170 
4171 void perf_prepare_sample(struct perf_event_header *header,
4172 			 struct perf_sample_data *data,
4173 			 struct perf_event *event,
4174 			 struct pt_regs *regs)
4175 {
4176 	u64 sample_type = event->attr.sample_type;
4177 
4178 	header->type = PERF_RECORD_SAMPLE;
4179 	header->size = sizeof(*header) + event->header_size;
4180 
4181 	header->misc = 0;
4182 	header->misc |= perf_misc_flags(regs);
4183 
4184 	__perf_event_header__init_id(header, data, event);
4185 
4186 	if (sample_type & PERF_SAMPLE_IP)
4187 		data->ip = perf_instruction_pointer(regs);
4188 
4189 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4190 		int size = 1;
4191 
4192 		data->callchain = perf_callchain(event, regs);
4193 
4194 		if (data->callchain)
4195 			size += data->callchain->nr;
4196 
4197 		header->size += size * sizeof(u64);
4198 	}
4199 
4200 	if (sample_type & PERF_SAMPLE_RAW) {
4201 		int size = sizeof(u32);
4202 
4203 		if (data->raw)
4204 			size += data->raw->size;
4205 		else
4206 			size += sizeof(u32);
4207 
4208 		WARN_ON_ONCE(size & (sizeof(u64)-1));
4209 		header->size += size;
4210 	}
4211 
4212 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4213 		int size = sizeof(u64); /* nr */
4214 		if (data->br_stack) {
4215 			size += data->br_stack->nr
4216 			      * sizeof(struct perf_branch_entry);
4217 		}
4218 		header->size += size;
4219 	}
4220 
4221 	if (sample_type & PERF_SAMPLE_REGS_USER) {
4222 		/* regs dump ABI info */
4223 		int size = sizeof(u64);
4224 
4225 		perf_sample_regs_user(&data->regs_user, regs);
4226 
4227 		if (data->regs_user.regs) {
4228 			u64 mask = event->attr.sample_regs_user;
4229 			size += hweight64(mask) * sizeof(u64);
4230 		}
4231 
4232 		header->size += size;
4233 	}
4234 
4235 	if (sample_type & PERF_SAMPLE_STACK_USER) {
4236 		/*
4237 		 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4238 		 * processed as the last one or have additional check added
4239 		 * in case new sample type is added, because we could eat
4240 		 * up the rest of the sample size.
4241 		 */
4242 		struct perf_regs_user *uregs = &data->regs_user;
4243 		u16 stack_size = event->attr.sample_stack_user;
4244 		u16 size = sizeof(u64);
4245 
4246 		if (!uregs->abi)
4247 			perf_sample_regs_user(uregs, regs);
4248 
4249 		stack_size = perf_sample_ustack_size(stack_size, header->size,
4250 						     uregs->regs);
4251 
4252 		/*
4253 		 * If there is something to dump, add space for the dump
4254 		 * itself and for the field that tells the dynamic size,
4255 		 * which is how many have been actually dumped.
4256 		 */
4257 		if (stack_size)
4258 			size += sizeof(u64) + stack_size;
4259 
4260 		data->stack_user_size = stack_size;
4261 		header->size += size;
4262 	}
4263 }
4264 
4265 static void perf_event_output(struct perf_event *event,
4266 				struct perf_sample_data *data,
4267 				struct pt_regs *regs)
4268 {
4269 	struct perf_output_handle handle;
4270 	struct perf_event_header header;
4271 
4272 	/* protect the callchain buffers */
4273 	rcu_read_lock();
4274 
4275 	perf_prepare_sample(&header, data, event, regs);
4276 
4277 	if (perf_output_begin(&handle, event, header.size))
4278 		goto exit;
4279 
4280 	perf_output_sample(&handle, &header, data, event);
4281 
4282 	perf_output_end(&handle);
4283 
4284 exit:
4285 	rcu_read_unlock();
4286 }
4287 
4288 /*
4289  * read event_id
4290  */
4291 
4292 struct perf_read_event {
4293 	struct perf_event_header	header;
4294 
4295 	u32				pid;
4296 	u32				tid;
4297 };
4298 
4299 static void
4300 perf_event_read_event(struct perf_event *event,
4301 			struct task_struct *task)
4302 {
4303 	struct perf_output_handle handle;
4304 	struct perf_sample_data sample;
4305 	struct perf_read_event read_event = {
4306 		.header = {
4307 			.type = PERF_RECORD_READ,
4308 			.misc = 0,
4309 			.size = sizeof(read_event) + event->read_size,
4310 		},
4311 		.pid = perf_event_pid(event, task),
4312 		.tid = perf_event_tid(event, task),
4313 	};
4314 	int ret;
4315 
4316 	perf_event_header__init_id(&read_event.header, &sample, event);
4317 	ret = perf_output_begin(&handle, event, read_event.header.size);
4318 	if (ret)
4319 		return;
4320 
4321 	perf_output_put(&handle, read_event);
4322 	perf_output_read(&handle, event);
4323 	perf_event__output_id_sample(event, &handle, &sample);
4324 
4325 	perf_output_end(&handle);
4326 }
4327 
4328 /*
4329  * task tracking -- fork/exit
4330  *
4331  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
4332  */
4333 
4334 struct perf_task_event {
4335 	struct task_struct		*task;
4336 	struct perf_event_context	*task_ctx;
4337 
4338 	struct {
4339 		struct perf_event_header	header;
4340 
4341 		u32				pid;
4342 		u32				ppid;
4343 		u32				tid;
4344 		u32				ptid;
4345 		u64				time;
4346 	} event_id;
4347 };
4348 
4349 static void perf_event_task_output(struct perf_event *event,
4350 				     struct perf_task_event *task_event)
4351 {
4352 	struct perf_output_handle handle;
4353 	struct perf_sample_data	sample;
4354 	struct task_struct *task = task_event->task;
4355 	int ret, size = task_event->event_id.header.size;
4356 
4357 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4358 
4359 	ret = perf_output_begin(&handle, event,
4360 				task_event->event_id.header.size);
4361 	if (ret)
4362 		goto out;
4363 
4364 	task_event->event_id.pid = perf_event_pid(event, task);
4365 	task_event->event_id.ppid = perf_event_pid(event, current);
4366 
4367 	task_event->event_id.tid = perf_event_tid(event, task);
4368 	task_event->event_id.ptid = perf_event_tid(event, current);
4369 
4370 	perf_output_put(&handle, task_event->event_id);
4371 
4372 	perf_event__output_id_sample(event, &handle, &sample);
4373 
4374 	perf_output_end(&handle);
4375 out:
4376 	task_event->event_id.header.size = size;
4377 }
4378 
4379 static int perf_event_task_match(struct perf_event *event)
4380 {
4381 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4382 		return 0;
4383 
4384 	if (!event_filter_match(event))
4385 		return 0;
4386 
4387 	if (event->attr.comm || event->attr.mmap ||
4388 	    event->attr.mmap_data || event->attr.task)
4389 		return 1;
4390 
4391 	return 0;
4392 }
4393 
4394 static void perf_event_task_ctx(struct perf_event_context *ctx,
4395 				  struct perf_task_event *task_event)
4396 {
4397 	struct perf_event *event;
4398 
4399 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4400 		if (perf_event_task_match(event))
4401 			perf_event_task_output(event, task_event);
4402 	}
4403 }
4404 
4405 static void perf_event_task_event(struct perf_task_event *task_event)
4406 {
4407 	struct perf_cpu_context *cpuctx;
4408 	struct perf_event_context *ctx;
4409 	struct pmu *pmu;
4410 	int ctxn;
4411 
4412 	rcu_read_lock();
4413 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4414 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4415 		if (cpuctx->active_pmu != pmu)
4416 			goto next;
4417 		perf_event_task_ctx(&cpuctx->ctx, task_event);
4418 
4419 		ctx = task_event->task_ctx;
4420 		if (!ctx) {
4421 			ctxn = pmu->task_ctx_nr;
4422 			if (ctxn < 0)
4423 				goto next;
4424 			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4425 		}
4426 		if (ctx)
4427 			perf_event_task_ctx(ctx, task_event);
4428 next:
4429 		put_cpu_ptr(pmu->pmu_cpu_context);
4430 	}
4431 	rcu_read_unlock();
4432 }
4433 
4434 static void perf_event_task(struct task_struct *task,
4435 			      struct perf_event_context *task_ctx,
4436 			      int new)
4437 {
4438 	struct perf_task_event task_event;
4439 
4440 	if (!atomic_read(&nr_comm_events) &&
4441 	    !atomic_read(&nr_mmap_events) &&
4442 	    !atomic_read(&nr_task_events))
4443 		return;
4444 
4445 	task_event = (struct perf_task_event){
4446 		.task	  = task,
4447 		.task_ctx = task_ctx,
4448 		.event_id    = {
4449 			.header = {
4450 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4451 				.misc = 0,
4452 				.size = sizeof(task_event.event_id),
4453 			},
4454 			/* .pid  */
4455 			/* .ppid */
4456 			/* .tid  */
4457 			/* .ptid */
4458 			.time = perf_clock(),
4459 		},
4460 	};
4461 
4462 	perf_event_task_event(&task_event);
4463 }
4464 
4465 void perf_event_fork(struct task_struct *task)
4466 {
4467 	perf_event_task(task, NULL, 1);
4468 }
4469 
4470 /*
4471  * comm tracking
4472  */
4473 
4474 struct perf_comm_event {
4475 	struct task_struct	*task;
4476 	char			*comm;
4477 	int			comm_size;
4478 
4479 	struct {
4480 		struct perf_event_header	header;
4481 
4482 		u32				pid;
4483 		u32				tid;
4484 	} event_id;
4485 };
4486 
4487 static void perf_event_comm_output(struct perf_event *event,
4488 				     struct perf_comm_event *comm_event)
4489 {
4490 	struct perf_output_handle handle;
4491 	struct perf_sample_data sample;
4492 	int size = comm_event->event_id.header.size;
4493 	int ret;
4494 
4495 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4496 	ret = perf_output_begin(&handle, event,
4497 				comm_event->event_id.header.size);
4498 
4499 	if (ret)
4500 		goto out;
4501 
4502 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4503 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4504 
4505 	perf_output_put(&handle, comm_event->event_id);
4506 	__output_copy(&handle, comm_event->comm,
4507 				   comm_event->comm_size);
4508 
4509 	perf_event__output_id_sample(event, &handle, &sample);
4510 
4511 	perf_output_end(&handle);
4512 out:
4513 	comm_event->event_id.header.size = size;
4514 }
4515 
4516 static int perf_event_comm_match(struct perf_event *event)
4517 {
4518 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4519 		return 0;
4520 
4521 	if (!event_filter_match(event))
4522 		return 0;
4523 
4524 	if (event->attr.comm)
4525 		return 1;
4526 
4527 	return 0;
4528 }
4529 
4530 static void perf_event_comm_ctx(struct perf_event_context *ctx,
4531 				  struct perf_comm_event *comm_event)
4532 {
4533 	struct perf_event *event;
4534 
4535 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4536 		if (perf_event_comm_match(event))
4537 			perf_event_comm_output(event, comm_event);
4538 	}
4539 }
4540 
4541 static void perf_event_comm_event(struct perf_comm_event *comm_event)
4542 {
4543 	struct perf_cpu_context *cpuctx;
4544 	struct perf_event_context *ctx;
4545 	char comm[TASK_COMM_LEN];
4546 	unsigned int size;
4547 	struct pmu *pmu;
4548 	int ctxn;
4549 
4550 	memset(comm, 0, sizeof(comm));
4551 	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4552 	size = ALIGN(strlen(comm)+1, sizeof(u64));
4553 
4554 	comm_event->comm = comm;
4555 	comm_event->comm_size = size;
4556 
4557 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4558 	rcu_read_lock();
4559 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4560 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4561 		if (cpuctx->active_pmu != pmu)
4562 			goto next;
4563 		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4564 
4565 		ctxn = pmu->task_ctx_nr;
4566 		if (ctxn < 0)
4567 			goto next;
4568 
4569 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4570 		if (ctx)
4571 			perf_event_comm_ctx(ctx, comm_event);
4572 next:
4573 		put_cpu_ptr(pmu->pmu_cpu_context);
4574 	}
4575 	rcu_read_unlock();
4576 }
4577 
4578 void perf_event_comm(struct task_struct *task)
4579 {
4580 	struct perf_comm_event comm_event;
4581 	struct perf_event_context *ctx;
4582 	int ctxn;
4583 
4584 	for_each_task_context_nr(ctxn) {
4585 		ctx = task->perf_event_ctxp[ctxn];
4586 		if (!ctx)
4587 			continue;
4588 
4589 		perf_event_enable_on_exec(ctx);
4590 	}
4591 
4592 	if (!atomic_read(&nr_comm_events))
4593 		return;
4594 
4595 	comm_event = (struct perf_comm_event){
4596 		.task	= task,
4597 		/* .comm      */
4598 		/* .comm_size */
4599 		.event_id  = {
4600 			.header = {
4601 				.type = PERF_RECORD_COMM,
4602 				.misc = 0,
4603 				/* .size */
4604 			},
4605 			/* .pid */
4606 			/* .tid */
4607 		},
4608 	};
4609 
4610 	perf_event_comm_event(&comm_event);
4611 }
4612 
4613 /*
4614  * mmap tracking
4615  */
4616 
4617 struct perf_mmap_event {
4618 	struct vm_area_struct	*vma;
4619 
4620 	const char		*file_name;
4621 	int			file_size;
4622 
4623 	struct {
4624 		struct perf_event_header	header;
4625 
4626 		u32				pid;
4627 		u32				tid;
4628 		u64				start;
4629 		u64				len;
4630 		u64				pgoff;
4631 	} event_id;
4632 };
4633 
4634 static void perf_event_mmap_output(struct perf_event *event,
4635 				     struct perf_mmap_event *mmap_event)
4636 {
4637 	struct perf_output_handle handle;
4638 	struct perf_sample_data sample;
4639 	int size = mmap_event->event_id.header.size;
4640 	int ret;
4641 
4642 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4643 	ret = perf_output_begin(&handle, event,
4644 				mmap_event->event_id.header.size);
4645 	if (ret)
4646 		goto out;
4647 
4648 	mmap_event->event_id.pid = perf_event_pid(event, current);
4649 	mmap_event->event_id.tid = perf_event_tid(event, current);
4650 
4651 	perf_output_put(&handle, mmap_event->event_id);
4652 	__output_copy(&handle, mmap_event->file_name,
4653 				   mmap_event->file_size);
4654 
4655 	perf_event__output_id_sample(event, &handle, &sample);
4656 
4657 	perf_output_end(&handle);
4658 out:
4659 	mmap_event->event_id.header.size = size;
4660 }
4661 
4662 static int perf_event_mmap_match(struct perf_event *event,
4663 				   struct perf_mmap_event *mmap_event,
4664 				   int executable)
4665 {
4666 	if (event->state < PERF_EVENT_STATE_INACTIVE)
4667 		return 0;
4668 
4669 	if (!event_filter_match(event))
4670 		return 0;
4671 
4672 	if ((!executable && event->attr.mmap_data) ||
4673 	    (executable && event->attr.mmap))
4674 		return 1;
4675 
4676 	return 0;
4677 }
4678 
4679 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4680 				  struct perf_mmap_event *mmap_event,
4681 				  int executable)
4682 {
4683 	struct perf_event *event;
4684 
4685 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4686 		if (perf_event_mmap_match(event, mmap_event, executable))
4687 			perf_event_mmap_output(event, mmap_event);
4688 	}
4689 }
4690 
4691 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4692 {
4693 	struct perf_cpu_context *cpuctx;
4694 	struct perf_event_context *ctx;
4695 	struct vm_area_struct *vma = mmap_event->vma;
4696 	struct file *file = vma->vm_file;
4697 	unsigned int size;
4698 	char tmp[16];
4699 	char *buf = NULL;
4700 	const char *name;
4701 	struct pmu *pmu;
4702 	int ctxn;
4703 
4704 	memset(tmp, 0, sizeof(tmp));
4705 
4706 	if (file) {
4707 		/*
4708 		 * d_path works from the end of the rb backwards, so we
4709 		 * need to add enough zero bytes after the string to handle
4710 		 * the 64bit alignment we do later.
4711 		 */
4712 		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4713 		if (!buf) {
4714 			name = strncpy(tmp, "//enomem", sizeof(tmp));
4715 			goto got_name;
4716 		}
4717 		name = d_path(&file->f_path, buf, PATH_MAX);
4718 		if (IS_ERR(name)) {
4719 			name = strncpy(tmp, "//toolong", sizeof(tmp));
4720 			goto got_name;
4721 		}
4722 	} else {
4723 		if (arch_vma_name(mmap_event->vma)) {
4724 			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4725 				       sizeof(tmp));
4726 			goto got_name;
4727 		}
4728 
4729 		if (!vma->vm_mm) {
4730 			name = strncpy(tmp, "[vdso]", sizeof(tmp));
4731 			goto got_name;
4732 		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
4733 				vma->vm_end >= vma->vm_mm->brk) {
4734 			name = strncpy(tmp, "[heap]", sizeof(tmp));
4735 			goto got_name;
4736 		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
4737 				vma->vm_end >= vma->vm_mm->start_stack) {
4738 			name = strncpy(tmp, "[stack]", sizeof(tmp));
4739 			goto got_name;
4740 		}
4741 
4742 		name = strncpy(tmp, "//anon", sizeof(tmp));
4743 		goto got_name;
4744 	}
4745 
4746 got_name:
4747 	size = ALIGN(strlen(name)+1, sizeof(u64));
4748 
4749 	mmap_event->file_name = name;
4750 	mmap_event->file_size = size;
4751 
4752 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4753 
4754 	rcu_read_lock();
4755 	list_for_each_entry_rcu(pmu, &pmus, entry) {
4756 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4757 		if (cpuctx->active_pmu != pmu)
4758 			goto next;
4759 		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4760 					vma->vm_flags & VM_EXEC);
4761 
4762 		ctxn = pmu->task_ctx_nr;
4763 		if (ctxn < 0)
4764 			goto next;
4765 
4766 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4767 		if (ctx) {
4768 			perf_event_mmap_ctx(ctx, mmap_event,
4769 					vma->vm_flags & VM_EXEC);
4770 		}
4771 next:
4772 		put_cpu_ptr(pmu->pmu_cpu_context);
4773 	}
4774 	rcu_read_unlock();
4775 
4776 	kfree(buf);
4777 }
4778 
4779 void perf_event_mmap(struct vm_area_struct *vma)
4780 {
4781 	struct perf_mmap_event mmap_event;
4782 
4783 	if (!atomic_read(&nr_mmap_events))
4784 		return;
4785 
4786 	mmap_event = (struct perf_mmap_event){
4787 		.vma	= vma,
4788 		/* .file_name */
4789 		/* .file_size */
4790 		.event_id  = {
4791 			.header = {
4792 				.type = PERF_RECORD_MMAP,
4793 				.misc = PERF_RECORD_MISC_USER,
4794 				/* .size */
4795 			},
4796 			/* .pid */
4797 			/* .tid */
4798 			.start  = vma->vm_start,
4799 			.len    = vma->vm_end - vma->vm_start,
4800 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4801 		},
4802 	};
4803 
4804 	perf_event_mmap_event(&mmap_event);
4805 }
4806 
4807 /*
4808  * IRQ throttle logging
4809  */
4810 
4811 static void perf_log_throttle(struct perf_event *event, int enable)
4812 {
4813 	struct perf_output_handle handle;
4814 	struct perf_sample_data sample;
4815 	int ret;
4816 
4817 	struct {
4818 		struct perf_event_header	header;
4819 		u64				time;
4820 		u64				id;
4821 		u64				stream_id;
4822 	} throttle_event = {
4823 		.header = {
4824 			.type = PERF_RECORD_THROTTLE,
4825 			.misc = 0,
4826 			.size = sizeof(throttle_event),
4827 		},
4828 		.time		= perf_clock(),
4829 		.id		= primary_event_id(event),
4830 		.stream_id	= event->id,
4831 	};
4832 
4833 	if (enable)
4834 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4835 
4836 	perf_event_header__init_id(&throttle_event.header, &sample, event);
4837 
4838 	ret = perf_output_begin(&handle, event,
4839 				throttle_event.header.size);
4840 	if (ret)
4841 		return;
4842 
4843 	perf_output_put(&handle, throttle_event);
4844 	perf_event__output_id_sample(event, &handle, &sample);
4845 	perf_output_end(&handle);
4846 }
4847 
4848 /*
4849  * Generic event overflow handling, sampling.
4850  */
4851 
4852 static int __perf_event_overflow(struct perf_event *event,
4853 				   int throttle, struct perf_sample_data *data,
4854 				   struct pt_regs *regs)
4855 {
4856 	int events = atomic_read(&event->event_limit);
4857 	struct hw_perf_event *hwc = &event->hw;
4858 	u64 seq;
4859 	int ret = 0;
4860 
4861 	/*
4862 	 * Non-sampling counters might still use the PMI to fold short
4863 	 * hardware counters, ignore those.
4864 	 */
4865 	if (unlikely(!is_sampling_event(event)))
4866 		return 0;
4867 
4868 	seq = __this_cpu_read(perf_throttled_seq);
4869 	if (seq != hwc->interrupts_seq) {
4870 		hwc->interrupts_seq = seq;
4871 		hwc->interrupts = 1;
4872 	} else {
4873 		hwc->interrupts++;
4874 		if (unlikely(throttle
4875 			     && hwc->interrupts >= max_samples_per_tick)) {
4876 			__this_cpu_inc(perf_throttled_count);
4877 			hwc->interrupts = MAX_INTERRUPTS;
4878 			perf_log_throttle(event, 0);
4879 			ret = 1;
4880 		}
4881 	}
4882 
4883 	if (event->attr.freq) {
4884 		u64 now = perf_clock();
4885 		s64 delta = now - hwc->freq_time_stamp;
4886 
4887 		hwc->freq_time_stamp = now;
4888 
4889 		if (delta > 0 && delta < 2*TICK_NSEC)
4890 			perf_adjust_period(event, delta, hwc->last_period, true);
4891 	}
4892 
4893 	/*
4894 	 * XXX event_limit might not quite work as expected on inherited
4895 	 * events
4896 	 */
4897 
4898 	event->pending_kill = POLL_IN;
4899 	if (events && atomic_dec_and_test(&event->event_limit)) {
4900 		ret = 1;
4901 		event->pending_kill = POLL_HUP;
4902 		event->pending_disable = 1;
4903 		irq_work_queue(&event->pending);
4904 	}
4905 
4906 	if (event->overflow_handler)
4907 		event->overflow_handler(event, data, regs);
4908 	else
4909 		perf_event_output(event, data, regs);
4910 
4911 	if (event->fasync && event->pending_kill) {
4912 		event->pending_wakeup = 1;
4913 		irq_work_queue(&event->pending);
4914 	}
4915 
4916 	return ret;
4917 }
4918 
4919 int perf_event_overflow(struct perf_event *event,
4920 			  struct perf_sample_data *data,
4921 			  struct pt_regs *regs)
4922 {
4923 	return __perf_event_overflow(event, 1, data, regs);
4924 }
4925 
4926 /*
4927  * Generic software event infrastructure
4928  */
4929 
4930 struct swevent_htable {
4931 	struct swevent_hlist		*swevent_hlist;
4932 	struct mutex			hlist_mutex;
4933 	int				hlist_refcount;
4934 
4935 	/* Recursion avoidance in each contexts */
4936 	int				recursion[PERF_NR_CONTEXTS];
4937 };
4938 
4939 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4940 
4941 /*
4942  * We directly increment event->count and keep a second value in
4943  * event->hw.period_left to count intervals. This period event
4944  * is kept in the range [-sample_period, 0] so that we can use the
4945  * sign as trigger.
4946  */
4947 
4948 static u64 perf_swevent_set_period(struct perf_event *event)
4949 {
4950 	struct hw_perf_event *hwc = &event->hw;
4951 	u64 period = hwc->last_period;
4952 	u64 nr, offset;
4953 	s64 old, val;
4954 
4955 	hwc->last_period = hwc->sample_period;
4956 
4957 again:
4958 	old = val = local64_read(&hwc->period_left);
4959 	if (val < 0)
4960 		return 0;
4961 
4962 	nr = div64_u64(period + val, period);
4963 	offset = nr * period;
4964 	val -= offset;
4965 	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4966 		goto again;
4967 
4968 	return nr;
4969 }
4970 
4971 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4972 				    struct perf_sample_data *data,
4973 				    struct pt_regs *regs)
4974 {
4975 	struct hw_perf_event *hwc = &event->hw;
4976 	int throttle = 0;
4977 
4978 	if (!overflow)
4979 		overflow = perf_swevent_set_period(event);
4980 
4981 	if (hwc->interrupts == MAX_INTERRUPTS)
4982 		return;
4983 
4984 	for (; overflow; overflow--) {
4985 		if (__perf_event_overflow(event, throttle,
4986 					    data, regs)) {
4987 			/*
4988 			 * We inhibit the overflow from happening when
4989 			 * hwc->interrupts == MAX_INTERRUPTS.
4990 			 */
4991 			break;
4992 		}
4993 		throttle = 1;
4994 	}
4995 }
4996 
4997 static void perf_swevent_event(struct perf_event *event, u64 nr,
4998 			       struct perf_sample_data *data,
4999 			       struct pt_regs *regs)
5000 {
5001 	struct hw_perf_event *hwc = &event->hw;
5002 
5003 	local64_add(nr, &event->count);
5004 
5005 	if (!regs)
5006 		return;
5007 
5008 	if (!is_sampling_event(event))
5009 		return;
5010 
5011 	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5012 		data->period = nr;
5013 		return perf_swevent_overflow(event, 1, data, regs);
5014 	} else
5015 		data->period = event->hw.last_period;
5016 
5017 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5018 		return perf_swevent_overflow(event, 1, data, regs);
5019 
5020 	if (local64_add_negative(nr, &hwc->period_left))
5021 		return;
5022 
5023 	perf_swevent_overflow(event, 0, data, regs);
5024 }
5025 
5026 static int perf_exclude_event(struct perf_event *event,
5027 			      struct pt_regs *regs)
5028 {
5029 	if (event->hw.state & PERF_HES_STOPPED)
5030 		return 1;
5031 
5032 	if (regs) {
5033 		if (event->attr.exclude_user && user_mode(regs))
5034 			return 1;
5035 
5036 		if (event->attr.exclude_kernel && !user_mode(regs))
5037 			return 1;
5038 	}
5039 
5040 	return 0;
5041 }
5042 
5043 static int perf_swevent_match(struct perf_event *event,
5044 				enum perf_type_id type,
5045 				u32 event_id,
5046 				struct perf_sample_data *data,
5047 				struct pt_regs *regs)
5048 {
5049 	if (event->attr.type != type)
5050 		return 0;
5051 
5052 	if (event->attr.config != event_id)
5053 		return 0;
5054 
5055 	if (perf_exclude_event(event, regs))
5056 		return 0;
5057 
5058 	return 1;
5059 }
5060 
5061 static inline u64 swevent_hash(u64 type, u32 event_id)
5062 {
5063 	u64 val = event_id | (type << 32);
5064 
5065 	return hash_64(val, SWEVENT_HLIST_BITS);
5066 }
5067 
5068 static inline struct hlist_head *
5069 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5070 {
5071 	u64 hash = swevent_hash(type, event_id);
5072 
5073 	return &hlist->heads[hash];
5074 }
5075 
5076 /* For the read side: events when they trigger */
5077 static inline struct hlist_head *
5078 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5079 {
5080 	struct swevent_hlist *hlist;
5081 
5082 	hlist = rcu_dereference(swhash->swevent_hlist);
5083 	if (!hlist)
5084 		return NULL;
5085 
5086 	return __find_swevent_head(hlist, type, event_id);
5087 }
5088 
5089 /* For the event head insertion and removal in the hlist */
5090 static inline struct hlist_head *
5091 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5092 {
5093 	struct swevent_hlist *hlist;
5094 	u32 event_id = event->attr.config;
5095 	u64 type = event->attr.type;
5096 
5097 	/*
5098 	 * Event scheduling is always serialized against hlist allocation
5099 	 * and release. Which makes the protected version suitable here.
5100 	 * The context lock guarantees that.
5101 	 */
5102 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
5103 					  lockdep_is_held(&event->ctx->lock));
5104 	if (!hlist)
5105 		return NULL;
5106 
5107 	return __find_swevent_head(hlist, type, event_id);
5108 }
5109 
5110 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5111 				    u64 nr,
5112 				    struct perf_sample_data *data,
5113 				    struct pt_regs *regs)
5114 {
5115 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5116 	struct perf_event *event;
5117 	struct hlist_node *node;
5118 	struct hlist_head *head;
5119 
5120 	rcu_read_lock();
5121 	head = find_swevent_head_rcu(swhash, type, event_id);
5122 	if (!head)
5123 		goto end;
5124 
5125 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5126 		if (perf_swevent_match(event, type, event_id, data, regs))
5127 			perf_swevent_event(event, nr, data, regs);
5128 	}
5129 end:
5130 	rcu_read_unlock();
5131 }
5132 
5133 int perf_swevent_get_recursion_context(void)
5134 {
5135 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5136 
5137 	return get_recursion_context(swhash->recursion);
5138 }
5139 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5140 
5141 inline void perf_swevent_put_recursion_context(int rctx)
5142 {
5143 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5144 
5145 	put_recursion_context(swhash->recursion, rctx);
5146 }
5147 
5148 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5149 {
5150 	struct perf_sample_data data;
5151 	int rctx;
5152 
5153 	preempt_disable_notrace();
5154 	rctx = perf_swevent_get_recursion_context();
5155 	if (rctx < 0)
5156 		return;
5157 
5158 	perf_sample_data_init(&data, addr, 0);
5159 
5160 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5161 
5162 	perf_swevent_put_recursion_context(rctx);
5163 	preempt_enable_notrace();
5164 }
5165 
5166 static void perf_swevent_read(struct perf_event *event)
5167 {
5168 }
5169 
5170 static int perf_swevent_add(struct perf_event *event, int flags)
5171 {
5172 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5173 	struct hw_perf_event *hwc = &event->hw;
5174 	struct hlist_head *head;
5175 
5176 	if (is_sampling_event(event)) {
5177 		hwc->last_period = hwc->sample_period;
5178 		perf_swevent_set_period(event);
5179 	}
5180 
5181 	hwc->state = !(flags & PERF_EF_START);
5182 
5183 	head = find_swevent_head(swhash, event);
5184 	if (WARN_ON_ONCE(!head))
5185 		return -EINVAL;
5186 
5187 	hlist_add_head_rcu(&event->hlist_entry, head);
5188 
5189 	return 0;
5190 }
5191 
5192 static void perf_swevent_del(struct perf_event *event, int flags)
5193 {
5194 	hlist_del_rcu(&event->hlist_entry);
5195 }
5196 
5197 static void perf_swevent_start(struct perf_event *event, int flags)
5198 {
5199 	event->hw.state = 0;
5200 }
5201 
5202 static void perf_swevent_stop(struct perf_event *event, int flags)
5203 {
5204 	event->hw.state = PERF_HES_STOPPED;
5205 }
5206 
5207 /* Deref the hlist from the update side */
5208 static inline struct swevent_hlist *
5209 swevent_hlist_deref(struct swevent_htable *swhash)
5210 {
5211 	return rcu_dereference_protected(swhash->swevent_hlist,
5212 					 lockdep_is_held(&swhash->hlist_mutex));
5213 }
5214 
5215 static void swevent_hlist_release(struct swevent_htable *swhash)
5216 {
5217 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5218 
5219 	if (!hlist)
5220 		return;
5221 
5222 	rcu_assign_pointer(swhash->swevent_hlist, NULL);
5223 	kfree_rcu(hlist, rcu_head);
5224 }
5225 
5226 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5227 {
5228 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5229 
5230 	mutex_lock(&swhash->hlist_mutex);
5231 
5232 	if (!--swhash->hlist_refcount)
5233 		swevent_hlist_release(swhash);
5234 
5235 	mutex_unlock(&swhash->hlist_mutex);
5236 }
5237 
5238 static void swevent_hlist_put(struct perf_event *event)
5239 {
5240 	int cpu;
5241 
5242 	if (event->cpu != -1) {
5243 		swevent_hlist_put_cpu(event, event->cpu);
5244 		return;
5245 	}
5246 
5247 	for_each_possible_cpu(cpu)
5248 		swevent_hlist_put_cpu(event, cpu);
5249 }
5250 
5251 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5252 {
5253 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5254 	int err = 0;
5255 
5256 	mutex_lock(&swhash->hlist_mutex);
5257 
5258 	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5259 		struct swevent_hlist *hlist;
5260 
5261 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5262 		if (!hlist) {
5263 			err = -ENOMEM;
5264 			goto exit;
5265 		}
5266 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
5267 	}
5268 	swhash->hlist_refcount++;
5269 exit:
5270 	mutex_unlock(&swhash->hlist_mutex);
5271 
5272 	return err;
5273 }
5274 
5275 static int swevent_hlist_get(struct perf_event *event)
5276 {
5277 	int err;
5278 	int cpu, failed_cpu;
5279 
5280 	if (event->cpu != -1)
5281 		return swevent_hlist_get_cpu(event, event->cpu);
5282 
5283 	get_online_cpus();
5284 	for_each_possible_cpu(cpu) {
5285 		err = swevent_hlist_get_cpu(event, cpu);
5286 		if (err) {
5287 			failed_cpu = cpu;
5288 			goto fail;
5289 		}
5290 	}
5291 	put_online_cpus();
5292 
5293 	return 0;
5294 fail:
5295 	for_each_possible_cpu(cpu) {
5296 		if (cpu == failed_cpu)
5297 			break;
5298 		swevent_hlist_put_cpu(event, cpu);
5299 	}
5300 
5301 	put_online_cpus();
5302 	return err;
5303 }
5304 
5305 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5306 
5307 static void sw_perf_event_destroy(struct perf_event *event)
5308 {
5309 	u64 event_id = event->attr.config;
5310 
5311 	WARN_ON(event->parent);
5312 
5313 	static_key_slow_dec(&perf_swevent_enabled[event_id]);
5314 	swevent_hlist_put(event);
5315 }
5316 
5317 static int perf_swevent_init(struct perf_event *event)
5318 {
5319 	int event_id = event->attr.config;
5320 
5321 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5322 		return -ENOENT;
5323 
5324 	/*
5325 	 * no branch sampling for software events
5326 	 */
5327 	if (has_branch_stack(event))
5328 		return -EOPNOTSUPP;
5329 
5330 	switch (event_id) {
5331 	case PERF_COUNT_SW_CPU_CLOCK:
5332 	case PERF_COUNT_SW_TASK_CLOCK:
5333 		return -ENOENT;
5334 
5335 	default:
5336 		break;
5337 	}
5338 
5339 	if (event_id >= PERF_COUNT_SW_MAX)
5340 		return -ENOENT;
5341 
5342 	if (!event->parent) {
5343 		int err;
5344 
5345 		err = swevent_hlist_get(event);
5346 		if (err)
5347 			return err;
5348 
5349 		static_key_slow_inc(&perf_swevent_enabled[event_id]);
5350 		event->destroy = sw_perf_event_destroy;
5351 	}
5352 
5353 	return 0;
5354 }
5355 
5356 static int perf_swevent_event_idx(struct perf_event *event)
5357 {
5358 	return 0;
5359 }
5360 
5361 static struct pmu perf_swevent = {
5362 	.task_ctx_nr	= perf_sw_context,
5363 
5364 	.event_init	= perf_swevent_init,
5365 	.add		= perf_swevent_add,
5366 	.del		= perf_swevent_del,
5367 	.start		= perf_swevent_start,
5368 	.stop		= perf_swevent_stop,
5369 	.read		= perf_swevent_read,
5370 
5371 	.event_idx	= perf_swevent_event_idx,
5372 };
5373 
5374 #ifdef CONFIG_EVENT_TRACING
5375 
5376 static int perf_tp_filter_match(struct perf_event *event,
5377 				struct perf_sample_data *data)
5378 {
5379 	void *record = data->raw->data;
5380 
5381 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
5382 		return 1;
5383 	return 0;
5384 }
5385 
5386 static int perf_tp_event_match(struct perf_event *event,
5387 				struct perf_sample_data *data,
5388 				struct pt_regs *regs)
5389 {
5390 	if (event->hw.state & PERF_HES_STOPPED)
5391 		return 0;
5392 	/*
5393 	 * All tracepoints are from kernel-space.
5394 	 */
5395 	if (event->attr.exclude_kernel)
5396 		return 0;
5397 
5398 	if (!perf_tp_filter_match(event, data))
5399 		return 0;
5400 
5401 	return 1;
5402 }
5403 
5404 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5405 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
5406 		   struct task_struct *task)
5407 {
5408 	struct perf_sample_data data;
5409 	struct perf_event *event;
5410 	struct hlist_node *node;
5411 
5412 	struct perf_raw_record raw = {
5413 		.size = entry_size,
5414 		.data = record,
5415 	};
5416 
5417 	perf_sample_data_init(&data, addr, 0);
5418 	data.raw = &raw;
5419 
5420 	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5421 		if (perf_tp_event_match(event, &data, regs))
5422 			perf_swevent_event(event, count, &data, regs);
5423 	}
5424 
5425 	/*
5426 	 * If we got specified a target task, also iterate its context and
5427 	 * deliver this event there too.
5428 	 */
5429 	if (task && task != current) {
5430 		struct perf_event_context *ctx;
5431 		struct trace_entry *entry = record;
5432 
5433 		rcu_read_lock();
5434 		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5435 		if (!ctx)
5436 			goto unlock;
5437 
5438 		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5439 			if (event->attr.type != PERF_TYPE_TRACEPOINT)
5440 				continue;
5441 			if (event->attr.config != entry->type)
5442 				continue;
5443 			if (perf_tp_event_match(event, &data, regs))
5444 				perf_swevent_event(event, count, &data, regs);
5445 		}
5446 unlock:
5447 		rcu_read_unlock();
5448 	}
5449 
5450 	perf_swevent_put_recursion_context(rctx);
5451 }
5452 EXPORT_SYMBOL_GPL(perf_tp_event);
5453 
5454 static void tp_perf_event_destroy(struct perf_event *event)
5455 {
5456 	perf_trace_destroy(event);
5457 }
5458 
5459 static int perf_tp_event_init(struct perf_event *event)
5460 {
5461 	int err;
5462 
5463 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5464 		return -ENOENT;
5465 
5466 	/*
5467 	 * no branch sampling for tracepoint events
5468 	 */
5469 	if (has_branch_stack(event))
5470 		return -EOPNOTSUPP;
5471 
5472 	err = perf_trace_init(event);
5473 	if (err)
5474 		return err;
5475 
5476 	event->destroy = tp_perf_event_destroy;
5477 
5478 	return 0;
5479 }
5480 
5481 static struct pmu perf_tracepoint = {
5482 	.task_ctx_nr	= perf_sw_context,
5483 
5484 	.event_init	= perf_tp_event_init,
5485 	.add		= perf_trace_add,
5486 	.del		= perf_trace_del,
5487 	.start		= perf_swevent_start,
5488 	.stop		= perf_swevent_stop,
5489 	.read		= perf_swevent_read,
5490 
5491 	.event_idx	= perf_swevent_event_idx,
5492 };
5493 
5494 static inline void perf_tp_register(void)
5495 {
5496 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5497 }
5498 
5499 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5500 {
5501 	char *filter_str;
5502 	int ret;
5503 
5504 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
5505 		return -EINVAL;
5506 
5507 	filter_str = strndup_user(arg, PAGE_SIZE);
5508 	if (IS_ERR(filter_str))
5509 		return PTR_ERR(filter_str);
5510 
5511 	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5512 
5513 	kfree(filter_str);
5514 	return ret;
5515 }
5516 
5517 static void perf_event_free_filter(struct perf_event *event)
5518 {
5519 	ftrace_profile_free_filter(event);
5520 }
5521 
5522 #else
5523 
5524 static inline void perf_tp_register(void)
5525 {
5526 }
5527 
5528 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5529 {
5530 	return -ENOENT;
5531 }
5532 
5533 static void perf_event_free_filter(struct perf_event *event)
5534 {
5535 }
5536 
5537 #endif /* CONFIG_EVENT_TRACING */
5538 
5539 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5540 void perf_bp_event(struct perf_event *bp, void *data)
5541 {
5542 	struct perf_sample_data sample;
5543 	struct pt_regs *regs = data;
5544 
5545 	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5546 
5547 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
5548 		perf_swevent_event(bp, 1, &sample, regs);
5549 }
5550 #endif
5551 
5552 /*
5553  * hrtimer based swevent callback
5554  */
5555 
5556 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5557 {
5558 	enum hrtimer_restart ret = HRTIMER_RESTART;
5559 	struct perf_sample_data data;
5560 	struct pt_regs *regs;
5561 	struct perf_event *event;
5562 	u64 period;
5563 
5564 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5565 
5566 	if (event->state != PERF_EVENT_STATE_ACTIVE)
5567 		return HRTIMER_NORESTART;
5568 
5569 	event->pmu->read(event);
5570 
5571 	perf_sample_data_init(&data, 0, event->hw.last_period);
5572 	regs = get_irq_regs();
5573 
5574 	if (regs && !perf_exclude_event(event, regs)) {
5575 		if (!(event->attr.exclude_idle && is_idle_task(current)))
5576 			if (__perf_event_overflow(event, 1, &data, regs))
5577 				ret = HRTIMER_NORESTART;
5578 	}
5579 
5580 	period = max_t(u64, 10000, event->hw.sample_period);
5581 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5582 
5583 	return ret;
5584 }
5585 
5586 static void perf_swevent_start_hrtimer(struct perf_event *event)
5587 {
5588 	struct hw_perf_event *hwc = &event->hw;
5589 	s64 period;
5590 
5591 	if (!is_sampling_event(event))
5592 		return;
5593 
5594 	period = local64_read(&hwc->period_left);
5595 	if (period) {
5596 		if (period < 0)
5597 			period = 10000;
5598 
5599 		local64_set(&hwc->period_left, 0);
5600 	} else {
5601 		period = max_t(u64, 10000, hwc->sample_period);
5602 	}
5603 	__hrtimer_start_range_ns(&hwc->hrtimer,
5604 				ns_to_ktime(period), 0,
5605 				HRTIMER_MODE_REL_PINNED, 0);
5606 }
5607 
5608 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5609 {
5610 	struct hw_perf_event *hwc = &event->hw;
5611 
5612 	if (is_sampling_event(event)) {
5613 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5614 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5615 
5616 		hrtimer_cancel(&hwc->hrtimer);
5617 	}
5618 }
5619 
5620 static void perf_swevent_init_hrtimer(struct perf_event *event)
5621 {
5622 	struct hw_perf_event *hwc = &event->hw;
5623 
5624 	if (!is_sampling_event(event))
5625 		return;
5626 
5627 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5628 	hwc->hrtimer.function = perf_swevent_hrtimer;
5629 
5630 	/*
5631 	 * Since hrtimers have a fixed rate, we can do a static freq->period
5632 	 * mapping and avoid the whole period adjust feedback stuff.
5633 	 */
5634 	if (event->attr.freq) {
5635 		long freq = event->attr.sample_freq;
5636 
5637 		event->attr.sample_period = NSEC_PER_SEC / freq;
5638 		hwc->sample_period = event->attr.sample_period;
5639 		local64_set(&hwc->period_left, hwc->sample_period);
5640 		event->attr.freq = 0;
5641 	}
5642 }
5643 
5644 /*
5645  * Software event: cpu wall time clock
5646  */
5647 
5648 static void cpu_clock_event_update(struct perf_event *event)
5649 {
5650 	s64 prev;
5651 	u64 now;
5652 
5653 	now = local_clock();
5654 	prev = local64_xchg(&event->hw.prev_count, now);
5655 	local64_add(now - prev, &event->count);
5656 }
5657 
5658 static void cpu_clock_event_start(struct perf_event *event, int flags)
5659 {
5660 	local64_set(&event->hw.prev_count, local_clock());
5661 	perf_swevent_start_hrtimer(event);
5662 }
5663 
5664 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5665 {
5666 	perf_swevent_cancel_hrtimer(event);
5667 	cpu_clock_event_update(event);
5668 }
5669 
5670 static int cpu_clock_event_add(struct perf_event *event, int flags)
5671 {
5672 	if (flags & PERF_EF_START)
5673 		cpu_clock_event_start(event, flags);
5674 
5675 	return 0;
5676 }
5677 
5678 static void cpu_clock_event_del(struct perf_event *event, int flags)
5679 {
5680 	cpu_clock_event_stop(event, flags);
5681 }
5682 
5683 static void cpu_clock_event_read(struct perf_event *event)
5684 {
5685 	cpu_clock_event_update(event);
5686 }
5687 
5688 static int cpu_clock_event_init(struct perf_event *event)
5689 {
5690 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5691 		return -ENOENT;
5692 
5693 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5694 		return -ENOENT;
5695 
5696 	/*
5697 	 * no branch sampling for software events
5698 	 */
5699 	if (has_branch_stack(event))
5700 		return -EOPNOTSUPP;
5701 
5702 	perf_swevent_init_hrtimer(event);
5703 
5704 	return 0;
5705 }
5706 
5707 static struct pmu perf_cpu_clock = {
5708 	.task_ctx_nr	= perf_sw_context,
5709 
5710 	.event_init	= cpu_clock_event_init,
5711 	.add		= cpu_clock_event_add,
5712 	.del		= cpu_clock_event_del,
5713 	.start		= cpu_clock_event_start,
5714 	.stop		= cpu_clock_event_stop,
5715 	.read		= cpu_clock_event_read,
5716 
5717 	.event_idx	= perf_swevent_event_idx,
5718 };
5719 
5720 /*
5721  * Software event: task time clock
5722  */
5723 
5724 static void task_clock_event_update(struct perf_event *event, u64 now)
5725 {
5726 	u64 prev;
5727 	s64 delta;
5728 
5729 	prev = local64_xchg(&event->hw.prev_count, now);
5730 	delta = now - prev;
5731 	local64_add(delta, &event->count);
5732 }
5733 
5734 static void task_clock_event_start(struct perf_event *event, int flags)
5735 {
5736 	local64_set(&event->hw.prev_count, event->ctx->time);
5737 	perf_swevent_start_hrtimer(event);
5738 }
5739 
5740 static void task_clock_event_stop(struct perf_event *event, int flags)
5741 {
5742 	perf_swevent_cancel_hrtimer(event);
5743 	task_clock_event_update(event, event->ctx->time);
5744 }
5745 
5746 static int task_clock_event_add(struct perf_event *event, int flags)
5747 {
5748 	if (flags & PERF_EF_START)
5749 		task_clock_event_start(event, flags);
5750 
5751 	return 0;
5752 }
5753 
5754 static void task_clock_event_del(struct perf_event *event, int flags)
5755 {
5756 	task_clock_event_stop(event, PERF_EF_UPDATE);
5757 }
5758 
5759 static void task_clock_event_read(struct perf_event *event)
5760 {
5761 	u64 now = perf_clock();
5762 	u64 delta = now - event->ctx->timestamp;
5763 	u64 time = event->ctx->time + delta;
5764 
5765 	task_clock_event_update(event, time);
5766 }
5767 
5768 static int task_clock_event_init(struct perf_event *event)
5769 {
5770 	if (event->attr.type != PERF_TYPE_SOFTWARE)
5771 		return -ENOENT;
5772 
5773 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5774 		return -ENOENT;
5775 
5776 	/*
5777 	 * no branch sampling for software events
5778 	 */
5779 	if (has_branch_stack(event))
5780 		return -EOPNOTSUPP;
5781 
5782 	perf_swevent_init_hrtimer(event);
5783 
5784 	return 0;
5785 }
5786 
5787 static struct pmu perf_task_clock = {
5788 	.task_ctx_nr	= perf_sw_context,
5789 
5790 	.event_init	= task_clock_event_init,
5791 	.add		= task_clock_event_add,
5792 	.del		= task_clock_event_del,
5793 	.start		= task_clock_event_start,
5794 	.stop		= task_clock_event_stop,
5795 	.read		= task_clock_event_read,
5796 
5797 	.event_idx	= perf_swevent_event_idx,
5798 };
5799 
5800 static void perf_pmu_nop_void(struct pmu *pmu)
5801 {
5802 }
5803 
5804 static int perf_pmu_nop_int(struct pmu *pmu)
5805 {
5806 	return 0;
5807 }
5808 
5809 static void perf_pmu_start_txn(struct pmu *pmu)
5810 {
5811 	perf_pmu_disable(pmu);
5812 }
5813 
5814 static int perf_pmu_commit_txn(struct pmu *pmu)
5815 {
5816 	perf_pmu_enable(pmu);
5817 	return 0;
5818 }
5819 
5820 static void perf_pmu_cancel_txn(struct pmu *pmu)
5821 {
5822 	perf_pmu_enable(pmu);
5823 }
5824 
5825 static int perf_event_idx_default(struct perf_event *event)
5826 {
5827 	return event->hw.idx + 1;
5828 }
5829 
5830 /*
5831  * Ensures all contexts with the same task_ctx_nr have the same
5832  * pmu_cpu_context too.
5833  */
5834 static void *find_pmu_context(int ctxn)
5835 {
5836 	struct pmu *pmu;
5837 
5838 	if (ctxn < 0)
5839 		return NULL;
5840 
5841 	list_for_each_entry(pmu, &pmus, entry) {
5842 		if (pmu->task_ctx_nr == ctxn)
5843 			return pmu->pmu_cpu_context;
5844 	}
5845 
5846 	return NULL;
5847 }
5848 
5849 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5850 {
5851 	int cpu;
5852 
5853 	for_each_possible_cpu(cpu) {
5854 		struct perf_cpu_context *cpuctx;
5855 
5856 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5857 
5858 		if (cpuctx->active_pmu == old_pmu)
5859 			cpuctx->active_pmu = pmu;
5860 	}
5861 }
5862 
5863 static void free_pmu_context(struct pmu *pmu)
5864 {
5865 	struct pmu *i;
5866 
5867 	mutex_lock(&pmus_lock);
5868 	/*
5869 	 * Like a real lame refcount.
5870 	 */
5871 	list_for_each_entry(i, &pmus, entry) {
5872 		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5873 			update_pmu_context(i, pmu);
5874 			goto out;
5875 		}
5876 	}
5877 
5878 	free_percpu(pmu->pmu_cpu_context);
5879 out:
5880 	mutex_unlock(&pmus_lock);
5881 }
5882 static struct idr pmu_idr;
5883 
5884 static ssize_t
5885 type_show(struct device *dev, struct device_attribute *attr, char *page)
5886 {
5887 	struct pmu *pmu = dev_get_drvdata(dev);
5888 
5889 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5890 }
5891 
5892 static struct device_attribute pmu_dev_attrs[] = {
5893        __ATTR_RO(type),
5894        __ATTR_NULL,
5895 };
5896 
5897 static int pmu_bus_running;
5898 static struct bus_type pmu_bus = {
5899 	.name		= "event_source",
5900 	.dev_attrs	= pmu_dev_attrs,
5901 };
5902 
5903 static void pmu_dev_release(struct device *dev)
5904 {
5905 	kfree(dev);
5906 }
5907 
5908 static int pmu_dev_alloc(struct pmu *pmu)
5909 {
5910 	int ret = -ENOMEM;
5911 
5912 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5913 	if (!pmu->dev)
5914 		goto out;
5915 
5916 	pmu->dev->groups = pmu->attr_groups;
5917 	device_initialize(pmu->dev);
5918 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
5919 	if (ret)
5920 		goto free_dev;
5921 
5922 	dev_set_drvdata(pmu->dev, pmu);
5923 	pmu->dev->bus = &pmu_bus;
5924 	pmu->dev->release = pmu_dev_release;
5925 	ret = device_add(pmu->dev);
5926 	if (ret)
5927 		goto free_dev;
5928 
5929 out:
5930 	return ret;
5931 
5932 free_dev:
5933 	put_device(pmu->dev);
5934 	goto out;
5935 }
5936 
5937 static struct lock_class_key cpuctx_mutex;
5938 static struct lock_class_key cpuctx_lock;
5939 
5940 int perf_pmu_register(struct pmu *pmu, char *name, int type)
5941 {
5942 	int cpu, ret;
5943 
5944 	mutex_lock(&pmus_lock);
5945 	ret = -ENOMEM;
5946 	pmu->pmu_disable_count = alloc_percpu(int);
5947 	if (!pmu->pmu_disable_count)
5948 		goto unlock;
5949 
5950 	pmu->type = -1;
5951 	if (!name)
5952 		goto skip_type;
5953 	pmu->name = name;
5954 
5955 	if (type < 0) {
5956 		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5957 		if (!err)
5958 			goto free_pdc;
5959 
5960 		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5961 		if (err) {
5962 			ret = err;
5963 			goto free_pdc;
5964 		}
5965 	}
5966 	pmu->type = type;
5967 
5968 	if (pmu_bus_running) {
5969 		ret = pmu_dev_alloc(pmu);
5970 		if (ret)
5971 			goto free_idr;
5972 	}
5973 
5974 skip_type:
5975 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5976 	if (pmu->pmu_cpu_context)
5977 		goto got_cpu_context;
5978 
5979 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5980 	if (!pmu->pmu_cpu_context)
5981 		goto free_dev;
5982 
5983 	for_each_possible_cpu(cpu) {
5984 		struct perf_cpu_context *cpuctx;
5985 
5986 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5987 		__perf_event_init_context(&cpuctx->ctx);
5988 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5989 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5990 		cpuctx->ctx.type = cpu_context;
5991 		cpuctx->ctx.pmu = pmu;
5992 		cpuctx->jiffies_interval = 1;
5993 		INIT_LIST_HEAD(&cpuctx->rotation_list);
5994 		cpuctx->active_pmu = pmu;
5995 	}
5996 
5997 got_cpu_context:
5998 	if (!pmu->start_txn) {
5999 		if (pmu->pmu_enable) {
6000 			/*
6001 			 * If we have pmu_enable/pmu_disable calls, install
6002 			 * transaction stubs that use that to try and batch
6003 			 * hardware accesses.
6004 			 */
6005 			pmu->start_txn  = perf_pmu_start_txn;
6006 			pmu->commit_txn = perf_pmu_commit_txn;
6007 			pmu->cancel_txn = perf_pmu_cancel_txn;
6008 		} else {
6009 			pmu->start_txn  = perf_pmu_nop_void;
6010 			pmu->commit_txn = perf_pmu_nop_int;
6011 			pmu->cancel_txn = perf_pmu_nop_void;
6012 		}
6013 	}
6014 
6015 	if (!pmu->pmu_enable) {
6016 		pmu->pmu_enable  = perf_pmu_nop_void;
6017 		pmu->pmu_disable = perf_pmu_nop_void;
6018 	}
6019 
6020 	if (!pmu->event_idx)
6021 		pmu->event_idx = perf_event_idx_default;
6022 
6023 	list_add_rcu(&pmu->entry, &pmus);
6024 	ret = 0;
6025 unlock:
6026 	mutex_unlock(&pmus_lock);
6027 
6028 	return ret;
6029 
6030 free_dev:
6031 	device_del(pmu->dev);
6032 	put_device(pmu->dev);
6033 
6034 free_idr:
6035 	if (pmu->type >= PERF_TYPE_MAX)
6036 		idr_remove(&pmu_idr, pmu->type);
6037 
6038 free_pdc:
6039 	free_percpu(pmu->pmu_disable_count);
6040 	goto unlock;
6041 }
6042 
6043 void perf_pmu_unregister(struct pmu *pmu)
6044 {
6045 	mutex_lock(&pmus_lock);
6046 	list_del_rcu(&pmu->entry);
6047 	mutex_unlock(&pmus_lock);
6048 
6049 	/*
6050 	 * We dereference the pmu list under both SRCU and regular RCU, so
6051 	 * synchronize against both of those.
6052 	 */
6053 	synchronize_srcu(&pmus_srcu);
6054 	synchronize_rcu();
6055 
6056 	free_percpu(pmu->pmu_disable_count);
6057 	if (pmu->type >= PERF_TYPE_MAX)
6058 		idr_remove(&pmu_idr, pmu->type);
6059 	device_del(pmu->dev);
6060 	put_device(pmu->dev);
6061 	free_pmu_context(pmu);
6062 }
6063 
6064 struct pmu *perf_init_event(struct perf_event *event)
6065 {
6066 	struct pmu *pmu = NULL;
6067 	int idx;
6068 	int ret;
6069 
6070 	idx = srcu_read_lock(&pmus_srcu);
6071 
6072 	rcu_read_lock();
6073 	pmu = idr_find(&pmu_idr, event->attr.type);
6074 	rcu_read_unlock();
6075 	if (pmu) {
6076 		event->pmu = pmu;
6077 		ret = pmu->event_init(event);
6078 		if (ret)
6079 			pmu = ERR_PTR(ret);
6080 		goto unlock;
6081 	}
6082 
6083 	list_for_each_entry_rcu(pmu, &pmus, entry) {
6084 		event->pmu = pmu;
6085 		ret = pmu->event_init(event);
6086 		if (!ret)
6087 			goto unlock;
6088 
6089 		if (ret != -ENOENT) {
6090 			pmu = ERR_PTR(ret);
6091 			goto unlock;
6092 		}
6093 	}
6094 	pmu = ERR_PTR(-ENOENT);
6095 unlock:
6096 	srcu_read_unlock(&pmus_srcu, idx);
6097 
6098 	return pmu;
6099 }
6100 
6101 /*
6102  * Allocate and initialize a event structure
6103  */
6104 static struct perf_event *
6105 perf_event_alloc(struct perf_event_attr *attr, int cpu,
6106 		 struct task_struct *task,
6107 		 struct perf_event *group_leader,
6108 		 struct perf_event *parent_event,
6109 		 perf_overflow_handler_t overflow_handler,
6110 		 void *context)
6111 {
6112 	struct pmu *pmu;
6113 	struct perf_event *event;
6114 	struct hw_perf_event *hwc;
6115 	long err;
6116 
6117 	if ((unsigned)cpu >= nr_cpu_ids) {
6118 		if (!task || cpu != -1)
6119 			return ERR_PTR(-EINVAL);
6120 	}
6121 
6122 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6123 	if (!event)
6124 		return ERR_PTR(-ENOMEM);
6125 
6126 	/*
6127 	 * Single events are their own group leaders, with an
6128 	 * empty sibling list:
6129 	 */
6130 	if (!group_leader)
6131 		group_leader = event;
6132 
6133 	mutex_init(&event->child_mutex);
6134 	INIT_LIST_HEAD(&event->child_list);
6135 
6136 	INIT_LIST_HEAD(&event->group_entry);
6137 	INIT_LIST_HEAD(&event->event_entry);
6138 	INIT_LIST_HEAD(&event->sibling_list);
6139 	INIT_LIST_HEAD(&event->rb_entry);
6140 
6141 	init_waitqueue_head(&event->waitq);
6142 	init_irq_work(&event->pending, perf_pending_event);
6143 
6144 	mutex_init(&event->mmap_mutex);
6145 
6146 	atomic_long_set(&event->refcount, 1);
6147 	event->cpu		= cpu;
6148 	event->attr		= *attr;
6149 	event->group_leader	= group_leader;
6150 	event->pmu		= NULL;
6151 	event->oncpu		= -1;
6152 
6153 	event->parent		= parent_event;
6154 
6155 	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
6156 	event->id		= atomic64_inc_return(&perf_event_id);
6157 
6158 	event->state		= PERF_EVENT_STATE_INACTIVE;
6159 
6160 	if (task) {
6161 		event->attach_state = PERF_ATTACH_TASK;
6162 #ifdef CONFIG_HAVE_HW_BREAKPOINT
6163 		/*
6164 		 * hw_breakpoint is a bit difficult here..
6165 		 */
6166 		if (attr->type == PERF_TYPE_BREAKPOINT)
6167 			event->hw.bp_target = task;
6168 #endif
6169 	}
6170 
6171 	if (!overflow_handler && parent_event) {
6172 		overflow_handler = parent_event->overflow_handler;
6173 		context = parent_event->overflow_handler_context;
6174 	}
6175 
6176 	event->overflow_handler	= overflow_handler;
6177 	event->overflow_handler_context = context;
6178 
6179 	if (attr->disabled)
6180 		event->state = PERF_EVENT_STATE_OFF;
6181 
6182 	pmu = NULL;
6183 
6184 	hwc = &event->hw;
6185 	hwc->sample_period = attr->sample_period;
6186 	if (attr->freq && attr->sample_freq)
6187 		hwc->sample_period = 1;
6188 	hwc->last_period = hwc->sample_period;
6189 
6190 	local64_set(&hwc->period_left, hwc->sample_period);
6191 
6192 	/*
6193 	 * we currently do not support PERF_FORMAT_GROUP on inherited events
6194 	 */
6195 	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6196 		goto done;
6197 
6198 	pmu = perf_init_event(event);
6199 
6200 done:
6201 	err = 0;
6202 	if (!pmu)
6203 		err = -EINVAL;
6204 	else if (IS_ERR(pmu))
6205 		err = PTR_ERR(pmu);
6206 
6207 	if (err) {
6208 		if (event->ns)
6209 			put_pid_ns(event->ns);
6210 		kfree(event);
6211 		return ERR_PTR(err);
6212 	}
6213 
6214 	if (!event->parent) {
6215 		if (event->attach_state & PERF_ATTACH_TASK)
6216 			static_key_slow_inc(&perf_sched_events.key);
6217 		if (event->attr.mmap || event->attr.mmap_data)
6218 			atomic_inc(&nr_mmap_events);
6219 		if (event->attr.comm)
6220 			atomic_inc(&nr_comm_events);
6221 		if (event->attr.task)
6222 			atomic_inc(&nr_task_events);
6223 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6224 			err = get_callchain_buffers();
6225 			if (err) {
6226 				free_event(event);
6227 				return ERR_PTR(err);
6228 			}
6229 		}
6230 		if (has_branch_stack(event)) {
6231 			static_key_slow_inc(&perf_sched_events.key);
6232 			if (!(event->attach_state & PERF_ATTACH_TASK))
6233 				atomic_inc(&per_cpu(perf_branch_stack_events,
6234 						    event->cpu));
6235 		}
6236 	}
6237 
6238 	return event;
6239 }
6240 
6241 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6242 			  struct perf_event_attr *attr)
6243 {
6244 	u32 size;
6245 	int ret;
6246 
6247 	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6248 		return -EFAULT;
6249 
6250 	/*
6251 	 * zero the full structure, so that a short copy will be nice.
6252 	 */
6253 	memset(attr, 0, sizeof(*attr));
6254 
6255 	ret = get_user(size, &uattr->size);
6256 	if (ret)
6257 		return ret;
6258 
6259 	if (size > PAGE_SIZE)	/* silly large */
6260 		goto err_size;
6261 
6262 	if (!size)		/* abi compat */
6263 		size = PERF_ATTR_SIZE_VER0;
6264 
6265 	if (size < PERF_ATTR_SIZE_VER0)
6266 		goto err_size;
6267 
6268 	/*
6269 	 * If we're handed a bigger struct than we know of,
6270 	 * ensure all the unknown bits are 0 - i.e. new
6271 	 * user-space does not rely on any kernel feature
6272 	 * extensions we dont know about yet.
6273 	 */
6274 	if (size > sizeof(*attr)) {
6275 		unsigned char __user *addr;
6276 		unsigned char __user *end;
6277 		unsigned char val;
6278 
6279 		addr = (void __user *)uattr + sizeof(*attr);
6280 		end  = (void __user *)uattr + size;
6281 
6282 		for (; addr < end; addr++) {
6283 			ret = get_user(val, addr);
6284 			if (ret)
6285 				return ret;
6286 			if (val)
6287 				goto err_size;
6288 		}
6289 		size = sizeof(*attr);
6290 	}
6291 
6292 	ret = copy_from_user(attr, uattr, size);
6293 	if (ret)
6294 		return -EFAULT;
6295 
6296 	if (attr->__reserved_1)
6297 		return -EINVAL;
6298 
6299 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6300 		return -EINVAL;
6301 
6302 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6303 		return -EINVAL;
6304 
6305 	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6306 		u64 mask = attr->branch_sample_type;
6307 
6308 		/* only using defined bits */
6309 		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6310 			return -EINVAL;
6311 
6312 		/* at least one branch bit must be set */
6313 		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6314 			return -EINVAL;
6315 
6316 		/* kernel level capture: check permissions */
6317 		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6318 		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6319 			return -EACCES;
6320 
6321 		/* propagate priv level, when not set for branch */
6322 		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6323 
6324 			/* exclude_kernel checked on syscall entry */
6325 			if (!attr->exclude_kernel)
6326 				mask |= PERF_SAMPLE_BRANCH_KERNEL;
6327 
6328 			if (!attr->exclude_user)
6329 				mask |= PERF_SAMPLE_BRANCH_USER;
6330 
6331 			if (!attr->exclude_hv)
6332 				mask |= PERF_SAMPLE_BRANCH_HV;
6333 			/*
6334 			 * adjust user setting (for HW filter setup)
6335 			 */
6336 			attr->branch_sample_type = mask;
6337 		}
6338 	}
6339 
6340 	if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6341 		ret = perf_reg_validate(attr->sample_regs_user);
6342 		if (ret)
6343 			return ret;
6344 	}
6345 
6346 	if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6347 		if (!arch_perf_have_user_stack_dump())
6348 			return -ENOSYS;
6349 
6350 		/*
6351 		 * We have __u32 type for the size, but so far
6352 		 * we can only use __u16 as maximum due to the
6353 		 * __u16 sample size limit.
6354 		 */
6355 		if (attr->sample_stack_user >= USHRT_MAX)
6356 			ret = -EINVAL;
6357 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6358 			ret = -EINVAL;
6359 	}
6360 
6361 out:
6362 	return ret;
6363 
6364 err_size:
6365 	put_user(sizeof(*attr), &uattr->size);
6366 	ret = -E2BIG;
6367 	goto out;
6368 }
6369 
6370 static int
6371 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6372 {
6373 	struct ring_buffer *rb = NULL, *old_rb = NULL;
6374 	int ret = -EINVAL;
6375 
6376 	if (!output_event)
6377 		goto set;
6378 
6379 	/* don't allow circular references */
6380 	if (event == output_event)
6381 		goto out;
6382 
6383 	/*
6384 	 * Don't allow cross-cpu buffers
6385 	 */
6386 	if (output_event->cpu != event->cpu)
6387 		goto out;
6388 
6389 	/*
6390 	 * If its not a per-cpu rb, it must be the same task.
6391 	 */
6392 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6393 		goto out;
6394 
6395 set:
6396 	mutex_lock(&event->mmap_mutex);
6397 	/* Can't redirect output if we've got an active mmap() */
6398 	if (atomic_read(&event->mmap_count))
6399 		goto unlock;
6400 
6401 	if (output_event) {
6402 		/* get the rb we want to redirect to */
6403 		rb = ring_buffer_get(output_event);
6404 		if (!rb)
6405 			goto unlock;
6406 	}
6407 
6408 	old_rb = event->rb;
6409 	rcu_assign_pointer(event->rb, rb);
6410 	if (old_rb)
6411 		ring_buffer_detach(event, old_rb);
6412 	ret = 0;
6413 unlock:
6414 	mutex_unlock(&event->mmap_mutex);
6415 
6416 	if (old_rb)
6417 		ring_buffer_put(old_rb);
6418 out:
6419 	return ret;
6420 }
6421 
6422 /**
6423  * sys_perf_event_open - open a performance event, associate it to a task/cpu
6424  *
6425  * @attr_uptr:	event_id type attributes for monitoring/sampling
6426  * @pid:		target pid
6427  * @cpu:		target cpu
6428  * @group_fd:		group leader event fd
6429  */
6430 SYSCALL_DEFINE5(perf_event_open,
6431 		struct perf_event_attr __user *, attr_uptr,
6432 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6433 {
6434 	struct perf_event *group_leader = NULL, *output_event = NULL;
6435 	struct perf_event *event, *sibling;
6436 	struct perf_event_attr attr;
6437 	struct perf_event_context *ctx;
6438 	struct file *event_file = NULL;
6439 	struct fd group = {NULL, 0};
6440 	struct task_struct *task = NULL;
6441 	struct pmu *pmu;
6442 	int event_fd;
6443 	int move_group = 0;
6444 	int err;
6445 
6446 	/* for future expandability... */
6447 	if (flags & ~PERF_FLAG_ALL)
6448 		return -EINVAL;
6449 
6450 	err = perf_copy_attr(attr_uptr, &attr);
6451 	if (err)
6452 		return err;
6453 
6454 	if (!attr.exclude_kernel) {
6455 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6456 			return -EACCES;
6457 	}
6458 
6459 	if (attr.freq) {
6460 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
6461 			return -EINVAL;
6462 	}
6463 
6464 	/*
6465 	 * In cgroup mode, the pid argument is used to pass the fd
6466 	 * opened to the cgroup directory in cgroupfs. The cpu argument
6467 	 * designates the cpu on which to monitor threads from that
6468 	 * cgroup.
6469 	 */
6470 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6471 		return -EINVAL;
6472 
6473 	event_fd = get_unused_fd();
6474 	if (event_fd < 0)
6475 		return event_fd;
6476 
6477 	if (group_fd != -1) {
6478 		err = perf_fget_light(group_fd, &group);
6479 		if (err)
6480 			goto err_fd;
6481 		group_leader = group.file->private_data;
6482 		if (flags & PERF_FLAG_FD_OUTPUT)
6483 			output_event = group_leader;
6484 		if (flags & PERF_FLAG_FD_NO_GROUP)
6485 			group_leader = NULL;
6486 	}
6487 
6488 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6489 		task = find_lively_task_by_vpid(pid);
6490 		if (IS_ERR(task)) {
6491 			err = PTR_ERR(task);
6492 			goto err_group_fd;
6493 		}
6494 	}
6495 
6496 	get_online_cpus();
6497 
6498 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6499 				 NULL, NULL);
6500 	if (IS_ERR(event)) {
6501 		err = PTR_ERR(event);
6502 		goto err_task;
6503 	}
6504 
6505 	if (flags & PERF_FLAG_PID_CGROUP) {
6506 		err = perf_cgroup_connect(pid, event, &attr, group_leader);
6507 		if (err)
6508 			goto err_alloc;
6509 		/*
6510 		 * one more event:
6511 		 * - that has cgroup constraint on event->cpu
6512 		 * - that may need work on context switch
6513 		 */
6514 		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6515 		static_key_slow_inc(&perf_sched_events.key);
6516 	}
6517 
6518 	/*
6519 	 * Special case software events and allow them to be part of
6520 	 * any hardware group.
6521 	 */
6522 	pmu = event->pmu;
6523 
6524 	if (group_leader &&
6525 	    (is_software_event(event) != is_software_event(group_leader))) {
6526 		if (is_software_event(event)) {
6527 			/*
6528 			 * If event and group_leader are not both a software
6529 			 * event, and event is, then group leader is not.
6530 			 *
6531 			 * Allow the addition of software events to !software
6532 			 * groups, this is safe because software events never
6533 			 * fail to schedule.
6534 			 */
6535 			pmu = group_leader->pmu;
6536 		} else if (is_software_event(group_leader) &&
6537 			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6538 			/*
6539 			 * In case the group is a pure software group, and we
6540 			 * try to add a hardware event, move the whole group to
6541 			 * the hardware context.
6542 			 */
6543 			move_group = 1;
6544 		}
6545 	}
6546 
6547 	/*
6548 	 * Get the target context (task or percpu):
6549 	 */
6550 	ctx = find_get_context(pmu, task, event->cpu);
6551 	if (IS_ERR(ctx)) {
6552 		err = PTR_ERR(ctx);
6553 		goto err_alloc;
6554 	}
6555 
6556 	if (task) {
6557 		put_task_struct(task);
6558 		task = NULL;
6559 	}
6560 
6561 	/*
6562 	 * Look up the group leader (we will attach this event to it):
6563 	 */
6564 	if (group_leader) {
6565 		err = -EINVAL;
6566 
6567 		/*
6568 		 * Do not allow a recursive hierarchy (this new sibling
6569 		 * becoming part of another group-sibling):
6570 		 */
6571 		if (group_leader->group_leader != group_leader)
6572 			goto err_context;
6573 		/*
6574 		 * Do not allow to attach to a group in a different
6575 		 * task or CPU context:
6576 		 */
6577 		if (move_group) {
6578 			if (group_leader->ctx->type != ctx->type)
6579 				goto err_context;
6580 		} else {
6581 			if (group_leader->ctx != ctx)
6582 				goto err_context;
6583 		}
6584 
6585 		/*
6586 		 * Only a group leader can be exclusive or pinned
6587 		 */
6588 		if (attr.exclusive || attr.pinned)
6589 			goto err_context;
6590 	}
6591 
6592 	if (output_event) {
6593 		err = perf_event_set_output(event, output_event);
6594 		if (err)
6595 			goto err_context;
6596 	}
6597 
6598 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6599 	if (IS_ERR(event_file)) {
6600 		err = PTR_ERR(event_file);
6601 		goto err_context;
6602 	}
6603 
6604 	if (move_group) {
6605 		struct perf_event_context *gctx = group_leader->ctx;
6606 
6607 		mutex_lock(&gctx->mutex);
6608 		perf_remove_from_context(group_leader);
6609 		list_for_each_entry(sibling, &group_leader->sibling_list,
6610 				    group_entry) {
6611 			perf_remove_from_context(sibling);
6612 			put_ctx(gctx);
6613 		}
6614 		mutex_unlock(&gctx->mutex);
6615 		put_ctx(gctx);
6616 	}
6617 
6618 	WARN_ON_ONCE(ctx->parent_ctx);
6619 	mutex_lock(&ctx->mutex);
6620 
6621 	if (move_group) {
6622 		synchronize_rcu();
6623 		perf_install_in_context(ctx, group_leader, event->cpu);
6624 		get_ctx(ctx);
6625 		list_for_each_entry(sibling, &group_leader->sibling_list,
6626 				    group_entry) {
6627 			perf_install_in_context(ctx, sibling, event->cpu);
6628 			get_ctx(ctx);
6629 		}
6630 	}
6631 
6632 	perf_install_in_context(ctx, event, event->cpu);
6633 	++ctx->generation;
6634 	perf_unpin_context(ctx);
6635 	mutex_unlock(&ctx->mutex);
6636 
6637 	put_online_cpus();
6638 
6639 	event->owner = current;
6640 
6641 	mutex_lock(&current->perf_event_mutex);
6642 	list_add_tail(&event->owner_entry, &current->perf_event_list);
6643 	mutex_unlock(&current->perf_event_mutex);
6644 
6645 	/*
6646 	 * Precalculate sample_data sizes
6647 	 */
6648 	perf_event__header_size(event);
6649 	perf_event__id_header_size(event);
6650 
6651 	/*
6652 	 * Drop the reference on the group_event after placing the
6653 	 * new event on the sibling_list. This ensures destruction
6654 	 * of the group leader will find the pointer to itself in
6655 	 * perf_group_detach().
6656 	 */
6657 	fdput(group);
6658 	fd_install(event_fd, event_file);
6659 	return event_fd;
6660 
6661 err_context:
6662 	perf_unpin_context(ctx);
6663 	put_ctx(ctx);
6664 err_alloc:
6665 	free_event(event);
6666 err_task:
6667 	put_online_cpus();
6668 	if (task)
6669 		put_task_struct(task);
6670 err_group_fd:
6671 	fdput(group);
6672 err_fd:
6673 	put_unused_fd(event_fd);
6674 	return err;
6675 }
6676 
6677 /**
6678  * perf_event_create_kernel_counter
6679  *
6680  * @attr: attributes of the counter to create
6681  * @cpu: cpu in which the counter is bound
6682  * @task: task to profile (NULL for percpu)
6683  */
6684 struct perf_event *
6685 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6686 				 struct task_struct *task,
6687 				 perf_overflow_handler_t overflow_handler,
6688 				 void *context)
6689 {
6690 	struct perf_event_context *ctx;
6691 	struct perf_event *event;
6692 	int err;
6693 
6694 	/*
6695 	 * Get the target context (task or percpu):
6696 	 */
6697 
6698 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6699 				 overflow_handler, context);
6700 	if (IS_ERR(event)) {
6701 		err = PTR_ERR(event);
6702 		goto err;
6703 	}
6704 
6705 	ctx = find_get_context(event->pmu, task, cpu);
6706 	if (IS_ERR(ctx)) {
6707 		err = PTR_ERR(ctx);
6708 		goto err_free;
6709 	}
6710 
6711 	WARN_ON_ONCE(ctx->parent_ctx);
6712 	mutex_lock(&ctx->mutex);
6713 	perf_install_in_context(ctx, event, cpu);
6714 	++ctx->generation;
6715 	perf_unpin_context(ctx);
6716 	mutex_unlock(&ctx->mutex);
6717 
6718 	return event;
6719 
6720 err_free:
6721 	free_event(event);
6722 err:
6723 	return ERR_PTR(err);
6724 }
6725 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6726 
6727 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
6728 {
6729 	struct perf_event_context *src_ctx;
6730 	struct perf_event_context *dst_ctx;
6731 	struct perf_event *event, *tmp;
6732 	LIST_HEAD(events);
6733 
6734 	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
6735 	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
6736 
6737 	mutex_lock(&src_ctx->mutex);
6738 	list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
6739 				 event_entry) {
6740 		perf_remove_from_context(event);
6741 		put_ctx(src_ctx);
6742 		list_add(&event->event_entry, &events);
6743 	}
6744 	mutex_unlock(&src_ctx->mutex);
6745 
6746 	synchronize_rcu();
6747 
6748 	mutex_lock(&dst_ctx->mutex);
6749 	list_for_each_entry_safe(event, tmp, &events, event_entry) {
6750 		list_del(&event->event_entry);
6751 		if (event->state >= PERF_EVENT_STATE_OFF)
6752 			event->state = PERF_EVENT_STATE_INACTIVE;
6753 		perf_install_in_context(dst_ctx, event, dst_cpu);
6754 		get_ctx(dst_ctx);
6755 	}
6756 	mutex_unlock(&dst_ctx->mutex);
6757 }
6758 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
6759 
6760 static void sync_child_event(struct perf_event *child_event,
6761 			       struct task_struct *child)
6762 {
6763 	struct perf_event *parent_event = child_event->parent;
6764 	u64 child_val;
6765 
6766 	if (child_event->attr.inherit_stat)
6767 		perf_event_read_event(child_event, child);
6768 
6769 	child_val = perf_event_count(child_event);
6770 
6771 	/*
6772 	 * Add back the child's count to the parent's count:
6773 	 */
6774 	atomic64_add(child_val, &parent_event->child_count);
6775 	atomic64_add(child_event->total_time_enabled,
6776 		     &parent_event->child_total_time_enabled);
6777 	atomic64_add(child_event->total_time_running,
6778 		     &parent_event->child_total_time_running);
6779 
6780 	/*
6781 	 * Remove this event from the parent's list
6782 	 */
6783 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6784 	mutex_lock(&parent_event->child_mutex);
6785 	list_del_init(&child_event->child_list);
6786 	mutex_unlock(&parent_event->child_mutex);
6787 
6788 	/*
6789 	 * Release the parent event, if this was the last
6790 	 * reference to it.
6791 	 */
6792 	put_event(parent_event);
6793 }
6794 
6795 static void
6796 __perf_event_exit_task(struct perf_event *child_event,
6797 			 struct perf_event_context *child_ctx,
6798 			 struct task_struct *child)
6799 {
6800 	if (child_event->parent) {
6801 		raw_spin_lock_irq(&child_ctx->lock);
6802 		perf_group_detach(child_event);
6803 		raw_spin_unlock_irq(&child_ctx->lock);
6804 	}
6805 
6806 	perf_remove_from_context(child_event);
6807 
6808 	/*
6809 	 * It can happen that the parent exits first, and has events
6810 	 * that are still around due to the child reference. These
6811 	 * events need to be zapped.
6812 	 */
6813 	if (child_event->parent) {
6814 		sync_child_event(child_event, child);
6815 		free_event(child_event);
6816 	}
6817 }
6818 
6819 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6820 {
6821 	struct perf_event *child_event, *tmp;
6822 	struct perf_event_context *child_ctx;
6823 	unsigned long flags;
6824 
6825 	if (likely(!child->perf_event_ctxp[ctxn])) {
6826 		perf_event_task(child, NULL, 0);
6827 		return;
6828 	}
6829 
6830 	local_irq_save(flags);
6831 	/*
6832 	 * We can't reschedule here because interrupts are disabled,
6833 	 * and either child is current or it is a task that can't be
6834 	 * scheduled, so we are now safe from rescheduling changing
6835 	 * our context.
6836 	 */
6837 	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6838 
6839 	/*
6840 	 * Take the context lock here so that if find_get_context is
6841 	 * reading child->perf_event_ctxp, we wait until it has
6842 	 * incremented the context's refcount before we do put_ctx below.
6843 	 */
6844 	raw_spin_lock(&child_ctx->lock);
6845 	task_ctx_sched_out(child_ctx);
6846 	child->perf_event_ctxp[ctxn] = NULL;
6847 	/*
6848 	 * If this context is a clone; unclone it so it can't get
6849 	 * swapped to another process while we're removing all
6850 	 * the events from it.
6851 	 */
6852 	unclone_ctx(child_ctx);
6853 	update_context_time(child_ctx);
6854 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6855 
6856 	/*
6857 	 * Report the task dead after unscheduling the events so that we
6858 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
6859 	 * get a few PERF_RECORD_READ events.
6860 	 */
6861 	perf_event_task(child, child_ctx, 0);
6862 
6863 	/*
6864 	 * We can recurse on the same lock type through:
6865 	 *
6866 	 *   __perf_event_exit_task()
6867 	 *     sync_child_event()
6868 	 *       put_event()
6869 	 *         mutex_lock(&ctx->mutex)
6870 	 *
6871 	 * But since its the parent context it won't be the same instance.
6872 	 */
6873 	mutex_lock(&child_ctx->mutex);
6874 
6875 again:
6876 	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6877 				 group_entry)
6878 		__perf_event_exit_task(child_event, child_ctx, child);
6879 
6880 	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6881 				 group_entry)
6882 		__perf_event_exit_task(child_event, child_ctx, child);
6883 
6884 	/*
6885 	 * If the last event was a group event, it will have appended all
6886 	 * its siblings to the list, but we obtained 'tmp' before that which
6887 	 * will still point to the list head terminating the iteration.
6888 	 */
6889 	if (!list_empty(&child_ctx->pinned_groups) ||
6890 	    !list_empty(&child_ctx->flexible_groups))
6891 		goto again;
6892 
6893 	mutex_unlock(&child_ctx->mutex);
6894 
6895 	put_ctx(child_ctx);
6896 }
6897 
6898 /*
6899  * When a child task exits, feed back event values to parent events.
6900  */
6901 void perf_event_exit_task(struct task_struct *child)
6902 {
6903 	struct perf_event *event, *tmp;
6904 	int ctxn;
6905 
6906 	mutex_lock(&child->perf_event_mutex);
6907 	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6908 				 owner_entry) {
6909 		list_del_init(&event->owner_entry);
6910 
6911 		/*
6912 		 * Ensure the list deletion is visible before we clear
6913 		 * the owner, closes a race against perf_release() where
6914 		 * we need to serialize on the owner->perf_event_mutex.
6915 		 */
6916 		smp_wmb();
6917 		event->owner = NULL;
6918 	}
6919 	mutex_unlock(&child->perf_event_mutex);
6920 
6921 	for_each_task_context_nr(ctxn)
6922 		perf_event_exit_task_context(child, ctxn);
6923 }
6924 
6925 static void perf_free_event(struct perf_event *event,
6926 			    struct perf_event_context *ctx)
6927 {
6928 	struct perf_event *parent = event->parent;
6929 
6930 	if (WARN_ON_ONCE(!parent))
6931 		return;
6932 
6933 	mutex_lock(&parent->child_mutex);
6934 	list_del_init(&event->child_list);
6935 	mutex_unlock(&parent->child_mutex);
6936 
6937 	put_event(parent);
6938 
6939 	perf_group_detach(event);
6940 	list_del_event(event, ctx);
6941 	free_event(event);
6942 }
6943 
6944 /*
6945  * free an unexposed, unused context as created by inheritance by
6946  * perf_event_init_task below, used by fork() in case of fail.
6947  */
6948 void perf_event_free_task(struct task_struct *task)
6949 {
6950 	struct perf_event_context *ctx;
6951 	struct perf_event *event, *tmp;
6952 	int ctxn;
6953 
6954 	for_each_task_context_nr(ctxn) {
6955 		ctx = task->perf_event_ctxp[ctxn];
6956 		if (!ctx)
6957 			continue;
6958 
6959 		mutex_lock(&ctx->mutex);
6960 again:
6961 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6962 				group_entry)
6963 			perf_free_event(event, ctx);
6964 
6965 		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6966 				group_entry)
6967 			perf_free_event(event, ctx);
6968 
6969 		if (!list_empty(&ctx->pinned_groups) ||
6970 				!list_empty(&ctx->flexible_groups))
6971 			goto again;
6972 
6973 		mutex_unlock(&ctx->mutex);
6974 
6975 		put_ctx(ctx);
6976 	}
6977 }
6978 
6979 void perf_event_delayed_put(struct task_struct *task)
6980 {
6981 	int ctxn;
6982 
6983 	for_each_task_context_nr(ctxn)
6984 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6985 }
6986 
6987 /*
6988  * inherit a event from parent task to child task:
6989  */
6990 static struct perf_event *
6991 inherit_event(struct perf_event *parent_event,
6992 	      struct task_struct *parent,
6993 	      struct perf_event_context *parent_ctx,
6994 	      struct task_struct *child,
6995 	      struct perf_event *group_leader,
6996 	      struct perf_event_context *child_ctx)
6997 {
6998 	struct perf_event *child_event;
6999 	unsigned long flags;
7000 
7001 	/*
7002 	 * Instead of creating recursive hierarchies of events,
7003 	 * we link inherited events back to the original parent,
7004 	 * which has a filp for sure, which we use as the reference
7005 	 * count:
7006 	 */
7007 	if (parent_event->parent)
7008 		parent_event = parent_event->parent;
7009 
7010 	child_event = perf_event_alloc(&parent_event->attr,
7011 					   parent_event->cpu,
7012 					   child,
7013 					   group_leader, parent_event,
7014 				           NULL, NULL);
7015 	if (IS_ERR(child_event))
7016 		return child_event;
7017 
7018 	if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7019 		free_event(child_event);
7020 		return NULL;
7021 	}
7022 
7023 	get_ctx(child_ctx);
7024 
7025 	/*
7026 	 * Make the child state follow the state of the parent event,
7027 	 * not its attr.disabled bit.  We hold the parent's mutex,
7028 	 * so we won't race with perf_event_{en, dis}able_family.
7029 	 */
7030 	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7031 		child_event->state = PERF_EVENT_STATE_INACTIVE;
7032 	else
7033 		child_event->state = PERF_EVENT_STATE_OFF;
7034 
7035 	if (parent_event->attr.freq) {
7036 		u64 sample_period = parent_event->hw.sample_period;
7037 		struct hw_perf_event *hwc = &child_event->hw;
7038 
7039 		hwc->sample_period = sample_period;
7040 		hwc->last_period   = sample_period;
7041 
7042 		local64_set(&hwc->period_left, sample_period);
7043 	}
7044 
7045 	child_event->ctx = child_ctx;
7046 	child_event->overflow_handler = parent_event->overflow_handler;
7047 	child_event->overflow_handler_context
7048 		= parent_event->overflow_handler_context;
7049 
7050 	/*
7051 	 * Precalculate sample_data sizes
7052 	 */
7053 	perf_event__header_size(child_event);
7054 	perf_event__id_header_size(child_event);
7055 
7056 	/*
7057 	 * Link it up in the child's context:
7058 	 */
7059 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
7060 	add_event_to_ctx(child_event, child_ctx);
7061 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7062 
7063 	/*
7064 	 * Link this into the parent event's child list
7065 	 */
7066 	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7067 	mutex_lock(&parent_event->child_mutex);
7068 	list_add_tail(&child_event->child_list, &parent_event->child_list);
7069 	mutex_unlock(&parent_event->child_mutex);
7070 
7071 	return child_event;
7072 }
7073 
7074 static int inherit_group(struct perf_event *parent_event,
7075 	      struct task_struct *parent,
7076 	      struct perf_event_context *parent_ctx,
7077 	      struct task_struct *child,
7078 	      struct perf_event_context *child_ctx)
7079 {
7080 	struct perf_event *leader;
7081 	struct perf_event *sub;
7082 	struct perf_event *child_ctr;
7083 
7084 	leader = inherit_event(parent_event, parent, parent_ctx,
7085 				 child, NULL, child_ctx);
7086 	if (IS_ERR(leader))
7087 		return PTR_ERR(leader);
7088 	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7089 		child_ctr = inherit_event(sub, parent, parent_ctx,
7090 					    child, leader, child_ctx);
7091 		if (IS_ERR(child_ctr))
7092 			return PTR_ERR(child_ctr);
7093 	}
7094 	return 0;
7095 }
7096 
7097 static int
7098 inherit_task_group(struct perf_event *event, struct task_struct *parent,
7099 		   struct perf_event_context *parent_ctx,
7100 		   struct task_struct *child, int ctxn,
7101 		   int *inherited_all)
7102 {
7103 	int ret;
7104 	struct perf_event_context *child_ctx;
7105 
7106 	if (!event->attr.inherit) {
7107 		*inherited_all = 0;
7108 		return 0;
7109 	}
7110 
7111 	child_ctx = child->perf_event_ctxp[ctxn];
7112 	if (!child_ctx) {
7113 		/*
7114 		 * This is executed from the parent task context, so
7115 		 * inherit events that have been marked for cloning.
7116 		 * First allocate and initialize a context for the
7117 		 * child.
7118 		 */
7119 
7120 		child_ctx = alloc_perf_context(event->pmu, child);
7121 		if (!child_ctx)
7122 			return -ENOMEM;
7123 
7124 		child->perf_event_ctxp[ctxn] = child_ctx;
7125 	}
7126 
7127 	ret = inherit_group(event, parent, parent_ctx,
7128 			    child, child_ctx);
7129 
7130 	if (ret)
7131 		*inherited_all = 0;
7132 
7133 	return ret;
7134 }
7135 
7136 /*
7137  * Initialize the perf_event context in task_struct
7138  */
7139 int perf_event_init_context(struct task_struct *child, int ctxn)
7140 {
7141 	struct perf_event_context *child_ctx, *parent_ctx;
7142 	struct perf_event_context *cloned_ctx;
7143 	struct perf_event *event;
7144 	struct task_struct *parent = current;
7145 	int inherited_all = 1;
7146 	unsigned long flags;
7147 	int ret = 0;
7148 
7149 	if (likely(!parent->perf_event_ctxp[ctxn]))
7150 		return 0;
7151 
7152 	/*
7153 	 * If the parent's context is a clone, pin it so it won't get
7154 	 * swapped under us.
7155 	 */
7156 	parent_ctx = perf_pin_task_context(parent, ctxn);
7157 
7158 	/*
7159 	 * No need to check if parent_ctx != NULL here; since we saw
7160 	 * it non-NULL earlier, the only reason for it to become NULL
7161 	 * is if we exit, and since we're currently in the middle of
7162 	 * a fork we can't be exiting at the same time.
7163 	 */
7164 
7165 	/*
7166 	 * Lock the parent list. No need to lock the child - not PID
7167 	 * hashed yet and not running, so nobody can access it.
7168 	 */
7169 	mutex_lock(&parent_ctx->mutex);
7170 
7171 	/*
7172 	 * We dont have to disable NMIs - we are only looking at
7173 	 * the list, not manipulating it:
7174 	 */
7175 	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7176 		ret = inherit_task_group(event, parent, parent_ctx,
7177 					 child, ctxn, &inherited_all);
7178 		if (ret)
7179 			break;
7180 	}
7181 
7182 	/*
7183 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
7184 	 * to allocations, but we need to prevent rotation because
7185 	 * rotate_ctx() will change the list from interrupt context.
7186 	 */
7187 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7188 	parent_ctx->rotate_disable = 1;
7189 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7190 
7191 	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7192 		ret = inherit_task_group(event, parent, parent_ctx,
7193 					 child, ctxn, &inherited_all);
7194 		if (ret)
7195 			break;
7196 	}
7197 
7198 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7199 	parent_ctx->rotate_disable = 0;
7200 
7201 	child_ctx = child->perf_event_ctxp[ctxn];
7202 
7203 	if (child_ctx && inherited_all) {
7204 		/*
7205 		 * Mark the child context as a clone of the parent
7206 		 * context, or of whatever the parent is a clone of.
7207 		 *
7208 		 * Note that if the parent is a clone, the holding of
7209 		 * parent_ctx->lock avoids it from being uncloned.
7210 		 */
7211 		cloned_ctx = parent_ctx->parent_ctx;
7212 		if (cloned_ctx) {
7213 			child_ctx->parent_ctx = cloned_ctx;
7214 			child_ctx->parent_gen = parent_ctx->parent_gen;
7215 		} else {
7216 			child_ctx->parent_ctx = parent_ctx;
7217 			child_ctx->parent_gen = parent_ctx->generation;
7218 		}
7219 		get_ctx(child_ctx->parent_ctx);
7220 	}
7221 
7222 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7223 	mutex_unlock(&parent_ctx->mutex);
7224 
7225 	perf_unpin_context(parent_ctx);
7226 	put_ctx(parent_ctx);
7227 
7228 	return ret;
7229 }
7230 
7231 /*
7232  * Initialize the perf_event context in task_struct
7233  */
7234 int perf_event_init_task(struct task_struct *child)
7235 {
7236 	int ctxn, ret;
7237 
7238 	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7239 	mutex_init(&child->perf_event_mutex);
7240 	INIT_LIST_HEAD(&child->perf_event_list);
7241 
7242 	for_each_task_context_nr(ctxn) {
7243 		ret = perf_event_init_context(child, ctxn);
7244 		if (ret)
7245 			return ret;
7246 	}
7247 
7248 	return 0;
7249 }
7250 
7251 static void __init perf_event_init_all_cpus(void)
7252 {
7253 	struct swevent_htable *swhash;
7254 	int cpu;
7255 
7256 	for_each_possible_cpu(cpu) {
7257 		swhash = &per_cpu(swevent_htable, cpu);
7258 		mutex_init(&swhash->hlist_mutex);
7259 		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7260 	}
7261 }
7262 
7263 static void __cpuinit perf_event_init_cpu(int cpu)
7264 {
7265 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7266 
7267 	mutex_lock(&swhash->hlist_mutex);
7268 	if (swhash->hlist_refcount > 0) {
7269 		struct swevent_hlist *hlist;
7270 
7271 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7272 		WARN_ON(!hlist);
7273 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
7274 	}
7275 	mutex_unlock(&swhash->hlist_mutex);
7276 }
7277 
7278 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7279 static void perf_pmu_rotate_stop(struct pmu *pmu)
7280 {
7281 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7282 
7283 	WARN_ON(!irqs_disabled());
7284 
7285 	list_del_init(&cpuctx->rotation_list);
7286 }
7287 
7288 static void __perf_event_exit_context(void *__info)
7289 {
7290 	struct perf_event_context *ctx = __info;
7291 	struct perf_event *event, *tmp;
7292 
7293 	perf_pmu_rotate_stop(ctx->pmu);
7294 
7295 	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7296 		__perf_remove_from_context(event);
7297 	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7298 		__perf_remove_from_context(event);
7299 }
7300 
7301 static void perf_event_exit_cpu_context(int cpu)
7302 {
7303 	struct perf_event_context *ctx;
7304 	struct pmu *pmu;
7305 	int idx;
7306 
7307 	idx = srcu_read_lock(&pmus_srcu);
7308 	list_for_each_entry_rcu(pmu, &pmus, entry) {
7309 		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7310 
7311 		mutex_lock(&ctx->mutex);
7312 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7313 		mutex_unlock(&ctx->mutex);
7314 	}
7315 	srcu_read_unlock(&pmus_srcu, idx);
7316 }
7317 
7318 static void perf_event_exit_cpu(int cpu)
7319 {
7320 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7321 
7322 	mutex_lock(&swhash->hlist_mutex);
7323 	swevent_hlist_release(swhash);
7324 	mutex_unlock(&swhash->hlist_mutex);
7325 
7326 	perf_event_exit_cpu_context(cpu);
7327 }
7328 #else
7329 static inline void perf_event_exit_cpu(int cpu) { }
7330 #endif
7331 
7332 static int
7333 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7334 {
7335 	int cpu;
7336 
7337 	for_each_online_cpu(cpu)
7338 		perf_event_exit_cpu(cpu);
7339 
7340 	return NOTIFY_OK;
7341 }
7342 
7343 /*
7344  * Run the perf reboot notifier at the very last possible moment so that
7345  * the generic watchdog code runs as long as possible.
7346  */
7347 static struct notifier_block perf_reboot_notifier = {
7348 	.notifier_call = perf_reboot,
7349 	.priority = INT_MIN,
7350 };
7351 
7352 static int __cpuinit
7353 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7354 {
7355 	unsigned int cpu = (long)hcpu;
7356 
7357 	switch (action & ~CPU_TASKS_FROZEN) {
7358 
7359 	case CPU_UP_PREPARE:
7360 	case CPU_DOWN_FAILED:
7361 		perf_event_init_cpu(cpu);
7362 		break;
7363 
7364 	case CPU_UP_CANCELED:
7365 	case CPU_DOWN_PREPARE:
7366 		perf_event_exit_cpu(cpu);
7367 		break;
7368 
7369 	default:
7370 		break;
7371 	}
7372 
7373 	return NOTIFY_OK;
7374 }
7375 
7376 void __init perf_event_init(void)
7377 {
7378 	int ret;
7379 
7380 	idr_init(&pmu_idr);
7381 
7382 	perf_event_init_all_cpus();
7383 	init_srcu_struct(&pmus_srcu);
7384 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7385 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
7386 	perf_pmu_register(&perf_task_clock, NULL, -1);
7387 	perf_tp_register();
7388 	perf_cpu_notifier(perf_cpu_notify);
7389 	register_reboot_notifier(&perf_reboot_notifier);
7390 
7391 	ret = init_hw_breakpoint();
7392 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7393 
7394 	/* do not patch jump label more than once per second */
7395 	jump_label_rate_limit(&perf_sched_events, HZ);
7396 
7397 	/*
7398 	 * Build time assertion that we keep the data_head at the intended
7399 	 * location.  IOW, validation we got the __reserved[] size right.
7400 	 */
7401 	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7402 		     != 1024);
7403 }
7404 
7405 static int __init perf_event_sysfs_init(void)
7406 {
7407 	struct pmu *pmu;
7408 	int ret;
7409 
7410 	mutex_lock(&pmus_lock);
7411 
7412 	ret = bus_register(&pmu_bus);
7413 	if (ret)
7414 		goto unlock;
7415 
7416 	list_for_each_entry(pmu, &pmus, entry) {
7417 		if (!pmu->name || pmu->type < 0)
7418 			continue;
7419 
7420 		ret = pmu_dev_alloc(pmu);
7421 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7422 	}
7423 	pmu_bus_running = 1;
7424 	ret = 0;
7425 
7426 unlock:
7427 	mutex_unlock(&pmus_lock);
7428 
7429 	return ret;
7430 }
7431 device_initcall(perf_event_sysfs_init);
7432 
7433 #ifdef CONFIG_CGROUP_PERF
7434 static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
7435 {
7436 	struct perf_cgroup *jc;
7437 
7438 	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7439 	if (!jc)
7440 		return ERR_PTR(-ENOMEM);
7441 
7442 	jc->info = alloc_percpu(struct perf_cgroup_info);
7443 	if (!jc->info) {
7444 		kfree(jc);
7445 		return ERR_PTR(-ENOMEM);
7446 	}
7447 
7448 	return &jc->css;
7449 }
7450 
7451 static void perf_cgroup_destroy(struct cgroup *cont)
7452 {
7453 	struct perf_cgroup *jc;
7454 	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7455 			  struct perf_cgroup, css);
7456 	free_percpu(jc->info);
7457 	kfree(jc);
7458 }
7459 
7460 static int __perf_cgroup_move(void *info)
7461 {
7462 	struct task_struct *task = info;
7463 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7464 	return 0;
7465 }
7466 
7467 static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7468 {
7469 	struct task_struct *task;
7470 
7471 	cgroup_taskset_for_each(task, cgrp, tset)
7472 		task_function_call(task, __perf_cgroup_move, task);
7473 }
7474 
7475 static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7476 			     struct task_struct *task)
7477 {
7478 	/*
7479 	 * cgroup_exit() is called in the copy_process() failure path.
7480 	 * Ignore this case since the task hasn't ran yet, this avoids
7481 	 * trying to poke a half freed task state from generic code.
7482 	 */
7483 	if (!(task->flags & PF_EXITING))
7484 		return;
7485 
7486 	task_function_call(task, __perf_cgroup_move, task);
7487 }
7488 
7489 struct cgroup_subsys perf_subsys = {
7490 	.name		= "perf_event",
7491 	.subsys_id	= perf_subsys_id,
7492 	.create		= perf_cgroup_create,
7493 	.destroy	= perf_cgroup_destroy,
7494 	.exit		= perf_cgroup_exit,
7495 	.attach		= perf_cgroup_attach,
7496 
7497 	/*
7498 	 * perf_event cgroup doesn't handle nesting correctly.
7499 	 * ctx->nr_cgroups adjustments should be propagated through the
7500 	 * cgroup hierarchy.  Fix it and remove the following.
7501 	 */
7502 	.broken_hierarchy = true,
7503 };
7504 #endif /* CONFIG_CGROUP_PERF */
7505