1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright � 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/sysfs.h> 22 #include <linux/dcache.h> 23 #include <linux/percpu.h> 24 #include <linux/ptrace.h> 25 #include <linux/reboot.h> 26 #include <linux/vmstat.h> 27 #include <linux/device.h> 28 #include <linux/export.h> 29 #include <linux/vmalloc.h> 30 #include <linux/hardirq.h> 31 #include <linux/rculist.h> 32 #include <linux/uaccess.h> 33 #include <linux/syscalls.h> 34 #include <linux/anon_inodes.h> 35 #include <linux/kernel_stat.h> 36 #include <linux/perf_event.h> 37 #include <linux/ftrace_event.h> 38 #include <linux/hw_breakpoint.h> 39 40 #include "internal.h" 41 42 #include <asm/irq_regs.h> 43 44 struct remote_function_call { 45 struct task_struct *p; 46 int (*func)(void *info); 47 void *info; 48 int ret; 49 }; 50 51 static void remote_function(void *data) 52 { 53 struct remote_function_call *tfc = data; 54 struct task_struct *p = tfc->p; 55 56 if (p) { 57 tfc->ret = -EAGAIN; 58 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 59 return; 60 } 61 62 tfc->ret = tfc->func(tfc->info); 63 } 64 65 /** 66 * task_function_call - call a function on the cpu on which a task runs 67 * @p: the task to evaluate 68 * @func: the function to be called 69 * @info: the function call argument 70 * 71 * Calls the function @func when the task is currently running. This might 72 * be on the current CPU, which just calls the function directly 73 * 74 * returns: @func return value, or 75 * -ESRCH - when the process isn't running 76 * -EAGAIN - when the process moved away 77 */ 78 static int 79 task_function_call(struct task_struct *p, int (*func) (void *info), void *info) 80 { 81 struct remote_function_call data = { 82 .p = p, 83 .func = func, 84 .info = info, 85 .ret = -ESRCH, /* No such (running) process */ 86 }; 87 88 if (task_curr(p)) 89 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 90 91 return data.ret; 92 } 93 94 /** 95 * cpu_function_call - call a function on the cpu 96 * @func: the function to be called 97 * @info: the function call argument 98 * 99 * Calls the function @func on the remote cpu. 100 * 101 * returns: @func return value or -ENXIO when the cpu is offline 102 */ 103 static int cpu_function_call(int cpu, int (*func) (void *info), void *info) 104 { 105 struct remote_function_call data = { 106 .p = NULL, 107 .func = func, 108 .info = info, 109 .ret = -ENXIO, /* No such CPU */ 110 }; 111 112 smp_call_function_single(cpu, remote_function, &data, 1); 113 114 return data.ret; 115 } 116 117 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 118 PERF_FLAG_FD_OUTPUT |\ 119 PERF_FLAG_PID_CGROUP) 120 121 enum event_type_t { 122 EVENT_FLEXIBLE = 0x1, 123 EVENT_PINNED = 0x2, 124 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 125 }; 126 127 /* 128 * perf_sched_events : >0 events exist 129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 130 */ 131 struct jump_label_key perf_sched_events __read_mostly; 132 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 133 134 static atomic_t nr_mmap_events __read_mostly; 135 static atomic_t nr_comm_events __read_mostly; 136 static atomic_t nr_task_events __read_mostly; 137 138 static LIST_HEAD(pmus); 139 static DEFINE_MUTEX(pmus_lock); 140 static struct srcu_struct pmus_srcu; 141 142 /* 143 * perf event paranoia level: 144 * -1 - not paranoid at all 145 * 0 - disallow raw tracepoint access for unpriv 146 * 1 - disallow cpu events for unpriv 147 * 2 - disallow kernel profiling for unpriv 148 */ 149 int sysctl_perf_event_paranoid __read_mostly = 1; 150 151 /* Minimum for 512 kiB + 1 user control page */ 152 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 153 154 /* 155 * max perf event sample rate 156 */ 157 #define DEFAULT_MAX_SAMPLE_RATE 100000 158 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 159 static int max_samples_per_tick __read_mostly = 160 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 161 162 int perf_proc_update_handler(struct ctl_table *table, int write, 163 void __user *buffer, size_t *lenp, 164 loff_t *ppos) 165 { 166 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 167 168 if (ret || !write) 169 return ret; 170 171 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 172 173 return 0; 174 } 175 176 static atomic64_t perf_event_id; 177 178 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 179 enum event_type_t event_type); 180 181 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 182 enum event_type_t event_type, 183 struct task_struct *task); 184 185 static void update_context_time(struct perf_event_context *ctx); 186 static u64 perf_event_time(struct perf_event *event); 187 188 static void ring_buffer_attach(struct perf_event *event, 189 struct ring_buffer *rb); 190 191 void __weak perf_event_print_debug(void) { } 192 193 extern __weak const char *perf_pmu_name(void) 194 { 195 return "pmu"; 196 } 197 198 static inline u64 perf_clock(void) 199 { 200 return local_clock(); 201 } 202 203 static inline struct perf_cpu_context * 204 __get_cpu_context(struct perf_event_context *ctx) 205 { 206 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 207 } 208 209 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 210 struct perf_event_context *ctx) 211 { 212 raw_spin_lock(&cpuctx->ctx.lock); 213 if (ctx) 214 raw_spin_lock(&ctx->lock); 215 } 216 217 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 218 struct perf_event_context *ctx) 219 { 220 if (ctx) 221 raw_spin_unlock(&ctx->lock); 222 raw_spin_unlock(&cpuctx->ctx.lock); 223 } 224 225 #ifdef CONFIG_CGROUP_PERF 226 227 /* 228 * Must ensure cgroup is pinned (css_get) before calling 229 * this function. In other words, we cannot call this function 230 * if there is no cgroup event for the current CPU context. 231 */ 232 static inline struct perf_cgroup * 233 perf_cgroup_from_task(struct task_struct *task) 234 { 235 return container_of(task_subsys_state(task, perf_subsys_id), 236 struct perf_cgroup, css); 237 } 238 239 static inline bool 240 perf_cgroup_match(struct perf_event *event) 241 { 242 struct perf_event_context *ctx = event->ctx; 243 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 244 245 return !event->cgrp || event->cgrp == cpuctx->cgrp; 246 } 247 248 static inline void perf_get_cgroup(struct perf_event *event) 249 { 250 css_get(&event->cgrp->css); 251 } 252 253 static inline void perf_put_cgroup(struct perf_event *event) 254 { 255 css_put(&event->cgrp->css); 256 } 257 258 static inline void perf_detach_cgroup(struct perf_event *event) 259 { 260 perf_put_cgroup(event); 261 event->cgrp = NULL; 262 } 263 264 static inline int is_cgroup_event(struct perf_event *event) 265 { 266 return event->cgrp != NULL; 267 } 268 269 static inline u64 perf_cgroup_event_time(struct perf_event *event) 270 { 271 struct perf_cgroup_info *t; 272 273 t = per_cpu_ptr(event->cgrp->info, event->cpu); 274 return t->time; 275 } 276 277 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 278 { 279 struct perf_cgroup_info *info; 280 u64 now; 281 282 now = perf_clock(); 283 284 info = this_cpu_ptr(cgrp->info); 285 286 info->time += now - info->timestamp; 287 info->timestamp = now; 288 } 289 290 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 291 { 292 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 293 if (cgrp_out) 294 __update_cgrp_time(cgrp_out); 295 } 296 297 static inline void update_cgrp_time_from_event(struct perf_event *event) 298 { 299 struct perf_cgroup *cgrp; 300 301 /* 302 * ensure we access cgroup data only when needed and 303 * when we know the cgroup is pinned (css_get) 304 */ 305 if (!is_cgroup_event(event)) 306 return; 307 308 cgrp = perf_cgroup_from_task(current); 309 /* 310 * Do not update time when cgroup is not active 311 */ 312 if (cgrp == event->cgrp) 313 __update_cgrp_time(event->cgrp); 314 } 315 316 static inline void 317 perf_cgroup_set_timestamp(struct task_struct *task, 318 struct perf_event_context *ctx) 319 { 320 struct perf_cgroup *cgrp; 321 struct perf_cgroup_info *info; 322 323 /* 324 * ctx->lock held by caller 325 * ensure we do not access cgroup data 326 * unless we have the cgroup pinned (css_get) 327 */ 328 if (!task || !ctx->nr_cgroups) 329 return; 330 331 cgrp = perf_cgroup_from_task(task); 332 info = this_cpu_ptr(cgrp->info); 333 info->timestamp = ctx->timestamp; 334 } 335 336 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 337 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 338 339 /* 340 * reschedule events based on the cgroup constraint of task. 341 * 342 * mode SWOUT : schedule out everything 343 * mode SWIN : schedule in based on cgroup for next 344 */ 345 void perf_cgroup_switch(struct task_struct *task, int mode) 346 { 347 struct perf_cpu_context *cpuctx; 348 struct pmu *pmu; 349 unsigned long flags; 350 351 /* 352 * disable interrupts to avoid geting nr_cgroup 353 * changes via __perf_event_disable(). Also 354 * avoids preemption. 355 */ 356 local_irq_save(flags); 357 358 /* 359 * we reschedule only in the presence of cgroup 360 * constrained events. 361 */ 362 rcu_read_lock(); 363 364 list_for_each_entry_rcu(pmu, &pmus, entry) { 365 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 366 367 /* 368 * perf_cgroup_events says at least one 369 * context on this CPU has cgroup events. 370 * 371 * ctx->nr_cgroups reports the number of cgroup 372 * events for a context. 373 */ 374 if (cpuctx->ctx.nr_cgroups > 0) { 375 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 376 perf_pmu_disable(cpuctx->ctx.pmu); 377 378 if (mode & PERF_CGROUP_SWOUT) { 379 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 380 /* 381 * must not be done before ctxswout due 382 * to event_filter_match() in event_sched_out() 383 */ 384 cpuctx->cgrp = NULL; 385 } 386 387 if (mode & PERF_CGROUP_SWIN) { 388 WARN_ON_ONCE(cpuctx->cgrp); 389 /* set cgrp before ctxsw in to 390 * allow event_filter_match() to not 391 * have to pass task around 392 */ 393 cpuctx->cgrp = perf_cgroup_from_task(task); 394 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 395 } 396 perf_pmu_enable(cpuctx->ctx.pmu); 397 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 398 } 399 } 400 401 rcu_read_unlock(); 402 403 local_irq_restore(flags); 404 } 405 406 static inline void perf_cgroup_sched_out(struct task_struct *task, 407 struct task_struct *next) 408 { 409 struct perf_cgroup *cgrp1; 410 struct perf_cgroup *cgrp2 = NULL; 411 412 /* 413 * we come here when we know perf_cgroup_events > 0 414 */ 415 cgrp1 = perf_cgroup_from_task(task); 416 417 /* 418 * next is NULL when called from perf_event_enable_on_exec() 419 * that will systematically cause a cgroup_switch() 420 */ 421 if (next) 422 cgrp2 = perf_cgroup_from_task(next); 423 424 /* 425 * only schedule out current cgroup events if we know 426 * that we are switching to a different cgroup. Otherwise, 427 * do no touch the cgroup events. 428 */ 429 if (cgrp1 != cgrp2) 430 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 431 } 432 433 static inline void perf_cgroup_sched_in(struct task_struct *prev, 434 struct task_struct *task) 435 { 436 struct perf_cgroup *cgrp1; 437 struct perf_cgroup *cgrp2 = NULL; 438 439 /* 440 * we come here when we know perf_cgroup_events > 0 441 */ 442 cgrp1 = perf_cgroup_from_task(task); 443 444 /* prev can never be NULL */ 445 cgrp2 = perf_cgroup_from_task(prev); 446 447 /* 448 * only need to schedule in cgroup events if we are changing 449 * cgroup during ctxsw. Cgroup events were not scheduled 450 * out of ctxsw out if that was not the case. 451 */ 452 if (cgrp1 != cgrp2) 453 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 454 } 455 456 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 457 struct perf_event_attr *attr, 458 struct perf_event *group_leader) 459 { 460 struct perf_cgroup *cgrp; 461 struct cgroup_subsys_state *css; 462 struct file *file; 463 int ret = 0, fput_needed; 464 465 file = fget_light(fd, &fput_needed); 466 if (!file) 467 return -EBADF; 468 469 css = cgroup_css_from_dir(file, perf_subsys_id); 470 if (IS_ERR(css)) { 471 ret = PTR_ERR(css); 472 goto out; 473 } 474 475 cgrp = container_of(css, struct perf_cgroup, css); 476 event->cgrp = cgrp; 477 478 /* must be done before we fput() the file */ 479 perf_get_cgroup(event); 480 481 /* 482 * all events in a group must monitor 483 * the same cgroup because a task belongs 484 * to only one perf cgroup at a time 485 */ 486 if (group_leader && group_leader->cgrp != cgrp) { 487 perf_detach_cgroup(event); 488 ret = -EINVAL; 489 } 490 out: 491 fput_light(file, fput_needed); 492 return ret; 493 } 494 495 static inline void 496 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 497 { 498 struct perf_cgroup_info *t; 499 t = per_cpu_ptr(event->cgrp->info, event->cpu); 500 event->shadow_ctx_time = now - t->timestamp; 501 } 502 503 static inline void 504 perf_cgroup_defer_enabled(struct perf_event *event) 505 { 506 /* 507 * when the current task's perf cgroup does not match 508 * the event's, we need to remember to call the 509 * perf_mark_enable() function the first time a task with 510 * a matching perf cgroup is scheduled in. 511 */ 512 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 513 event->cgrp_defer_enabled = 1; 514 } 515 516 static inline void 517 perf_cgroup_mark_enabled(struct perf_event *event, 518 struct perf_event_context *ctx) 519 { 520 struct perf_event *sub; 521 u64 tstamp = perf_event_time(event); 522 523 if (!event->cgrp_defer_enabled) 524 return; 525 526 event->cgrp_defer_enabled = 0; 527 528 event->tstamp_enabled = tstamp - event->total_time_enabled; 529 list_for_each_entry(sub, &event->sibling_list, group_entry) { 530 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 531 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 532 sub->cgrp_defer_enabled = 0; 533 } 534 } 535 } 536 #else /* !CONFIG_CGROUP_PERF */ 537 538 static inline bool 539 perf_cgroup_match(struct perf_event *event) 540 { 541 return true; 542 } 543 544 static inline void perf_detach_cgroup(struct perf_event *event) 545 {} 546 547 static inline int is_cgroup_event(struct perf_event *event) 548 { 549 return 0; 550 } 551 552 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 553 { 554 return 0; 555 } 556 557 static inline void update_cgrp_time_from_event(struct perf_event *event) 558 { 559 } 560 561 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 562 { 563 } 564 565 static inline void perf_cgroup_sched_out(struct task_struct *task, 566 struct task_struct *next) 567 { 568 } 569 570 static inline void perf_cgroup_sched_in(struct task_struct *prev, 571 struct task_struct *task) 572 { 573 } 574 575 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 576 struct perf_event_attr *attr, 577 struct perf_event *group_leader) 578 { 579 return -EINVAL; 580 } 581 582 static inline void 583 perf_cgroup_set_timestamp(struct task_struct *task, 584 struct perf_event_context *ctx) 585 { 586 } 587 588 void 589 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 590 { 591 } 592 593 static inline void 594 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 595 { 596 } 597 598 static inline u64 perf_cgroup_event_time(struct perf_event *event) 599 { 600 return 0; 601 } 602 603 static inline void 604 perf_cgroup_defer_enabled(struct perf_event *event) 605 { 606 } 607 608 static inline void 609 perf_cgroup_mark_enabled(struct perf_event *event, 610 struct perf_event_context *ctx) 611 { 612 } 613 #endif 614 615 void perf_pmu_disable(struct pmu *pmu) 616 { 617 int *count = this_cpu_ptr(pmu->pmu_disable_count); 618 if (!(*count)++) 619 pmu->pmu_disable(pmu); 620 } 621 622 void perf_pmu_enable(struct pmu *pmu) 623 { 624 int *count = this_cpu_ptr(pmu->pmu_disable_count); 625 if (!--(*count)) 626 pmu->pmu_enable(pmu); 627 } 628 629 static DEFINE_PER_CPU(struct list_head, rotation_list); 630 631 /* 632 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 633 * because they're strictly cpu affine and rotate_start is called with IRQs 634 * disabled, while rotate_context is called from IRQ context. 635 */ 636 static void perf_pmu_rotate_start(struct pmu *pmu) 637 { 638 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 639 struct list_head *head = &__get_cpu_var(rotation_list); 640 641 WARN_ON(!irqs_disabled()); 642 643 if (list_empty(&cpuctx->rotation_list)) 644 list_add(&cpuctx->rotation_list, head); 645 } 646 647 static void get_ctx(struct perf_event_context *ctx) 648 { 649 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 650 } 651 652 static void put_ctx(struct perf_event_context *ctx) 653 { 654 if (atomic_dec_and_test(&ctx->refcount)) { 655 if (ctx->parent_ctx) 656 put_ctx(ctx->parent_ctx); 657 if (ctx->task) 658 put_task_struct(ctx->task); 659 kfree_rcu(ctx, rcu_head); 660 } 661 } 662 663 static void unclone_ctx(struct perf_event_context *ctx) 664 { 665 if (ctx->parent_ctx) { 666 put_ctx(ctx->parent_ctx); 667 ctx->parent_ctx = NULL; 668 } 669 } 670 671 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 672 { 673 /* 674 * only top level events have the pid namespace they were created in 675 */ 676 if (event->parent) 677 event = event->parent; 678 679 return task_tgid_nr_ns(p, event->ns); 680 } 681 682 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 683 { 684 /* 685 * only top level events have the pid namespace they were created in 686 */ 687 if (event->parent) 688 event = event->parent; 689 690 return task_pid_nr_ns(p, event->ns); 691 } 692 693 /* 694 * If we inherit events we want to return the parent event id 695 * to userspace. 696 */ 697 static u64 primary_event_id(struct perf_event *event) 698 { 699 u64 id = event->id; 700 701 if (event->parent) 702 id = event->parent->id; 703 704 return id; 705 } 706 707 /* 708 * Get the perf_event_context for a task and lock it. 709 * This has to cope with with the fact that until it is locked, 710 * the context could get moved to another task. 711 */ 712 static struct perf_event_context * 713 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 714 { 715 struct perf_event_context *ctx; 716 717 rcu_read_lock(); 718 retry: 719 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 720 if (ctx) { 721 /* 722 * If this context is a clone of another, it might 723 * get swapped for another underneath us by 724 * perf_event_task_sched_out, though the 725 * rcu_read_lock() protects us from any context 726 * getting freed. Lock the context and check if it 727 * got swapped before we could get the lock, and retry 728 * if so. If we locked the right context, then it 729 * can't get swapped on us any more. 730 */ 731 raw_spin_lock_irqsave(&ctx->lock, *flags); 732 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 733 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 734 goto retry; 735 } 736 737 if (!atomic_inc_not_zero(&ctx->refcount)) { 738 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 739 ctx = NULL; 740 } 741 } 742 rcu_read_unlock(); 743 return ctx; 744 } 745 746 /* 747 * Get the context for a task and increment its pin_count so it 748 * can't get swapped to another task. This also increments its 749 * reference count so that the context can't get freed. 750 */ 751 static struct perf_event_context * 752 perf_pin_task_context(struct task_struct *task, int ctxn) 753 { 754 struct perf_event_context *ctx; 755 unsigned long flags; 756 757 ctx = perf_lock_task_context(task, ctxn, &flags); 758 if (ctx) { 759 ++ctx->pin_count; 760 raw_spin_unlock_irqrestore(&ctx->lock, flags); 761 } 762 return ctx; 763 } 764 765 static void perf_unpin_context(struct perf_event_context *ctx) 766 { 767 unsigned long flags; 768 769 raw_spin_lock_irqsave(&ctx->lock, flags); 770 --ctx->pin_count; 771 raw_spin_unlock_irqrestore(&ctx->lock, flags); 772 } 773 774 /* 775 * Update the record of the current time in a context. 776 */ 777 static void update_context_time(struct perf_event_context *ctx) 778 { 779 u64 now = perf_clock(); 780 781 ctx->time += now - ctx->timestamp; 782 ctx->timestamp = now; 783 } 784 785 static u64 perf_event_time(struct perf_event *event) 786 { 787 struct perf_event_context *ctx = event->ctx; 788 789 if (is_cgroup_event(event)) 790 return perf_cgroup_event_time(event); 791 792 return ctx ? ctx->time : 0; 793 } 794 795 /* 796 * Update the total_time_enabled and total_time_running fields for a event. 797 * The caller of this function needs to hold the ctx->lock. 798 */ 799 static void update_event_times(struct perf_event *event) 800 { 801 struct perf_event_context *ctx = event->ctx; 802 u64 run_end; 803 804 if (event->state < PERF_EVENT_STATE_INACTIVE || 805 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 806 return; 807 /* 808 * in cgroup mode, time_enabled represents 809 * the time the event was enabled AND active 810 * tasks were in the monitored cgroup. This is 811 * independent of the activity of the context as 812 * there may be a mix of cgroup and non-cgroup events. 813 * 814 * That is why we treat cgroup events differently 815 * here. 816 */ 817 if (is_cgroup_event(event)) 818 run_end = perf_event_time(event); 819 else if (ctx->is_active) 820 run_end = ctx->time; 821 else 822 run_end = event->tstamp_stopped; 823 824 event->total_time_enabled = run_end - event->tstamp_enabled; 825 826 if (event->state == PERF_EVENT_STATE_INACTIVE) 827 run_end = event->tstamp_stopped; 828 else 829 run_end = perf_event_time(event); 830 831 event->total_time_running = run_end - event->tstamp_running; 832 833 } 834 835 /* 836 * Update total_time_enabled and total_time_running for all events in a group. 837 */ 838 static void update_group_times(struct perf_event *leader) 839 { 840 struct perf_event *event; 841 842 update_event_times(leader); 843 list_for_each_entry(event, &leader->sibling_list, group_entry) 844 update_event_times(event); 845 } 846 847 static struct list_head * 848 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 849 { 850 if (event->attr.pinned) 851 return &ctx->pinned_groups; 852 else 853 return &ctx->flexible_groups; 854 } 855 856 /* 857 * Add a event from the lists for its context. 858 * Must be called with ctx->mutex and ctx->lock held. 859 */ 860 static void 861 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 862 { 863 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 864 event->attach_state |= PERF_ATTACH_CONTEXT; 865 866 /* 867 * If we're a stand alone event or group leader, we go to the context 868 * list, group events are kept attached to the group so that 869 * perf_group_detach can, at all times, locate all siblings. 870 */ 871 if (event->group_leader == event) { 872 struct list_head *list; 873 874 if (is_software_event(event)) 875 event->group_flags |= PERF_GROUP_SOFTWARE; 876 877 list = ctx_group_list(event, ctx); 878 list_add_tail(&event->group_entry, list); 879 } 880 881 if (is_cgroup_event(event)) 882 ctx->nr_cgroups++; 883 884 list_add_rcu(&event->event_entry, &ctx->event_list); 885 if (!ctx->nr_events) 886 perf_pmu_rotate_start(ctx->pmu); 887 ctx->nr_events++; 888 if (event->attr.inherit_stat) 889 ctx->nr_stat++; 890 } 891 892 /* 893 * Called at perf_event creation and when events are attached/detached from a 894 * group. 895 */ 896 static void perf_event__read_size(struct perf_event *event) 897 { 898 int entry = sizeof(u64); /* value */ 899 int size = 0; 900 int nr = 1; 901 902 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 903 size += sizeof(u64); 904 905 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 906 size += sizeof(u64); 907 908 if (event->attr.read_format & PERF_FORMAT_ID) 909 entry += sizeof(u64); 910 911 if (event->attr.read_format & PERF_FORMAT_GROUP) { 912 nr += event->group_leader->nr_siblings; 913 size += sizeof(u64); 914 } 915 916 size += entry * nr; 917 event->read_size = size; 918 } 919 920 static void perf_event__header_size(struct perf_event *event) 921 { 922 struct perf_sample_data *data; 923 u64 sample_type = event->attr.sample_type; 924 u16 size = 0; 925 926 perf_event__read_size(event); 927 928 if (sample_type & PERF_SAMPLE_IP) 929 size += sizeof(data->ip); 930 931 if (sample_type & PERF_SAMPLE_ADDR) 932 size += sizeof(data->addr); 933 934 if (sample_type & PERF_SAMPLE_PERIOD) 935 size += sizeof(data->period); 936 937 if (sample_type & PERF_SAMPLE_READ) 938 size += event->read_size; 939 940 event->header_size = size; 941 } 942 943 static void perf_event__id_header_size(struct perf_event *event) 944 { 945 struct perf_sample_data *data; 946 u64 sample_type = event->attr.sample_type; 947 u16 size = 0; 948 949 if (sample_type & PERF_SAMPLE_TID) 950 size += sizeof(data->tid_entry); 951 952 if (sample_type & PERF_SAMPLE_TIME) 953 size += sizeof(data->time); 954 955 if (sample_type & PERF_SAMPLE_ID) 956 size += sizeof(data->id); 957 958 if (sample_type & PERF_SAMPLE_STREAM_ID) 959 size += sizeof(data->stream_id); 960 961 if (sample_type & PERF_SAMPLE_CPU) 962 size += sizeof(data->cpu_entry); 963 964 event->id_header_size = size; 965 } 966 967 static void perf_group_attach(struct perf_event *event) 968 { 969 struct perf_event *group_leader = event->group_leader, *pos; 970 971 /* 972 * We can have double attach due to group movement in perf_event_open. 973 */ 974 if (event->attach_state & PERF_ATTACH_GROUP) 975 return; 976 977 event->attach_state |= PERF_ATTACH_GROUP; 978 979 if (group_leader == event) 980 return; 981 982 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 983 !is_software_event(event)) 984 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 985 986 list_add_tail(&event->group_entry, &group_leader->sibling_list); 987 group_leader->nr_siblings++; 988 989 perf_event__header_size(group_leader); 990 991 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 992 perf_event__header_size(pos); 993 } 994 995 /* 996 * Remove a event from the lists for its context. 997 * Must be called with ctx->mutex and ctx->lock held. 998 */ 999 static void 1000 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1001 { 1002 struct perf_cpu_context *cpuctx; 1003 /* 1004 * We can have double detach due to exit/hot-unplug + close. 1005 */ 1006 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1007 return; 1008 1009 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1010 1011 if (is_cgroup_event(event)) { 1012 ctx->nr_cgroups--; 1013 cpuctx = __get_cpu_context(ctx); 1014 /* 1015 * if there are no more cgroup events 1016 * then cler cgrp to avoid stale pointer 1017 * in update_cgrp_time_from_cpuctx() 1018 */ 1019 if (!ctx->nr_cgroups) 1020 cpuctx->cgrp = NULL; 1021 } 1022 1023 ctx->nr_events--; 1024 if (event->attr.inherit_stat) 1025 ctx->nr_stat--; 1026 1027 list_del_rcu(&event->event_entry); 1028 1029 if (event->group_leader == event) 1030 list_del_init(&event->group_entry); 1031 1032 update_group_times(event); 1033 1034 /* 1035 * If event was in error state, then keep it 1036 * that way, otherwise bogus counts will be 1037 * returned on read(). The only way to get out 1038 * of error state is by explicit re-enabling 1039 * of the event 1040 */ 1041 if (event->state > PERF_EVENT_STATE_OFF) 1042 event->state = PERF_EVENT_STATE_OFF; 1043 } 1044 1045 static void perf_group_detach(struct perf_event *event) 1046 { 1047 struct perf_event *sibling, *tmp; 1048 struct list_head *list = NULL; 1049 1050 /* 1051 * We can have double detach due to exit/hot-unplug + close. 1052 */ 1053 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1054 return; 1055 1056 event->attach_state &= ~PERF_ATTACH_GROUP; 1057 1058 /* 1059 * If this is a sibling, remove it from its group. 1060 */ 1061 if (event->group_leader != event) { 1062 list_del_init(&event->group_entry); 1063 event->group_leader->nr_siblings--; 1064 goto out; 1065 } 1066 1067 if (!list_empty(&event->group_entry)) 1068 list = &event->group_entry; 1069 1070 /* 1071 * If this was a group event with sibling events then 1072 * upgrade the siblings to singleton events by adding them 1073 * to whatever list we are on. 1074 */ 1075 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1076 if (list) 1077 list_move_tail(&sibling->group_entry, list); 1078 sibling->group_leader = sibling; 1079 1080 /* Inherit group flags from the previous leader */ 1081 sibling->group_flags = event->group_flags; 1082 } 1083 1084 out: 1085 perf_event__header_size(event->group_leader); 1086 1087 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1088 perf_event__header_size(tmp); 1089 } 1090 1091 static inline int 1092 event_filter_match(struct perf_event *event) 1093 { 1094 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1095 && perf_cgroup_match(event); 1096 } 1097 1098 static void 1099 event_sched_out(struct perf_event *event, 1100 struct perf_cpu_context *cpuctx, 1101 struct perf_event_context *ctx) 1102 { 1103 u64 tstamp = perf_event_time(event); 1104 u64 delta; 1105 /* 1106 * An event which could not be activated because of 1107 * filter mismatch still needs to have its timings 1108 * maintained, otherwise bogus information is return 1109 * via read() for time_enabled, time_running: 1110 */ 1111 if (event->state == PERF_EVENT_STATE_INACTIVE 1112 && !event_filter_match(event)) { 1113 delta = tstamp - event->tstamp_stopped; 1114 event->tstamp_running += delta; 1115 event->tstamp_stopped = tstamp; 1116 } 1117 1118 if (event->state != PERF_EVENT_STATE_ACTIVE) 1119 return; 1120 1121 event->state = PERF_EVENT_STATE_INACTIVE; 1122 if (event->pending_disable) { 1123 event->pending_disable = 0; 1124 event->state = PERF_EVENT_STATE_OFF; 1125 } 1126 event->tstamp_stopped = tstamp; 1127 event->pmu->del(event, 0); 1128 event->oncpu = -1; 1129 1130 if (!is_software_event(event)) 1131 cpuctx->active_oncpu--; 1132 ctx->nr_active--; 1133 if (event->attr.exclusive || !cpuctx->active_oncpu) 1134 cpuctx->exclusive = 0; 1135 } 1136 1137 static void 1138 group_sched_out(struct perf_event *group_event, 1139 struct perf_cpu_context *cpuctx, 1140 struct perf_event_context *ctx) 1141 { 1142 struct perf_event *event; 1143 int state = group_event->state; 1144 1145 event_sched_out(group_event, cpuctx, ctx); 1146 1147 /* 1148 * Schedule out siblings (if any): 1149 */ 1150 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1151 event_sched_out(event, cpuctx, ctx); 1152 1153 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1154 cpuctx->exclusive = 0; 1155 } 1156 1157 /* 1158 * Cross CPU call to remove a performance event 1159 * 1160 * We disable the event on the hardware level first. After that we 1161 * remove it from the context list. 1162 */ 1163 static int __perf_remove_from_context(void *info) 1164 { 1165 struct perf_event *event = info; 1166 struct perf_event_context *ctx = event->ctx; 1167 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1168 1169 raw_spin_lock(&ctx->lock); 1170 event_sched_out(event, cpuctx, ctx); 1171 list_del_event(event, ctx); 1172 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1173 ctx->is_active = 0; 1174 cpuctx->task_ctx = NULL; 1175 } 1176 raw_spin_unlock(&ctx->lock); 1177 1178 return 0; 1179 } 1180 1181 1182 /* 1183 * Remove the event from a task's (or a CPU's) list of events. 1184 * 1185 * CPU events are removed with a smp call. For task events we only 1186 * call when the task is on a CPU. 1187 * 1188 * If event->ctx is a cloned context, callers must make sure that 1189 * every task struct that event->ctx->task could possibly point to 1190 * remains valid. This is OK when called from perf_release since 1191 * that only calls us on the top-level context, which can't be a clone. 1192 * When called from perf_event_exit_task, it's OK because the 1193 * context has been detached from its task. 1194 */ 1195 static void perf_remove_from_context(struct perf_event *event) 1196 { 1197 struct perf_event_context *ctx = event->ctx; 1198 struct task_struct *task = ctx->task; 1199 1200 lockdep_assert_held(&ctx->mutex); 1201 1202 if (!task) { 1203 /* 1204 * Per cpu events are removed via an smp call and 1205 * the removal is always successful. 1206 */ 1207 cpu_function_call(event->cpu, __perf_remove_from_context, event); 1208 return; 1209 } 1210 1211 retry: 1212 if (!task_function_call(task, __perf_remove_from_context, event)) 1213 return; 1214 1215 raw_spin_lock_irq(&ctx->lock); 1216 /* 1217 * If we failed to find a running task, but find the context active now 1218 * that we've acquired the ctx->lock, retry. 1219 */ 1220 if (ctx->is_active) { 1221 raw_spin_unlock_irq(&ctx->lock); 1222 goto retry; 1223 } 1224 1225 /* 1226 * Since the task isn't running, its safe to remove the event, us 1227 * holding the ctx->lock ensures the task won't get scheduled in. 1228 */ 1229 list_del_event(event, ctx); 1230 raw_spin_unlock_irq(&ctx->lock); 1231 } 1232 1233 /* 1234 * Cross CPU call to disable a performance event 1235 */ 1236 static int __perf_event_disable(void *info) 1237 { 1238 struct perf_event *event = info; 1239 struct perf_event_context *ctx = event->ctx; 1240 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1241 1242 /* 1243 * If this is a per-task event, need to check whether this 1244 * event's task is the current task on this cpu. 1245 * 1246 * Can trigger due to concurrent perf_event_context_sched_out() 1247 * flipping contexts around. 1248 */ 1249 if (ctx->task && cpuctx->task_ctx != ctx) 1250 return -EINVAL; 1251 1252 raw_spin_lock(&ctx->lock); 1253 1254 /* 1255 * If the event is on, turn it off. 1256 * If it is in error state, leave it in error state. 1257 */ 1258 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1259 update_context_time(ctx); 1260 update_cgrp_time_from_event(event); 1261 update_group_times(event); 1262 if (event == event->group_leader) 1263 group_sched_out(event, cpuctx, ctx); 1264 else 1265 event_sched_out(event, cpuctx, ctx); 1266 event->state = PERF_EVENT_STATE_OFF; 1267 } 1268 1269 raw_spin_unlock(&ctx->lock); 1270 1271 return 0; 1272 } 1273 1274 /* 1275 * Disable a event. 1276 * 1277 * If event->ctx is a cloned context, callers must make sure that 1278 * every task struct that event->ctx->task could possibly point to 1279 * remains valid. This condition is satisifed when called through 1280 * perf_event_for_each_child or perf_event_for_each because they 1281 * hold the top-level event's child_mutex, so any descendant that 1282 * goes to exit will block in sync_child_event. 1283 * When called from perf_pending_event it's OK because event->ctx 1284 * is the current context on this CPU and preemption is disabled, 1285 * hence we can't get into perf_event_task_sched_out for this context. 1286 */ 1287 void perf_event_disable(struct perf_event *event) 1288 { 1289 struct perf_event_context *ctx = event->ctx; 1290 struct task_struct *task = ctx->task; 1291 1292 if (!task) { 1293 /* 1294 * Disable the event on the cpu that it's on 1295 */ 1296 cpu_function_call(event->cpu, __perf_event_disable, event); 1297 return; 1298 } 1299 1300 retry: 1301 if (!task_function_call(task, __perf_event_disable, event)) 1302 return; 1303 1304 raw_spin_lock_irq(&ctx->lock); 1305 /* 1306 * If the event is still active, we need to retry the cross-call. 1307 */ 1308 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1309 raw_spin_unlock_irq(&ctx->lock); 1310 /* 1311 * Reload the task pointer, it might have been changed by 1312 * a concurrent perf_event_context_sched_out(). 1313 */ 1314 task = ctx->task; 1315 goto retry; 1316 } 1317 1318 /* 1319 * Since we have the lock this context can't be scheduled 1320 * in, so we can change the state safely. 1321 */ 1322 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1323 update_group_times(event); 1324 event->state = PERF_EVENT_STATE_OFF; 1325 } 1326 raw_spin_unlock_irq(&ctx->lock); 1327 } 1328 1329 static void perf_set_shadow_time(struct perf_event *event, 1330 struct perf_event_context *ctx, 1331 u64 tstamp) 1332 { 1333 /* 1334 * use the correct time source for the time snapshot 1335 * 1336 * We could get by without this by leveraging the 1337 * fact that to get to this function, the caller 1338 * has most likely already called update_context_time() 1339 * and update_cgrp_time_xx() and thus both timestamp 1340 * are identical (or very close). Given that tstamp is, 1341 * already adjusted for cgroup, we could say that: 1342 * tstamp - ctx->timestamp 1343 * is equivalent to 1344 * tstamp - cgrp->timestamp. 1345 * 1346 * Then, in perf_output_read(), the calculation would 1347 * work with no changes because: 1348 * - event is guaranteed scheduled in 1349 * - no scheduled out in between 1350 * - thus the timestamp would be the same 1351 * 1352 * But this is a bit hairy. 1353 * 1354 * So instead, we have an explicit cgroup call to remain 1355 * within the time time source all along. We believe it 1356 * is cleaner and simpler to understand. 1357 */ 1358 if (is_cgroup_event(event)) 1359 perf_cgroup_set_shadow_time(event, tstamp); 1360 else 1361 event->shadow_ctx_time = tstamp - ctx->timestamp; 1362 } 1363 1364 #define MAX_INTERRUPTS (~0ULL) 1365 1366 static void perf_log_throttle(struct perf_event *event, int enable); 1367 1368 static int 1369 event_sched_in(struct perf_event *event, 1370 struct perf_cpu_context *cpuctx, 1371 struct perf_event_context *ctx) 1372 { 1373 u64 tstamp = perf_event_time(event); 1374 1375 if (event->state <= PERF_EVENT_STATE_OFF) 1376 return 0; 1377 1378 event->state = PERF_EVENT_STATE_ACTIVE; 1379 event->oncpu = smp_processor_id(); 1380 1381 /* 1382 * Unthrottle events, since we scheduled we might have missed several 1383 * ticks already, also for a heavily scheduling task there is little 1384 * guarantee it'll get a tick in a timely manner. 1385 */ 1386 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1387 perf_log_throttle(event, 1); 1388 event->hw.interrupts = 0; 1389 } 1390 1391 /* 1392 * The new state must be visible before we turn it on in the hardware: 1393 */ 1394 smp_wmb(); 1395 1396 if (event->pmu->add(event, PERF_EF_START)) { 1397 event->state = PERF_EVENT_STATE_INACTIVE; 1398 event->oncpu = -1; 1399 return -EAGAIN; 1400 } 1401 1402 event->tstamp_running += tstamp - event->tstamp_stopped; 1403 1404 perf_set_shadow_time(event, ctx, tstamp); 1405 1406 if (!is_software_event(event)) 1407 cpuctx->active_oncpu++; 1408 ctx->nr_active++; 1409 1410 if (event->attr.exclusive) 1411 cpuctx->exclusive = 1; 1412 1413 return 0; 1414 } 1415 1416 static int 1417 group_sched_in(struct perf_event *group_event, 1418 struct perf_cpu_context *cpuctx, 1419 struct perf_event_context *ctx) 1420 { 1421 struct perf_event *event, *partial_group = NULL; 1422 struct pmu *pmu = group_event->pmu; 1423 u64 now = ctx->time; 1424 bool simulate = false; 1425 1426 if (group_event->state == PERF_EVENT_STATE_OFF) 1427 return 0; 1428 1429 pmu->start_txn(pmu); 1430 1431 if (event_sched_in(group_event, cpuctx, ctx)) { 1432 pmu->cancel_txn(pmu); 1433 return -EAGAIN; 1434 } 1435 1436 /* 1437 * Schedule in siblings as one group (if any): 1438 */ 1439 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1440 if (event_sched_in(event, cpuctx, ctx)) { 1441 partial_group = event; 1442 goto group_error; 1443 } 1444 } 1445 1446 if (!pmu->commit_txn(pmu)) 1447 return 0; 1448 1449 group_error: 1450 /* 1451 * Groups can be scheduled in as one unit only, so undo any 1452 * partial group before returning: 1453 * The events up to the failed event are scheduled out normally, 1454 * tstamp_stopped will be updated. 1455 * 1456 * The failed events and the remaining siblings need to have 1457 * their timings updated as if they had gone thru event_sched_in() 1458 * and event_sched_out(). This is required to get consistent timings 1459 * across the group. This also takes care of the case where the group 1460 * could never be scheduled by ensuring tstamp_stopped is set to mark 1461 * the time the event was actually stopped, such that time delta 1462 * calculation in update_event_times() is correct. 1463 */ 1464 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1465 if (event == partial_group) 1466 simulate = true; 1467 1468 if (simulate) { 1469 event->tstamp_running += now - event->tstamp_stopped; 1470 event->tstamp_stopped = now; 1471 } else { 1472 event_sched_out(event, cpuctx, ctx); 1473 } 1474 } 1475 event_sched_out(group_event, cpuctx, ctx); 1476 1477 pmu->cancel_txn(pmu); 1478 1479 return -EAGAIN; 1480 } 1481 1482 /* 1483 * Work out whether we can put this event group on the CPU now. 1484 */ 1485 static int group_can_go_on(struct perf_event *event, 1486 struct perf_cpu_context *cpuctx, 1487 int can_add_hw) 1488 { 1489 /* 1490 * Groups consisting entirely of software events can always go on. 1491 */ 1492 if (event->group_flags & PERF_GROUP_SOFTWARE) 1493 return 1; 1494 /* 1495 * If an exclusive group is already on, no other hardware 1496 * events can go on. 1497 */ 1498 if (cpuctx->exclusive) 1499 return 0; 1500 /* 1501 * If this group is exclusive and there are already 1502 * events on the CPU, it can't go on. 1503 */ 1504 if (event->attr.exclusive && cpuctx->active_oncpu) 1505 return 0; 1506 /* 1507 * Otherwise, try to add it if all previous groups were able 1508 * to go on. 1509 */ 1510 return can_add_hw; 1511 } 1512 1513 static void add_event_to_ctx(struct perf_event *event, 1514 struct perf_event_context *ctx) 1515 { 1516 u64 tstamp = perf_event_time(event); 1517 1518 list_add_event(event, ctx); 1519 perf_group_attach(event); 1520 event->tstamp_enabled = tstamp; 1521 event->tstamp_running = tstamp; 1522 event->tstamp_stopped = tstamp; 1523 } 1524 1525 static void task_ctx_sched_out(struct perf_event_context *ctx); 1526 static void 1527 ctx_sched_in(struct perf_event_context *ctx, 1528 struct perf_cpu_context *cpuctx, 1529 enum event_type_t event_type, 1530 struct task_struct *task); 1531 1532 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 1533 struct perf_event_context *ctx, 1534 struct task_struct *task) 1535 { 1536 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 1537 if (ctx) 1538 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 1539 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 1540 if (ctx) 1541 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 1542 } 1543 1544 /* 1545 * Cross CPU call to install and enable a performance event 1546 * 1547 * Must be called with ctx->mutex held 1548 */ 1549 static int __perf_install_in_context(void *info) 1550 { 1551 struct perf_event *event = info; 1552 struct perf_event_context *ctx = event->ctx; 1553 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1554 struct perf_event_context *task_ctx = cpuctx->task_ctx; 1555 struct task_struct *task = current; 1556 1557 perf_ctx_lock(cpuctx, task_ctx); 1558 perf_pmu_disable(cpuctx->ctx.pmu); 1559 1560 /* 1561 * If there was an active task_ctx schedule it out. 1562 */ 1563 if (task_ctx) 1564 task_ctx_sched_out(task_ctx); 1565 1566 /* 1567 * If the context we're installing events in is not the 1568 * active task_ctx, flip them. 1569 */ 1570 if (ctx->task && task_ctx != ctx) { 1571 if (task_ctx) 1572 raw_spin_unlock(&task_ctx->lock); 1573 raw_spin_lock(&ctx->lock); 1574 task_ctx = ctx; 1575 } 1576 1577 if (task_ctx) { 1578 cpuctx->task_ctx = task_ctx; 1579 task = task_ctx->task; 1580 } 1581 1582 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 1583 1584 update_context_time(ctx); 1585 /* 1586 * update cgrp time only if current cgrp 1587 * matches event->cgrp. Must be done before 1588 * calling add_event_to_ctx() 1589 */ 1590 update_cgrp_time_from_event(event); 1591 1592 add_event_to_ctx(event, ctx); 1593 1594 /* 1595 * Schedule everything back in 1596 */ 1597 perf_event_sched_in(cpuctx, task_ctx, task); 1598 1599 perf_pmu_enable(cpuctx->ctx.pmu); 1600 perf_ctx_unlock(cpuctx, task_ctx); 1601 1602 return 0; 1603 } 1604 1605 /* 1606 * Attach a performance event to a context 1607 * 1608 * First we add the event to the list with the hardware enable bit 1609 * in event->hw_config cleared. 1610 * 1611 * If the event is attached to a task which is on a CPU we use a smp 1612 * call to enable it in the task context. The task might have been 1613 * scheduled away, but we check this in the smp call again. 1614 */ 1615 static void 1616 perf_install_in_context(struct perf_event_context *ctx, 1617 struct perf_event *event, 1618 int cpu) 1619 { 1620 struct task_struct *task = ctx->task; 1621 1622 lockdep_assert_held(&ctx->mutex); 1623 1624 event->ctx = ctx; 1625 1626 if (!task) { 1627 /* 1628 * Per cpu events are installed via an smp call and 1629 * the install is always successful. 1630 */ 1631 cpu_function_call(cpu, __perf_install_in_context, event); 1632 return; 1633 } 1634 1635 retry: 1636 if (!task_function_call(task, __perf_install_in_context, event)) 1637 return; 1638 1639 raw_spin_lock_irq(&ctx->lock); 1640 /* 1641 * If we failed to find a running task, but find the context active now 1642 * that we've acquired the ctx->lock, retry. 1643 */ 1644 if (ctx->is_active) { 1645 raw_spin_unlock_irq(&ctx->lock); 1646 goto retry; 1647 } 1648 1649 /* 1650 * Since the task isn't running, its safe to add the event, us holding 1651 * the ctx->lock ensures the task won't get scheduled in. 1652 */ 1653 add_event_to_ctx(event, ctx); 1654 raw_spin_unlock_irq(&ctx->lock); 1655 } 1656 1657 /* 1658 * Put a event into inactive state and update time fields. 1659 * Enabling the leader of a group effectively enables all 1660 * the group members that aren't explicitly disabled, so we 1661 * have to update their ->tstamp_enabled also. 1662 * Note: this works for group members as well as group leaders 1663 * since the non-leader members' sibling_lists will be empty. 1664 */ 1665 static void __perf_event_mark_enabled(struct perf_event *event, 1666 struct perf_event_context *ctx) 1667 { 1668 struct perf_event *sub; 1669 u64 tstamp = perf_event_time(event); 1670 1671 event->state = PERF_EVENT_STATE_INACTIVE; 1672 event->tstamp_enabled = tstamp - event->total_time_enabled; 1673 list_for_each_entry(sub, &event->sibling_list, group_entry) { 1674 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 1675 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 1676 } 1677 } 1678 1679 /* 1680 * Cross CPU call to enable a performance event 1681 */ 1682 static int __perf_event_enable(void *info) 1683 { 1684 struct perf_event *event = info; 1685 struct perf_event_context *ctx = event->ctx; 1686 struct perf_event *leader = event->group_leader; 1687 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1688 int err; 1689 1690 if (WARN_ON_ONCE(!ctx->is_active)) 1691 return -EINVAL; 1692 1693 raw_spin_lock(&ctx->lock); 1694 update_context_time(ctx); 1695 1696 if (event->state >= PERF_EVENT_STATE_INACTIVE) 1697 goto unlock; 1698 1699 /* 1700 * set current task's cgroup time reference point 1701 */ 1702 perf_cgroup_set_timestamp(current, ctx); 1703 1704 __perf_event_mark_enabled(event, ctx); 1705 1706 if (!event_filter_match(event)) { 1707 if (is_cgroup_event(event)) 1708 perf_cgroup_defer_enabled(event); 1709 goto unlock; 1710 } 1711 1712 /* 1713 * If the event is in a group and isn't the group leader, 1714 * then don't put it on unless the group is on. 1715 */ 1716 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 1717 goto unlock; 1718 1719 if (!group_can_go_on(event, cpuctx, 1)) { 1720 err = -EEXIST; 1721 } else { 1722 if (event == leader) 1723 err = group_sched_in(event, cpuctx, ctx); 1724 else 1725 err = event_sched_in(event, cpuctx, ctx); 1726 } 1727 1728 if (err) { 1729 /* 1730 * If this event can't go on and it's part of a 1731 * group, then the whole group has to come off. 1732 */ 1733 if (leader != event) 1734 group_sched_out(leader, cpuctx, ctx); 1735 if (leader->attr.pinned) { 1736 update_group_times(leader); 1737 leader->state = PERF_EVENT_STATE_ERROR; 1738 } 1739 } 1740 1741 unlock: 1742 raw_spin_unlock(&ctx->lock); 1743 1744 return 0; 1745 } 1746 1747 /* 1748 * Enable a event. 1749 * 1750 * If event->ctx is a cloned context, callers must make sure that 1751 * every task struct that event->ctx->task could possibly point to 1752 * remains valid. This condition is satisfied when called through 1753 * perf_event_for_each_child or perf_event_for_each as described 1754 * for perf_event_disable. 1755 */ 1756 void perf_event_enable(struct perf_event *event) 1757 { 1758 struct perf_event_context *ctx = event->ctx; 1759 struct task_struct *task = ctx->task; 1760 1761 if (!task) { 1762 /* 1763 * Enable the event on the cpu that it's on 1764 */ 1765 cpu_function_call(event->cpu, __perf_event_enable, event); 1766 return; 1767 } 1768 1769 raw_spin_lock_irq(&ctx->lock); 1770 if (event->state >= PERF_EVENT_STATE_INACTIVE) 1771 goto out; 1772 1773 /* 1774 * If the event is in error state, clear that first. 1775 * That way, if we see the event in error state below, we 1776 * know that it has gone back into error state, as distinct 1777 * from the task having been scheduled away before the 1778 * cross-call arrived. 1779 */ 1780 if (event->state == PERF_EVENT_STATE_ERROR) 1781 event->state = PERF_EVENT_STATE_OFF; 1782 1783 retry: 1784 if (!ctx->is_active) { 1785 __perf_event_mark_enabled(event, ctx); 1786 goto out; 1787 } 1788 1789 raw_spin_unlock_irq(&ctx->lock); 1790 1791 if (!task_function_call(task, __perf_event_enable, event)) 1792 return; 1793 1794 raw_spin_lock_irq(&ctx->lock); 1795 1796 /* 1797 * If the context is active and the event is still off, 1798 * we need to retry the cross-call. 1799 */ 1800 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 1801 /* 1802 * task could have been flipped by a concurrent 1803 * perf_event_context_sched_out() 1804 */ 1805 task = ctx->task; 1806 goto retry; 1807 } 1808 1809 out: 1810 raw_spin_unlock_irq(&ctx->lock); 1811 } 1812 1813 int perf_event_refresh(struct perf_event *event, int refresh) 1814 { 1815 /* 1816 * not supported on inherited events 1817 */ 1818 if (event->attr.inherit || !is_sampling_event(event)) 1819 return -EINVAL; 1820 1821 atomic_add(refresh, &event->event_limit); 1822 perf_event_enable(event); 1823 1824 return 0; 1825 } 1826 EXPORT_SYMBOL_GPL(perf_event_refresh); 1827 1828 static void ctx_sched_out(struct perf_event_context *ctx, 1829 struct perf_cpu_context *cpuctx, 1830 enum event_type_t event_type) 1831 { 1832 struct perf_event *event; 1833 int is_active = ctx->is_active; 1834 1835 ctx->is_active &= ~event_type; 1836 if (likely(!ctx->nr_events)) 1837 return; 1838 1839 update_context_time(ctx); 1840 update_cgrp_time_from_cpuctx(cpuctx); 1841 if (!ctx->nr_active) 1842 return; 1843 1844 perf_pmu_disable(ctx->pmu); 1845 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 1846 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1847 group_sched_out(event, cpuctx, ctx); 1848 } 1849 1850 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 1851 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1852 group_sched_out(event, cpuctx, ctx); 1853 } 1854 perf_pmu_enable(ctx->pmu); 1855 } 1856 1857 /* 1858 * Test whether two contexts are equivalent, i.e. whether they 1859 * have both been cloned from the same version of the same context 1860 * and they both have the same number of enabled events. 1861 * If the number of enabled events is the same, then the set 1862 * of enabled events should be the same, because these are both 1863 * inherited contexts, therefore we can't access individual events 1864 * in them directly with an fd; we can only enable/disable all 1865 * events via prctl, or enable/disable all events in a family 1866 * via ioctl, which will have the same effect on both contexts. 1867 */ 1868 static int context_equiv(struct perf_event_context *ctx1, 1869 struct perf_event_context *ctx2) 1870 { 1871 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1872 && ctx1->parent_gen == ctx2->parent_gen 1873 && !ctx1->pin_count && !ctx2->pin_count; 1874 } 1875 1876 static void __perf_event_sync_stat(struct perf_event *event, 1877 struct perf_event *next_event) 1878 { 1879 u64 value; 1880 1881 if (!event->attr.inherit_stat) 1882 return; 1883 1884 /* 1885 * Update the event value, we cannot use perf_event_read() 1886 * because we're in the middle of a context switch and have IRQs 1887 * disabled, which upsets smp_call_function_single(), however 1888 * we know the event must be on the current CPU, therefore we 1889 * don't need to use it. 1890 */ 1891 switch (event->state) { 1892 case PERF_EVENT_STATE_ACTIVE: 1893 event->pmu->read(event); 1894 /* fall-through */ 1895 1896 case PERF_EVENT_STATE_INACTIVE: 1897 update_event_times(event); 1898 break; 1899 1900 default: 1901 break; 1902 } 1903 1904 /* 1905 * In order to keep per-task stats reliable we need to flip the event 1906 * values when we flip the contexts. 1907 */ 1908 value = local64_read(&next_event->count); 1909 value = local64_xchg(&event->count, value); 1910 local64_set(&next_event->count, value); 1911 1912 swap(event->total_time_enabled, next_event->total_time_enabled); 1913 swap(event->total_time_running, next_event->total_time_running); 1914 1915 /* 1916 * Since we swizzled the values, update the user visible data too. 1917 */ 1918 perf_event_update_userpage(event); 1919 perf_event_update_userpage(next_event); 1920 } 1921 1922 #define list_next_entry(pos, member) \ 1923 list_entry(pos->member.next, typeof(*pos), member) 1924 1925 static void perf_event_sync_stat(struct perf_event_context *ctx, 1926 struct perf_event_context *next_ctx) 1927 { 1928 struct perf_event *event, *next_event; 1929 1930 if (!ctx->nr_stat) 1931 return; 1932 1933 update_context_time(ctx); 1934 1935 event = list_first_entry(&ctx->event_list, 1936 struct perf_event, event_entry); 1937 1938 next_event = list_first_entry(&next_ctx->event_list, 1939 struct perf_event, event_entry); 1940 1941 while (&event->event_entry != &ctx->event_list && 1942 &next_event->event_entry != &next_ctx->event_list) { 1943 1944 __perf_event_sync_stat(event, next_event); 1945 1946 event = list_next_entry(event, event_entry); 1947 next_event = list_next_entry(next_event, event_entry); 1948 } 1949 } 1950 1951 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 1952 struct task_struct *next) 1953 { 1954 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 1955 struct perf_event_context *next_ctx; 1956 struct perf_event_context *parent; 1957 struct perf_cpu_context *cpuctx; 1958 int do_switch = 1; 1959 1960 if (likely(!ctx)) 1961 return; 1962 1963 cpuctx = __get_cpu_context(ctx); 1964 if (!cpuctx->task_ctx) 1965 return; 1966 1967 rcu_read_lock(); 1968 parent = rcu_dereference(ctx->parent_ctx); 1969 next_ctx = next->perf_event_ctxp[ctxn]; 1970 if (parent && next_ctx && 1971 rcu_dereference(next_ctx->parent_ctx) == parent) { 1972 /* 1973 * Looks like the two contexts are clones, so we might be 1974 * able to optimize the context switch. We lock both 1975 * contexts and check that they are clones under the 1976 * lock (including re-checking that neither has been 1977 * uncloned in the meantime). It doesn't matter which 1978 * order we take the locks because no other cpu could 1979 * be trying to lock both of these tasks. 1980 */ 1981 raw_spin_lock(&ctx->lock); 1982 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1983 if (context_equiv(ctx, next_ctx)) { 1984 /* 1985 * XXX do we need a memory barrier of sorts 1986 * wrt to rcu_dereference() of perf_event_ctxp 1987 */ 1988 task->perf_event_ctxp[ctxn] = next_ctx; 1989 next->perf_event_ctxp[ctxn] = ctx; 1990 ctx->task = next; 1991 next_ctx->task = task; 1992 do_switch = 0; 1993 1994 perf_event_sync_stat(ctx, next_ctx); 1995 } 1996 raw_spin_unlock(&next_ctx->lock); 1997 raw_spin_unlock(&ctx->lock); 1998 } 1999 rcu_read_unlock(); 2000 2001 if (do_switch) { 2002 raw_spin_lock(&ctx->lock); 2003 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2004 cpuctx->task_ctx = NULL; 2005 raw_spin_unlock(&ctx->lock); 2006 } 2007 } 2008 2009 #define for_each_task_context_nr(ctxn) \ 2010 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2011 2012 /* 2013 * Called from scheduler to remove the events of the current task, 2014 * with interrupts disabled. 2015 * 2016 * We stop each event and update the event value in event->count. 2017 * 2018 * This does not protect us against NMI, but disable() 2019 * sets the disabled bit in the control field of event _before_ 2020 * accessing the event control register. If a NMI hits, then it will 2021 * not restart the event. 2022 */ 2023 void __perf_event_task_sched_out(struct task_struct *task, 2024 struct task_struct *next) 2025 { 2026 int ctxn; 2027 2028 for_each_task_context_nr(ctxn) 2029 perf_event_context_sched_out(task, ctxn, next); 2030 2031 /* 2032 * if cgroup events exist on this CPU, then we need 2033 * to check if we have to switch out PMU state. 2034 * cgroup event are system-wide mode only 2035 */ 2036 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2037 perf_cgroup_sched_out(task, next); 2038 } 2039 2040 static void task_ctx_sched_out(struct perf_event_context *ctx) 2041 { 2042 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2043 2044 if (!cpuctx->task_ctx) 2045 return; 2046 2047 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2048 return; 2049 2050 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2051 cpuctx->task_ctx = NULL; 2052 } 2053 2054 /* 2055 * Called with IRQs disabled 2056 */ 2057 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2058 enum event_type_t event_type) 2059 { 2060 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2061 } 2062 2063 static void 2064 ctx_pinned_sched_in(struct perf_event_context *ctx, 2065 struct perf_cpu_context *cpuctx) 2066 { 2067 struct perf_event *event; 2068 2069 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2070 if (event->state <= PERF_EVENT_STATE_OFF) 2071 continue; 2072 if (!event_filter_match(event)) 2073 continue; 2074 2075 /* may need to reset tstamp_enabled */ 2076 if (is_cgroup_event(event)) 2077 perf_cgroup_mark_enabled(event, ctx); 2078 2079 if (group_can_go_on(event, cpuctx, 1)) 2080 group_sched_in(event, cpuctx, ctx); 2081 2082 /* 2083 * If this pinned group hasn't been scheduled, 2084 * put it in error state. 2085 */ 2086 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2087 update_group_times(event); 2088 event->state = PERF_EVENT_STATE_ERROR; 2089 } 2090 } 2091 } 2092 2093 static void 2094 ctx_flexible_sched_in(struct perf_event_context *ctx, 2095 struct perf_cpu_context *cpuctx) 2096 { 2097 struct perf_event *event; 2098 int can_add_hw = 1; 2099 2100 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2101 /* Ignore events in OFF or ERROR state */ 2102 if (event->state <= PERF_EVENT_STATE_OFF) 2103 continue; 2104 /* 2105 * Listen to the 'cpu' scheduling filter constraint 2106 * of events: 2107 */ 2108 if (!event_filter_match(event)) 2109 continue; 2110 2111 /* may need to reset tstamp_enabled */ 2112 if (is_cgroup_event(event)) 2113 perf_cgroup_mark_enabled(event, ctx); 2114 2115 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2116 if (group_sched_in(event, cpuctx, ctx)) 2117 can_add_hw = 0; 2118 } 2119 } 2120 } 2121 2122 static void 2123 ctx_sched_in(struct perf_event_context *ctx, 2124 struct perf_cpu_context *cpuctx, 2125 enum event_type_t event_type, 2126 struct task_struct *task) 2127 { 2128 u64 now; 2129 int is_active = ctx->is_active; 2130 2131 ctx->is_active |= event_type; 2132 if (likely(!ctx->nr_events)) 2133 return; 2134 2135 now = perf_clock(); 2136 ctx->timestamp = now; 2137 perf_cgroup_set_timestamp(task, ctx); 2138 /* 2139 * First go through the list and put on any pinned groups 2140 * in order to give them the best chance of going on. 2141 */ 2142 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2143 ctx_pinned_sched_in(ctx, cpuctx); 2144 2145 /* Then walk through the lower prio flexible groups */ 2146 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2147 ctx_flexible_sched_in(ctx, cpuctx); 2148 } 2149 2150 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2151 enum event_type_t event_type, 2152 struct task_struct *task) 2153 { 2154 struct perf_event_context *ctx = &cpuctx->ctx; 2155 2156 ctx_sched_in(ctx, cpuctx, event_type, task); 2157 } 2158 2159 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2160 struct task_struct *task) 2161 { 2162 struct perf_cpu_context *cpuctx; 2163 2164 cpuctx = __get_cpu_context(ctx); 2165 if (cpuctx->task_ctx == ctx) 2166 return; 2167 2168 perf_ctx_lock(cpuctx, ctx); 2169 perf_pmu_disable(ctx->pmu); 2170 /* 2171 * We want to keep the following priority order: 2172 * cpu pinned (that don't need to move), task pinned, 2173 * cpu flexible, task flexible. 2174 */ 2175 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2176 2177 perf_event_sched_in(cpuctx, ctx, task); 2178 2179 if (ctx->nr_events) 2180 cpuctx->task_ctx = ctx; 2181 2182 perf_pmu_enable(ctx->pmu); 2183 perf_ctx_unlock(cpuctx, ctx); 2184 2185 /* 2186 * Since these rotations are per-cpu, we need to ensure the 2187 * cpu-context we got scheduled on is actually rotating. 2188 */ 2189 perf_pmu_rotate_start(ctx->pmu); 2190 } 2191 2192 /* 2193 * Called from scheduler to add the events of the current task 2194 * with interrupts disabled. 2195 * 2196 * We restore the event value and then enable it. 2197 * 2198 * This does not protect us against NMI, but enable() 2199 * sets the enabled bit in the control field of event _before_ 2200 * accessing the event control register. If a NMI hits, then it will 2201 * keep the event running. 2202 */ 2203 void __perf_event_task_sched_in(struct task_struct *prev, 2204 struct task_struct *task) 2205 { 2206 struct perf_event_context *ctx; 2207 int ctxn; 2208 2209 for_each_task_context_nr(ctxn) { 2210 ctx = task->perf_event_ctxp[ctxn]; 2211 if (likely(!ctx)) 2212 continue; 2213 2214 perf_event_context_sched_in(ctx, task); 2215 } 2216 /* 2217 * if cgroup events exist on this CPU, then we need 2218 * to check if we have to switch in PMU state. 2219 * cgroup event are system-wide mode only 2220 */ 2221 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2222 perf_cgroup_sched_in(prev, task); 2223 } 2224 2225 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2226 { 2227 u64 frequency = event->attr.sample_freq; 2228 u64 sec = NSEC_PER_SEC; 2229 u64 divisor, dividend; 2230 2231 int count_fls, nsec_fls, frequency_fls, sec_fls; 2232 2233 count_fls = fls64(count); 2234 nsec_fls = fls64(nsec); 2235 frequency_fls = fls64(frequency); 2236 sec_fls = 30; 2237 2238 /* 2239 * We got @count in @nsec, with a target of sample_freq HZ 2240 * the target period becomes: 2241 * 2242 * @count * 10^9 2243 * period = ------------------- 2244 * @nsec * sample_freq 2245 * 2246 */ 2247 2248 /* 2249 * Reduce accuracy by one bit such that @a and @b converge 2250 * to a similar magnitude. 2251 */ 2252 #define REDUCE_FLS(a, b) \ 2253 do { \ 2254 if (a##_fls > b##_fls) { \ 2255 a >>= 1; \ 2256 a##_fls--; \ 2257 } else { \ 2258 b >>= 1; \ 2259 b##_fls--; \ 2260 } \ 2261 } while (0) 2262 2263 /* 2264 * Reduce accuracy until either term fits in a u64, then proceed with 2265 * the other, so that finally we can do a u64/u64 division. 2266 */ 2267 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2268 REDUCE_FLS(nsec, frequency); 2269 REDUCE_FLS(sec, count); 2270 } 2271 2272 if (count_fls + sec_fls > 64) { 2273 divisor = nsec * frequency; 2274 2275 while (count_fls + sec_fls > 64) { 2276 REDUCE_FLS(count, sec); 2277 divisor >>= 1; 2278 } 2279 2280 dividend = count * sec; 2281 } else { 2282 dividend = count * sec; 2283 2284 while (nsec_fls + frequency_fls > 64) { 2285 REDUCE_FLS(nsec, frequency); 2286 dividend >>= 1; 2287 } 2288 2289 divisor = nsec * frequency; 2290 } 2291 2292 if (!divisor) 2293 return dividend; 2294 2295 return div64_u64(dividend, divisor); 2296 } 2297 2298 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2299 { 2300 struct hw_perf_event *hwc = &event->hw; 2301 s64 period, sample_period; 2302 s64 delta; 2303 2304 period = perf_calculate_period(event, nsec, count); 2305 2306 delta = (s64)(period - hwc->sample_period); 2307 delta = (delta + 7) / 8; /* low pass filter */ 2308 2309 sample_period = hwc->sample_period + delta; 2310 2311 if (!sample_period) 2312 sample_period = 1; 2313 2314 hwc->sample_period = sample_period; 2315 2316 if (local64_read(&hwc->period_left) > 8*sample_period) { 2317 event->pmu->stop(event, PERF_EF_UPDATE); 2318 local64_set(&hwc->period_left, 0); 2319 event->pmu->start(event, PERF_EF_RELOAD); 2320 } 2321 } 2322 2323 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) 2324 { 2325 struct perf_event *event; 2326 struct hw_perf_event *hwc; 2327 u64 interrupts, now; 2328 s64 delta; 2329 2330 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2331 if (event->state != PERF_EVENT_STATE_ACTIVE) 2332 continue; 2333 2334 if (!event_filter_match(event)) 2335 continue; 2336 2337 hwc = &event->hw; 2338 2339 interrupts = hwc->interrupts; 2340 hwc->interrupts = 0; 2341 2342 /* 2343 * unthrottle events on the tick 2344 */ 2345 if (interrupts == MAX_INTERRUPTS) { 2346 perf_log_throttle(event, 1); 2347 event->pmu->start(event, 0); 2348 } 2349 2350 if (!event->attr.freq || !event->attr.sample_freq) 2351 continue; 2352 2353 event->pmu->read(event); 2354 now = local64_read(&event->count); 2355 delta = now - hwc->freq_count_stamp; 2356 hwc->freq_count_stamp = now; 2357 2358 if (delta > 0) 2359 perf_adjust_period(event, period, delta); 2360 } 2361 } 2362 2363 /* 2364 * Round-robin a context's events: 2365 */ 2366 static void rotate_ctx(struct perf_event_context *ctx) 2367 { 2368 /* 2369 * Rotate the first entry last of non-pinned groups. Rotation might be 2370 * disabled by the inheritance code. 2371 */ 2372 if (!ctx->rotate_disable) 2373 list_rotate_left(&ctx->flexible_groups); 2374 } 2375 2376 /* 2377 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 2378 * because they're strictly cpu affine and rotate_start is called with IRQs 2379 * disabled, while rotate_context is called from IRQ context. 2380 */ 2381 static void perf_rotate_context(struct perf_cpu_context *cpuctx) 2382 { 2383 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; 2384 struct perf_event_context *ctx = NULL; 2385 int rotate = 0, remove = 1; 2386 2387 if (cpuctx->ctx.nr_events) { 2388 remove = 0; 2389 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2390 rotate = 1; 2391 } 2392 2393 ctx = cpuctx->task_ctx; 2394 if (ctx && ctx->nr_events) { 2395 remove = 0; 2396 if (ctx->nr_events != ctx->nr_active) 2397 rotate = 1; 2398 } 2399 2400 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2401 perf_pmu_disable(cpuctx->ctx.pmu); 2402 perf_ctx_adjust_freq(&cpuctx->ctx, interval); 2403 if (ctx) 2404 perf_ctx_adjust_freq(ctx, interval); 2405 2406 if (!rotate) 2407 goto done; 2408 2409 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2410 if (ctx) 2411 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2412 2413 rotate_ctx(&cpuctx->ctx); 2414 if (ctx) 2415 rotate_ctx(ctx); 2416 2417 perf_event_sched_in(cpuctx, ctx, current); 2418 2419 done: 2420 if (remove) 2421 list_del_init(&cpuctx->rotation_list); 2422 2423 perf_pmu_enable(cpuctx->ctx.pmu); 2424 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2425 } 2426 2427 void perf_event_task_tick(void) 2428 { 2429 struct list_head *head = &__get_cpu_var(rotation_list); 2430 struct perf_cpu_context *cpuctx, *tmp; 2431 2432 WARN_ON(!irqs_disabled()); 2433 2434 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2435 if (cpuctx->jiffies_interval == 1 || 2436 !(jiffies % cpuctx->jiffies_interval)) 2437 perf_rotate_context(cpuctx); 2438 } 2439 } 2440 2441 static int event_enable_on_exec(struct perf_event *event, 2442 struct perf_event_context *ctx) 2443 { 2444 if (!event->attr.enable_on_exec) 2445 return 0; 2446 2447 event->attr.enable_on_exec = 0; 2448 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2449 return 0; 2450 2451 __perf_event_mark_enabled(event, ctx); 2452 2453 return 1; 2454 } 2455 2456 /* 2457 * Enable all of a task's events that have been marked enable-on-exec. 2458 * This expects task == current. 2459 */ 2460 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 2461 { 2462 struct perf_event *event; 2463 unsigned long flags; 2464 int enabled = 0; 2465 int ret; 2466 2467 local_irq_save(flags); 2468 if (!ctx || !ctx->nr_events) 2469 goto out; 2470 2471 /* 2472 * We must ctxsw out cgroup events to avoid conflict 2473 * when invoking perf_task_event_sched_in() later on 2474 * in this function. Otherwise we end up trying to 2475 * ctxswin cgroup events which are already scheduled 2476 * in. 2477 */ 2478 perf_cgroup_sched_out(current, NULL); 2479 2480 raw_spin_lock(&ctx->lock); 2481 task_ctx_sched_out(ctx); 2482 2483 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2484 ret = event_enable_on_exec(event, ctx); 2485 if (ret) 2486 enabled = 1; 2487 } 2488 2489 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2490 ret = event_enable_on_exec(event, ctx); 2491 if (ret) 2492 enabled = 1; 2493 } 2494 2495 /* 2496 * Unclone this context if we enabled any event. 2497 */ 2498 if (enabled) 2499 unclone_ctx(ctx); 2500 2501 raw_spin_unlock(&ctx->lock); 2502 2503 /* 2504 * Also calls ctxswin for cgroup events, if any: 2505 */ 2506 perf_event_context_sched_in(ctx, ctx->task); 2507 out: 2508 local_irq_restore(flags); 2509 } 2510 2511 /* 2512 * Cross CPU call to read the hardware event 2513 */ 2514 static void __perf_event_read(void *info) 2515 { 2516 struct perf_event *event = info; 2517 struct perf_event_context *ctx = event->ctx; 2518 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2519 2520 /* 2521 * If this is a task context, we need to check whether it is 2522 * the current task context of this cpu. If not it has been 2523 * scheduled out before the smp call arrived. In that case 2524 * event->count would have been updated to a recent sample 2525 * when the event was scheduled out. 2526 */ 2527 if (ctx->task && cpuctx->task_ctx != ctx) 2528 return; 2529 2530 raw_spin_lock(&ctx->lock); 2531 if (ctx->is_active) { 2532 update_context_time(ctx); 2533 update_cgrp_time_from_event(event); 2534 } 2535 update_event_times(event); 2536 if (event->state == PERF_EVENT_STATE_ACTIVE) 2537 event->pmu->read(event); 2538 raw_spin_unlock(&ctx->lock); 2539 } 2540 2541 static inline u64 perf_event_count(struct perf_event *event) 2542 { 2543 return local64_read(&event->count) + atomic64_read(&event->child_count); 2544 } 2545 2546 static u64 perf_event_read(struct perf_event *event) 2547 { 2548 /* 2549 * If event is enabled and currently active on a CPU, update the 2550 * value in the event structure: 2551 */ 2552 if (event->state == PERF_EVENT_STATE_ACTIVE) { 2553 smp_call_function_single(event->oncpu, 2554 __perf_event_read, event, 1); 2555 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 2556 struct perf_event_context *ctx = event->ctx; 2557 unsigned long flags; 2558 2559 raw_spin_lock_irqsave(&ctx->lock, flags); 2560 /* 2561 * may read while context is not active 2562 * (e.g., thread is blocked), in that case 2563 * we cannot update context time 2564 */ 2565 if (ctx->is_active) { 2566 update_context_time(ctx); 2567 update_cgrp_time_from_event(event); 2568 } 2569 update_event_times(event); 2570 raw_spin_unlock_irqrestore(&ctx->lock, flags); 2571 } 2572 2573 return perf_event_count(event); 2574 } 2575 2576 /* 2577 * Callchain support 2578 */ 2579 2580 struct callchain_cpus_entries { 2581 struct rcu_head rcu_head; 2582 struct perf_callchain_entry *cpu_entries[0]; 2583 }; 2584 2585 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 2586 static atomic_t nr_callchain_events; 2587 static DEFINE_MUTEX(callchain_mutex); 2588 struct callchain_cpus_entries *callchain_cpus_entries; 2589 2590 2591 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, 2592 struct pt_regs *regs) 2593 { 2594 } 2595 2596 __weak void perf_callchain_user(struct perf_callchain_entry *entry, 2597 struct pt_regs *regs) 2598 { 2599 } 2600 2601 static void release_callchain_buffers_rcu(struct rcu_head *head) 2602 { 2603 struct callchain_cpus_entries *entries; 2604 int cpu; 2605 2606 entries = container_of(head, struct callchain_cpus_entries, rcu_head); 2607 2608 for_each_possible_cpu(cpu) 2609 kfree(entries->cpu_entries[cpu]); 2610 2611 kfree(entries); 2612 } 2613 2614 static void release_callchain_buffers(void) 2615 { 2616 struct callchain_cpus_entries *entries; 2617 2618 entries = callchain_cpus_entries; 2619 rcu_assign_pointer(callchain_cpus_entries, NULL); 2620 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); 2621 } 2622 2623 static int alloc_callchain_buffers(void) 2624 { 2625 int cpu; 2626 int size; 2627 struct callchain_cpus_entries *entries; 2628 2629 /* 2630 * We can't use the percpu allocation API for data that can be 2631 * accessed from NMI. Use a temporary manual per cpu allocation 2632 * until that gets sorted out. 2633 */ 2634 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); 2635 2636 entries = kzalloc(size, GFP_KERNEL); 2637 if (!entries) 2638 return -ENOMEM; 2639 2640 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; 2641 2642 for_each_possible_cpu(cpu) { 2643 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, 2644 cpu_to_node(cpu)); 2645 if (!entries->cpu_entries[cpu]) 2646 goto fail; 2647 } 2648 2649 rcu_assign_pointer(callchain_cpus_entries, entries); 2650 2651 return 0; 2652 2653 fail: 2654 for_each_possible_cpu(cpu) 2655 kfree(entries->cpu_entries[cpu]); 2656 kfree(entries); 2657 2658 return -ENOMEM; 2659 } 2660 2661 static int get_callchain_buffers(void) 2662 { 2663 int err = 0; 2664 int count; 2665 2666 mutex_lock(&callchain_mutex); 2667 2668 count = atomic_inc_return(&nr_callchain_events); 2669 if (WARN_ON_ONCE(count < 1)) { 2670 err = -EINVAL; 2671 goto exit; 2672 } 2673 2674 if (count > 1) { 2675 /* If the allocation failed, give up */ 2676 if (!callchain_cpus_entries) 2677 err = -ENOMEM; 2678 goto exit; 2679 } 2680 2681 err = alloc_callchain_buffers(); 2682 if (err) 2683 release_callchain_buffers(); 2684 exit: 2685 mutex_unlock(&callchain_mutex); 2686 2687 return err; 2688 } 2689 2690 static void put_callchain_buffers(void) 2691 { 2692 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { 2693 release_callchain_buffers(); 2694 mutex_unlock(&callchain_mutex); 2695 } 2696 } 2697 2698 static int get_recursion_context(int *recursion) 2699 { 2700 int rctx; 2701 2702 if (in_nmi()) 2703 rctx = 3; 2704 else if (in_irq()) 2705 rctx = 2; 2706 else if (in_softirq()) 2707 rctx = 1; 2708 else 2709 rctx = 0; 2710 2711 if (recursion[rctx]) 2712 return -1; 2713 2714 recursion[rctx]++; 2715 barrier(); 2716 2717 return rctx; 2718 } 2719 2720 static inline void put_recursion_context(int *recursion, int rctx) 2721 { 2722 barrier(); 2723 recursion[rctx]--; 2724 } 2725 2726 static struct perf_callchain_entry *get_callchain_entry(int *rctx) 2727 { 2728 int cpu; 2729 struct callchain_cpus_entries *entries; 2730 2731 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); 2732 if (*rctx == -1) 2733 return NULL; 2734 2735 entries = rcu_dereference(callchain_cpus_entries); 2736 if (!entries) 2737 return NULL; 2738 2739 cpu = smp_processor_id(); 2740 2741 return &entries->cpu_entries[cpu][*rctx]; 2742 } 2743 2744 static void 2745 put_callchain_entry(int rctx) 2746 { 2747 put_recursion_context(__get_cpu_var(callchain_recursion), rctx); 2748 } 2749 2750 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) 2751 { 2752 int rctx; 2753 struct perf_callchain_entry *entry; 2754 2755 2756 entry = get_callchain_entry(&rctx); 2757 if (rctx == -1) 2758 return NULL; 2759 2760 if (!entry) 2761 goto exit_put; 2762 2763 entry->nr = 0; 2764 2765 if (!user_mode(regs)) { 2766 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 2767 perf_callchain_kernel(entry, regs); 2768 if (current->mm) 2769 regs = task_pt_regs(current); 2770 else 2771 regs = NULL; 2772 } 2773 2774 if (regs) { 2775 perf_callchain_store(entry, PERF_CONTEXT_USER); 2776 perf_callchain_user(entry, regs); 2777 } 2778 2779 exit_put: 2780 put_callchain_entry(rctx); 2781 2782 return entry; 2783 } 2784 2785 /* 2786 * Initialize the perf_event context in a task_struct: 2787 */ 2788 static void __perf_event_init_context(struct perf_event_context *ctx) 2789 { 2790 raw_spin_lock_init(&ctx->lock); 2791 mutex_init(&ctx->mutex); 2792 INIT_LIST_HEAD(&ctx->pinned_groups); 2793 INIT_LIST_HEAD(&ctx->flexible_groups); 2794 INIT_LIST_HEAD(&ctx->event_list); 2795 atomic_set(&ctx->refcount, 1); 2796 } 2797 2798 static struct perf_event_context * 2799 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 2800 { 2801 struct perf_event_context *ctx; 2802 2803 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 2804 if (!ctx) 2805 return NULL; 2806 2807 __perf_event_init_context(ctx); 2808 if (task) { 2809 ctx->task = task; 2810 get_task_struct(task); 2811 } 2812 ctx->pmu = pmu; 2813 2814 return ctx; 2815 } 2816 2817 static struct task_struct * 2818 find_lively_task_by_vpid(pid_t vpid) 2819 { 2820 struct task_struct *task; 2821 int err; 2822 2823 rcu_read_lock(); 2824 if (!vpid) 2825 task = current; 2826 else 2827 task = find_task_by_vpid(vpid); 2828 if (task) 2829 get_task_struct(task); 2830 rcu_read_unlock(); 2831 2832 if (!task) 2833 return ERR_PTR(-ESRCH); 2834 2835 /* Reuse ptrace permission checks for now. */ 2836 err = -EACCES; 2837 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 2838 goto errout; 2839 2840 return task; 2841 errout: 2842 put_task_struct(task); 2843 return ERR_PTR(err); 2844 2845 } 2846 2847 /* 2848 * Returns a matching context with refcount and pincount. 2849 */ 2850 static struct perf_event_context * 2851 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) 2852 { 2853 struct perf_event_context *ctx; 2854 struct perf_cpu_context *cpuctx; 2855 unsigned long flags; 2856 int ctxn, err; 2857 2858 if (!task) { 2859 /* Must be root to operate on a CPU event: */ 2860 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 2861 return ERR_PTR(-EACCES); 2862 2863 /* 2864 * We could be clever and allow to attach a event to an 2865 * offline CPU and activate it when the CPU comes up, but 2866 * that's for later. 2867 */ 2868 if (!cpu_online(cpu)) 2869 return ERR_PTR(-ENODEV); 2870 2871 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 2872 ctx = &cpuctx->ctx; 2873 get_ctx(ctx); 2874 ++ctx->pin_count; 2875 2876 return ctx; 2877 } 2878 2879 err = -EINVAL; 2880 ctxn = pmu->task_ctx_nr; 2881 if (ctxn < 0) 2882 goto errout; 2883 2884 retry: 2885 ctx = perf_lock_task_context(task, ctxn, &flags); 2886 if (ctx) { 2887 unclone_ctx(ctx); 2888 ++ctx->pin_count; 2889 raw_spin_unlock_irqrestore(&ctx->lock, flags); 2890 } else { 2891 ctx = alloc_perf_context(pmu, task); 2892 err = -ENOMEM; 2893 if (!ctx) 2894 goto errout; 2895 2896 err = 0; 2897 mutex_lock(&task->perf_event_mutex); 2898 /* 2899 * If it has already passed perf_event_exit_task(). 2900 * we must see PF_EXITING, it takes this mutex too. 2901 */ 2902 if (task->flags & PF_EXITING) 2903 err = -ESRCH; 2904 else if (task->perf_event_ctxp[ctxn]) 2905 err = -EAGAIN; 2906 else { 2907 get_ctx(ctx); 2908 ++ctx->pin_count; 2909 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 2910 } 2911 mutex_unlock(&task->perf_event_mutex); 2912 2913 if (unlikely(err)) { 2914 put_ctx(ctx); 2915 2916 if (err == -EAGAIN) 2917 goto retry; 2918 goto errout; 2919 } 2920 } 2921 2922 return ctx; 2923 2924 errout: 2925 return ERR_PTR(err); 2926 } 2927 2928 static void perf_event_free_filter(struct perf_event *event); 2929 2930 static void free_event_rcu(struct rcu_head *head) 2931 { 2932 struct perf_event *event; 2933 2934 event = container_of(head, struct perf_event, rcu_head); 2935 if (event->ns) 2936 put_pid_ns(event->ns); 2937 perf_event_free_filter(event); 2938 kfree(event); 2939 } 2940 2941 static void ring_buffer_put(struct ring_buffer *rb); 2942 2943 static void free_event(struct perf_event *event) 2944 { 2945 irq_work_sync(&event->pending); 2946 2947 if (!event->parent) { 2948 if (event->attach_state & PERF_ATTACH_TASK) 2949 jump_label_dec(&perf_sched_events); 2950 if (event->attr.mmap || event->attr.mmap_data) 2951 atomic_dec(&nr_mmap_events); 2952 if (event->attr.comm) 2953 atomic_dec(&nr_comm_events); 2954 if (event->attr.task) 2955 atomic_dec(&nr_task_events); 2956 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 2957 put_callchain_buffers(); 2958 if (is_cgroup_event(event)) { 2959 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); 2960 jump_label_dec(&perf_sched_events); 2961 } 2962 } 2963 2964 if (event->rb) { 2965 ring_buffer_put(event->rb); 2966 event->rb = NULL; 2967 } 2968 2969 if (is_cgroup_event(event)) 2970 perf_detach_cgroup(event); 2971 2972 if (event->destroy) 2973 event->destroy(event); 2974 2975 if (event->ctx) 2976 put_ctx(event->ctx); 2977 2978 call_rcu(&event->rcu_head, free_event_rcu); 2979 } 2980 2981 int perf_event_release_kernel(struct perf_event *event) 2982 { 2983 struct perf_event_context *ctx = event->ctx; 2984 2985 WARN_ON_ONCE(ctx->parent_ctx); 2986 /* 2987 * There are two ways this annotation is useful: 2988 * 2989 * 1) there is a lock recursion from perf_event_exit_task 2990 * see the comment there. 2991 * 2992 * 2) there is a lock-inversion with mmap_sem through 2993 * perf_event_read_group(), which takes faults while 2994 * holding ctx->mutex, however this is called after 2995 * the last filedesc died, so there is no possibility 2996 * to trigger the AB-BA case. 2997 */ 2998 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 2999 raw_spin_lock_irq(&ctx->lock); 3000 perf_group_detach(event); 3001 raw_spin_unlock_irq(&ctx->lock); 3002 perf_remove_from_context(event); 3003 mutex_unlock(&ctx->mutex); 3004 3005 free_event(event); 3006 3007 return 0; 3008 } 3009 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3010 3011 /* 3012 * Called when the last reference to the file is gone. 3013 */ 3014 static int perf_release(struct inode *inode, struct file *file) 3015 { 3016 struct perf_event *event = file->private_data; 3017 struct task_struct *owner; 3018 3019 file->private_data = NULL; 3020 3021 rcu_read_lock(); 3022 owner = ACCESS_ONCE(event->owner); 3023 /* 3024 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3025 * !owner it means the list deletion is complete and we can indeed 3026 * free this event, otherwise we need to serialize on 3027 * owner->perf_event_mutex. 3028 */ 3029 smp_read_barrier_depends(); 3030 if (owner) { 3031 /* 3032 * Since delayed_put_task_struct() also drops the last 3033 * task reference we can safely take a new reference 3034 * while holding the rcu_read_lock(). 3035 */ 3036 get_task_struct(owner); 3037 } 3038 rcu_read_unlock(); 3039 3040 if (owner) { 3041 mutex_lock(&owner->perf_event_mutex); 3042 /* 3043 * We have to re-check the event->owner field, if it is cleared 3044 * we raced with perf_event_exit_task(), acquiring the mutex 3045 * ensured they're done, and we can proceed with freeing the 3046 * event. 3047 */ 3048 if (event->owner) 3049 list_del_init(&event->owner_entry); 3050 mutex_unlock(&owner->perf_event_mutex); 3051 put_task_struct(owner); 3052 } 3053 3054 return perf_event_release_kernel(event); 3055 } 3056 3057 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3058 { 3059 struct perf_event *child; 3060 u64 total = 0; 3061 3062 *enabled = 0; 3063 *running = 0; 3064 3065 mutex_lock(&event->child_mutex); 3066 total += perf_event_read(event); 3067 *enabled += event->total_time_enabled + 3068 atomic64_read(&event->child_total_time_enabled); 3069 *running += event->total_time_running + 3070 atomic64_read(&event->child_total_time_running); 3071 3072 list_for_each_entry(child, &event->child_list, child_list) { 3073 total += perf_event_read(child); 3074 *enabled += child->total_time_enabled; 3075 *running += child->total_time_running; 3076 } 3077 mutex_unlock(&event->child_mutex); 3078 3079 return total; 3080 } 3081 EXPORT_SYMBOL_GPL(perf_event_read_value); 3082 3083 static int perf_event_read_group(struct perf_event *event, 3084 u64 read_format, char __user *buf) 3085 { 3086 struct perf_event *leader = event->group_leader, *sub; 3087 int n = 0, size = 0, ret = -EFAULT; 3088 struct perf_event_context *ctx = leader->ctx; 3089 u64 values[5]; 3090 u64 count, enabled, running; 3091 3092 mutex_lock(&ctx->mutex); 3093 count = perf_event_read_value(leader, &enabled, &running); 3094 3095 values[n++] = 1 + leader->nr_siblings; 3096 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3097 values[n++] = enabled; 3098 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3099 values[n++] = running; 3100 values[n++] = count; 3101 if (read_format & PERF_FORMAT_ID) 3102 values[n++] = primary_event_id(leader); 3103 3104 size = n * sizeof(u64); 3105 3106 if (copy_to_user(buf, values, size)) 3107 goto unlock; 3108 3109 ret = size; 3110 3111 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3112 n = 0; 3113 3114 values[n++] = perf_event_read_value(sub, &enabled, &running); 3115 if (read_format & PERF_FORMAT_ID) 3116 values[n++] = primary_event_id(sub); 3117 3118 size = n * sizeof(u64); 3119 3120 if (copy_to_user(buf + ret, values, size)) { 3121 ret = -EFAULT; 3122 goto unlock; 3123 } 3124 3125 ret += size; 3126 } 3127 unlock: 3128 mutex_unlock(&ctx->mutex); 3129 3130 return ret; 3131 } 3132 3133 static int perf_event_read_one(struct perf_event *event, 3134 u64 read_format, char __user *buf) 3135 { 3136 u64 enabled, running; 3137 u64 values[4]; 3138 int n = 0; 3139 3140 values[n++] = perf_event_read_value(event, &enabled, &running); 3141 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3142 values[n++] = enabled; 3143 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3144 values[n++] = running; 3145 if (read_format & PERF_FORMAT_ID) 3146 values[n++] = primary_event_id(event); 3147 3148 if (copy_to_user(buf, values, n * sizeof(u64))) 3149 return -EFAULT; 3150 3151 return n * sizeof(u64); 3152 } 3153 3154 /* 3155 * Read the performance event - simple non blocking version for now 3156 */ 3157 static ssize_t 3158 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3159 { 3160 u64 read_format = event->attr.read_format; 3161 int ret; 3162 3163 /* 3164 * Return end-of-file for a read on a event that is in 3165 * error state (i.e. because it was pinned but it couldn't be 3166 * scheduled on to the CPU at some point). 3167 */ 3168 if (event->state == PERF_EVENT_STATE_ERROR) 3169 return 0; 3170 3171 if (count < event->read_size) 3172 return -ENOSPC; 3173 3174 WARN_ON_ONCE(event->ctx->parent_ctx); 3175 if (read_format & PERF_FORMAT_GROUP) 3176 ret = perf_event_read_group(event, read_format, buf); 3177 else 3178 ret = perf_event_read_one(event, read_format, buf); 3179 3180 return ret; 3181 } 3182 3183 static ssize_t 3184 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3185 { 3186 struct perf_event *event = file->private_data; 3187 3188 return perf_read_hw(event, buf, count); 3189 } 3190 3191 static unsigned int perf_poll(struct file *file, poll_table *wait) 3192 { 3193 struct perf_event *event = file->private_data; 3194 struct ring_buffer *rb; 3195 unsigned int events = POLL_HUP; 3196 3197 /* 3198 * Race between perf_event_set_output() and perf_poll(): perf_poll() 3199 * grabs the rb reference but perf_event_set_output() overrides it. 3200 * Here is the timeline for two threads T1, T2: 3201 * t0: T1, rb = rcu_dereference(event->rb) 3202 * t1: T2, old_rb = event->rb 3203 * t2: T2, event->rb = new rb 3204 * t3: T2, ring_buffer_detach(old_rb) 3205 * t4: T1, ring_buffer_attach(rb1) 3206 * t5: T1, poll_wait(event->waitq) 3207 * 3208 * To avoid this problem, we grab mmap_mutex in perf_poll() 3209 * thereby ensuring that the assignment of the new ring buffer 3210 * and the detachment of the old buffer appear atomic to perf_poll() 3211 */ 3212 mutex_lock(&event->mmap_mutex); 3213 3214 rcu_read_lock(); 3215 rb = rcu_dereference(event->rb); 3216 if (rb) { 3217 ring_buffer_attach(event, rb); 3218 events = atomic_xchg(&rb->poll, 0); 3219 } 3220 rcu_read_unlock(); 3221 3222 mutex_unlock(&event->mmap_mutex); 3223 3224 poll_wait(file, &event->waitq, wait); 3225 3226 return events; 3227 } 3228 3229 static void perf_event_reset(struct perf_event *event) 3230 { 3231 (void)perf_event_read(event); 3232 local64_set(&event->count, 0); 3233 perf_event_update_userpage(event); 3234 } 3235 3236 /* 3237 * Holding the top-level event's child_mutex means that any 3238 * descendant process that has inherited this event will block 3239 * in sync_child_event if it goes to exit, thus satisfying the 3240 * task existence requirements of perf_event_enable/disable. 3241 */ 3242 static void perf_event_for_each_child(struct perf_event *event, 3243 void (*func)(struct perf_event *)) 3244 { 3245 struct perf_event *child; 3246 3247 WARN_ON_ONCE(event->ctx->parent_ctx); 3248 mutex_lock(&event->child_mutex); 3249 func(event); 3250 list_for_each_entry(child, &event->child_list, child_list) 3251 func(child); 3252 mutex_unlock(&event->child_mutex); 3253 } 3254 3255 static void perf_event_for_each(struct perf_event *event, 3256 void (*func)(struct perf_event *)) 3257 { 3258 struct perf_event_context *ctx = event->ctx; 3259 struct perf_event *sibling; 3260 3261 WARN_ON_ONCE(ctx->parent_ctx); 3262 mutex_lock(&ctx->mutex); 3263 event = event->group_leader; 3264 3265 perf_event_for_each_child(event, func); 3266 func(event); 3267 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3268 perf_event_for_each_child(event, func); 3269 mutex_unlock(&ctx->mutex); 3270 } 3271 3272 static int perf_event_period(struct perf_event *event, u64 __user *arg) 3273 { 3274 struct perf_event_context *ctx = event->ctx; 3275 int ret = 0; 3276 u64 value; 3277 3278 if (!is_sampling_event(event)) 3279 return -EINVAL; 3280 3281 if (copy_from_user(&value, arg, sizeof(value))) 3282 return -EFAULT; 3283 3284 if (!value) 3285 return -EINVAL; 3286 3287 raw_spin_lock_irq(&ctx->lock); 3288 if (event->attr.freq) { 3289 if (value > sysctl_perf_event_sample_rate) { 3290 ret = -EINVAL; 3291 goto unlock; 3292 } 3293 3294 event->attr.sample_freq = value; 3295 } else { 3296 event->attr.sample_period = value; 3297 event->hw.sample_period = value; 3298 } 3299 unlock: 3300 raw_spin_unlock_irq(&ctx->lock); 3301 3302 return ret; 3303 } 3304 3305 static const struct file_operations perf_fops; 3306 3307 static struct perf_event *perf_fget_light(int fd, int *fput_needed) 3308 { 3309 struct file *file; 3310 3311 file = fget_light(fd, fput_needed); 3312 if (!file) 3313 return ERR_PTR(-EBADF); 3314 3315 if (file->f_op != &perf_fops) { 3316 fput_light(file, *fput_needed); 3317 *fput_needed = 0; 3318 return ERR_PTR(-EBADF); 3319 } 3320 3321 return file->private_data; 3322 } 3323 3324 static int perf_event_set_output(struct perf_event *event, 3325 struct perf_event *output_event); 3326 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 3327 3328 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3329 { 3330 struct perf_event *event = file->private_data; 3331 void (*func)(struct perf_event *); 3332 u32 flags = arg; 3333 3334 switch (cmd) { 3335 case PERF_EVENT_IOC_ENABLE: 3336 func = perf_event_enable; 3337 break; 3338 case PERF_EVENT_IOC_DISABLE: 3339 func = perf_event_disable; 3340 break; 3341 case PERF_EVENT_IOC_RESET: 3342 func = perf_event_reset; 3343 break; 3344 3345 case PERF_EVENT_IOC_REFRESH: 3346 return perf_event_refresh(event, arg); 3347 3348 case PERF_EVENT_IOC_PERIOD: 3349 return perf_event_period(event, (u64 __user *)arg); 3350 3351 case PERF_EVENT_IOC_SET_OUTPUT: 3352 { 3353 struct perf_event *output_event = NULL; 3354 int fput_needed = 0; 3355 int ret; 3356 3357 if (arg != -1) { 3358 output_event = perf_fget_light(arg, &fput_needed); 3359 if (IS_ERR(output_event)) 3360 return PTR_ERR(output_event); 3361 } 3362 3363 ret = perf_event_set_output(event, output_event); 3364 if (output_event) 3365 fput_light(output_event->filp, fput_needed); 3366 3367 return ret; 3368 } 3369 3370 case PERF_EVENT_IOC_SET_FILTER: 3371 return perf_event_set_filter(event, (void __user *)arg); 3372 3373 default: 3374 return -ENOTTY; 3375 } 3376 3377 if (flags & PERF_IOC_FLAG_GROUP) 3378 perf_event_for_each(event, func); 3379 else 3380 perf_event_for_each_child(event, func); 3381 3382 return 0; 3383 } 3384 3385 int perf_event_task_enable(void) 3386 { 3387 struct perf_event *event; 3388 3389 mutex_lock(¤t->perf_event_mutex); 3390 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3391 perf_event_for_each_child(event, perf_event_enable); 3392 mutex_unlock(¤t->perf_event_mutex); 3393 3394 return 0; 3395 } 3396 3397 int perf_event_task_disable(void) 3398 { 3399 struct perf_event *event; 3400 3401 mutex_lock(¤t->perf_event_mutex); 3402 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3403 perf_event_for_each_child(event, perf_event_disable); 3404 mutex_unlock(¤t->perf_event_mutex); 3405 3406 return 0; 3407 } 3408 3409 #ifndef PERF_EVENT_INDEX_OFFSET 3410 # define PERF_EVENT_INDEX_OFFSET 0 3411 #endif 3412 3413 static int perf_event_index(struct perf_event *event) 3414 { 3415 if (event->hw.state & PERF_HES_STOPPED) 3416 return 0; 3417 3418 if (event->state != PERF_EVENT_STATE_ACTIVE) 3419 return 0; 3420 3421 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; 3422 } 3423 3424 static void calc_timer_values(struct perf_event *event, 3425 u64 *enabled, 3426 u64 *running) 3427 { 3428 u64 now, ctx_time; 3429 3430 now = perf_clock(); 3431 ctx_time = event->shadow_ctx_time + now; 3432 *enabled = ctx_time - event->tstamp_enabled; 3433 *running = ctx_time - event->tstamp_running; 3434 } 3435 3436 /* 3437 * Callers need to ensure there can be no nesting of this function, otherwise 3438 * the seqlock logic goes bad. We can not serialize this because the arch 3439 * code calls this from NMI context. 3440 */ 3441 void perf_event_update_userpage(struct perf_event *event) 3442 { 3443 struct perf_event_mmap_page *userpg; 3444 struct ring_buffer *rb; 3445 u64 enabled, running; 3446 3447 rcu_read_lock(); 3448 /* 3449 * compute total_time_enabled, total_time_running 3450 * based on snapshot values taken when the event 3451 * was last scheduled in. 3452 * 3453 * we cannot simply called update_context_time() 3454 * because of locking issue as we can be called in 3455 * NMI context 3456 */ 3457 calc_timer_values(event, &enabled, &running); 3458 rb = rcu_dereference(event->rb); 3459 if (!rb) 3460 goto unlock; 3461 3462 userpg = rb->user_page; 3463 3464 /* 3465 * Disable preemption so as to not let the corresponding user-space 3466 * spin too long if we get preempted. 3467 */ 3468 preempt_disable(); 3469 ++userpg->lock; 3470 barrier(); 3471 userpg->index = perf_event_index(event); 3472 userpg->offset = perf_event_count(event); 3473 if (event->state == PERF_EVENT_STATE_ACTIVE) 3474 userpg->offset -= local64_read(&event->hw.prev_count); 3475 3476 userpg->time_enabled = enabled + 3477 atomic64_read(&event->child_total_time_enabled); 3478 3479 userpg->time_running = running + 3480 atomic64_read(&event->child_total_time_running); 3481 3482 barrier(); 3483 ++userpg->lock; 3484 preempt_enable(); 3485 unlock: 3486 rcu_read_unlock(); 3487 } 3488 3489 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 3490 { 3491 struct perf_event *event = vma->vm_file->private_data; 3492 struct ring_buffer *rb; 3493 int ret = VM_FAULT_SIGBUS; 3494 3495 if (vmf->flags & FAULT_FLAG_MKWRITE) { 3496 if (vmf->pgoff == 0) 3497 ret = 0; 3498 return ret; 3499 } 3500 3501 rcu_read_lock(); 3502 rb = rcu_dereference(event->rb); 3503 if (!rb) 3504 goto unlock; 3505 3506 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 3507 goto unlock; 3508 3509 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 3510 if (!vmf->page) 3511 goto unlock; 3512 3513 get_page(vmf->page); 3514 vmf->page->mapping = vma->vm_file->f_mapping; 3515 vmf->page->index = vmf->pgoff; 3516 3517 ret = 0; 3518 unlock: 3519 rcu_read_unlock(); 3520 3521 return ret; 3522 } 3523 3524 static void ring_buffer_attach(struct perf_event *event, 3525 struct ring_buffer *rb) 3526 { 3527 unsigned long flags; 3528 3529 if (!list_empty(&event->rb_entry)) 3530 return; 3531 3532 spin_lock_irqsave(&rb->event_lock, flags); 3533 if (!list_empty(&event->rb_entry)) 3534 goto unlock; 3535 3536 list_add(&event->rb_entry, &rb->event_list); 3537 unlock: 3538 spin_unlock_irqrestore(&rb->event_lock, flags); 3539 } 3540 3541 static void ring_buffer_detach(struct perf_event *event, 3542 struct ring_buffer *rb) 3543 { 3544 unsigned long flags; 3545 3546 if (list_empty(&event->rb_entry)) 3547 return; 3548 3549 spin_lock_irqsave(&rb->event_lock, flags); 3550 list_del_init(&event->rb_entry); 3551 wake_up_all(&event->waitq); 3552 spin_unlock_irqrestore(&rb->event_lock, flags); 3553 } 3554 3555 static void ring_buffer_wakeup(struct perf_event *event) 3556 { 3557 struct ring_buffer *rb; 3558 3559 rcu_read_lock(); 3560 rb = rcu_dereference(event->rb); 3561 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3562 wake_up_all(&event->waitq); 3563 } 3564 rcu_read_unlock(); 3565 } 3566 3567 static void rb_free_rcu(struct rcu_head *rcu_head) 3568 { 3569 struct ring_buffer *rb; 3570 3571 rb = container_of(rcu_head, struct ring_buffer, rcu_head); 3572 rb_free(rb); 3573 } 3574 3575 static struct ring_buffer *ring_buffer_get(struct perf_event *event) 3576 { 3577 struct ring_buffer *rb; 3578 3579 rcu_read_lock(); 3580 rb = rcu_dereference(event->rb); 3581 if (rb) { 3582 if (!atomic_inc_not_zero(&rb->refcount)) 3583 rb = NULL; 3584 } 3585 rcu_read_unlock(); 3586 3587 return rb; 3588 } 3589 3590 static void ring_buffer_put(struct ring_buffer *rb) 3591 { 3592 struct perf_event *event, *n; 3593 unsigned long flags; 3594 3595 if (!atomic_dec_and_test(&rb->refcount)) 3596 return; 3597 3598 spin_lock_irqsave(&rb->event_lock, flags); 3599 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { 3600 list_del_init(&event->rb_entry); 3601 wake_up_all(&event->waitq); 3602 } 3603 spin_unlock_irqrestore(&rb->event_lock, flags); 3604 3605 call_rcu(&rb->rcu_head, rb_free_rcu); 3606 } 3607 3608 static void perf_mmap_open(struct vm_area_struct *vma) 3609 { 3610 struct perf_event *event = vma->vm_file->private_data; 3611 3612 atomic_inc(&event->mmap_count); 3613 } 3614 3615 static void perf_mmap_close(struct vm_area_struct *vma) 3616 { 3617 struct perf_event *event = vma->vm_file->private_data; 3618 3619 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 3620 unsigned long size = perf_data_size(event->rb); 3621 struct user_struct *user = event->mmap_user; 3622 struct ring_buffer *rb = event->rb; 3623 3624 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3625 vma->vm_mm->pinned_vm -= event->mmap_locked; 3626 rcu_assign_pointer(event->rb, NULL); 3627 ring_buffer_detach(event, rb); 3628 mutex_unlock(&event->mmap_mutex); 3629 3630 ring_buffer_put(rb); 3631 free_uid(user); 3632 } 3633 } 3634 3635 static const struct vm_operations_struct perf_mmap_vmops = { 3636 .open = perf_mmap_open, 3637 .close = perf_mmap_close, 3638 .fault = perf_mmap_fault, 3639 .page_mkwrite = perf_mmap_fault, 3640 }; 3641 3642 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 3643 { 3644 struct perf_event *event = file->private_data; 3645 unsigned long user_locked, user_lock_limit; 3646 struct user_struct *user = current_user(); 3647 unsigned long locked, lock_limit; 3648 struct ring_buffer *rb; 3649 unsigned long vma_size; 3650 unsigned long nr_pages; 3651 long user_extra, extra; 3652 int ret = 0, flags = 0; 3653 3654 /* 3655 * Don't allow mmap() of inherited per-task counters. This would 3656 * create a performance issue due to all children writing to the 3657 * same rb. 3658 */ 3659 if (event->cpu == -1 && event->attr.inherit) 3660 return -EINVAL; 3661 3662 if (!(vma->vm_flags & VM_SHARED)) 3663 return -EINVAL; 3664 3665 vma_size = vma->vm_end - vma->vm_start; 3666 nr_pages = (vma_size / PAGE_SIZE) - 1; 3667 3668 /* 3669 * If we have rb pages ensure they're a power-of-two number, so we 3670 * can do bitmasks instead of modulo. 3671 */ 3672 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 3673 return -EINVAL; 3674 3675 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 3676 return -EINVAL; 3677 3678 if (vma->vm_pgoff != 0) 3679 return -EINVAL; 3680 3681 WARN_ON_ONCE(event->ctx->parent_ctx); 3682 mutex_lock(&event->mmap_mutex); 3683 if (event->rb) { 3684 if (event->rb->nr_pages == nr_pages) 3685 atomic_inc(&event->rb->refcount); 3686 else 3687 ret = -EINVAL; 3688 goto unlock; 3689 } 3690 3691 user_extra = nr_pages + 1; 3692 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 3693 3694 /* 3695 * Increase the limit linearly with more CPUs: 3696 */ 3697 user_lock_limit *= num_online_cpus(); 3698 3699 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 3700 3701 extra = 0; 3702 if (user_locked > user_lock_limit) 3703 extra = user_locked - user_lock_limit; 3704 3705 lock_limit = rlimit(RLIMIT_MEMLOCK); 3706 lock_limit >>= PAGE_SHIFT; 3707 locked = vma->vm_mm->pinned_vm + extra; 3708 3709 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 3710 !capable(CAP_IPC_LOCK)) { 3711 ret = -EPERM; 3712 goto unlock; 3713 } 3714 3715 WARN_ON(event->rb); 3716 3717 if (vma->vm_flags & VM_WRITE) 3718 flags |= RING_BUFFER_WRITABLE; 3719 3720 rb = rb_alloc(nr_pages, 3721 event->attr.watermark ? event->attr.wakeup_watermark : 0, 3722 event->cpu, flags); 3723 3724 if (!rb) { 3725 ret = -ENOMEM; 3726 goto unlock; 3727 } 3728 rcu_assign_pointer(event->rb, rb); 3729 3730 atomic_long_add(user_extra, &user->locked_vm); 3731 event->mmap_locked = extra; 3732 event->mmap_user = get_current_user(); 3733 vma->vm_mm->pinned_vm += event->mmap_locked; 3734 3735 unlock: 3736 if (!ret) 3737 atomic_inc(&event->mmap_count); 3738 mutex_unlock(&event->mmap_mutex); 3739 3740 vma->vm_flags |= VM_RESERVED; 3741 vma->vm_ops = &perf_mmap_vmops; 3742 3743 return ret; 3744 } 3745 3746 static int perf_fasync(int fd, struct file *filp, int on) 3747 { 3748 struct inode *inode = filp->f_path.dentry->d_inode; 3749 struct perf_event *event = filp->private_data; 3750 int retval; 3751 3752 mutex_lock(&inode->i_mutex); 3753 retval = fasync_helper(fd, filp, on, &event->fasync); 3754 mutex_unlock(&inode->i_mutex); 3755 3756 if (retval < 0) 3757 return retval; 3758 3759 return 0; 3760 } 3761 3762 static const struct file_operations perf_fops = { 3763 .llseek = no_llseek, 3764 .release = perf_release, 3765 .read = perf_read, 3766 .poll = perf_poll, 3767 .unlocked_ioctl = perf_ioctl, 3768 .compat_ioctl = perf_ioctl, 3769 .mmap = perf_mmap, 3770 .fasync = perf_fasync, 3771 }; 3772 3773 /* 3774 * Perf event wakeup 3775 * 3776 * If there's data, ensure we set the poll() state and publish everything 3777 * to user-space before waking everybody up. 3778 */ 3779 3780 void perf_event_wakeup(struct perf_event *event) 3781 { 3782 ring_buffer_wakeup(event); 3783 3784 if (event->pending_kill) { 3785 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 3786 event->pending_kill = 0; 3787 } 3788 } 3789 3790 static void perf_pending_event(struct irq_work *entry) 3791 { 3792 struct perf_event *event = container_of(entry, 3793 struct perf_event, pending); 3794 3795 if (event->pending_disable) { 3796 event->pending_disable = 0; 3797 __perf_event_disable(event); 3798 } 3799 3800 if (event->pending_wakeup) { 3801 event->pending_wakeup = 0; 3802 perf_event_wakeup(event); 3803 } 3804 } 3805 3806 /* 3807 * We assume there is only KVM supporting the callbacks. 3808 * Later on, we might change it to a list if there is 3809 * another virtualization implementation supporting the callbacks. 3810 */ 3811 struct perf_guest_info_callbacks *perf_guest_cbs; 3812 3813 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 3814 { 3815 perf_guest_cbs = cbs; 3816 return 0; 3817 } 3818 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 3819 3820 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 3821 { 3822 perf_guest_cbs = NULL; 3823 return 0; 3824 } 3825 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 3826 3827 static void __perf_event_header__init_id(struct perf_event_header *header, 3828 struct perf_sample_data *data, 3829 struct perf_event *event) 3830 { 3831 u64 sample_type = event->attr.sample_type; 3832 3833 data->type = sample_type; 3834 header->size += event->id_header_size; 3835 3836 if (sample_type & PERF_SAMPLE_TID) { 3837 /* namespace issues */ 3838 data->tid_entry.pid = perf_event_pid(event, current); 3839 data->tid_entry.tid = perf_event_tid(event, current); 3840 } 3841 3842 if (sample_type & PERF_SAMPLE_TIME) 3843 data->time = perf_clock(); 3844 3845 if (sample_type & PERF_SAMPLE_ID) 3846 data->id = primary_event_id(event); 3847 3848 if (sample_type & PERF_SAMPLE_STREAM_ID) 3849 data->stream_id = event->id; 3850 3851 if (sample_type & PERF_SAMPLE_CPU) { 3852 data->cpu_entry.cpu = raw_smp_processor_id(); 3853 data->cpu_entry.reserved = 0; 3854 } 3855 } 3856 3857 void perf_event_header__init_id(struct perf_event_header *header, 3858 struct perf_sample_data *data, 3859 struct perf_event *event) 3860 { 3861 if (event->attr.sample_id_all) 3862 __perf_event_header__init_id(header, data, event); 3863 } 3864 3865 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 3866 struct perf_sample_data *data) 3867 { 3868 u64 sample_type = data->type; 3869 3870 if (sample_type & PERF_SAMPLE_TID) 3871 perf_output_put(handle, data->tid_entry); 3872 3873 if (sample_type & PERF_SAMPLE_TIME) 3874 perf_output_put(handle, data->time); 3875 3876 if (sample_type & PERF_SAMPLE_ID) 3877 perf_output_put(handle, data->id); 3878 3879 if (sample_type & PERF_SAMPLE_STREAM_ID) 3880 perf_output_put(handle, data->stream_id); 3881 3882 if (sample_type & PERF_SAMPLE_CPU) 3883 perf_output_put(handle, data->cpu_entry); 3884 } 3885 3886 void perf_event__output_id_sample(struct perf_event *event, 3887 struct perf_output_handle *handle, 3888 struct perf_sample_data *sample) 3889 { 3890 if (event->attr.sample_id_all) 3891 __perf_event__output_id_sample(handle, sample); 3892 } 3893 3894 static void perf_output_read_one(struct perf_output_handle *handle, 3895 struct perf_event *event, 3896 u64 enabled, u64 running) 3897 { 3898 u64 read_format = event->attr.read_format; 3899 u64 values[4]; 3900 int n = 0; 3901 3902 values[n++] = perf_event_count(event); 3903 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3904 values[n++] = enabled + 3905 atomic64_read(&event->child_total_time_enabled); 3906 } 3907 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3908 values[n++] = running + 3909 atomic64_read(&event->child_total_time_running); 3910 } 3911 if (read_format & PERF_FORMAT_ID) 3912 values[n++] = primary_event_id(event); 3913 3914 __output_copy(handle, values, n * sizeof(u64)); 3915 } 3916 3917 /* 3918 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 3919 */ 3920 static void perf_output_read_group(struct perf_output_handle *handle, 3921 struct perf_event *event, 3922 u64 enabled, u64 running) 3923 { 3924 struct perf_event *leader = event->group_leader, *sub; 3925 u64 read_format = event->attr.read_format; 3926 u64 values[5]; 3927 int n = 0; 3928 3929 values[n++] = 1 + leader->nr_siblings; 3930 3931 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3932 values[n++] = enabled; 3933 3934 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3935 values[n++] = running; 3936 3937 if (leader != event) 3938 leader->pmu->read(leader); 3939 3940 values[n++] = perf_event_count(leader); 3941 if (read_format & PERF_FORMAT_ID) 3942 values[n++] = primary_event_id(leader); 3943 3944 __output_copy(handle, values, n * sizeof(u64)); 3945 3946 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3947 n = 0; 3948 3949 if (sub != event) 3950 sub->pmu->read(sub); 3951 3952 values[n++] = perf_event_count(sub); 3953 if (read_format & PERF_FORMAT_ID) 3954 values[n++] = primary_event_id(sub); 3955 3956 __output_copy(handle, values, n * sizeof(u64)); 3957 } 3958 } 3959 3960 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 3961 PERF_FORMAT_TOTAL_TIME_RUNNING) 3962 3963 static void perf_output_read(struct perf_output_handle *handle, 3964 struct perf_event *event) 3965 { 3966 u64 enabled = 0, running = 0; 3967 u64 read_format = event->attr.read_format; 3968 3969 /* 3970 * compute total_time_enabled, total_time_running 3971 * based on snapshot values taken when the event 3972 * was last scheduled in. 3973 * 3974 * we cannot simply called update_context_time() 3975 * because of locking issue as we are called in 3976 * NMI context 3977 */ 3978 if (read_format & PERF_FORMAT_TOTAL_TIMES) 3979 calc_timer_values(event, &enabled, &running); 3980 3981 if (event->attr.read_format & PERF_FORMAT_GROUP) 3982 perf_output_read_group(handle, event, enabled, running); 3983 else 3984 perf_output_read_one(handle, event, enabled, running); 3985 } 3986 3987 void perf_output_sample(struct perf_output_handle *handle, 3988 struct perf_event_header *header, 3989 struct perf_sample_data *data, 3990 struct perf_event *event) 3991 { 3992 u64 sample_type = data->type; 3993 3994 perf_output_put(handle, *header); 3995 3996 if (sample_type & PERF_SAMPLE_IP) 3997 perf_output_put(handle, data->ip); 3998 3999 if (sample_type & PERF_SAMPLE_TID) 4000 perf_output_put(handle, data->tid_entry); 4001 4002 if (sample_type & PERF_SAMPLE_TIME) 4003 perf_output_put(handle, data->time); 4004 4005 if (sample_type & PERF_SAMPLE_ADDR) 4006 perf_output_put(handle, data->addr); 4007 4008 if (sample_type & PERF_SAMPLE_ID) 4009 perf_output_put(handle, data->id); 4010 4011 if (sample_type & PERF_SAMPLE_STREAM_ID) 4012 perf_output_put(handle, data->stream_id); 4013 4014 if (sample_type & PERF_SAMPLE_CPU) 4015 perf_output_put(handle, data->cpu_entry); 4016 4017 if (sample_type & PERF_SAMPLE_PERIOD) 4018 perf_output_put(handle, data->period); 4019 4020 if (sample_type & PERF_SAMPLE_READ) 4021 perf_output_read(handle, event); 4022 4023 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4024 if (data->callchain) { 4025 int size = 1; 4026 4027 if (data->callchain) 4028 size += data->callchain->nr; 4029 4030 size *= sizeof(u64); 4031 4032 __output_copy(handle, data->callchain, size); 4033 } else { 4034 u64 nr = 0; 4035 perf_output_put(handle, nr); 4036 } 4037 } 4038 4039 if (sample_type & PERF_SAMPLE_RAW) { 4040 if (data->raw) { 4041 perf_output_put(handle, data->raw->size); 4042 __output_copy(handle, data->raw->data, 4043 data->raw->size); 4044 } else { 4045 struct { 4046 u32 size; 4047 u32 data; 4048 } raw = { 4049 .size = sizeof(u32), 4050 .data = 0, 4051 }; 4052 perf_output_put(handle, raw); 4053 } 4054 } 4055 4056 if (!event->attr.watermark) { 4057 int wakeup_events = event->attr.wakeup_events; 4058 4059 if (wakeup_events) { 4060 struct ring_buffer *rb = handle->rb; 4061 int events = local_inc_return(&rb->events); 4062 4063 if (events >= wakeup_events) { 4064 local_sub(wakeup_events, &rb->events); 4065 local_inc(&rb->wakeup); 4066 } 4067 } 4068 } 4069 } 4070 4071 void perf_prepare_sample(struct perf_event_header *header, 4072 struct perf_sample_data *data, 4073 struct perf_event *event, 4074 struct pt_regs *regs) 4075 { 4076 u64 sample_type = event->attr.sample_type; 4077 4078 header->type = PERF_RECORD_SAMPLE; 4079 header->size = sizeof(*header) + event->header_size; 4080 4081 header->misc = 0; 4082 header->misc |= perf_misc_flags(regs); 4083 4084 __perf_event_header__init_id(header, data, event); 4085 4086 if (sample_type & PERF_SAMPLE_IP) 4087 data->ip = perf_instruction_pointer(regs); 4088 4089 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4090 int size = 1; 4091 4092 data->callchain = perf_callchain(regs); 4093 4094 if (data->callchain) 4095 size += data->callchain->nr; 4096 4097 header->size += size * sizeof(u64); 4098 } 4099 4100 if (sample_type & PERF_SAMPLE_RAW) { 4101 int size = sizeof(u32); 4102 4103 if (data->raw) 4104 size += data->raw->size; 4105 else 4106 size += sizeof(u32); 4107 4108 WARN_ON_ONCE(size & (sizeof(u64)-1)); 4109 header->size += size; 4110 } 4111 } 4112 4113 static void perf_event_output(struct perf_event *event, 4114 struct perf_sample_data *data, 4115 struct pt_regs *regs) 4116 { 4117 struct perf_output_handle handle; 4118 struct perf_event_header header; 4119 4120 /* protect the callchain buffers */ 4121 rcu_read_lock(); 4122 4123 perf_prepare_sample(&header, data, event, regs); 4124 4125 if (perf_output_begin(&handle, event, header.size)) 4126 goto exit; 4127 4128 perf_output_sample(&handle, &header, data, event); 4129 4130 perf_output_end(&handle); 4131 4132 exit: 4133 rcu_read_unlock(); 4134 } 4135 4136 /* 4137 * read event_id 4138 */ 4139 4140 struct perf_read_event { 4141 struct perf_event_header header; 4142 4143 u32 pid; 4144 u32 tid; 4145 }; 4146 4147 static void 4148 perf_event_read_event(struct perf_event *event, 4149 struct task_struct *task) 4150 { 4151 struct perf_output_handle handle; 4152 struct perf_sample_data sample; 4153 struct perf_read_event read_event = { 4154 .header = { 4155 .type = PERF_RECORD_READ, 4156 .misc = 0, 4157 .size = sizeof(read_event) + event->read_size, 4158 }, 4159 .pid = perf_event_pid(event, task), 4160 .tid = perf_event_tid(event, task), 4161 }; 4162 int ret; 4163 4164 perf_event_header__init_id(&read_event.header, &sample, event); 4165 ret = perf_output_begin(&handle, event, read_event.header.size); 4166 if (ret) 4167 return; 4168 4169 perf_output_put(&handle, read_event); 4170 perf_output_read(&handle, event); 4171 perf_event__output_id_sample(event, &handle, &sample); 4172 4173 perf_output_end(&handle); 4174 } 4175 4176 /* 4177 * task tracking -- fork/exit 4178 * 4179 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task 4180 */ 4181 4182 struct perf_task_event { 4183 struct task_struct *task; 4184 struct perf_event_context *task_ctx; 4185 4186 struct { 4187 struct perf_event_header header; 4188 4189 u32 pid; 4190 u32 ppid; 4191 u32 tid; 4192 u32 ptid; 4193 u64 time; 4194 } event_id; 4195 }; 4196 4197 static void perf_event_task_output(struct perf_event *event, 4198 struct perf_task_event *task_event) 4199 { 4200 struct perf_output_handle handle; 4201 struct perf_sample_data sample; 4202 struct task_struct *task = task_event->task; 4203 int ret, size = task_event->event_id.header.size; 4204 4205 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 4206 4207 ret = perf_output_begin(&handle, event, 4208 task_event->event_id.header.size); 4209 if (ret) 4210 goto out; 4211 4212 task_event->event_id.pid = perf_event_pid(event, task); 4213 task_event->event_id.ppid = perf_event_pid(event, current); 4214 4215 task_event->event_id.tid = perf_event_tid(event, task); 4216 task_event->event_id.ptid = perf_event_tid(event, current); 4217 4218 perf_output_put(&handle, task_event->event_id); 4219 4220 perf_event__output_id_sample(event, &handle, &sample); 4221 4222 perf_output_end(&handle); 4223 out: 4224 task_event->event_id.header.size = size; 4225 } 4226 4227 static int perf_event_task_match(struct perf_event *event) 4228 { 4229 if (event->state < PERF_EVENT_STATE_INACTIVE) 4230 return 0; 4231 4232 if (!event_filter_match(event)) 4233 return 0; 4234 4235 if (event->attr.comm || event->attr.mmap || 4236 event->attr.mmap_data || event->attr.task) 4237 return 1; 4238 4239 return 0; 4240 } 4241 4242 static void perf_event_task_ctx(struct perf_event_context *ctx, 4243 struct perf_task_event *task_event) 4244 { 4245 struct perf_event *event; 4246 4247 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4248 if (perf_event_task_match(event)) 4249 perf_event_task_output(event, task_event); 4250 } 4251 } 4252 4253 static void perf_event_task_event(struct perf_task_event *task_event) 4254 { 4255 struct perf_cpu_context *cpuctx; 4256 struct perf_event_context *ctx; 4257 struct pmu *pmu; 4258 int ctxn; 4259 4260 rcu_read_lock(); 4261 list_for_each_entry_rcu(pmu, &pmus, entry) { 4262 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4263 if (cpuctx->active_pmu != pmu) 4264 goto next; 4265 perf_event_task_ctx(&cpuctx->ctx, task_event); 4266 4267 ctx = task_event->task_ctx; 4268 if (!ctx) { 4269 ctxn = pmu->task_ctx_nr; 4270 if (ctxn < 0) 4271 goto next; 4272 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4273 } 4274 if (ctx) 4275 perf_event_task_ctx(ctx, task_event); 4276 next: 4277 put_cpu_ptr(pmu->pmu_cpu_context); 4278 } 4279 rcu_read_unlock(); 4280 } 4281 4282 static void perf_event_task(struct task_struct *task, 4283 struct perf_event_context *task_ctx, 4284 int new) 4285 { 4286 struct perf_task_event task_event; 4287 4288 if (!atomic_read(&nr_comm_events) && 4289 !atomic_read(&nr_mmap_events) && 4290 !atomic_read(&nr_task_events)) 4291 return; 4292 4293 task_event = (struct perf_task_event){ 4294 .task = task, 4295 .task_ctx = task_ctx, 4296 .event_id = { 4297 .header = { 4298 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 4299 .misc = 0, 4300 .size = sizeof(task_event.event_id), 4301 }, 4302 /* .pid */ 4303 /* .ppid */ 4304 /* .tid */ 4305 /* .ptid */ 4306 .time = perf_clock(), 4307 }, 4308 }; 4309 4310 perf_event_task_event(&task_event); 4311 } 4312 4313 void perf_event_fork(struct task_struct *task) 4314 { 4315 perf_event_task(task, NULL, 1); 4316 } 4317 4318 /* 4319 * comm tracking 4320 */ 4321 4322 struct perf_comm_event { 4323 struct task_struct *task; 4324 char *comm; 4325 int comm_size; 4326 4327 struct { 4328 struct perf_event_header header; 4329 4330 u32 pid; 4331 u32 tid; 4332 } event_id; 4333 }; 4334 4335 static void perf_event_comm_output(struct perf_event *event, 4336 struct perf_comm_event *comm_event) 4337 { 4338 struct perf_output_handle handle; 4339 struct perf_sample_data sample; 4340 int size = comm_event->event_id.header.size; 4341 int ret; 4342 4343 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 4344 ret = perf_output_begin(&handle, event, 4345 comm_event->event_id.header.size); 4346 4347 if (ret) 4348 goto out; 4349 4350 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 4351 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 4352 4353 perf_output_put(&handle, comm_event->event_id); 4354 __output_copy(&handle, comm_event->comm, 4355 comm_event->comm_size); 4356 4357 perf_event__output_id_sample(event, &handle, &sample); 4358 4359 perf_output_end(&handle); 4360 out: 4361 comm_event->event_id.header.size = size; 4362 } 4363 4364 static int perf_event_comm_match(struct perf_event *event) 4365 { 4366 if (event->state < PERF_EVENT_STATE_INACTIVE) 4367 return 0; 4368 4369 if (!event_filter_match(event)) 4370 return 0; 4371 4372 if (event->attr.comm) 4373 return 1; 4374 4375 return 0; 4376 } 4377 4378 static void perf_event_comm_ctx(struct perf_event_context *ctx, 4379 struct perf_comm_event *comm_event) 4380 { 4381 struct perf_event *event; 4382 4383 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4384 if (perf_event_comm_match(event)) 4385 perf_event_comm_output(event, comm_event); 4386 } 4387 } 4388 4389 static void perf_event_comm_event(struct perf_comm_event *comm_event) 4390 { 4391 struct perf_cpu_context *cpuctx; 4392 struct perf_event_context *ctx; 4393 char comm[TASK_COMM_LEN]; 4394 unsigned int size; 4395 struct pmu *pmu; 4396 int ctxn; 4397 4398 memset(comm, 0, sizeof(comm)); 4399 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 4400 size = ALIGN(strlen(comm)+1, sizeof(u64)); 4401 4402 comm_event->comm = comm; 4403 comm_event->comm_size = size; 4404 4405 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 4406 rcu_read_lock(); 4407 list_for_each_entry_rcu(pmu, &pmus, entry) { 4408 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4409 if (cpuctx->active_pmu != pmu) 4410 goto next; 4411 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 4412 4413 ctxn = pmu->task_ctx_nr; 4414 if (ctxn < 0) 4415 goto next; 4416 4417 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4418 if (ctx) 4419 perf_event_comm_ctx(ctx, comm_event); 4420 next: 4421 put_cpu_ptr(pmu->pmu_cpu_context); 4422 } 4423 rcu_read_unlock(); 4424 } 4425 4426 void perf_event_comm(struct task_struct *task) 4427 { 4428 struct perf_comm_event comm_event; 4429 struct perf_event_context *ctx; 4430 int ctxn; 4431 4432 for_each_task_context_nr(ctxn) { 4433 ctx = task->perf_event_ctxp[ctxn]; 4434 if (!ctx) 4435 continue; 4436 4437 perf_event_enable_on_exec(ctx); 4438 } 4439 4440 if (!atomic_read(&nr_comm_events)) 4441 return; 4442 4443 comm_event = (struct perf_comm_event){ 4444 .task = task, 4445 /* .comm */ 4446 /* .comm_size */ 4447 .event_id = { 4448 .header = { 4449 .type = PERF_RECORD_COMM, 4450 .misc = 0, 4451 /* .size */ 4452 }, 4453 /* .pid */ 4454 /* .tid */ 4455 }, 4456 }; 4457 4458 perf_event_comm_event(&comm_event); 4459 } 4460 4461 /* 4462 * mmap tracking 4463 */ 4464 4465 struct perf_mmap_event { 4466 struct vm_area_struct *vma; 4467 4468 const char *file_name; 4469 int file_size; 4470 4471 struct { 4472 struct perf_event_header header; 4473 4474 u32 pid; 4475 u32 tid; 4476 u64 start; 4477 u64 len; 4478 u64 pgoff; 4479 } event_id; 4480 }; 4481 4482 static void perf_event_mmap_output(struct perf_event *event, 4483 struct perf_mmap_event *mmap_event) 4484 { 4485 struct perf_output_handle handle; 4486 struct perf_sample_data sample; 4487 int size = mmap_event->event_id.header.size; 4488 int ret; 4489 4490 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 4491 ret = perf_output_begin(&handle, event, 4492 mmap_event->event_id.header.size); 4493 if (ret) 4494 goto out; 4495 4496 mmap_event->event_id.pid = perf_event_pid(event, current); 4497 mmap_event->event_id.tid = perf_event_tid(event, current); 4498 4499 perf_output_put(&handle, mmap_event->event_id); 4500 __output_copy(&handle, mmap_event->file_name, 4501 mmap_event->file_size); 4502 4503 perf_event__output_id_sample(event, &handle, &sample); 4504 4505 perf_output_end(&handle); 4506 out: 4507 mmap_event->event_id.header.size = size; 4508 } 4509 4510 static int perf_event_mmap_match(struct perf_event *event, 4511 struct perf_mmap_event *mmap_event, 4512 int executable) 4513 { 4514 if (event->state < PERF_EVENT_STATE_INACTIVE) 4515 return 0; 4516 4517 if (!event_filter_match(event)) 4518 return 0; 4519 4520 if ((!executable && event->attr.mmap_data) || 4521 (executable && event->attr.mmap)) 4522 return 1; 4523 4524 return 0; 4525 } 4526 4527 static void perf_event_mmap_ctx(struct perf_event_context *ctx, 4528 struct perf_mmap_event *mmap_event, 4529 int executable) 4530 { 4531 struct perf_event *event; 4532 4533 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4534 if (perf_event_mmap_match(event, mmap_event, executable)) 4535 perf_event_mmap_output(event, mmap_event); 4536 } 4537 } 4538 4539 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 4540 { 4541 struct perf_cpu_context *cpuctx; 4542 struct perf_event_context *ctx; 4543 struct vm_area_struct *vma = mmap_event->vma; 4544 struct file *file = vma->vm_file; 4545 unsigned int size; 4546 char tmp[16]; 4547 char *buf = NULL; 4548 const char *name; 4549 struct pmu *pmu; 4550 int ctxn; 4551 4552 memset(tmp, 0, sizeof(tmp)); 4553 4554 if (file) { 4555 /* 4556 * d_path works from the end of the rb backwards, so we 4557 * need to add enough zero bytes after the string to handle 4558 * the 64bit alignment we do later. 4559 */ 4560 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); 4561 if (!buf) { 4562 name = strncpy(tmp, "//enomem", sizeof(tmp)); 4563 goto got_name; 4564 } 4565 name = d_path(&file->f_path, buf, PATH_MAX); 4566 if (IS_ERR(name)) { 4567 name = strncpy(tmp, "//toolong", sizeof(tmp)); 4568 goto got_name; 4569 } 4570 } else { 4571 if (arch_vma_name(mmap_event->vma)) { 4572 name = strncpy(tmp, arch_vma_name(mmap_event->vma), 4573 sizeof(tmp)); 4574 goto got_name; 4575 } 4576 4577 if (!vma->vm_mm) { 4578 name = strncpy(tmp, "[vdso]", sizeof(tmp)); 4579 goto got_name; 4580 } else if (vma->vm_start <= vma->vm_mm->start_brk && 4581 vma->vm_end >= vma->vm_mm->brk) { 4582 name = strncpy(tmp, "[heap]", sizeof(tmp)); 4583 goto got_name; 4584 } else if (vma->vm_start <= vma->vm_mm->start_stack && 4585 vma->vm_end >= vma->vm_mm->start_stack) { 4586 name = strncpy(tmp, "[stack]", sizeof(tmp)); 4587 goto got_name; 4588 } 4589 4590 name = strncpy(tmp, "//anon", sizeof(tmp)); 4591 goto got_name; 4592 } 4593 4594 got_name: 4595 size = ALIGN(strlen(name)+1, sizeof(u64)); 4596 4597 mmap_event->file_name = name; 4598 mmap_event->file_size = size; 4599 4600 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 4601 4602 rcu_read_lock(); 4603 list_for_each_entry_rcu(pmu, &pmus, entry) { 4604 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4605 if (cpuctx->active_pmu != pmu) 4606 goto next; 4607 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4608 vma->vm_flags & VM_EXEC); 4609 4610 ctxn = pmu->task_ctx_nr; 4611 if (ctxn < 0) 4612 goto next; 4613 4614 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4615 if (ctx) { 4616 perf_event_mmap_ctx(ctx, mmap_event, 4617 vma->vm_flags & VM_EXEC); 4618 } 4619 next: 4620 put_cpu_ptr(pmu->pmu_cpu_context); 4621 } 4622 rcu_read_unlock(); 4623 4624 kfree(buf); 4625 } 4626 4627 void perf_event_mmap(struct vm_area_struct *vma) 4628 { 4629 struct perf_mmap_event mmap_event; 4630 4631 if (!atomic_read(&nr_mmap_events)) 4632 return; 4633 4634 mmap_event = (struct perf_mmap_event){ 4635 .vma = vma, 4636 /* .file_name */ 4637 /* .file_size */ 4638 .event_id = { 4639 .header = { 4640 .type = PERF_RECORD_MMAP, 4641 .misc = PERF_RECORD_MISC_USER, 4642 /* .size */ 4643 }, 4644 /* .pid */ 4645 /* .tid */ 4646 .start = vma->vm_start, 4647 .len = vma->vm_end - vma->vm_start, 4648 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 4649 }, 4650 }; 4651 4652 perf_event_mmap_event(&mmap_event); 4653 } 4654 4655 /* 4656 * IRQ throttle logging 4657 */ 4658 4659 static void perf_log_throttle(struct perf_event *event, int enable) 4660 { 4661 struct perf_output_handle handle; 4662 struct perf_sample_data sample; 4663 int ret; 4664 4665 struct { 4666 struct perf_event_header header; 4667 u64 time; 4668 u64 id; 4669 u64 stream_id; 4670 } throttle_event = { 4671 .header = { 4672 .type = PERF_RECORD_THROTTLE, 4673 .misc = 0, 4674 .size = sizeof(throttle_event), 4675 }, 4676 .time = perf_clock(), 4677 .id = primary_event_id(event), 4678 .stream_id = event->id, 4679 }; 4680 4681 if (enable) 4682 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 4683 4684 perf_event_header__init_id(&throttle_event.header, &sample, event); 4685 4686 ret = perf_output_begin(&handle, event, 4687 throttle_event.header.size); 4688 if (ret) 4689 return; 4690 4691 perf_output_put(&handle, throttle_event); 4692 perf_event__output_id_sample(event, &handle, &sample); 4693 perf_output_end(&handle); 4694 } 4695 4696 /* 4697 * Generic event overflow handling, sampling. 4698 */ 4699 4700 static int __perf_event_overflow(struct perf_event *event, 4701 int throttle, struct perf_sample_data *data, 4702 struct pt_regs *regs) 4703 { 4704 int events = atomic_read(&event->event_limit); 4705 struct hw_perf_event *hwc = &event->hw; 4706 int ret = 0; 4707 4708 /* 4709 * Non-sampling counters might still use the PMI to fold short 4710 * hardware counters, ignore those. 4711 */ 4712 if (unlikely(!is_sampling_event(event))) 4713 return 0; 4714 4715 if (unlikely(hwc->interrupts >= max_samples_per_tick)) { 4716 if (throttle) { 4717 hwc->interrupts = MAX_INTERRUPTS; 4718 perf_log_throttle(event, 0); 4719 ret = 1; 4720 } 4721 } else 4722 hwc->interrupts++; 4723 4724 if (event->attr.freq) { 4725 u64 now = perf_clock(); 4726 s64 delta = now - hwc->freq_time_stamp; 4727 4728 hwc->freq_time_stamp = now; 4729 4730 if (delta > 0 && delta < 2*TICK_NSEC) 4731 perf_adjust_period(event, delta, hwc->last_period); 4732 } 4733 4734 /* 4735 * XXX event_limit might not quite work as expected on inherited 4736 * events 4737 */ 4738 4739 event->pending_kill = POLL_IN; 4740 if (events && atomic_dec_and_test(&event->event_limit)) { 4741 ret = 1; 4742 event->pending_kill = POLL_HUP; 4743 event->pending_disable = 1; 4744 irq_work_queue(&event->pending); 4745 } 4746 4747 if (event->overflow_handler) 4748 event->overflow_handler(event, data, regs); 4749 else 4750 perf_event_output(event, data, regs); 4751 4752 if (event->fasync && event->pending_kill) { 4753 event->pending_wakeup = 1; 4754 irq_work_queue(&event->pending); 4755 } 4756 4757 return ret; 4758 } 4759 4760 int perf_event_overflow(struct perf_event *event, 4761 struct perf_sample_data *data, 4762 struct pt_regs *regs) 4763 { 4764 return __perf_event_overflow(event, 1, data, regs); 4765 } 4766 4767 /* 4768 * Generic software event infrastructure 4769 */ 4770 4771 struct swevent_htable { 4772 struct swevent_hlist *swevent_hlist; 4773 struct mutex hlist_mutex; 4774 int hlist_refcount; 4775 4776 /* Recursion avoidance in each contexts */ 4777 int recursion[PERF_NR_CONTEXTS]; 4778 }; 4779 4780 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 4781 4782 /* 4783 * We directly increment event->count and keep a second value in 4784 * event->hw.period_left to count intervals. This period event 4785 * is kept in the range [-sample_period, 0] so that we can use the 4786 * sign as trigger. 4787 */ 4788 4789 static u64 perf_swevent_set_period(struct perf_event *event) 4790 { 4791 struct hw_perf_event *hwc = &event->hw; 4792 u64 period = hwc->last_period; 4793 u64 nr, offset; 4794 s64 old, val; 4795 4796 hwc->last_period = hwc->sample_period; 4797 4798 again: 4799 old = val = local64_read(&hwc->period_left); 4800 if (val < 0) 4801 return 0; 4802 4803 nr = div64_u64(period + val, period); 4804 offset = nr * period; 4805 val -= offset; 4806 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 4807 goto again; 4808 4809 return nr; 4810 } 4811 4812 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 4813 struct perf_sample_data *data, 4814 struct pt_regs *regs) 4815 { 4816 struct hw_perf_event *hwc = &event->hw; 4817 int throttle = 0; 4818 4819 data->period = event->hw.last_period; 4820 if (!overflow) 4821 overflow = perf_swevent_set_period(event); 4822 4823 if (hwc->interrupts == MAX_INTERRUPTS) 4824 return; 4825 4826 for (; overflow; overflow--) { 4827 if (__perf_event_overflow(event, throttle, 4828 data, regs)) { 4829 /* 4830 * We inhibit the overflow from happening when 4831 * hwc->interrupts == MAX_INTERRUPTS. 4832 */ 4833 break; 4834 } 4835 throttle = 1; 4836 } 4837 } 4838 4839 static void perf_swevent_event(struct perf_event *event, u64 nr, 4840 struct perf_sample_data *data, 4841 struct pt_regs *regs) 4842 { 4843 struct hw_perf_event *hwc = &event->hw; 4844 4845 local64_add(nr, &event->count); 4846 4847 if (!regs) 4848 return; 4849 4850 if (!is_sampling_event(event)) 4851 return; 4852 4853 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 4854 return perf_swevent_overflow(event, 1, data, regs); 4855 4856 if (local64_add_negative(nr, &hwc->period_left)) 4857 return; 4858 4859 perf_swevent_overflow(event, 0, data, regs); 4860 } 4861 4862 static int perf_exclude_event(struct perf_event *event, 4863 struct pt_regs *regs) 4864 { 4865 if (event->hw.state & PERF_HES_STOPPED) 4866 return 1; 4867 4868 if (regs) { 4869 if (event->attr.exclude_user && user_mode(regs)) 4870 return 1; 4871 4872 if (event->attr.exclude_kernel && !user_mode(regs)) 4873 return 1; 4874 } 4875 4876 return 0; 4877 } 4878 4879 static int perf_swevent_match(struct perf_event *event, 4880 enum perf_type_id type, 4881 u32 event_id, 4882 struct perf_sample_data *data, 4883 struct pt_regs *regs) 4884 { 4885 if (event->attr.type != type) 4886 return 0; 4887 4888 if (event->attr.config != event_id) 4889 return 0; 4890 4891 if (perf_exclude_event(event, regs)) 4892 return 0; 4893 4894 return 1; 4895 } 4896 4897 static inline u64 swevent_hash(u64 type, u32 event_id) 4898 { 4899 u64 val = event_id | (type << 32); 4900 4901 return hash_64(val, SWEVENT_HLIST_BITS); 4902 } 4903 4904 static inline struct hlist_head * 4905 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 4906 { 4907 u64 hash = swevent_hash(type, event_id); 4908 4909 return &hlist->heads[hash]; 4910 } 4911 4912 /* For the read side: events when they trigger */ 4913 static inline struct hlist_head * 4914 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 4915 { 4916 struct swevent_hlist *hlist; 4917 4918 hlist = rcu_dereference(swhash->swevent_hlist); 4919 if (!hlist) 4920 return NULL; 4921 4922 return __find_swevent_head(hlist, type, event_id); 4923 } 4924 4925 /* For the event head insertion and removal in the hlist */ 4926 static inline struct hlist_head * 4927 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 4928 { 4929 struct swevent_hlist *hlist; 4930 u32 event_id = event->attr.config; 4931 u64 type = event->attr.type; 4932 4933 /* 4934 * Event scheduling is always serialized against hlist allocation 4935 * and release. Which makes the protected version suitable here. 4936 * The context lock guarantees that. 4937 */ 4938 hlist = rcu_dereference_protected(swhash->swevent_hlist, 4939 lockdep_is_held(&event->ctx->lock)); 4940 if (!hlist) 4941 return NULL; 4942 4943 return __find_swevent_head(hlist, type, event_id); 4944 } 4945 4946 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 4947 u64 nr, 4948 struct perf_sample_data *data, 4949 struct pt_regs *regs) 4950 { 4951 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 4952 struct perf_event *event; 4953 struct hlist_node *node; 4954 struct hlist_head *head; 4955 4956 rcu_read_lock(); 4957 head = find_swevent_head_rcu(swhash, type, event_id); 4958 if (!head) 4959 goto end; 4960 4961 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 4962 if (perf_swevent_match(event, type, event_id, data, regs)) 4963 perf_swevent_event(event, nr, data, regs); 4964 } 4965 end: 4966 rcu_read_unlock(); 4967 } 4968 4969 int perf_swevent_get_recursion_context(void) 4970 { 4971 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 4972 4973 return get_recursion_context(swhash->recursion); 4974 } 4975 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 4976 4977 inline void perf_swevent_put_recursion_context(int rctx) 4978 { 4979 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 4980 4981 put_recursion_context(swhash->recursion, rctx); 4982 } 4983 4984 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 4985 { 4986 struct perf_sample_data data; 4987 int rctx; 4988 4989 preempt_disable_notrace(); 4990 rctx = perf_swevent_get_recursion_context(); 4991 if (rctx < 0) 4992 return; 4993 4994 perf_sample_data_init(&data, addr); 4995 4996 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 4997 4998 perf_swevent_put_recursion_context(rctx); 4999 preempt_enable_notrace(); 5000 } 5001 5002 static void perf_swevent_read(struct perf_event *event) 5003 { 5004 } 5005 5006 static int perf_swevent_add(struct perf_event *event, int flags) 5007 { 5008 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5009 struct hw_perf_event *hwc = &event->hw; 5010 struct hlist_head *head; 5011 5012 if (is_sampling_event(event)) { 5013 hwc->last_period = hwc->sample_period; 5014 perf_swevent_set_period(event); 5015 } 5016 5017 hwc->state = !(flags & PERF_EF_START); 5018 5019 head = find_swevent_head(swhash, event); 5020 if (WARN_ON_ONCE(!head)) 5021 return -EINVAL; 5022 5023 hlist_add_head_rcu(&event->hlist_entry, head); 5024 5025 return 0; 5026 } 5027 5028 static void perf_swevent_del(struct perf_event *event, int flags) 5029 { 5030 hlist_del_rcu(&event->hlist_entry); 5031 } 5032 5033 static void perf_swevent_start(struct perf_event *event, int flags) 5034 { 5035 event->hw.state = 0; 5036 } 5037 5038 static void perf_swevent_stop(struct perf_event *event, int flags) 5039 { 5040 event->hw.state = PERF_HES_STOPPED; 5041 } 5042 5043 /* Deref the hlist from the update side */ 5044 static inline struct swevent_hlist * 5045 swevent_hlist_deref(struct swevent_htable *swhash) 5046 { 5047 return rcu_dereference_protected(swhash->swevent_hlist, 5048 lockdep_is_held(&swhash->hlist_mutex)); 5049 } 5050 5051 static void swevent_hlist_release(struct swevent_htable *swhash) 5052 { 5053 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 5054 5055 if (!hlist) 5056 return; 5057 5058 rcu_assign_pointer(swhash->swevent_hlist, NULL); 5059 kfree_rcu(hlist, rcu_head); 5060 } 5061 5062 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 5063 { 5064 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5065 5066 mutex_lock(&swhash->hlist_mutex); 5067 5068 if (!--swhash->hlist_refcount) 5069 swevent_hlist_release(swhash); 5070 5071 mutex_unlock(&swhash->hlist_mutex); 5072 } 5073 5074 static void swevent_hlist_put(struct perf_event *event) 5075 { 5076 int cpu; 5077 5078 if (event->cpu != -1) { 5079 swevent_hlist_put_cpu(event, event->cpu); 5080 return; 5081 } 5082 5083 for_each_possible_cpu(cpu) 5084 swevent_hlist_put_cpu(event, cpu); 5085 } 5086 5087 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 5088 { 5089 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5090 int err = 0; 5091 5092 mutex_lock(&swhash->hlist_mutex); 5093 5094 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 5095 struct swevent_hlist *hlist; 5096 5097 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 5098 if (!hlist) { 5099 err = -ENOMEM; 5100 goto exit; 5101 } 5102 rcu_assign_pointer(swhash->swevent_hlist, hlist); 5103 } 5104 swhash->hlist_refcount++; 5105 exit: 5106 mutex_unlock(&swhash->hlist_mutex); 5107 5108 return err; 5109 } 5110 5111 static int swevent_hlist_get(struct perf_event *event) 5112 { 5113 int err; 5114 int cpu, failed_cpu; 5115 5116 if (event->cpu != -1) 5117 return swevent_hlist_get_cpu(event, event->cpu); 5118 5119 get_online_cpus(); 5120 for_each_possible_cpu(cpu) { 5121 err = swevent_hlist_get_cpu(event, cpu); 5122 if (err) { 5123 failed_cpu = cpu; 5124 goto fail; 5125 } 5126 } 5127 put_online_cpus(); 5128 5129 return 0; 5130 fail: 5131 for_each_possible_cpu(cpu) { 5132 if (cpu == failed_cpu) 5133 break; 5134 swevent_hlist_put_cpu(event, cpu); 5135 } 5136 5137 put_online_cpus(); 5138 return err; 5139 } 5140 5141 struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 5142 5143 static void sw_perf_event_destroy(struct perf_event *event) 5144 { 5145 u64 event_id = event->attr.config; 5146 5147 WARN_ON(event->parent); 5148 5149 jump_label_dec(&perf_swevent_enabled[event_id]); 5150 swevent_hlist_put(event); 5151 } 5152 5153 static int perf_swevent_init(struct perf_event *event) 5154 { 5155 int event_id = event->attr.config; 5156 5157 if (event->attr.type != PERF_TYPE_SOFTWARE) 5158 return -ENOENT; 5159 5160 switch (event_id) { 5161 case PERF_COUNT_SW_CPU_CLOCK: 5162 case PERF_COUNT_SW_TASK_CLOCK: 5163 return -ENOENT; 5164 5165 default: 5166 break; 5167 } 5168 5169 if (event_id >= PERF_COUNT_SW_MAX) 5170 return -ENOENT; 5171 5172 if (!event->parent) { 5173 int err; 5174 5175 err = swevent_hlist_get(event); 5176 if (err) 5177 return err; 5178 5179 jump_label_inc(&perf_swevent_enabled[event_id]); 5180 event->destroy = sw_perf_event_destroy; 5181 } 5182 5183 return 0; 5184 } 5185 5186 static struct pmu perf_swevent = { 5187 .task_ctx_nr = perf_sw_context, 5188 5189 .event_init = perf_swevent_init, 5190 .add = perf_swevent_add, 5191 .del = perf_swevent_del, 5192 .start = perf_swevent_start, 5193 .stop = perf_swevent_stop, 5194 .read = perf_swevent_read, 5195 }; 5196 5197 #ifdef CONFIG_EVENT_TRACING 5198 5199 static int perf_tp_filter_match(struct perf_event *event, 5200 struct perf_sample_data *data) 5201 { 5202 void *record = data->raw->data; 5203 5204 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 5205 return 1; 5206 return 0; 5207 } 5208 5209 static int perf_tp_event_match(struct perf_event *event, 5210 struct perf_sample_data *data, 5211 struct pt_regs *regs) 5212 { 5213 if (event->hw.state & PERF_HES_STOPPED) 5214 return 0; 5215 /* 5216 * All tracepoints are from kernel-space. 5217 */ 5218 if (event->attr.exclude_kernel) 5219 return 0; 5220 5221 if (!perf_tp_filter_match(event, data)) 5222 return 0; 5223 5224 return 1; 5225 } 5226 5227 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 5228 struct pt_regs *regs, struct hlist_head *head, int rctx) 5229 { 5230 struct perf_sample_data data; 5231 struct perf_event *event; 5232 struct hlist_node *node; 5233 5234 struct perf_raw_record raw = { 5235 .size = entry_size, 5236 .data = record, 5237 }; 5238 5239 perf_sample_data_init(&data, addr); 5240 data.raw = &raw; 5241 5242 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5243 if (perf_tp_event_match(event, &data, regs)) 5244 perf_swevent_event(event, count, &data, regs); 5245 } 5246 5247 perf_swevent_put_recursion_context(rctx); 5248 } 5249 EXPORT_SYMBOL_GPL(perf_tp_event); 5250 5251 static void tp_perf_event_destroy(struct perf_event *event) 5252 { 5253 perf_trace_destroy(event); 5254 } 5255 5256 static int perf_tp_event_init(struct perf_event *event) 5257 { 5258 int err; 5259 5260 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5261 return -ENOENT; 5262 5263 err = perf_trace_init(event); 5264 if (err) 5265 return err; 5266 5267 event->destroy = tp_perf_event_destroy; 5268 5269 return 0; 5270 } 5271 5272 static struct pmu perf_tracepoint = { 5273 .task_ctx_nr = perf_sw_context, 5274 5275 .event_init = perf_tp_event_init, 5276 .add = perf_trace_add, 5277 .del = perf_trace_del, 5278 .start = perf_swevent_start, 5279 .stop = perf_swevent_stop, 5280 .read = perf_swevent_read, 5281 }; 5282 5283 static inline void perf_tp_register(void) 5284 { 5285 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 5286 } 5287 5288 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 5289 { 5290 char *filter_str; 5291 int ret; 5292 5293 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5294 return -EINVAL; 5295 5296 filter_str = strndup_user(arg, PAGE_SIZE); 5297 if (IS_ERR(filter_str)) 5298 return PTR_ERR(filter_str); 5299 5300 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 5301 5302 kfree(filter_str); 5303 return ret; 5304 } 5305 5306 static void perf_event_free_filter(struct perf_event *event) 5307 { 5308 ftrace_profile_free_filter(event); 5309 } 5310 5311 #else 5312 5313 static inline void perf_tp_register(void) 5314 { 5315 } 5316 5317 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 5318 { 5319 return -ENOENT; 5320 } 5321 5322 static void perf_event_free_filter(struct perf_event *event) 5323 { 5324 } 5325 5326 #endif /* CONFIG_EVENT_TRACING */ 5327 5328 #ifdef CONFIG_HAVE_HW_BREAKPOINT 5329 void perf_bp_event(struct perf_event *bp, void *data) 5330 { 5331 struct perf_sample_data sample; 5332 struct pt_regs *regs = data; 5333 5334 perf_sample_data_init(&sample, bp->attr.bp_addr); 5335 5336 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 5337 perf_swevent_event(bp, 1, &sample, regs); 5338 } 5339 #endif 5340 5341 /* 5342 * hrtimer based swevent callback 5343 */ 5344 5345 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 5346 { 5347 enum hrtimer_restart ret = HRTIMER_RESTART; 5348 struct perf_sample_data data; 5349 struct pt_regs *regs; 5350 struct perf_event *event; 5351 u64 period; 5352 5353 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 5354 5355 if (event->state != PERF_EVENT_STATE_ACTIVE) 5356 return HRTIMER_NORESTART; 5357 5358 event->pmu->read(event); 5359 5360 perf_sample_data_init(&data, 0); 5361 data.period = event->hw.last_period; 5362 regs = get_irq_regs(); 5363 5364 if (regs && !perf_exclude_event(event, regs)) { 5365 if (!(event->attr.exclude_idle && current->pid == 0)) 5366 if (perf_event_overflow(event, &data, regs)) 5367 ret = HRTIMER_NORESTART; 5368 } 5369 5370 period = max_t(u64, 10000, event->hw.sample_period); 5371 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 5372 5373 return ret; 5374 } 5375 5376 static void perf_swevent_start_hrtimer(struct perf_event *event) 5377 { 5378 struct hw_perf_event *hwc = &event->hw; 5379 s64 period; 5380 5381 if (!is_sampling_event(event)) 5382 return; 5383 5384 period = local64_read(&hwc->period_left); 5385 if (period) { 5386 if (period < 0) 5387 period = 10000; 5388 5389 local64_set(&hwc->period_left, 0); 5390 } else { 5391 period = max_t(u64, 10000, hwc->sample_period); 5392 } 5393 __hrtimer_start_range_ns(&hwc->hrtimer, 5394 ns_to_ktime(period), 0, 5395 HRTIMER_MODE_REL_PINNED, 0); 5396 } 5397 5398 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 5399 { 5400 struct hw_perf_event *hwc = &event->hw; 5401 5402 if (is_sampling_event(event)) { 5403 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 5404 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 5405 5406 hrtimer_cancel(&hwc->hrtimer); 5407 } 5408 } 5409 5410 static void perf_swevent_init_hrtimer(struct perf_event *event) 5411 { 5412 struct hw_perf_event *hwc = &event->hw; 5413 5414 if (!is_sampling_event(event)) 5415 return; 5416 5417 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5418 hwc->hrtimer.function = perf_swevent_hrtimer; 5419 5420 /* 5421 * Since hrtimers have a fixed rate, we can do a static freq->period 5422 * mapping and avoid the whole period adjust feedback stuff. 5423 */ 5424 if (event->attr.freq) { 5425 long freq = event->attr.sample_freq; 5426 5427 event->attr.sample_period = NSEC_PER_SEC / freq; 5428 hwc->sample_period = event->attr.sample_period; 5429 local64_set(&hwc->period_left, hwc->sample_period); 5430 event->attr.freq = 0; 5431 } 5432 } 5433 5434 /* 5435 * Software event: cpu wall time clock 5436 */ 5437 5438 static void cpu_clock_event_update(struct perf_event *event) 5439 { 5440 s64 prev; 5441 u64 now; 5442 5443 now = local_clock(); 5444 prev = local64_xchg(&event->hw.prev_count, now); 5445 local64_add(now - prev, &event->count); 5446 } 5447 5448 static void cpu_clock_event_start(struct perf_event *event, int flags) 5449 { 5450 local64_set(&event->hw.prev_count, local_clock()); 5451 perf_swevent_start_hrtimer(event); 5452 } 5453 5454 static void cpu_clock_event_stop(struct perf_event *event, int flags) 5455 { 5456 perf_swevent_cancel_hrtimer(event); 5457 cpu_clock_event_update(event); 5458 } 5459 5460 static int cpu_clock_event_add(struct perf_event *event, int flags) 5461 { 5462 if (flags & PERF_EF_START) 5463 cpu_clock_event_start(event, flags); 5464 5465 return 0; 5466 } 5467 5468 static void cpu_clock_event_del(struct perf_event *event, int flags) 5469 { 5470 cpu_clock_event_stop(event, flags); 5471 } 5472 5473 static void cpu_clock_event_read(struct perf_event *event) 5474 { 5475 cpu_clock_event_update(event); 5476 } 5477 5478 static int cpu_clock_event_init(struct perf_event *event) 5479 { 5480 if (event->attr.type != PERF_TYPE_SOFTWARE) 5481 return -ENOENT; 5482 5483 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 5484 return -ENOENT; 5485 5486 perf_swevent_init_hrtimer(event); 5487 5488 return 0; 5489 } 5490 5491 static struct pmu perf_cpu_clock = { 5492 .task_ctx_nr = perf_sw_context, 5493 5494 .event_init = cpu_clock_event_init, 5495 .add = cpu_clock_event_add, 5496 .del = cpu_clock_event_del, 5497 .start = cpu_clock_event_start, 5498 .stop = cpu_clock_event_stop, 5499 .read = cpu_clock_event_read, 5500 }; 5501 5502 /* 5503 * Software event: task time clock 5504 */ 5505 5506 static void task_clock_event_update(struct perf_event *event, u64 now) 5507 { 5508 u64 prev; 5509 s64 delta; 5510 5511 prev = local64_xchg(&event->hw.prev_count, now); 5512 delta = now - prev; 5513 local64_add(delta, &event->count); 5514 } 5515 5516 static void task_clock_event_start(struct perf_event *event, int flags) 5517 { 5518 local64_set(&event->hw.prev_count, event->ctx->time); 5519 perf_swevent_start_hrtimer(event); 5520 } 5521 5522 static void task_clock_event_stop(struct perf_event *event, int flags) 5523 { 5524 perf_swevent_cancel_hrtimer(event); 5525 task_clock_event_update(event, event->ctx->time); 5526 } 5527 5528 static int task_clock_event_add(struct perf_event *event, int flags) 5529 { 5530 if (flags & PERF_EF_START) 5531 task_clock_event_start(event, flags); 5532 5533 return 0; 5534 } 5535 5536 static void task_clock_event_del(struct perf_event *event, int flags) 5537 { 5538 task_clock_event_stop(event, PERF_EF_UPDATE); 5539 } 5540 5541 static void task_clock_event_read(struct perf_event *event) 5542 { 5543 u64 now = perf_clock(); 5544 u64 delta = now - event->ctx->timestamp; 5545 u64 time = event->ctx->time + delta; 5546 5547 task_clock_event_update(event, time); 5548 } 5549 5550 static int task_clock_event_init(struct perf_event *event) 5551 { 5552 if (event->attr.type != PERF_TYPE_SOFTWARE) 5553 return -ENOENT; 5554 5555 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 5556 return -ENOENT; 5557 5558 perf_swevent_init_hrtimer(event); 5559 5560 return 0; 5561 } 5562 5563 static struct pmu perf_task_clock = { 5564 .task_ctx_nr = perf_sw_context, 5565 5566 .event_init = task_clock_event_init, 5567 .add = task_clock_event_add, 5568 .del = task_clock_event_del, 5569 .start = task_clock_event_start, 5570 .stop = task_clock_event_stop, 5571 .read = task_clock_event_read, 5572 }; 5573 5574 static void perf_pmu_nop_void(struct pmu *pmu) 5575 { 5576 } 5577 5578 static int perf_pmu_nop_int(struct pmu *pmu) 5579 { 5580 return 0; 5581 } 5582 5583 static void perf_pmu_start_txn(struct pmu *pmu) 5584 { 5585 perf_pmu_disable(pmu); 5586 } 5587 5588 static int perf_pmu_commit_txn(struct pmu *pmu) 5589 { 5590 perf_pmu_enable(pmu); 5591 return 0; 5592 } 5593 5594 static void perf_pmu_cancel_txn(struct pmu *pmu) 5595 { 5596 perf_pmu_enable(pmu); 5597 } 5598 5599 /* 5600 * Ensures all contexts with the same task_ctx_nr have the same 5601 * pmu_cpu_context too. 5602 */ 5603 static void *find_pmu_context(int ctxn) 5604 { 5605 struct pmu *pmu; 5606 5607 if (ctxn < 0) 5608 return NULL; 5609 5610 list_for_each_entry(pmu, &pmus, entry) { 5611 if (pmu->task_ctx_nr == ctxn) 5612 return pmu->pmu_cpu_context; 5613 } 5614 5615 return NULL; 5616 } 5617 5618 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 5619 { 5620 int cpu; 5621 5622 for_each_possible_cpu(cpu) { 5623 struct perf_cpu_context *cpuctx; 5624 5625 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5626 5627 if (cpuctx->active_pmu == old_pmu) 5628 cpuctx->active_pmu = pmu; 5629 } 5630 } 5631 5632 static void free_pmu_context(struct pmu *pmu) 5633 { 5634 struct pmu *i; 5635 5636 mutex_lock(&pmus_lock); 5637 /* 5638 * Like a real lame refcount. 5639 */ 5640 list_for_each_entry(i, &pmus, entry) { 5641 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 5642 update_pmu_context(i, pmu); 5643 goto out; 5644 } 5645 } 5646 5647 free_percpu(pmu->pmu_cpu_context); 5648 out: 5649 mutex_unlock(&pmus_lock); 5650 } 5651 static struct idr pmu_idr; 5652 5653 static ssize_t 5654 type_show(struct device *dev, struct device_attribute *attr, char *page) 5655 { 5656 struct pmu *pmu = dev_get_drvdata(dev); 5657 5658 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 5659 } 5660 5661 static struct device_attribute pmu_dev_attrs[] = { 5662 __ATTR_RO(type), 5663 __ATTR_NULL, 5664 }; 5665 5666 static int pmu_bus_running; 5667 static struct bus_type pmu_bus = { 5668 .name = "event_source", 5669 .dev_attrs = pmu_dev_attrs, 5670 }; 5671 5672 static void pmu_dev_release(struct device *dev) 5673 { 5674 kfree(dev); 5675 } 5676 5677 static int pmu_dev_alloc(struct pmu *pmu) 5678 { 5679 int ret = -ENOMEM; 5680 5681 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 5682 if (!pmu->dev) 5683 goto out; 5684 5685 device_initialize(pmu->dev); 5686 ret = dev_set_name(pmu->dev, "%s", pmu->name); 5687 if (ret) 5688 goto free_dev; 5689 5690 dev_set_drvdata(pmu->dev, pmu); 5691 pmu->dev->bus = &pmu_bus; 5692 pmu->dev->release = pmu_dev_release; 5693 ret = device_add(pmu->dev); 5694 if (ret) 5695 goto free_dev; 5696 5697 out: 5698 return ret; 5699 5700 free_dev: 5701 put_device(pmu->dev); 5702 goto out; 5703 } 5704 5705 static struct lock_class_key cpuctx_mutex; 5706 static struct lock_class_key cpuctx_lock; 5707 5708 int perf_pmu_register(struct pmu *pmu, char *name, int type) 5709 { 5710 int cpu, ret; 5711 5712 mutex_lock(&pmus_lock); 5713 ret = -ENOMEM; 5714 pmu->pmu_disable_count = alloc_percpu(int); 5715 if (!pmu->pmu_disable_count) 5716 goto unlock; 5717 5718 pmu->type = -1; 5719 if (!name) 5720 goto skip_type; 5721 pmu->name = name; 5722 5723 if (type < 0) { 5724 int err = idr_pre_get(&pmu_idr, GFP_KERNEL); 5725 if (!err) 5726 goto free_pdc; 5727 5728 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); 5729 if (err) { 5730 ret = err; 5731 goto free_pdc; 5732 } 5733 } 5734 pmu->type = type; 5735 5736 if (pmu_bus_running) { 5737 ret = pmu_dev_alloc(pmu); 5738 if (ret) 5739 goto free_idr; 5740 } 5741 5742 skip_type: 5743 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 5744 if (pmu->pmu_cpu_context) 5745 goto got_cpu_context; 5746 5747 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 5748 if (!pmu->pmu_cpu_context) 5749 goto free_dev; 5750 5751 for_each_possible_cpu(cpu) { 5752 struct perf_cpu_context *cpuctx; 5753 5754 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5755 __perf_event_init_context(&cpuctx->ctx); 5756 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 5757 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 5758 cpuctx->ctx.type = cpu_context; 5759 cpuctx->ctx.pmu = pmu; 5760 cpuctx->jiffies_interval = 1; 5761 INIT_LIST_HEAD(&cpuctx->rotation_list); 5762 cpuctx->active_pmu = pmu; 5763 } 5764 5765 got_cpu_context: 5766 if (!pmu->start_txn) { 5767 if (pmu->pmu_enable) { 5768 /* 5769 * If we have pmu_enable/pmu_disable calls, install 5770 * transaction stubs that use that to try and batch 5771 * hardware accesses. 5772 */ 5773 pmu->start_txn = perf_pmu_start_txn; 5774 pmu->commit_txn = perf_pmu_commit_txn; 5775 pmu->cancel_txn = perf_pmu_cancel_txn; 5776 } else { 5777 pmu->start_txn = perf_pmu_nop_void; 5778 pmu->commit_txn = perf_pmu_nop_int; 5779 pmu->cancel_txn = perf_pmu_nop_void; 5780 } 5781 } 5782 5783 if (!pmu->pmu_enable) { 5784 pmu->pmu_enable = perf_pmu_nop_void; 5785 pmu->pmu_disable = perf_pmu_nop_void; 5786 } 5787 5788 list_add_rcu(&pmu->entry, &pmus); 5789 ret = 0; 5790 unlock: 5791 mutex_unlock(&pmus_lock); 5792 5793 return ret; 5794 5795 free_dev: 5796 device_del(pmu->dev); 5797 put_device(pmu->dev); 5798 5799 free_idr: 5800 if (pmu->type >= PERF_TYPE_MAX) 5801 idr_remove(&pmu_idr, pmu->type); 5802 5803 free_pdc: 5804 free_percpu(pmu->pmu_disable_count); 5805 goto unlock; 5806 } 5807 5808 void perf_pmu_unregister(struct pmu *pmu) 5809 { 5810 mutex_lock(&pmus_lock); 5811 list_del_rcu(&pmu->entry); 5812 mutex_unlock(&pmus_lock); 5813 5814 /* 5815 * We dereference the pmu list under both SRCU and regular RCU, so 5816 * synchronize against both of those. 5817 */ 5818 synchronize_srcu(&pmus_srcu); 5819 synchronize_rcu(); 5820 5821 free_percpu(pmu->pmu_disable_count); 5822 if (pmu->type >= PERF_TYPE_MAX) 5823 idr_remove(&pmu_idr, pmu->type); 5824 device_del(pmu->dev); 5825 put_device(pmu->dev); 5826 free_pmu_context(pmu); 5827 } 5828 5829 struct pmu *perf_init_event(struct perf_event *event) 5830 { 5831 struct pmu *pmu = NULL; 5832 int idx; 5833 int ret; 5834 5835 idx = srcu_read_lock(&pmus_srcu); 5836 5837 rcu_read_lock(); 5838 pmu = idr_find(&pmu_idr, event->attr.type); 5839 rcu_read_unlock(); 5840 if (pmu) { 5841 event->pmu = pmu; 5842 ret = pmu->event_init(event); 5843 if (ret) 5844 pmu = ERR_PTR(ret); 5845 goto unlock; 5846 } 5847 5848 list_for_each_entry_rcu(pmu, &pmus, entry) { 5849 event->pmu = pmu; 5850 ret = pmu->event_init(event); 5851 if (!ret) 5852 goto unlock; 5853 5854 if (ret != -ENOENT) { 5855 pmu = ERR_PTR(ret); 5856 goto unlock; 5857 } 5858 } 5859 pmu = ERR_PTR(-ENOENT); 5860 unlock: 5861 srcu_read_unlock(&pmus_srcu, idx); 5862 5863 return pmu; 5864 } 5865 5866 /* 5867 * Allocate and initialize a event structure 5868 */ 5869 static struct perf_event * 5870 perf_event_alloc(struct perf_event_attr *attr, int cpu, 5871 struct task_struct *task, 5872 struct perf_event *group_leader, 5873 struct perf_event *parent_event, 5874 perf_overflow_handler_t overflow_handler, 5875 void *context) 5876 { 5877 struct pmu *pmu; 5878 struct perf_event *event; 5879 struct hw_perf_event *hwc; 5880 long err; 5881 5882 if ((unsigned)cpu >= nr_cpu_ids) { 5883 if (!task || cpu != -1) 5884 return ERR_PTR(-EINVAL); 5885 } 5886 5887 event = kzalloc(sizeof(*event), GFP_KERNEL); 5888 if (!event) 5889 return ERR_PTR(-ENOMEM); 5890 5891 /* 5892 * Single events are their own group leaders, with an 5893 * empty sibling list: 5894 */ 5895 if (!group_leader) 5896 group_leader = event; 5897 5898 mutex_init(&event->child_mutex); 5899 INIT_LIST_HEAD(&event->child_list); 5900 5901 INIT_LIST_HEAD(&event->group_entry); 5902 INIT_LIST_HEAD(&event->event_entry); 5903 INIT_LIST_HEAD(&event->sibling_list); 5904 INIT_LIST_HEAD(&event->rb_entry); 5905 5906 init_waitqueue_head(&event->waitq); 5907 init_irq_work(&event->pending, perf_pending_event); 5908 5909 mutex_init(&event->mmap_mutex); 5910 5911 event->cpu = cpu; 5912 event->attr = *attr; 5913 event->group_leader = group_leader; 5914 event->pmu = NULL; 5915 event->oncpu = -1; 5916 5917 event->parent = parent_event; 5918 5919 event->ns = get_pid_ns(current->nsproxy->pid_ns); 5920 event->id = atomic64_inc_return(&perf_event_id); 5921 5922 event->state = PERF_EVENT_STATE_INACTIVE; 5923 5924 if (task) { 5925 event->attach_state = PERF_ATTACH_TASK; 5926 #ifdef CONFIG_HAVE_HW_BREAKPOINT 5927 /* 5928 * hw_breakpoint is a bit difficult here.. 5929 */ 5930 if (attr->type == PERF_TYPE_BREAKPOINT) 5931 event->hw.bp_target = task; 5932 #endif 5933 } 5934 5935 if (!overflow_handler && parent_event) { 5936 overflow_handler = parent_event->overflow_handler; 5937 context = parent_event->overflow_handler_context; 5938 } 5939 5940 event->overflow_handler = overflow_handler; 5941 event->overflow_handler_context = context; 5942 5943 if (attr->disabled) 5944 event->state = PERF_EVENT_STATE_OFF; 5945 5946 pmu = NULL; 5947 5948 hwc = &event->hw; 5949 hwc->sample_period = attr->sample_period; 5950 if (attr->freq && attr->sample_freq) 5951 hwc->sample_period = 1; 5952 hwc->last_period = hwc->sample_period; 5953 5954 local64_set(&hwc->period_left, hwc->sample_period); 5955 5956 /* 5957 * we currently do not support PERF_FORMAT_GROUP on inherited events 5958 */ 5959 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 5960 goto done; 5961 5962 pmu = perf_init_event(event); 5963 5964 done: 5965 err = 0; 5966 if (!pmu) 5967 err = -EINVAL; 5968 else if (IS_ERR(pmu)) 5969 err = PTR_ERR(pmu); 5970 5971 if (err) { 5972 if (event->ns) 5973 put_pid_ns(event->ns); 5974 kfree(event); 5975 return ERR_PTR(err); 5976 } 5977 5978 if (!event->parent) { 5979 if (event->attach_state & PERF_ATTACH_TASK) 5980 jump_label_inc(&perf_sched_events); 5981 if (event->attr.mmap || event->attr.mmap_data) 5982 atomic_inc(&nr_mmap_events); 5983 if (event->attr.comm) 5984 atomic_inc(&nr_comm_events); 5985 if (event->attr.task) 5986 atomic_inc(&nr_task_events); 5987 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 5988 err = get_callchain_buffers(); 5989 if (err) { 5990 free_event(event); 5991 return ERR_PTR(err); 5992 } 5993 } 5994 } 5995 5996 return event; 5997 } 5998 5999 static int perf_copy_attr(struct perf_event_attr __user *uattr, 6000 struct perf_event_attr *attr) 6001 { 6002 u32 size; 6003 int ret; 6004 6005 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 6006 return -EFAULT; 6007 6008 /* 6009 * zero the full structure, so that a short copy will be nice. 6010 */ 6011 memset(attr, 0, sizeof(*attr)); 6012 6013 ret = get_user(size, &uattr->size); 6014 if (ret) 6015 return ret; 6016 6017 if (size > PAGE_SIZE) /* silly large */ 6018 goto err_size; 6019 6020 if (!size) /* abi compat */ 6021 size = PERF_ATTR_SIZE_VER0; 6022 6023 if (size < PERF_ATTR_SIZE_VER0) 6024 goto err_size; 6025 6026 /* 6027 * If we're handed a bigger struct than we know of, 6028 * ensure all the unknown bits are 0 - i.e. new 6029 * user-space does not rely on any kernel feature 6030 * extensions we dont know about yet. 6031 */ 6032 if (size > sizeof(*attr)) { 6033 unsigned char __user *addr; 6034 unsigned char __user *end; 6035 unsigned char val; 6036 6037 addr = (void __user *)uattr + sizeof(*attr); 6038 end = (void __user *)uattr + size; 6039 6040 for (; addr < end; addr++) { 6041 ret = get_user(val, addr); 6042 if (ret) 6043 return ret; 6044 if (val) 6045 goto err_size; 6046 } 6047 size = sizeof(*attr); 6048 } 6049 6050 ret = copy_from_user(attr, uattr, size); 6051 if (ret) 6052 return -EFAULT; 6053 6054 if (attr->__reserved_1) 6055 return -EINVAL; 6056 6057 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 6058 return -EINVAL; 6059 6060 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 6061 return -EINVAL; 6062 6063 out: 6064 return ret; 6065 6066 err_size: 6067 put_user(sizeof(*attr), &uattr->size); 6068 ret = -E2BIG; 6069 goto out; 6070 } 6071 6072 static int 6073 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6074 { 6075 struct ring_buffer *rb = NULL, *old_rb = NULL; 6076 int ret = -EINVAL; 6077 6078 if (!output_event) 6079 goto set; 6080 6081 /* don't allow circular references */ 6082 if (event == output_event) 6083 goto out; 6084 6085 /* 6086 * Don't allow cross-cpu buffers 6087 */ 6088 if (output_event->cpu != event->cpu) 6089 goto out; 6090 6091 /* 6092 * If its not a per-cpu rb, it must be the same task. 6093 */ 6094 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 6095 goto out; 6096 6097 set: 6098 mutex_lock(&event->mmap_mutex); 6099 /* Can't redirect output if we've got an active mmap() */ 6100 if (atomic_read(&event->mmap_count)) 6101 goto unlock; 6102 6103 if (output_event) { 6104 /* get the rb we want to redirect to */ 6105 rb = ring_buffer_get(output_event); 6106 if (!rb) 6107 goto unlock; 6108 } 6109 6110 old_rb = event->rb; 6111 rcu_assign_pointer(event->rb, rb); 6112 if (old_rb) 6113 ring_buffer_detach(event, old_rb); 6114 ret = 0; 6115 unlock: 6116 mutex_unlock(&event->mmap_mutex); 6117 6118 if (old_rb) 6119 ring_buffer_put(old_rb); 6120 out: 6121 return ret; 6122 } 6123 6124 /** 6125 * sys_perf_event_open - open a performance event, associate it to a task/cpu 6126 * 6127 * @attr_uptr: event_id type attributes for monitoring/sampling 6128 * @pid: target pid 6129 * @cpu: target cpu 6130 * @group_fd: group leader event fd 6131 */ 6132 SYSCALL_DEFINE5(perf_event_open, 6133 struct perf_event_attr __user *, attr_uptr, 6134 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 6135 { 6136 struct perf_event *group_leader = NULL, *output_event = NULL; 6137 struct perf_event *event, *sibling; 6138 struct perf_event_attr attr; 6139 struct perf_event_context *ctx; 6140 struct file *event_file = NULL; 6141 struct file *group_file = NULL; 6142 struct task_struct *task = NULL; 6143 struct pmu *pmu; 6144 int event_fd; 6145 int move_group = 0; 6146 int fput_needed = 0; 6147 int err; 6148 6149 /* for future expandability... */ 6150 if (flags & ~PERF_FLAG_ALL) 6151 return -EINVAL; 6152 6153 err = perf_copy_attr(attr_uptr, &attr); 6154 if (err) 6155 return err; 6156 6157 if (!attr.exclude_kernel) { 6158 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 6159 return -EACCES; 6160 } 6161 6162 if (attr.freq) { 6163 if (attr.sample_freq > sysctl_perf_event_sample_rate) 6164 return -EINVAL; 6165 } 6166 6167 /* 6168 * In cgroup mode, the pid argument is used to pass the fd 6169 * opened to the cgroup directory in cgroupfs. The cpu argument 6170 * designates the cpu on which to monitor threads from that 6171 * cgroup. 6172 */ 6173 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 6174 return -EINVAL; 6175 6176 event_fd = get_unused_fd_flags(O_RDWR); 6177 if (event_fd < 0) 6178 return event_fd; 6179 6180 if (group_fd != -1) { 6181 group_leader = perf_fget_light(group_fd, &fput_needed); 6182 if (IS_ERR(group_leader)) { 6183 err = PTR_ERR(group_leader); 6184 goto err_fd; 6185 } 6186 group_file = group_leader->filp; 6187 if (flags & PERF_FLAG_FD_OUTPUT) 6188 output_event = group_leader; 6189 if (flags & PERF_FLAG_FD_NO_GROUP) 6190 group_leader = NULL; 6191 } 6192 6193 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 6194 task = find_lively_task_by_vpid(pid); 6195 if (IS_ERR(task)) { 6196 err = PTR_ERR(task); 6197 goto err_group_fd; 6198 } 6199 } 6200 6201 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 6202 NULL, NULL); 6203 if (IS_ERR(event)) { 6204 err = PTR_ERR(event); 6205 goto err_task; 6206 } 6207 6208 if (flags & PERF_FLAG_PID_CGROUP) { 6209 err = perf_cgroup_connect(pid, event, &attr, group_leader); 6210 if (err) 6211 goto err_alloc; 6212 /* 6213 * one more event: 6214 * - that has cgroup constraint on event->cpu 6215 * - that may need work on context switch 6216 */ 6217 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); 6218 jump_label_inc(&perf_sched_events); 6219 } 6220 6221 /* 6222 * Special case software events and allow them to be part of 6223 * any hardware group. 6224 */ 6225 pmu = event->pmu; 6226 6227 if (group_leader && 6228 (is_software_event(event) != is_software_event(group_leader))) { 6229 if (is_software_event(event)) { 6230 /* 6231 * If event and group_leader are not both a software 6232 * event, and event is, then group leader is not. 6233 * 6234 * Allow the addition of software events to !software 6235 * groups, this is safe because software events never 6236 * fail to schedule. 6237 */ 6238 pmu = group_leader->pmu; 6239 } else if (is_software_event(group_leader) && 6240 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 6241 /* 6242 * In case the group is a pure software group, and we 6243 * try to add a hardware event, move the whole group to 6244 * the hardware context. 6245 */ 6246 move_group = 1; 6247 } 6248 } 6249 6250 /* 6251 * Get the target context (task or percpu): 6252 */ 6253 ctx = find_get_context(pmu, task, cpu); 6254 if (IS_ERR(ctx)) { 6255 err = PTR_ERR(ctx); 6256 goto err_alloc; 6257 } 6258 6259 if (task) { 6260 put_task_struct(task); 6261 task = NULL; 6262 } 6263 6264 /* 6265 * Look up the group leader (we will attach this event to it): 6266 */ 6267 if (group_leader) { 6268 err = -EINVAL; 6269 6270 /* 6271 * Do not allow a recursive hierarchy (this new sibling 6272 * becoming part of another group-sibling): 6273 */ 6274 if (group_leader->group_leader != group_leader) 6275 goto err_context; 6276 /* 6277 * Do not allow to attach to a group in a different 6278 * task or CPU context: 6279 */ 6280 if (move_group) { 6281 if (group_leader->ctx->type != ctx->type) 6282 goto err_context; 6283 } else { 6284 if (group_leader->ctx != ctx) 6285 goto err_context; 6286 } 6287 6288 /* 6289 * Only a group leader can be exclusive or pinned 6290 */ 6291 if (attr.exclusive || attr.pinned) 6292 goto err_context; 6293 } 6294 6295 if (output_event) { 6296 err = perf_event_set_output(event, output_event); 6297 if (err) 6298 goto err_context; 6299 } 6300 6301 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); 6302 if (IS_ERR(event_file)) { 6303 err = PTR_ERR(event_file); 6304 goto err_context; 6305 } 6306 6307 if (move_group) { 6308 struct perf_event_context *gctx = group_leader->ctx; 6309 6310 mutex_lock(&gctx->mutex); 6311 perf_remove_from_context(group_leader); 6312 list_for_each_entry(sibling, &group_leader->sibling_list, 6313 group_entry) { 6314 perf_remove_from_context(sibling); 6315 put_ctx(gctx); 6316 } 6317 mutex_unlock(&gctx->mutex); 6318 put_ctx(gctx); 6319 } 6320 6321 event->filp = event_file; 6322 WARN_ON_ONCE(ctx->parent_ctx); 6323 mutex_lock(&ctx->mutex); 6324 6325 if (move_group) { 6326 perf_install_in_context(ctx, group_leader, cpu); 6327 get_ctx(ctx); 6328 list_for_each_entry(sibling, &group_leader->sibling_list, 6329 group_entry) { 6330 perf_install_in_context(ctx, sibling, cpu); 6331 get_ctx(ctx); 6332 } 6333 } 6334 6335 perf_install_in_context(ctx, event, cpu); 6336 ++ctx->generation; 6337 perf_unpin_context(ctx); 6338 mutex_unlock(&ctx->mutex); 6339 6340 event->owner = current; 6341 6342 mutex_lock(¤t->perf_event_mutex); 6343 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 6344 mutex_unlock(¤t->perf_event_mutex); 6345 6346 /* 6347 * Precalculate sample_data sizes 6348 */ 6349 perf_event__header_size(event); 6350 perf_event__id_header_size(event); 6351 6352 /* 6353 * Drop the reference on the group_event after placing the 6354 * new event on the sibling_list. This ensures destruction 6355 * of the group leader will find the pointer to itself in 6356 * perf_group_detach(). 6357 */ 6358 fput_light(group_file, fput_needed); 6359 fd_install(event_fd, event_file); 6360 return event_fd; 6361 6362 err_context: 6363 perf_unpin_context(ctx); 6364 put_ctx(ctx); 6365 err_alloc: 6366 free_event(event); 6367 err_task: 6368 if (task) 6369 put_task_struct(task); 6370 err_group_fd: 6371 fput_light(group_file, fput_needed); 6372 err_fd: 6373 put_unused_fd(event_fd); 6374 return err; 6375 } 6376 6377 /** 6378 * perf_event_create_kernel_counter 6379 * 6380 * @attr: attributes of the counter to create 6381 * @cpu: cpu in which the counter is bound 6382 * @task: task to profile (NULL for percpu) 6383 */ 6384 struct perf_event * 6385 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 6386 struct task_struct *task, 6387 perf_overflow_handler_t overflow_handler, 6388 void *context) 6389 { 6390 struct perf_event_context *ctx; 6391 struct perf_event *event; 6392 int err; 6393 6394 /* 6395 * Get the target context (task or percpu): 6396 */ 6397 6398 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 6399 overflow_handler, context); 6400 if (IS_ERR(event)) { 6401 err = PTR_ERR(event); 6402 goto err; 6403 } 6404 6405 ctx = find_get_context(event->pmu, task, cpu); 6406 if (IS_ERR(ctx)) { 6407 err = PTR_ERR(ctx); 6408 goto err_free; 6409 } 6410 6411 event->filp = NULL; 6412 WARN_ON_ONCE(ctx->parent_ctx); 6413 mutex_lock(&ctx->mutex); 6414 perf_install_in_context(ctx, event, cpu); 6415 ++ctx->generation; 6416 perf_unpin_context(ctx); 6417 mutex_unlock(&ctx->mutex); 6418 6419 return event; 6420 6421 err_free: 6422 free_event(event); 6423 err: 6424 return ERR_PTR(err); 6425 } 6426 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 6427 6428 static void sync_child_event(struct perf_event *child_event, 6429 struct task_struct *child) 6430 { 6431 struct perf_event *parent_event = child_event->parent; 6432 u64 child_val; 6433 6434 if (child_event->attr.inherit_stat) 6435 perf_event_read_event(child_event, child); 6436 6437 child_val = perf_event_count(child_event); 6438 6439 /* 6440 * Add back the child's count to the parent's count: 6441 */ 6442 atomic64_add(child_val, &parent_event->child_count); 6443 atomic64_add(child_event->total_time_enabled, 6444 &parent_event->child_total_time_enabled); 6445 atomic64_add(child_event->total_time_running, 6446 &parent_event->child_total_time_running); 6447 6448 /* 6449 * Remove this event from the parent's list 6450 */ 6451 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 6452 mutex_lock(&parent_event->child_mutex); 6453 list_del_init(&child_event->child_list); 6454 mutex_unlock(&parent_event->child_mutex); 6455 6456 /* 6457 * Release the parent event, if this was the last 6458 * reference to it. 6459 */ 6460 fput(parent_event->filp); 6461 } 6462 6463 static void 6464 __perf_event_exit_task(struct perf_event *child_event, 6465 struct perf_event_context *child_ctx, 6466 struct task_struct *child) 6467 { 6468 if (child_event->parent) { 6469 raw_spin_lock_irq(&child_ctx->lock); 6470 perf_group_detach(child_event); 6471 raw_spin_unlock_irq(&child_ctx->lock); 6472 } 6473 6474 perf_remove_from_context(child_event); 6475 6476 /* 6477 * It can happen that the parent exits first, and has events 6478 * that are still around due to the child reference. These 6479 * events need to be zapped. 6480 */ 6481 if (child_event->parent) { 6482 sync_child_event(child_event, child); 6483 free_event(child_event); 6484 } 6485 } 6486 6487 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 6488 { 6489 struct perf_event *child_event, *tmp; 6490 struct perf_event_context *child_ctx; 6491 unsigned long flags; 6492 6493 if (likely(!child->perf_event_ctxp[ctxn])) { 6494 perf_event_task(child, NULL, 0); 6495 return; 6496 } 6497 6498 local_irq_save(flags); 6499 /* 6500 * We can't reschedule here because interrupts are disabled, 6501 * and either child is current or it is a task that can't be 6502 * scheduled, so we are now safe from rescheduling changing 6503 * our context. 6504 */ 6505 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 6506 6507 /* 6508 * Take the context lock here so that if find_get_context is 6509 * reading child->perf_event_ctxp, we wait until it has 6510 * incremented the context's refcount before we do put_ctx below. 6511 */ 6512 raw_spin_lock(&child_ctx->lock); 6513 task_ctx_sched_out(child_ctx); 6514 child->perf_event_ctxp[ctxn] = NULL; 6515 /* 6516 * If this context is a clone; unclone it so it can't get 6517 * swapped to another process while we're removing all 6518 * the events from it. 6519 */ 6520 unclone_ctx(child_ctx); 6521 update_context_time(child_ctx); 6522 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 6523 6524 /* 6525 * Report the task dead after unscheduling the events so that we 6526 * won't get any samples after PERF_RECORD_EXIT. We can however still 6527 * get a few PERF_RECORD_READ events. 6528 */ 6529 perf_event_task(child, child_ctx, 0); 6530 6531 /* 6532 * We can recurse on the same lock type through: 6533 * 6534 * __perf_event_exit_task() 6535 * sync_child_event() 6536 * fput(parent_event->filp) 6537 * perf_release() 6538 * mutex_lock(&ctx->mutex) 6539 * 6540 * But since its the parent context it won't be the same instance. 6541 */ 6542 mutex_lock(&child_ctx->mutex); 6543 6544 again: 6545 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, 6546 group_entry) 6547 __perf_event_exit_task(child_event, child_ctx, child); 6548 6549 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, 6550 group_entry) 6551 __perf_event_exit_task(child_event, child_ctx, child); 6552 6553 /* 6554 * If the last event was a group event, it will have appended all 6555 * its siblings to the list, but we obtained 'tmp' before that which 6556 * will still point to the list head terminating the iteration. 6557 */ 6558 if (!list_empty(&child_ctx->pinned_groups) || 6559 !list_empty(&child_ctx->flexible_groups)) 6560 goto again; 6561 6562 mutex_unlock(&child_ctx->mutex); 6563 6564 put_ctx(child_ctx); 6565 } 6566 6567 /* 6568 * When a child task exits, feed back event values to parent events. 6569 */ 6570 void perf_event_exit_task(struct task_struct *child) 6571 { 6572 struct perf_event *event, *tmp; 6573 int ctxn; 6574 6575 mutex_lock(&child->perf_event_mutex); 6576 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 6577 owner_entry) { 6578 list_del_init(&event->owner_entry); 6579 6580 /* 6581 * Ensure the list deletion is visible before we clear 6582 * the owner, closes a race against perf_release() where 6583 * we need to serialize on the owner->perf_event_mutex. 6584 */ 6585 smp_wmb(); 6586 event->owner = NULL; 6587 } 6588 mutex_unlock(&child->perf_event_mutex); 6589 6590 for_each_task_context_nr(ctxn) 6591 perf_event_exit_task_context(child, ctxn); 6592 } 6593 6594 static void perf_free_event(struct perf_event *event, 6595 struct perf_event_context *ctx) 6596 { 6597 struct perf_event *parent = event->parent; 6598 6599 if (WARN_ON_ONCE(!parent)) 6600 return; 6601 6602 mutex_lock(&parent->child_mutex); 6603 list_del_init(&event->child_list); 6604 mutex_unlock(&parent->child_mutex); 6605 6606 fput(parent->filp); 6607 6608 perf_group_detach(event); 6609 list_del_event(event, ctx); 6610 free_event(event); 6611 } 6612 6613 /* 6614 * free an unexposed, unused context as created by inheritance by 6615 * perf_event_init_task below, used by fork() in case of fail. 6616 */ 6617 void perf_event_free_task(struct task_struct *task) 6618 { 6619 struct perf_event_context *ctx; 6620 struct perf_event *event, *tmp; 6621 int ctxn; 6622 6623 for_each_task_context_nr(ctxn) { 6624 ctx = task->perf_event_ctxp[ctxn]; 6625 if (!ctx) 6626 continue; 6627 6628 mutex_lock(&ctx->mutex); 6629 again: 6630 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 6631 group_entry) 6632 perf_free_event(event, ctx); 6633 6634 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 6635 group_entry) 6636 perf_free_event(event, ctx); 6637 6638 if (!list_empty(&ctx->pinned_groups) || 6639 !list_empty(&ctx->flexible_groups)) 6640 goto again; 6641 6642 mutex_unlock(&ctx->mutex); 6643 6644 put_ctx(ctx); 6645 } 6646 } 6647 6648 void perf_event_delayed_put(struct task_struct *task) 6649 { 6650 int ctxn; 6651 6652 for_each_task_context_nr(ctxn) 6653 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 6654 } 6655 6656 /* 6657 * inherit a event from parent task to child task: 6658 */ 6659 static struct perf_event * 6660 inherit_event(struct perf_event *parent_event, 6661 struct task_struct *parent, 6662 struct perf_event_context *parent_ctx, 6663 struct task_struct *child, 6664 struct perf_event *group_leader, 6665 struct perf_event_context *child_ctx) 6666 { 6667 struct perf_event *child_event; 6668 unsigned long flags; 6669 6670 /* 6671 * Instead of creating recursive hierarchies of events, 6672 * we link inherited events back to the original parent, 6673 * which has a filp for sure, which we use as the reference 6674 * count: 6675 */ 6676 if (parent_event->parent) 6677 parent_event = parent_event->parent; 6678 6679 child_event = perf_event_alloc(&parent_event->attr, 6680 parent_event->cpu, 6681 child, 6682 group_leader, parent_event, 6683 NULL, NULL); 6684 if (IS_ERR(child_event)) 6685 return child_event; 6686 get_ctx(child_ctx); 6687 6688 /* 6689 * Make the child state follow the state of the parent event, 6690 * not its attr.disabled bit. We hold the parent's mutex, 6691 * so we won't race with perf_event_{en, dis}able_family. 6692 */ 6693 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) 6694 child_event->state = PERF_EVENT_STATE_INACTIVE; 6695 else 6696 child_event->state = PERF_EVENT_STATE_OFF; 6697 6698 if (parent_event->attr.freq) { 6699 u64 sample_period = parent_event->hw.sample_period; 6700 struct hw_perf_event *hwc = &child_event->hw; 6701 6702 hwc->sample_period = sample_period; 6703 hwc->last_period = sample_period; 6704 6705 local64_set(&hwc->period_left, sample_period); 6706 } 6707 6708 child_event->ctx = child_ctx; 6709 child_event->overflow_handler = parent_event->overflow_handler; 6710 child_event->overflow_handler_context 6711 = parent_event->overflow_handler_context; 6712 6713 /* 6714 * Precalculate sample_data sizes 6715 */ 6716 perf_event__header_size(child_event); 6717 perf_event__id_header_size(child_event); 6718 6719 /* 6720 * Link it up in the child's context: 6721 */ 6722 raw_spin_lock_irqsave(&child_ctx->lock, flags); 6723 add_event_to_ctx(child_event, child_ctx); 6724 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 6725 6726 /* 6727 * Get a reference to the parent filp - we will fput it 6728 * when the child event exits. This is safe to do because 6729 * we are in the parent and we know that the filp still 6730 * exists and has a nonzero count: 6731 */ 6732 atomic_long_inc(&parent_event->filp->f_count); 6733 6734 /* 6735 * Link this into the parent event's child list 6736 */ 6737 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 6738 mutex_lock(&parent_event->child_mutex); 6739 list_add_tail(&child_event->child_list, &parent_event->child_list); 6740 mutex_unlock(&parent_event->child_mutex); 6741 6742 return child_event; 6743 } 6744 6745 static int inherit_group(struct perf_event *parent_event, 6746 struct task_struct *parent, 6747 struct perf_event_context *parent_ctx, 6748 struct task_struct *child, 6749 struct perf_event_context *child_ctx) 6750 { 6751 struct perf_event *leader; 6752 struct perf_event *sub; 6753 struct perf_event *child_ctr; 6754 6755 leader = inherit_event(parent_event, parent, parent_ctx, 6756 child, NULL, child_ctx); 6757 if (IS_ERR(leader)) 6758 return PTR_ERR(leader); 6759 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 6760 child_ctr = inherit_event(sub, parent, parent_ctx, 6761 child, leader, child_ctx); 6762 if (IS_ERR(child_ctr)) 6763 return PTR_ERR(child_ctr); 6764 } 6765 return 0; 6766 } 6767 6768 static int 6769 inherit_task_group(struct perf_event *event, struct task_struct *parent, 6770 struct perf_event_context *parent_ctx, 6771 struct task_struct *child, int ctxn, 6772 int *inherited_all) 6773 { 6774 int ret; 6775 struct perf_event_context *child_ctx; 6776 6777 if (!event->attr.inherit) { 6778 *inherited_all = 0; 6779 return 0; 6780 } 6781 6782 child_ctx = child->perf_event_ctxp[ctxn]; 6783 if (!child_ctx) { 6784 /* 6785 * This is executed from the parent task context, so 6786 * inherit events that have been marked for cloning. 6787 * First allocate and initialize a context for the 6788 * child. 6789 */ 6790 6791 child_ctx = alloc_perf_context(event->pmu, child); 6792 if (!child_ctx) 6793 return -ENOMEM; 6794 6795 child->perf_event_ctxp[ctxn] = child_ctx; 6796 } 6797 6798 ret = inherit_group(event, parent, parent_ctx, 6799 child, child_ctx); 6800 6801 if (ret) 6802 *inherited_all = 0; 6803 6804 return ret; 6805 } 6806 6807 /* 6808 * Initialize the perf_event context in task_struct 6809 */ 6810 int perf_event_init_context(struct task_struct *child, int ctxn) 6811 { 6812 struct perf_event_context *child_ctx, *parent_ctx; 6813 struct perf_event_context *cloned_ctx; 6814 struct perf_event *event; 6815 struct task_struct *parent = current; 6816 int inherited_all = 1; 6817 unsigned long flags; 6818 int ret = 0; 6819 6820 if (likely(!parent->perf_event_ctxp[ctxn])) 6821 return 0; 6822 6823 /* 6824 * If the parent's context is a clone, pin it so it won't get 6825 * swapped under us. 6826 */ 6827 parent_ctx = perf_pin_task_context(parent, ctxn); 6828 6829 /* 6830 * No need to check if parent_ctx != NULL here; since we saw 6831 * it non-NULL earlier, the only reason for it to become NULL 6832 * is if we exit, and since we're currently in the middle of 6833 * a fork we can't be exiting at the same time. 6834 */ 6835 6836 /* 6837 * Lock the parent list. No need to lock the child - not PID 6838 * hashed yet and not running, so nobody can access it. 6839 */ 6840 mutex_lock(&parent_ctx->mutex); 6841 6842 /* 6843 * We dont have to disable NMIs - we are only looking at 6844 * the list, not manipulating it: 6845 */ 6846 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 6847 ret = inherit_task_group(event, parent, parent_ctx, 6848 child, ctxn, &inherited_all); 6849 if (ret) 6850 break; 6851 } 6852 6853 /* 6854 * We can't hold ctx->lock when iterating the ->flexible_group list due 6855 * to allocations, but we need to prevent rotation because 6856 * rotate_ctx() will change the list from interrupt context. 6857 */ 6858 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 6859 parent_ctx->rotate_disable = 1; 6860 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 6861 6862 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 6863 ret = inherit_task_group(event, parent, parent_ctx, 6864 child, ctxn, &inherited_all); 6865 if (ret) 6866 break; 6867 } 6868 6869 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 6870 parent_ctx->rotate_disable = 0; 6871 6872 child_ctx = child->perf_event_ctxp[ctxn]; 6873 6874 if (child_ctx && inherited_all) { 6875 /* 6876 * Mark the child context as a clone of the parent 6877 * context, or of whatever the parent is a clone of. 6878 * 6879 * Note that if the parent is a clone, the holding of 6880 * parent_ctx->lock avoids it from being uncloned. 6881 */ 6882 cloned_ctx = parent_ctx->parent_ctx; 6883 if (cloned_ctx) { 6884 child_ctx->parent_ctx = cloned_ctx; 6885 child_ctx->parent_gen = parent_ctx->parent_gen; 6886 } else { 6887 child_ctx->parent_ctx = parent_ctx; 6888 child_ctx->parent_gen = parent_ctx->generation; 6889 } 6890 get_ctx(child_ctx->parent_ctx); 6891 } 6892 6893 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 6894 mutex_unlock(&parent_ctx->mutex); 6895 6896 perf_unpin_context(parent_ctx); 6897 put_ctx(parent_ctx); 6898 6899 return ret; 6900 } 6901 6902 /* 6903 * Initialize the perf_event context in task_struct 6904 */ 6905 int perf_event_init_task(struct task_struct *child) 6906 { 6907 int ctxn, ret; 6908 6909 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 6910 mutex_init(&child->perf_event_mutex); 6911 INIT_LIST_HEAD(&child->perf_event_list); 6912 6913 for_each_task_context_nr(ctxn) { 6914 ret = perf_event_init_context(child, ctxn); 6915 if (ret) 6916 return ret; 6917 } 6918 6919 return 0; 6920 } 6921 6922 static void __init perf_event_init_all_cpus(void) 6923 { 6924 struct swevent_htable *swhash; 6925 int cpu; 6926 6927 for_each_possible_cpu(cpu) { 6928 swhash = &per_cpu(swevent_htable, cpu); 6929 mutex_init(&swhash->hlist_mutex); 6930 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); 6931 } 6932 } 6933 6934 static void __cpuinit perf_event_init_cpu(int cpu) 6935 { 6936 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6937 6938 mutex_lock(&swhash->hlist_mutex); 6939 if (swhash->hlist_refcount > 0) { 6940 struct swevent_hlist *hlist; 6941 6942 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 6943 WARN_ON(!hlist); 6944 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6945 } 6946 mutex_unlock(&swhash->hlist_mutex); 6947 } 6948 6949 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 6950 static void perf_pmu_rotate_stop(struct pmu *pmu) 6951 { 6952 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 6953 6954 WARN_ON(!irqs_disabled()); 6955 6956 list_del_init(&cpuctx->rotation_list); 6957 } 6958 6959 static void __perf_event_exit_context(void *__info) 6960 { 6961 struct perf_event_context *ctx = __info; 6962 struct perf_event *event, *tmp; 6963 6964 perf_pmu_rotate_stop(ctx->pmu); 6965 6966 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) 6967 __perf_remove_from_context(event); 6968 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) 6969 __perf_remove_from_context(event); 6970 } 6971 6972 static void perf_event_exit_cpu_context(int cpu) 6973 { 6974 struct perf_event_context *ctx; 6975 struct pmu *pmu; 6976 int idx; 6977 6978 idx = srcu_read_lock(&pmus_srcu); 6979 list_for_each_entry_rcu(pmu, &pmus, entry) { 6980 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 6981 6982 mutex_lock(&ctx->mutex); 6983 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 6984 mutex_unlock(&ctx->mutex); 6985 } 6986 srcu_read_unlock(&pmus_srcu, idx); 6987 } 6988 6989 static void perf_event_exit_cpu(int cpu) 6990 { 6991 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6992 6993 mutex_lock(&swhash->hlist_mutex); 6994 swevent_hlist_release(swhash); 6995 mutex_unlock(&swhash->hlist_mutex); 6996 6997 perf_event_exit_cpu_context(cpu); 6998 } 6999 #else 7000 static inline void perf_event_exit_cpu(int cpu) { } 7001 #endif 7002 7003 static int 7004 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 7005 { 7006 int cpu; 7007 7008 for_each_online_cpu(cpu) 7009 perf_event_exit_cpu(cpu); 7010 7011 return NOTIFY_OK; 7012 } 7013 7014 /* 7015 * Run the perf reboot notifier at the very last possible moment so that 7016 * the generic watchdog code runs as long as possible. 7017 */ 7018 static struct notifier_block perf_reboot_notifier = { 7019 .notifier_call = perf_reboot, 7020 .priority = INT_MIN, 7021 }; 7022 7023 static int __cpuinit 7024 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 7025 { 7026 unsigned int cpu = (long)hcpu; 7027 7028 switch (action & ~CPU_TASKS_FROZEN) { 7029 7030 case CPU_UP_PREPARE: 7031 case CPU_DOWN_FAILED: 7032 perf_event_init_cpu(cpu); 7033 break; 7034 7035 case CPU_UP_CANCELED: 7036 case CPU_DOWN_PREPARE: 7037 perf_event_exit_cpu(cpu); 7038 break; 7039 7040 default: 7041 break; 7042 } 7043 7044 return NOTIFY_OK; 7045 } 7046 7047 void __init perf_event_init(void) 7048 { 7049 int ret; 7050 7051 idr_init(&pmu_idr); 7052 7053 perf_event_init_all_cpus(); 7054 init_srcu_struct(&pmus_srcu); 7055 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 7056 perf_pmu_register(&perf_cpu_clock, NULL, -1); 7057 perf_pmu_register(&perf_task_clock, NULL, -1); 7058 perf_tp_register(); 7059 perf_cpu_notifier(perf_cpu_notify); 7060 register_reboot_notifier(&perf_reboot_notifier); 7061 7062 ret = init_hw_breakpoint(); 7063 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 7064 } 7065 7066 static int __init perf_event_sysfs_init(void) 7067 { 7068 struct pmu *pmu; 7069 int ret; 7070 7071 mutex_lock(&pmus_lock); 7072 7073 ret = bus_register(&pmu_bus); 7074 if (ret) 7075 goto unlock; 7076 7077 list_for_each_entry(pmu, &pmus, entry) { 7078 if (!pmu->name || pmu->type < 0) 7079 continue; 7080 7081 ret = pmu_dev_alloc(pmu); 7082 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 7083 } 7084 pmu_bus_running = 1; 7085 ret = 0; 7086 7087 unlock: 7088 mutex_unlock(&pmus_lock); 7089 7090 return ret; 7091 } 7092 device_initcall(perf_event_sysfs_init); 7093 7094 #ifdef CONFIG_CGROUP_PERF 7095 static struct cgroup_subsys_state *perf_cgroup_create( 7096 struct cgroup_subsys *ss, struct cgroup *cont) 7097 { 7098 struct perf_cgroup *jc; 7099 7100 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 7101 if (!jc) 7102 return ERR_PTR(-ENOMEM); 7103 7104 jc->info = alloc_percpu(struct perf_cgroup_info); 7105 if (!jc->info) { 7106 kfree(jc); 7107 return ERR_PTR(-ENOMEM); 7108 } 7109 7110 return &jc->css; 7111 } 7112 7113 static void perf_cgroup_destroy(struct cgroup_subsys *ss, 7114 struct cgroup *cont) 7115 { 7116 struct perf_cgroup *jc; 7117 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), 7118 struct perf_cgroup, css); 7119 free_percpu(jc->info); 7120 kfree(jc); 7121 } 7122 7123 static int __perf_cgroup_move(void *info) 7124 { 7125 struct task_struct *task = info; 7126 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 7127 return 0; 7128 } 7129 7130 static void 7131 perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task) 7132 { 7133 task_function_call(task, __perf_cgroup_move, task); 7134 } 7135 7136 static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7137 struct cgroup *old_cgrp, struct task_struct *task) 7138 { 7139 /* 7140 * cgroup_exit() is called in the copy_process() failure path. 7141 * Ignore this case since the task hasn't ran yet, this avoids 7142 * trying to poke a half freed task state from generic code. 7143 */ 7144 if (!(task->flags & PF_EXITING)) 7145 return; 7146 7147 perf_cgroup_attach_task(cgrp, task); 7148 } 7149 7150 struct cgroup_subsys perf_subsys = { 7151 .name = "perf_event", 7152 .subsys_id = perf_subsys_id, 7153 .create = perf_cgroup_create, 7154 .destroy = perf_cgroup_destroy, 7155 .exit = perf_cgroup_exit, 7156 .attach_task = perf_cgroup_attach_task, 7157 }; 7158 #endif /* CONFIG_CGROUP_PERF */ 7159