1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 167 static LIST_HEAD(pmus); 168 static DEFINE_MUTEX(pmus_lock); 169 static struct srcu_struct pmus_srcu; 170 171 /* 172 * perf event paranoia level: 173 * -1 - not paranoid at all 174 * 0 - disallow raw tracepoint access for unpriv 175 * 1 - disallow cpu events for unpriv 176 * 2 - disallow kernel profiling for unpriv 177 */ 178 int sysctl_perf_event_paranoid __read_mostly = 1; 179 180 /* Minimum for 512 kiB + 1 user control page */ 181 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 182 183 /* 184 * max perf event sample rate 185 */ 186 #define DEFAULT_MAX_SAMPLE_RATE 100000 187 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 188 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 189 190 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 191 192 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 193 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 194 195 static int perf_sample_allowed_ns __read_mostly = 196 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 197 198 void update_perf_cpu_limits(void) 199 { 200 u64 tmp = perf_sample_period_ns; 201 202 tmp *= sysctl_perf_cpu_time_max_percent; 203 do_div(tmp, 100); 204 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 205 } 206 207 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 208 209 int perf_proc_update_handler(struct ctl_table *table, int write, 210 void __user *buffer, size_t *lenp, 211 loff_t *ppos) 212 { 213 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 214 215 if (ret || !write) 216 return ret; 217 218 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 219 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 220 update_perf_cpu_limits(); 221 222 return 0; 223 } 224 225 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 226 227 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 228 void __user *buffer, size_t *lenp, 229 loff_t *ppos) 230 { 231 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 232 233 if (ret || !write) 234 return ret; 235 236 update_perf_cpu_limits(); 237 238 return 0; 239 } 240 241 /* 242 * perf samples are done in some very critical code paths (NMIs). 243 * If they take too much CPU time, the system can lock up and not 244 * get any real work done. This will drop the sample rate when 245 * we detect that events are taking too long. 246 */ 247 #define NR_ACCUMULATED_SAMPLES 128 248 static DEFINE_PER_CPU(u64, running_sample_length); 249 250 static void perf_duration_warn(struct irq_work *w) 251 { 252 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 253 u64 avg_local_sample_len; 254 u64 local_samples_len; 255 256 local_samples_len = __this_cpu_read(running_sample_length); 257 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 258 259 printk_ratelimited(KERN_WARNING 260 "perf interrupt took too long (%lld > %lld), lowering " 261 "kernel.perf_event_max_sample_rate to %d\n", 262 avg_local_sample_len, allowed_ns >> 1, 263 sysctl_perf_event_sample_rate); 264 } 265 266 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 267 268 void perf_sample_event_took(u64 sample_len_ns) 269 { 270 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 271 u64 avg_local_sample_len; 272 u64 local_samples_len; 273 274 if (allowed_ns == 0) 275 return; 276 277 /* decay the counter by 1 average sample */ 278 local_samples_len = __this_cpu_read(running_sample_length); 279 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 280 local_samples_len += sample_len_ns; 281 __this_cpu_write(running_sample_length, local_samples_len); 282 283 /* 284 * note: this will be biased artifically low until we have 285 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 286 * from having to maintain a count. 287 */ 288 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 289 290 if (avg_local_sample_len <= allowed_ns) 291 return; 292 293 if (max_samples_per_tick <= 1) 294 return; 295 296 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 297 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 298 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 299 300 update_perf_cpu_limits(); 301 302 if (!irq_work_queue(&perf_duration_work)) { 303 early_printk("perf interrupt took too long (%lld > %lld), lowering " 304 "kernel.perf_event_max_sample_rate to %d\n", 305 avg_local_sample_len, allowed_ns >> 1, 306 sysctl_perf_event_sample_rate); 307 } 308 } 309 310 static atomic64_t perf_event_id; 311 312 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 313 enum event_type_t event_type); 314 315 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 316 enum event_type_t event_type, 317 struct task_struct *task); 318 319 static void update_context_time(struct perf_event_context *ctx); 320 static u64 perf_event_time(struct perf_event *event); 321 322 void __weak perf_event_print_debug(void) { } 323 324 extern __weak const char *perf_pmu_name(void) 325 { 326 return "pmu"; 327 } 328 329 static inline u64 perf_clock(void) 330 { 331 return local_clock(); 332 } 333 334 static inline u64 perf_event_clock(struct perf_event *event) 335 { 336 return event->clock(); 337 } 338 339 static inline struct perf_cpu_context * 340 __get_cpu_context(struct perf_event_context *ctx) 341 { 342 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 343 } 344 345 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 346 struct perf_event_context *ctx) 347 { 348 raw_spin_lock(&cpuctx->ctx.lock); 349 if (ctx) 350 raw_spin_lock(&ctx->lock); 351 } 352 353 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 354 struct perf_event_context *ctx) 355 { 356 if (ctx) 357 raw_spin_unlock(&ctx->lock); 358 raw_spin_unlock(&cpuctx->ctx.lock); 359 } 360 361 #ifdef CONFIG_CGROUP_PERF 362 363 static inline bool 364 perf_cgroup_match(struct perf_event *event) 365 { 366 struct perf_event_context *ctx = event->ctx; 367 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 368 369 /* @event doesn't care about cgroup */ 370 if (!event->cgrp) 371 return true; 372 373 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 374 if (!cpuctx->cgrp) 375 return false; 376 377 /* 378 * Cgroup scoping is recursive. An event enabled for a cgroup is 379 * also enabled for all its descendant cgroups. If @cpuctx's 380 * cgroup is a descendant of @event's (the test covers identity 381 * case), it's a match. 382 */ 383 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 384 event->cgrp->css.cgroup); 385 } 386 387 static inline void perf_detach_cgroup(struct perf_event *event) 388 { 389 css_put(&event->cgrp->css); 390 event->cgrp = NULL; 391 } 392 393 static inline int is_cgroup_event(struct perf_event *event) 394 { 395 return event->cgrp != NULL; 396 } 397 398 static inline u64 perf_cgroup_event_time(struct perf_event *event) 399 { 400 struct perf_cgroup_info *t; 401 402 t = per_cpu_ptr(event->cgrp->info, event->cpu); 403 return t->time; 404 } 405 406 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 407 { 408 struct perf_cgroup_info *info; 409 u64 now; 410 411 now = perf_clock(); 412 413 info = this_cpu_ptr(cgrp->info); 414 415 info->time += now - info->timestamp; 416 info->timestamp = now; 417 } 418 419 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 420 { 421 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 422 if (cgrp_out) 423 __update_cgrp_time(cgrp_out); 424 } 425 426 static inline void update_cgrp_time_from_event(struct perf_event *event) 427 { 428 struct perf_cgroup *cgrp; 429 430 /* 431 * ensure we access cgroup data only when needed and 432 * when we know the cgroup is pinned (css_get) 433 */ 434 if (!is_cgroup_event(event)) 435 return; 436 437 cgrp = perf_cgroup_from_task(current); 438 /* 439 * Do not update time when cgroup is not active 440 */ 441 if (cgrp == event->cgrp) 442 __update_cgrp_time(event->cgrp); 443 } 444 445 static inline void 446 perf_cgroup_set_timestamp(struct task_struct *task, 447 struct perf_event_context *ctx) 448 { 449 struct perf_cgroup *cgrp; 450 struct perf_cgroup_info *info; 451 452 /* 453 * ctx->lock held by caller 454 * ensure we do not access cgroup data 455 * unless we have the cgroup pinned (css_get) 456 */ 457 if (!task || !ctx->nr_cgroups) 458 return; 459 460 cgrp = perf_cgroup_from_task(task); 461 info = this_cpu_ptr(cgrp->info); 462 info->timestamp = ctx->timestamp; 463 } 464 465 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 466 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 467 468 /* 469 * reschedule events based on the cgroup constraint of task. 470 * 471 * mode SWOUT : schedule out everything 472 * mode SWIN : schedule in based on cgroup for next 473 */ 474 void perf_cgroup_switch(struct task_struct *task, int mode) 475 { 476 struct perf_cpu_context *cpuctx; 477 struct pmu *pmu; 478 unsigned long flags; 479 480 /* 481 * disable interrupts to avoid geting nr_cgroup 482 * changes via __perf_event_disable(). Also 483 * avoids preemption. 484 */ 485 local_irq_save(flags); 486 487 /* 488 * we reschedule only in the presence of cgroup 489 * constrained events. 490 */ 491 rcu_read_lock(); 492 493 list_for_each_entry_rcu(pmu, &pmus, entry) { 494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 495 if (cpuctx->unique_pmu != pmu) 496 continue; /* ensure we process each cpuctx once */ 497 498 /* 499 * perf_cgroup_events says at least one 500 * context on this CPU has cgroup events. 501 * 502 * ctx->nr_cgroups reports the number of cgroup 503 * events for a context. 504 */ 505 if (cpuctx->ctx.nr_cgroups > 0) { 506 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 507 perf_pmu_disable(cpuctx->ctx.pmu); 508 509 if (mode & PERF_CGROUP_SWOUT) { 510 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 511 /* 512 * must not be done before ctxswout due 513 * to event_filter_match() in event_sched_out() 514 */ 515 cpuctx->cgrp = NULL; 516 } 517 518 if (mode & PERF_CGROUP_SWIN) { 519 WARN_ON_ONCE(cpuctx->cgrp); 520 /* 521 * set cgrp before ctxsw in to allow 522 * event_filter_match() to not have to pass 523 * task around 524 */ 525 cpuctx->cgrp = perf_cgroup_from_task(task); 526 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 527 } 528 perf_pmu_enable(cpuctx->ctx.pmu); 529 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 530 } 531 } 532 533 rcu_read_unlock(); 534 535 local_irq_restore(flags); 536 } 537 538 static inline void perf_cgroup_sched_out(struct task_struct *task, 539 struct task_struct *next) 540 { 541 struct perf_cgroup *cgrp1; 542 struct perf_cgroup *cgrp2 = NULL; 543 544 /* 545 * we come here when we know perf_cgroup_events > 0 546 */ 547 cgrp1 = perf_cgroup_from_task(task); 548 549 /* 550 * next is NULL when called from perf_event_enable_on_exec() 551 * that will systematically cause a cgroup_switch() 552 */ 553 if (next) 554 cgrp2 = perf_cgroup_from_task(next); 555 556 /* 557 * only schedule out current cgroup events if we know 558 * that we are switching to a different cgroup. Otherwise, 559 * do no touch the cgroup events. 560 */ 561 if (cgrp1 != cgrp2) 562 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 563 } 564 565 static inline void perf_cgroup_sched_in(struct task_struct *prev, 566 struct task_struct *task) 567 { 568 struct perf_cgroup *cgrp1; 569 struct perf_cgroup *cgrp2 = NULL; 570 571 /* 572 * we come here when we know perf_cgroup_events > 0 573 */ 574 cgrp1 = perf_cgroup_from_task(task); 575 576 /* prev can never be NULL */ 577 cgrp2 = perf_cgroup_from_task(prev); 578 579 /* 580 * only need to schedule in cgroup events if we are changing 581 * cgroup during ctxsw. Cgroup events were not scheduled 582 * out of ctxsw out if that was not the case. 583 */ 584 if (cgrp1 != cgrp2) 585 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 586 } 587 588 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 589 struct perf_event_attr *attr, 590 struct perf_event *group_leader) 591 { 592 struct perf_cgroup *cgrp; 593 struct cgroup_subsys_state *css; 594 struct fd f = fdget(fd); 595 int ret = 0; 596 597 if (!f.file) 598 return -EBADF; 599 600 css = css_tryget_online_from_dir(f.file->f_path.dentry, 601 &perf_event_cgrp_subsys); 602 if (IS_ERR(css)) { 603 ret = PTR_ERR(css); 604 goto out; 605 } 606 607 cgrp = container_of(css, struct perf_cgroup, css); 608 event->cgrp = cgrp; 609 610 /* 611 * all events in a group must monitor 612 * the same cgroup because a task belongs 613 * to only one perf cgroup at a time 614 */ 615 if (group_leader && group_leader->cgrp != cgrp) { 616 perf_detach_cgroup(event); 617 ret = -EINVAL; 618 } 619 out: 620 fdput(f); 621 return ret; 622 } 623 624 static inline void 625 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 626 { 627 struct perf_cgroup_info *t; 628 t = per_cpu_ptr(event->cgrp->info, event->cpu); 629 event->shadow_ctx_time = now - t->timestamp; 630 } 631 632 static inline void 633 perf_cgroup_defer_enabled(struct perf_event *event) 634 { 635 /* 636 * when the current task's perf cgroup does not match 637 * the event's, we need to remember to call the 638 * perf_mark_enable() function the first time a task with 639 * a matching perf cgroup is scheduled in. 640 */ 641 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 642 event->cgrp_defer_enabled = 1; 643 } 644 645 static inline void 646 perf_cgroup_mark_enabled(struct perf_event *event, 647 struct perf_event_context *ctx) 648 { 649 struct perf_event *sub; 650 u64 tstamp = perf_event_time(event); 651 652 if (!event->cgrp_defer_enabled) 653 return; 654 655 event->cgrp_defer_enabled = 0; 656 657 event->tstamp_enabled = tstamp - event->total_time_enabled; 658 list_for_each_entry(sub, &event->sibling_list, group_entry) { 659 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 660 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 661 sub->cgrp_defer_enabled = 0; 662 } 663 } 664 } 665 #else /* !CONFIG_CGROUP_PERF */ 666 667 static inline bool 668 perf_cgroup_match(struct perf_event *event) 669 { 670 return true; 671 } 672 673 static inline void perf_detach_cgroup(struct perf_event *event) 674 {} 675 676 static inline int is_cgroup_event(struct perf_event *event) 677 { 678 return 0; 679 } 680 681 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 682 { 683 return 0; 684 } 685 686 static inline void update_cgrp_time_from_event(struct perf_event *event) 687 { 688 } 689 690 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 691 { 692 } 693 694 static inline void perf_cgroup_sched_out(struct task_struct *task, 695 struct task_struct *next) 696 { 697 } 698 699 static inline void perf_cgroup_sched_in(struct task_struct *prev, 700 struct task_struct *task) 701 { 702 } 703 704 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 705 struct perf_event_attr *attr, 706 struct perf_event *group_leader) 707 { 708 return -EINVAL; 709 } 710 711 static inline void 712 perf_cgroup_set_timestamp(struct task_struct *task, 713 struct perf_event_context *ctx) 714 { 715 } 716 717 void 718 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 719 { 720 } 721 722 static inline void 723 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 724 { 725 } 726 727 static inline u64 perf_cgroup_event_time(struct perf_event *event) 728 { 729 return 0; 730 } 731 732 static inline void 733 perf_cgroup_defer_enabled(struct perf_event *event) 734 { 735 } 736 737 static inline void 738 perf_cgroup_mark_enabled(struct perf_event *event, 739 struct perf_event_context *ctx) 740 { 741 } 742 #endif 743 744 /* 745 * set default to be dependent on timer tick just 746 * like original code 747 */ 748 #define PERF_CPU_HRTIMER (1000 / HZ) 749 /* 750 * function must be called with interrupts disbled 751 */ 752 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 753 { 754 struct perf_cpu_context *cpuctx; 755 int rotations = 0; 756 757 WARN_ON(!irqs_disabled()); 758 759 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 760 rotations = perf_rotate_context(cpuctx); 761 762 raw_spin_lock(&cpuctx->hrtimer_lock); 763 if (rotations) 764 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 765 else 766 cpuctx->hrtimer_active = 0; 767 raw_spin_unlock(&cpuctx->hrtimer_lock); 768 769 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 770 } 771 772 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 773 { 774 struct hrtimer *timer = &cpuctx->hrtimer; 775 struct pmu *pmu = cpuctx->ctx.pmu; 776 u64 interval; 777 778 /* no multiplexing needed for SW PMU */ 779 if (pmu->task_ctx_nr == perf_sw_context) 780 return; 781 782 /* 783 * check default is sane, if not set then force to 784 * default interval (1/tick) 785 */ 786 interval = pmu->hrtimer_interval_ms; 787 if (interval < 1) 788 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 789 790 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 791 792 raw_spin_lock_init(&cpuctx->hrtimer_lock); 793 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 794 timer->function = perf_mux_hrtimer_handler; 795 } 796 797 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 798 { 799 struct hrtimer *timer = &cpuctx->hrtimer; 800 struct pmu *pmu = cpuctx->ctx.pmu; 801 unsigned long flags; 802 803 /* not for SW PMU */ 804 if (pmu->task_ctx_nr == perf_sw_context) 805 return 0; 806 807 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 808 if (!cpuctx->hrtimer_active) { 809 cpuctx->hrtimer_active = 1; 810 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 811 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 812 } 813 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 814 815 return 0; 816 } 817 818 void perf_pmu_disable(struct pmu *pmu) 819 { 820 int *count = this_cpu_ptr(pmu->pmu_disable_count); 821 if (!(*count)++) 822 pmu->pmu_disable(pmu); 823 } 824 825 void perf_pmu_enable(struct pmu *pmu) 826 { 827 int *count = this_cpu_ptr(pmu->pmu_disable_count); 828 if (!--(*count)) 829 pmu->pmu_enable(pmu); 830 } 831 832 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 833 834 /* 835 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 836 * perf_event_task_tick() are fully serialized because they're strictly cpu 837 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 838 * disabled, while perf_event_task_tick is called from IRQ context. 839 */ 840 static void perf_event_ctx_activate(struct perf_event_context *ctx) 841 { 842 struct list_head *head = this_cpu_ptr(&active_ctx_list); 843 844 WARN_ON(!irqs_disabled()); 845 846 WARN_ON(!list_empty(&ctx->active_ctx_list)); 847 848 list_add(&ctx->active_ctx_list, head); 849 } 850 851 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 852 { 853 WARN_ON(!irqs_disabled()); 854 855 WARN_ON(list_empty(&ctx->active_ctx_list)); 856 857 list_del_init(&ctx->active_ctx_list); 858 } 859 860 static void get_ctx(struct perf_event_context *ctx) 861 { 862 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 863 } 864 865 static void free_ctx(struct rcu_head *head) 866 { 867 struct perf_event_context *ctx; 868 869 ctx = container_of(head, struct perf_event_context, rcu_head); 870 kfree(ctx->task_ctx_data); 871 kfree(ctx); 872 } 873 874 static void put_ctx(struct perf_event_context *ctx) 875 { 876 if (atomic_dec_and_test(&ctx->refcount)) { 877 if (ctx->parent_ctx) 878 put_ctx(ctx->parent_ctx); 879 if (ctx->task) 880 put_task_struct(ctx->task); 881 call_rcu(&ctx->rcu_head, free_ctx); 882 } 883 } 884 885 /* 886 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 887 * perf_pmu_migrate_context() we need some magic. 888 * 889 * Those places that change perf_event::ctx will hold both 890 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 891 * 892 * Lock ordering is by mutex address. There are two other sites where 893 * perf_event_context::mutex nests and those are: 894 * 895 * - perf_event_exit_task_context() [ child , 0 ] 896 * __perf_event_exit_task() 897 * sync_child_event() 898 * put_event() [ parent, 1 ] 899 * 900 * - perf_event_init_context() [ parent, 0 ] 901 * inherit_task_group() 902 * inherit_group() 903 * inherit_event() 904 * perf_event_alloc() 905 * perf_init_event() 906 * perf_try_init_event() [ child , 1 ] 907 * 908 * While it appears there is an obvious deadlock here -- the parent and child 909 * nesting levels are inverted between the two. This is in fact safe because 910 * life-time rules separate them. That is an exiting task cannot fork, and a 911 * spawning task cannot (yet) exit. 912 * 913 * But remember that that these are parent<->child context relations, and 914 * migration does not affect children, therefore these two orderings should not 915 * interact. 916 * 917 * The change in perf_event::ctx does not affect children (as claimed above) 918 * because the sys_perf_event_open() case will install a new event and break 919 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 920 * concerned with cpuctx and that doesn't have children. 921 * 922 * The places that change perf_event::ctx will issue: 923 * 924 * perf_remove_from_context(); 925 * synchronize_rcu(); 926 * perf_install_in_context(); 927 * 928 * to affect the change. The remove_from_context() + synchronize_rcu() should 929 * quiesce the event, after which we can install it in the new location. This 930 * means that only external vectors (perf_fops, prctl) can perturb the event 931 * while in transit. Therefore all such accessors should also acquire 932 * perf_event_context::mutex to serialize against this. 933 * 934 * However; because event->ctx can change while we're waiting to acquire 935 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 936 * function. 937 * 938 * Lock order: 939 * task_struct::perf_event_mutex 940 * perf_event_context::mutex 941 * perf_event_context::lock 942 * perf_event::child_mutex; 943 * perf_event::mmap_mutex 944 * mmap_sem 945 */ 946 static struct perf_event_context * 947 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 948 { 949 struct perf_event_context *ctx; 950 951 again: 952 rcu_read_lock(); 953 ctx = ACCESS_ONCE(event->ctx); 954 if (!atomic_inc_not_zero(&ctx->refcount)) { 955 rcu_read_unlock(); 956 goto again; 957 } 958 rcu_read_unlock(); 959 960 mutex_lock_nested(&ctx->mutex, nesting); 961 if (event->ctx != ctx) { 962 mutex_unlock(&ctx->mutex); 963 put_ctx(ctx); 964 goto again; 965 } 966 967 return ctx; 968 } 969 970 static inline struct perf_event_context * 971 perf_event_ctx_lock(struct perf_event *event) 972 { 973 return perf_event_ctx_lock_nested(event, 0); 974 } 975 976 static void perf_event_ctx_unlock(struct perf_event *event, 977 struct perf_event_context *ctx) 978 { 979 mutex_unlock(&ctx->mutex); 980 put_ctx(ctx); 981 } 982 983 /* 984 * This must be done under the ctx->lock, such as to serialize against 985 * context_equiv(), therefore we cannot call put_ctx() since that might end up 986 * calling scheduler related locks and ctx->lock nests inside those. 987 */ 988 static __must_check struct perf_event_context * 989 unclone_ctx(struct perf_event_context *ctx) 990 { 991 struct perf_event_context *parent_ctx = ctx->parent_ctx; 992 993 lockdep_assert_held(&ctx->lock); 994 995 if (parent_ctx) 996 ctx->parent_ctx = NULL; 997 ctx->generation++; 998 999 return parent_ctx; 1000 } 1001 1002 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1003 { 1004 /* 1005 * only top level events have the pid namespace they were created in 1006 */ 1007 if (event->parent) 1008 event = event->parent; 1009 1010 return task_tgid_nr_ns(p, event->ns); 1011 } 1012 1013 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1014 { 1015 /* 1016 * only top level events have the pid namespace they were created in 1017 */ 1018 if (event->parent) 1019 event = event->parent; 1020 1021 return task_pid_nr_ns(p, event->ns); 1022 } 1023 1024 /* 1025 * If we inherit events we want to return the parent event id 1026 * to userspace. 1027 */ 1028 static u64 primary_event_id(struct perf_event *event) 1029 { 1030 u64 id = event->id; 1031 1032 if (event->parent) 1033 id = event->parent->id; 1034 1035 return id; 1036 } 1037 1038 /* 1039 * Get the perf_event_context for a task and lock it. 1040 * This has to cope with with the fact that until it is locked, 1041 * the context could get moved to another task. 1042 */ 1043 static struct perf_event_context * 1044 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1045 { 1046 struct perf_event_context *ctx; 1047 1048 retry: 1049 /* 1050 * One of the few rules of preemptible RCU is that one cannot do 1051 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1052 * part of the read side critical section was preemptible -- see 1053 * rcu_read_unlock_special(). 1054 * 1055 * Since ctx->lock nests under rq->lock we must ensure the entire read 1056 * side critical section is non-preemptible. 1057 */ 1058 preempt_disable(); 1059 rcu_read_lock(); 1060 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1061 if (ctx) { 1062 /* 1063 * If this context is a clone of another, it might 1064 * get swapped for another underneath us by 1065 * perf_event_task_sched_out, though the 1066 * rcu_read_lock() protects us from any context 1067 * getting freed. Lock the context and check if it 1068 * got swapped before we could get the lock, and retry 1069 * if so. If we locked the right context, then it 1070 * can't get swapped on us any more. 1071 */ 1072 raw_spin_lock_irqsave(&ctx->lock, *flags); 1073 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1074 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1075 rcu_read_unlock(); 1076 preempt_enable(); 1077 goto retry; 1078 } 1079 1080 if (!atomic_inc_not_zero(&ctx->refcount)) { 1081 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1082 ctx = NULL; 1083 } 1084 } 1085 rcu_read_unlock(); 1086 preempt_enable(); 1087 return ctx; 1088 } 1089 1090 /* 1091 * Get the context for a task and increment its pin_count so it 1092 * can't get swapped to another task. This also increments its 1093 * reference count so that the context can't get freed. 1094 */ 1095 static struct perf_event_context * 1096 perf_pin_task_context(struct task_struct *task, int ctxn) 1097 { 1098 struct perf_event_context *ctx; 1099 unsigned long flags; 1100 1101 ctx = perf_lock_task_context(task, ctxn, &flags); 1102 if (ctx) { 1103 ++ctx->pin_count; 1104 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1105 } 1106 return ctx; 1107 } 1108 1109 static void perf_unpin_context(struct perf_event_context *ctx) 1110 { 1111 unsigned long flags; 1112 1113 raw_spin_lock_irqsave(&ctx->lock, flags); 1114 --ctx->pin_count; 1115 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1116 } 1117 1118 /* 1119 * Update the record of the current time in a context. 1120 */ 1121 static void update_context_time(struct perf_event_context *ctx) 1122 { 1123 u64 now = perf_clock(); 1124 1125 ctx->time += now - ctx->timestamp; 1126 ctx->timestamp = now; 1127 } 1128 1129 static u64 perf_event_time(struct perf_event *event) 1130 { 1131 struct perf_event_context *ctx = event->ctx; 1132 1133 if (is_cgroup_event(event)) 1134 return perf_cgroup_event_time(event); 1135 1136 return ctx ? ctx->time : 0; 1137 } 1138 1139 /* 1140 * Update the total_time_enabled and total_time_running fields for a event. 1141 * The caller of this function needs to hold the ctx->lock. 1142 */ 1143 static void update_event_times(struct perf_event *event) 1144 { 1145 struct perf_event_context *ctx = event->ctx; 1146 u64 run_end; 1147 1148 if (event->state < PERF_EVENT_STATE_INACTIVE || 1149 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1150 return; 1151 /* 1152 * in cgroup mode, time_enabled represents 1153 * the time the event was enabled AND active 1154 * tasks were in the monitored cgroup. This is 1155 * independent of the activity of the context as 1156 * there may be a mix of cgroup and non-cgroup events. 1157 * 1158 * That is why we treat cgroup events differently 1159 * here. 1160 */ 1161 if (is_cgroup_event(event)) 1162 run_end = perf_cgroup_event_time(event); 1163 else if (ctx->is_active) 1164 run_end = ctx->time; 1165 else 1166 run_end = event->tstamp_stopped; 1167 1168 event->total_time_enabled = run_end - event->tstamp_enabled; 1169 1170 if (event->state == PERF_EVENT_STATE_INACTIVE) 1171 run_end = event->tstamp_stopped; 1172 else 1173 run_end = perf_event_time(event); 1174 1175 event->total_time_running = run_end - event->tstamp_running; 1176 1177 } 1178 1179 /* 1180 * Update total_time_enabled and total_time_running for all events in a group. 1181 */ 1182 static void update_group_times(struct perf_event *leader) 1183 { 1184 struct perf_event *event; 1185 1186 update_event_times(leader); 1187 list_for_each_entry(event, &leader->sibling_list, group_entry) 1188 update_event_times(event); 1189 } 1190 1191 static struct list_head * 1192 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1193 { 1194 if (event->attr.pinned) 1195 return &ctx->pinned_groups; 1196 else 1197 return &ctx->flexible_groups; 1198 } 1199 1200 /* 1201 * Add a event from the lists for its context. 1202 * Must be called with ctx->mutex and ctx->lock held. 1203 */ 1204 static void 1205 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1206 { 1207 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1208 event->attach_state |= PERF_ATTACH_CONTEXT; 1209 1210 /* 1211 * If we're a stand alone event or group leader, we go to the context 1212 * list, group events are kept attached to the group so that 1213 * perf_group_detach can, at all times, locate all siblings. 1214 */ 1215 if (event->group_leader == event) { 1216 struct list_head *list; 1217 1218 if (is_software_event(event)) 1219 event->group_flags |= PERF_GROUP_SOFTWARE; 1220 1221 list = ctx_group_list(event, ctx); 1222 list_add_tail(&event->group_entry, list); 1223 } 1224 1225 if (is_cgroup_event(event)) 1226 ctx->nr_cgroups++; 1227 1228 list_add_rcu(&event->event_entry, &ctx->event_list); 1229 ctx->nr_events++; 1230 if (event->attr.inherit_stat) 1231 ctx->nr_stat++; 1232 1233 ctx->generation++; 1234 } 1235 1236 /* 1237 * Initialize event state based on the perf_event_attr::disabled. 1238 */ 1239 static inline void perf_event__state_init(struct perf_event *event) 1240 { 1241 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1242 PERF_EVENT_STATE_INACTIVE; 1243 } 1244 1245 /* 1246 * Called at perf_event creation and when events are attached/detached from a 1247 * group. 1248 */ 1249 static void perf_event__read_size(struct perf_event *event) 1250 { 1251 int entry = sizeof(u64); /* value */ 1252 int size = 0; 1253 int nr = 1; 1254 1255 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1256 size += sizeof(u64); 1257 1258 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1259 size += sizeof(u64); 1260 1261 if (event->attr.read_format & PERF_FORMAT_ID) 1262 entry += sizeof(u64); 1263 1264 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1265 nr += event->group_leader->nr_siblings; 1266 size += sizeof(u64); 1267 } 1268 1269 size += entry * nr; 1270 event->read_size = size; 1271 } 1272 1273 static void perf_event__header_size(struct perf_event *event) 1274 { 1275 struct perf_sample_data *data; 1276 u64 sample_type = event->attr.sample_type; 1277 u16 size = 0; 1278 1279 perf_event__read_size(event); 1280 1281 if (sample_type & PERF_SAMPLE_IP) 1282 size += sizeof(data->ip); 1283 1284 if (sample_type & PERF_SAMPLE_ADDR) 1285 size += sizeof(data->addr); 1286 1287 if (sample_type & PERF_SAMPLE_PERIOD) 1288 size += sizeof(data->period); 1289 1290 if (sample_type & PERF_SAMPLE_WEIGHT) 1291 size += sizeof(data->weight); 1292 1293 if (sample_type & PERF_SAMPLE_READ) 1294 size += event->read_size; 1295 1296 if (sample_type & PERF_SAMPLE_DATA_SRC) 1297 size += sizeof(data->data_src.val); 1298 1299 if (sample_type & PERF_SAMPLE_TRANSACTION) 1300 size += sizeof(data->txn); 1301 1302 event->header_size = size; 1303 } 1304 1305 static void perf_event__id_header_size(struct perf_event *event) 1306 { 1307 struct perf_sample_data *data; 1308 u64 sample_type = event->attr.sample_type; 1309 u16 size = 0; 1310 1311 if (sample_type & PERF_SAMPLE_TID) 1312 size += sizeof(data->tid_entry); 1313 1314 if (sample_type & PERF_SAMPLE_TIME) 1315 size += sizeof(data->time); 1316 1317 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1318 size += sizeof(data->id); 1319 1320 if (sample_type & PERF_SAMPLE_ID) 1321 size += sizeof(data->id); 1322 1323 if (sample_type & PERF_SAMPLE_STREAM_ID) 1324 size += sizeof(data->stream_id); 1325 1326 if (sample_type & PERF_SAMPLE_CPU) 1327 size += sizeof(data->cpu_entry); 1328 1329 event->id_header_size = size; 1330 } 1331 1332 static void perf_group_attach(struct perf_event *event) 1333 { 1334 struct perf_event *group_leader = event->group_leader, *pos; 1335 1336 /* 1337 * We can have double attach due to group movement in perf_event_open. 1338 */ 1339 if (event->attach_state & PERF_ATTACH_GROUP) 1340 return; 1341 1342 event->attach_state |= PERF_ATTACH_GROUP; 1343 1344 if (group_leader == event) 1345 return; 1346 1347 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1348 1349 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1350 !is_software_event(event)) 1351 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1352 1353 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1354 group_leader->nr_siblings++; 1355 1356 perf_event__header_size(group_leader); 1357 1358 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1359 perf_event__header_size(pos); 1360 } 1361 1362 /* 1363 * Remove a event from the lists for its context. 1364 * Must be called with ctx->mutex and ctx->lock held. 1365 */ 1366 static void 1367 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1368 { 1369 struct perf_cpu_context *cpuctx; 1370 1371 WARN_ON_ONCE(event->ctx != ctx); 1372 lockdep_assert_held(&ctx->lock); 1373 1374 /* 1375 * We can have double detach due to exit/hot-unplug + close. 1376 */ 1377 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1378 return; 1379 1380 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1381 1382 if (is_cgroup_event(event)) { 1383 ctx->nr_cgroups--; 1384 cpuctx = __get_cpu_context(ctx); 1385 /* 1386 * if there are no more cgroup events 1387 * then cler cgrp to avoid stale pointer 1388 * in update_cgrp_time_from_cpuctx() 1389 */ 1390 if (!ctx->nr_cgroups) 1391 cpuctx->cgrp = NULL; 1392 } 1393 1394 ctx->nr_events--; 1395 if (event->attr.inherit_stat) 1396 ctx->nr_stat--; 1397 1398 list_del_rcu(&event->event_entry); 1399 1400 if (event->group_leader == event) 1401 list_del_init(&event->group_entry); 1402 1403 update_group_times(event); 1404 1405 /* 1406 * If event was in error state, then keep it 1407 * that way, otherwise bogus counts will be 1408 * returned on read(). The only way to get out 1409 * of error state is by explicit re-enabling 1410 * of the event 1411 */ 1412 if (event->state > PERF_EVENT_STATE_OFF) 1413 event->state = PERF_EVENT_STATE_OFF; 1414 1415 ctx->generation++; 1416 } 1417 1418 static void perf_group_detach(struct perf_event *event) 1419 { 1420 struct perf_event *sibling, *tmp; 1421 struct list_head *list = NULL; 1422 1423 /* 1424 * We can have double detach due to exit/hot-unplug + close. 1425 */ 1426 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1427 return; 1428 1429 event->attach_state &= ~PERF_ATTACH_GROUP; 1430 1431 /* 1432 * If this is a sibling, remove it from its group. 1433 */ 1434 if (event->group_leader != event) { 1435 list_del_init(&event->group_entry); 1436 event->group_leader->nr_siblings--; 1437 goto out; 1438 } 1439 1440 if (!list_empty(&event->group_entry)) 1441 list = &event->group_entry; 1442 1443 /* 1444 * If this was a group event with sibling events then 1445 * upgrade the siblings to singleton events by adding them 1446 * to whatever list we are on. 1447 */ 1448 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1449 if (list) 1450 list_move_tail(&sibling->group_entry, list); 1451 sibling->group_leader = sibling; 1452 1453 /* Inherit group flags from the previous leader */ 1454 sibling->group_flags = event->group_flags; 1455 1456 WARN_ON_ONCE(sibling->ctx != event->ctx); 1457 } 1458 1459 out: 1460 perf_event__header_size(event->group_leader); 1461 1462 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1463 perf_event__header_size(tmp); 1464 } 1465 1466 /* 1467 * User event without the task. 1468 */ 1469 static bool is_orphaned_event(struct perf_event *event) 1470 { 1471 return event && !is_kernel_event(event) && !event->owner; 1472 } 1473 1474 /* 1475 * Event has a parent but parent's task finished and it's 1476 * alive only because of children holding refference. 1477 */ 1478 static bool is_orphaned_child(struct perf_event *event) 1479 { 1480 return is_orphaned_event(event->parent); 1481 } 1482 1483 static void orphans_remove_work(struct work_struct *work); 1484 1485 static void schedule_orphans_remove(struct perf_event_context *ctx) 1486 { 1487 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1488 return; 1489 1490 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1491 get_ctx(ctx); 1492 ctx->orphans_remove_sched = true; 1493 } 1494 } 1495 1496 static int __init perf_workqueue_init(void) 1497 { 1498 perf_wq = create_singlethread_workqueue("perf"); 1499 WARN(!perf_wq, "failed to create perf workqueue\n"); 1500 return perf_wq ? 0 : -1; 1501 } 1502 1503 core_initcall(perf_workqueue_init); 1504 1505 static inline int pmu_filter_match(struct perf_event *event) 1506 { 1507 struct pmu *pmu = event->pmu; 1508 return pmu->filter_match ? pmu->filter_match(event) : 1; 1509 } 1510 1511 static inline int 1512 event_filter_match(struct perf_event *event) 1513 { 1514 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1515 && perf_cgroup_match(event) && pmu_filter_match(event); 1516 } 1517 1518 static void 1519 event_sched_out(struct perf_event *event, 1520 struct perf_cpu_context *cpuctx, 1521 struct perf_event_context *ctx) 1522 { 1523 u64 tstamp = perf_event_time(event); 1524 u64 delta; 1525 1526 WARN_ON_ONCE(event->ctx != ctx); 1527 lockdep_assert_held(&ctx->lock); 1528 1529 /* 1530 * An event which could not be activated because of 1531 * filter mismatch still needs to have its timings 1532 * maintained, otherwise bogus information is return 1533 * via read() for time_enabled, time_running: 1534 */ 1535 if (event->state == PERF_EVENT_STATE_INACTIVE 1536 && !event_filter_match(event)) { 1537 delta = tstamp - event->tstamp_stopped; 1538 event->tstamp_running += delta; 1539 event->tstamp_stopped = tstamp; 1540 } 1541 1542 if (event->state != PERF_EVENT_STATE_ACTIVE) 1543 return; 1544 1545 perf_pmu_disable(event->pmu); 1546 1547 event->state = PERF_EVENT_STATE_INACTIVE; 1548 if (event->pending_disable) { 1549 event->pending_disable = 0; 1550 event->state = PERF_EVENT_STATE_OFF; 1551 } 1552 event->tstamp_stopped = tstamp; 1553 event->pmu->del(event, 0); 1554 event->oncpu = -1; 1555 1556 if (!is_software_event(event)) 1557 cpuctx->active_oncpu--; 1558 if (!--ctx->nr_active) 1559 perf_event_ctx_deactivate(ctx); 1560 if (event->attr.freq && event->attr.sample_freq) 1561 ctx->nr_freq--; 1562 if (event->attr.exclusive || !cpuctx->active_oncpu) 1563 cpuctx->exclusive = 0; 1564 1565 if (is_orphaned_child(event)) 1566 schedule_orphans_remove(ctx); 1567 1568 perf_pmu_enable(event->pmu); 1569 } 1570 1571 static void 1572 group_sched_out(struct perf_event *group_event, 1573 struct perf_cpu_context *cpuctx, 1574 struct perf_event_context *ctx) 1575 { 1576 struct perf_event *event; 1577 int state = group_event->state; 1578 1579 event_sched_out(group_event, cpuctx, ctx); 1580 1581 /* 1582 * Schedule out siblings (if any): 1583 */ 1584 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1585 event_sched_out(event, cpuctx, ctx); 1586 1587 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1588 cpuctx->exclusive = 0; 1589 } 1590 1591 struct remove_event { 1592 struct perf_event *event; 1593 bool detach_group; 1594 }; 1595 1596 /* 1597 * Cross CPU call to remove a performance event 1598 * 1599 * We disable the event on the hardware level first. After that we 1600 * remove it from the context list. 1601 */ 1602 static int __perf_remove_from_context(void *info) 1603 { 1604 struct remove_event *re = info; 1605 struct perf_event *event = re->event; 1606 struct perf_event_context *ctx = event->ctx; 1607 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1608 1609 raw_spin_lock(&ctx->lock); 1610 event_sched_out(event, cpuctx, ctx); 1611 if (re->detach_group) 1612 perf_group_detach(event); 1613 list_del_event(event, ctx); 1614 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1615 ctx->is_active = 0; 1616 cpuctx->task_ctx = NULL; 1617 } 1618 raw_spin_unlock(&ctx->lock); 1619 1620 return 0; 1621 } 1622 1623 1624 /* 1625 * Remove the event from a task's (or a CPU's) list of events. 1626 * 1627 * CPU events are removed with a smp call. For task events we only 1628 * call when the task is on a CPU. 1629 * 1630 * If event->ctx is a cloned context, callers must make sure that 1631 * every task struct that event->ctx->task could possibly point to 1632 * remains valid. This is OK when called from perf_release since 1633 * that only calls us on the top-level context, which can't be a clone. 1634 * When called from perf_event_exit_task, it's OK because the 1635 * context has been detached from its task. 1636 */ 1637 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1638 { 1639 struct perf_event_context *ctx = event->ctx; 1640 struct task_struct *task = ctx->task; 1641 struct remove_event re = { 1642 .event = event, 1643 .detach_group = detach_group, 1644 }; 1645 1646 lockdep_assert_held(&ctx->mutex); 1647 1648 if (!task) { 1649 /* 1650 * Per cpu events are removed via an smp call. The removal can 1651 * fail if the CPU is currently offline, but in that case we 1652 * already called __perf_remove_from_context from 1653 * perf_event_exit_cpu. 1654 */ 1655 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1656 return; 1657 } 1658 1659 retry: 1660 if (!task_function_call(task, __perf_remove_from_context, &re)) 1661 return; 1662 1663 raw_spin_lock_irq(&ctx->lock); 1664 /* 1665 * If we failed to find a running task, but find the context active now 1666 * that we've acquired the ctx->lock, retry. 1667 */ 1668 if (ctx->is_active) { 1669 raw_spin_unlock_irq(&ctx->lock); 1670 /* 1671 * Reload the task pointer, it might have been changed by 1672 * a concurrent perf_event_context_sched_out(). 1673 */ 1674 task = ctx->task; 1675 goto retry; 1676 } 1677 1678 /* 1679 * Since the task isn't running, its safe to remove the event, us 1680 * holding the ctx->lock ensures the task won't get scheduled in. 1681 */ 1682 if (detach_group) 1683 perf_group_detach(event); 1684 list_del_event(event, ctx); 1685 raw_spin_unlock_irq(&ctx->lock); 1686 } 1687 1688 /* 1689 * Cross CPU call to disable a performance event 1690 */ 1691 int __perf_event_disable(void *info) 1692 { 1693 struct perf_event *event = info; 1694 struct perf_event_context *ctx = event->ctx; 1695 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1696 1697 /* 1698 * If this is a per-task event, need to check whether this 1699 * event's task is the current task on this cpu. 1700 * 1701 * Can trigger due to concurrent perf_event_context_sched_out() 1702 * flipping contexts around. 1703 */ 1704 if (ctx->task && cpuctx->task_ctx != ctx) 1705 return -EINVAL; 1706 1707 raw_spin_lock(&ctx->lock); 1708 1709 /* 1710 * If the event is on, turn it off. 1711 * If it is in error state, leave it in error state. 1712 */ 1713 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1714 update_context_time(ctx); 1715 update_cgrp_time_from_event(event); 1716 update_group_times(event); 1717 if (event == event->group_leader) 1718 group_sched_out(event, cpuctx, ctx); 1719 else 1720 event_sched_out(event, cpuctx, ctx); 1721 event->state = PERF_EVENT_STATE_OFF; 1722 } 1723 1724 raw_spin_unlock(&ctx->lock); 1725 1726 return 0; 1727 } 1728 1729 /* 1730 * Disable a event. 1731 * 1732 * If event->ctx is a cloned context, callers must make sure that 1733 * every task struct that event->ctx->task could possibly point to 1734 * remains valid. This condition is satisifed when called through 1735 * perf_event_for_each_child or perf_event_for_each because they 1736 * hold the top-level event's child_mutex, so any descendant that 1737 * goes to exit will block in sync_child_event. 1738 * When called from perf_pending_event it's OK because event->ctx 1739 * is the current context on this CPU and preemption is disabled, 1740 * hence we can't get into perf_event_task_sched_out for this context. 1741 */ 1742 static void _perf_event_disable(struct perf_event *event) 1743 { 1744 struct perf_event_context *ctx = event->ctx; 1745 struct task_struct *task = ctx->task; 1746 1747 if (!task) { 1748 /* 1749 * Disable the event on the cpu that it's on 1750 */ 1751 cpu_function_call(event->cpu, __perf_event_disable, event); 1752 return; 1753 } 1754 1755 retry: 1756 if (!task_function_call(task, __perf_event_disable, event)) 1757 return; 1758 1759 raw_spin_lock_irq(&ctx->lock); 1760 /* 1761 * If the event is still active, we need to retry the cross-call. 1762 */ 1763 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1764 raw_spin_unlock_irq(&ctx->lock); 1765 /* 1766 * Reload the task pointer, it might have been changed by 1767 * a concurrent perf_event_context_sched_out(). 1768 */ 1769 task = ctx->task; 1770 goto retry; 1771 } 1772 1773 /* 1774 * Since we have the lock this context can't be scheduled 1775 * in, so we can change the state safely. 1776 */ 1777 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1778 update_group_times(event); 1779 event->state = PERF_EVENT_STATE_OFF; 1780 } 1781 raw_spin_unlock_irq(&ctx->lock); 1782 } 1783 1784 /* 1785 * Strictly speaking kernel users cannot create groups and therefore this 1786 * interface does not need the perf_event_ctx_lock() magic. 1787 */ 1788 void perf_event_disable(struct perf_event *event) 1789 { 1790 struct perf_event_context *ctx; 1791 1792 ctx = perf_event_ctx_lock(event); 1793 _perf_event_disable(event); 1794 perf_event_ctx_unlock(event, ctx); 1795 } 1796 EXPORT_SYMBOL_GPL(perf_event_disable); 1797 1798 static void perf_set_shadow_time(struct perf_event *event, 1799 struct perf_event_context *ctx, 1800 u64 tstamp) 1801 { 1802 /* 1803 * use the correct time source for the time snapshot 1804 * 1805 * We could get by without this by leveraging the 1806 * fact that to get to this function, the caller 1807 * has most likely already called update_context_time() 1808 * and update_cgrp_time_xx() and thus both timestamp 1809 * are identical (or very close). Given that tstamp is, 1810 * already adjusted for cgroup, we could say that: 1811 * tstamp - ctx->timestamp 1812 * is equivalent to 1813 * tstamp - cgrp->timestamp. 1814 * 1815 * Then, in perf_output_read(), the calculation would 1816 * work with no changes because: 1817 * - event is guaranteed scheduled in 1818 * - no scheduled out in between 1819 * - thus the timestamp would be the same 1820 * 1821 * But this is a bit hairy. 1822 * 1823 * So instead, we have an explicit cgroup call to remain 1824 * within the time time source all along. We believe it 1825 * is cleaner and simpler to understand. 1826 */ 1827 if (is_cgroup_event(event)) 1828 perf_cgroup_set_shadow_time(event, tstamp); 1829 else 1830 event->shadow_ctx_time = tstamp - ctx->timestamp; 1831 } 1832 1833 #define MAX_INTERRUPTS (~0ULL) 1834 1835 static void perf_log_throttle(struct perf_event *event, int enable); 1836 static void perf_log_itrace_start(struct perf_event *event); 1837 1838 static int 1839 event_sched_in(struct perf_event *event, 1840 struct perf_cpu_context *cpuctx, 1841 struct perf_event_context *ctx) 1842 { 1843 u64 tstamp = perf_event_time(event); 1844 int ret = 0; 1845 1846 lockdep_assert_held(&ctx->lock); 1847 1848 if (event->state <= PERF_EVENT_STATE_OFF) 1849 return 0; 1850 1851 event->state = PERF_EVENT_STATE_ACTIVE; 1852 event->oncpu = smp_processor_id(); 1853 1854 /* 1855 * Unthrottle events, since we scheduled we might have missed several 1856 * ticks already, also for a heavily scheduling task there is little 1857 * guarantee it'll get a tick in a timely manner. 1858 */ 1859 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1860 perf_log_throttle(event, 1); 1861 event->hw.interrupts = 0; 1862 } 1863 1864 /* 1865 * The new state must be visible before we turn it on in the hardware: 1866 */ 1867 smp_wmb(); 1868 1869 perf_pmu_disable(event->pmu); 1870 1871 perf_set_shadow_time(event, ctx, tstamp); 1872 1873 perf_log_itrace_start(event); 1874 1875 if (event->pmu->add(event, PERF_EF_START)) { 1876 event->state = PERF_EVENT_STATE_INACTIVE; 1877 event->oncpu = -1; 1878 ret = -EAGAIN; 1879 goto out; 1880 } 1881 1882 event->tstamp_running += tstamp - event->tstamp_stopped; 1883 1884 if (!is_software_event(event)) 1885 cpuctx->active_oncpu++; 1886 if (!ctx->nr_active++) 1887 perf_event_ctx_activate(ctx); 1888 if (event->attr.freq && event->attr.sample_freq) 1889 ctx->nr_freq++; 1890 1891 if (event->attr.exclusive) 1892 cpuctx->exclusive = 1; 1893 1894 if (is_orphaned_child(event)) 1895 schedule_orphans_remove(ctx); 1896 1897 out: 1898 perf_pmu_enable(event->pmu); 1899 1900 return ret; 1901 } 1902 1903 static int 1904 group_sched_in(struct perf_event *group_event, 1905 struct perf_cpu_context *cpuctx, 1906 struct perf_event_context *ctx) 1907 { 1908 struct perf_event *event, *partial_group = NULL; 1909 struct pmu *pmu = ctx->pmu; 1910 u64 now = ctx->time; 1911 bool simulate = false; 1912 1913 if (group_event->state == PERF_EVENT_STATE_OFF) 1914 return 0; 1915 1916 pmu->start_txn(pmu); 1917 1918 if (event_sched_in(group_event, cpuctx, ctx)) { 1919 pmu->cancel_txn(pmu); 1920 perf_mux_hrtimer_restart(cpuctx); 1921 return -EAGAIN; 1922 } 1923 1924 /* 1925 * Schedule in siblings as one group (if any): 1926 */ 1927 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1928 if (event_sched_in(event, cpuctx, ctx)) { 1929 partial_group = event; 1930 goto group_error; 1931 } 1932 } 1933 1934 if (!pmu->commit_txn(pmu)) 1935 return 0; 1936 1937 group_error: 1938 /* 1939 * Groups can be scheduled in as one unit only, so undo any 1940 * partial group before returning: 1941 * The events up to the failed event are scheduled out normally, 1942 * tstamp_stopped will be updated. 1943 * 1944 * The failed events and the remaining siblings need to have 1945 * their timings updated as if they had gone thru event_sched_in() 1946 * and event_sched_out(). This is required to get consistent timings 1947 * across the group. This also takes care of the case where the group 1948 * could never be scheduled by ensuring tstamp_stopped is set to mark 1949 * the time the event was actually stopped, such that time delta 1950 * calculation in update_event_times() is correct. 1951 */ 1952 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1953 if (event == partial_group) 1954 simulate = true; 1955 1956 if (simulate) { 1957 event->tstamp_running += now - event->tstamp_stopped; 1958 event->tstamp_stopped = now; 1959 } else { 1960 event_sched_out(event, cpuctx, ctx); 1961 } 1962 } 1963 event_sched_out(group_event, cpuctx, ctx); 1964 1965 pmu->cancel_txn(pmu); 1966 1967 perf_mux_hrtimer_restart(cpuctx); 1968 1969 return -EAGAIN; 1970 } 1971 1972 /* 1973 * Work out whether we can put this event group on the CPU now. 1974 */ 1975 static int group_can_go_on(struct perf_event *event, 1976 struct perf_cpu_context *cpuctx, 1977 int can_add_hw) 1978 { 1979 /* 1980 * Groups consisting entirely of software events can always go on. 1981 */ 1982 if (event->group_flags & PERF_GROUP_SOFTWARE) 1983 return 1; 1984 /* 1985 * If an exclusive group is already on, no other hardware 1986 * events can go on. 1987 */ 1988 if (cpuctx->exclusive) 1989 return 0; 1990 /* 1991 * If this group is exclusive and there are already 1992 * events on the CPU, it can't go on. 1993 */ 1994 if (event->attr.exclusive && cpuctx->active_oncpu) 1995 return 0; 1996 /* 1997 * Otherwise, try to add it if all previous groups were able 1998 * to go on. 1999 */ 2000 return can_add_hw; 2001 } 2002 2003 static void add_event_to_ctx(struct perf_event *event, 2004 struct perf_event_context *ctx) 2005 { 2006 u64 tstamp = perf_event_time(event); 2007 2008 list_add_event(event, ctx); 2009 perf_group_attach(event); 2010 event->tstamp_enabled = tstamp; 2011 event->tstamp_running = tstamp; 2012 event->tstamp_stopped = tstamp; 2013 } 2014 2015 static void task_ctx_sched_out(struct perf_event_context *ctx); 2016 static void 2017 ctx_sched_in(struct perf_event_context *ctx, 2018 struct perf_cpu_context *cpuctx, 2019 enum event_type_t event_type, 2020 struct task_struct *task); 2021 2022 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2023 struct perf_event_context *ctx, 2024 struct task_struct *task) 2025 { 2026 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2027 if (ctx) 2028 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2029 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2030 if (ctx) 2031 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2032 } 2033 2034 /* 2035 * Cross CPU call to install and enable a performance event 2036 * 2037 * Must be called with ctx->mutex held 2038 */ 2039 static int __perf_install_in_context(void *info) 2040 { 2041 struct perf_event *event = info; 2042 struct perf_event_context *ctx = event->ctx; 2043 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2044 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2045 struct task_struct *task = current; 2046 2047 perf_ctx_lock(cpuctx, task_ctx); 2048 perf_pmu_disable(cpuctx->ctx.pmu); 2049 2050 /* 2051 * If there was an active task_ctx schedule it out. 2052 */ 2053 if (task_ctx) 2054 task_ctx_sched_out(task_ctx); 2055 2056 /* 2057 * If the context we're installing events in is not the 2058 * active task_ctx, flip them. 2059 */ 2060 if (ctx->task && task_ctx != ctx) { 2061 if (task_ctx) 2062 raw_spin_unlock(&task_ctx->lock); 2063 raw_spin_lock(&ctx->lock); 2064 task_ctx = ctx; 2065 } 2066 2067 if (task_ctx) { 2068 cpuctx->task_ctx = task_ctx; 2069 task = task_ctx->task; 2070 } 2071 2072 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2073 2074 update_context_time(ctx); 2075 /* 2076 * update cgrp time only if current cgrp 2077 * matches event->cgrp. Must be done before 2078 * calling add_event_to_ctx() 2079 */ 2080 update_cgrp_time_from_event(event); 2081 2082 add_event_to_ctx(event, ctx); 2083 2084 /* 2085 * Schedule everything back in 2086 */ 2087 perf_event_sched_in(cpuctx, task_ctx, task); 2088 2089 perf_pmu_enable(cpuctx->ctx.pmu); 2090 perf_ctx_unlock(cpuctx, task_ctx); 2091 2092 return 0; 2093 } 2094 2095 /* 2096 * Attach a performance event to a context 2097 * 2098 * First we add the event to the list with the hardware enable bit 2099 * in event->hw_config cleared. 2100 * 2101 * If the event is attached to a task which is on a CPU we use a smp 2102 * call to enable it in the task context. The task might have been 2103 * scheduled away, but we check this in the smp call again. 2104 */ 2105 static void 2106 perf_install_in_context(struct perf_event_context *ctx, 2107 struct perf_event *event, 2108 int cpu) 2109 { 2110 struct task_struct *task = ctx->task; 2111 2112 lockdep_assert_held(&ctx->mutex); 2113 2114 event->ctx = ctx; 2115 if (event->cpu != -1) 2116 event->cpu = cpu; 2117 2118 if (!task) { 2119 /* 2120 * Per cpu events are installed via an smp call and 2121 * the install is always successful. 2122 */ 2123 cpu_function_call(cpu, __perf_install_in_context, event); 2124 return; 2125 } 2126 2127 retry: 2128 if (!task_function_call(task, __perf_install_in_context, event)) 2129 return; 2130 2131 raw_spin_lock_irq(&ctx->lock); 2132 /* 2133 * If we failed to find a running task, but find the context active now 2134 * that we've acquired the ctx->lock, retry. 2135 */ 2136 if (ctx->is_active) { 2137 raw_spin_unlock_irq(&ctx->lock); 2138 /* 2139 * Reload the task pointer, it might have been changed by 2140 * a concurrent perf_event_context_sched_out(). 2141 */ 2142 task = ctx->task; 2143 goto retry; 2144 } 2145 2146 /* 2147 * Since the task isn't running, its safe to add the event, us holding 2148 * the ctx->lock ensures the task won't get scheduled in. 2149 */ 2150 add_event_to_ctx(event, ctx); 2151 raw_spin_unlock_irq(&ctx->lock); 2152 } 2153 2154 /* 2155 * Put a event into inactive state and update time fields. 2156 * Enabling the leader of a group effectively enables all 2157 * the group members that aren't explicitly disabled, so we 2158 * have to update their ->tstamp_enabled also. 2159 * Note: this works for group members as well as group leaders 2160 * since the non-leader members' sibling_lists will be empty. 2161 */ 2162 static void __perf_event_mark_enabled(struct perf_event *event) 2163 { 2164 struct perf_event *sub; 2165 u64 tstamp = perf_event_time(event); 2166 2167 event->state = PERF_EVENT_STATE_INACTIVE; 2168 event->tstamp_enabled = tstamp - event->total_time_enabled; 2169 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2170 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2171 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2172 } 2173 } 2174 2175 /* 2176 * Cross CPU call to enable a performance event 2177 */ 2178 static int __perf_event_enable(void *info) 2179 { 2180 struct perf_event *event = info; 2181 struct perf_event_context *ctx = event->ctx; 2182 struct perf_event *leader = event->group_leader; 2183 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2184 int err; 2185 2186 /* 2187 * There's a time window between 'ctx->is_active' check 2188 * in perf_event_enable function and this place having: 2189 * - IRQs on 2190 * - ctx->lock unlocked 2191 * 2192 * where the task could be killed and 'ctx' deactivated 2193 * by perf_event_exit_task. 2194 */ 2195 if (!ctx->is_active) 2196 return -EINVAL; 2197 2198 raw_spin_lock(&ctx->lock); 2199 update_context_time(ctx); 2200 2201 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2202 goto unlock; 2203 2204 /* 2205 * set current task's cgroup time reference point 2206 */ 2207 perf_cgroup_set_timestamp(current, ctx); 2208 2209 __perf_event_mark_enabled(event); 2210 2211 if (!event_filter_match(event)) { 2212 if (is_cgroup_event(event)) 2213 perf_cgroup_defer_enabled(event); 2214 goto unlock; 2215 } 2216 2217 /* 2218 * If the event is in a group and isn't the group leader, 2219 * then don't put it on unless the group is on. 2220 */ 2221 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2222 goto unlock; 2223 2224 if (!group_can_go_on(event, cpuctx, 1)) { 2225 err = -EEXIST; 2226 } else { 2227 if (event == leader) 2228 err = group_sched_in(event, cpuctx, ctx); 2229 else 2230 err = event_sched_in(event, cpuctx, ctx); 2231 } 2232 2233 if (err) { 2234 /* 2235 * If this event can't go on and it's part of a 2236 * group, then the whole group has to come off. 2237 */ 2238 if (leader != event) { 2239 group_sched_out(leader, cpuctx, ctx); 2240 perf_mux_hrtimer_restart(cpuctx); 2241 } 2242 if (leader->attr.pinned) { 2243 update_group_times(leader); 2244 leader->state = PERF_EVENT_STATE_ERROR; 2245 } 2246 } 2247 2248 unlock: 2249 raw_spin_unlock(&ctx->lock); 2250 2251 return 0; 2252 } 2253 2254 /* 2255 * Enable a event. 2256 * 2257 * If event->ctx is a cloned context, callers must make sure that 2258 * every task struct that event->ctx->task could possibly point to 2259 * remains valid. This condition is satisfied when called through 2260 * perf_event_for_each_child or perf_event_for_each as described 2261 * for perf_event_disable. 2262 */ 2263 static void _perf_event_enable(struct perf_event *event) 2264 { 2265 struct perf_event_context *ctx = event->ctx; 2266 struct task_struct *task = ctx->task; 2267 2268 if (!task) { 2269 /* 2270 * Enable the event on the cpu that it's on 2271 */ 2272 cpu_function_call(event->cpu, __perf_event_enable, event); 2273 return; 2274 } 2275 2276 raw_spin_lock_irq(&ctx->lock); 2277 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2278 goto out; 2279 2280 /* 2281 * If the event is in error state, clear that first. 2282 * That way, if we see the event in error state below, we 2283 * know that it has gone back into error state, as distinct 2284 * from the task having been scheduled away before the 2285 * cross-call arrived. 2286 */ 2287 if (event->state == PERF_EVENT_STATE_ERROR) 2288 event->state = PERF_EVENT_STATE_OFF; 2289 2290 retry: 2291 if (!ctx->is_active) { 2292 __perf_event_mark_enabled(event); 2293 goto out; 2294 } 2295 2296 raw_spin_unlock_irq(&ctx->lock); 2297 2298 if (!task_function_call(task, __perf_event_enable, event)) 2299 return; 2300 2301 raw_spin_lock_irq(&ctx->lock); 2302 2303 /* 2304 * If the context is active and the event is still off, 2305 * we need to retry the cross-call. 2306 */ 2307 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2308 /* 2309 * task could have been flipped by a concurrent 2310 * perf_event_context_sched_out() 2311 */ 2312 task = ctx->task; 2313 goto retry; 2314 } 2315 2316 out: 2317 raw_spin_unlock_irq(&ctx->lock); 2318 } 2319 2320 /* 2321 * See perf_event_disable(); 2322 */ 2323 void perf_event_enable(struct perf_event *event) 2324 { 2325 struct perf_event_context *ctx; 2326 2327 ctx = perf_event_ctx_lock(event); 2328 _perf_event_enable(event); 2329 perf_event_ctx_unlock(event, ctx); 2330 } 2331 EXPORT_SYMBOL_GPL(perf_event_enable); 2332 2333 static int _perf_event_refresh(struct perf_event *event, int refresh) 2334 { 2335 /* 2336 * not supported on inherited events 2337 */ 2338 if (event->attr.inherit || !is_sampling_event(event)) 2339 return -EINVAL; 2340 2341 atomic_add(refresh, &event->event_limit); 2342 _perf_event_enable(event); 2343 2344 return 0; 2345 } 2346 2347 /* 2348 * See perf_event_disable() 2349 */ 2350 int perf_event_refresh(struct perf_event *event, int refresh) 2351 { 2352 struct perf_event_context *ctx; 2353 int ret; 2354 2355 ctx = perf_event_ctx_lock(event); 2356 ret = _perf_event_refresh(event, refresh); 2357 perf_event_ctx_unlock(event, ctx); 2358 2359 return ret; 2360 } 2361 EXPORT_SYMBOL_GPL(perf_event_refresh); 2362 2363 static void ctx_sched_out(struct perf_event_context *ctx, 2364 struct perf_cpu_context *cpuctx, 2365 enum event_type_t event_type) 2366 { 2367 struct perf_event *event; 2368 int is_active = ctx->is_active; 2369 2370 ctx->is_active &= ~event_type; 2371 if (likely(!ctx->nr_events)) 2372 return; 2373 2374 update_context_time(ctx); 2375 update_cgrp_time_from_cpuctx(cpuctx); 2376 if (!ctx->nr_active) 2377 return; 2378 2379 perf_pmu_disable(ctx->pmu); 2380 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2381 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2382 group_sched_out(event, cpuctx, ctx); 2383 } 2384 2385 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2386 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2387 group_sched_out(event, cpuctx, ctx); 2388 } 2389 perf_pmu_enable(ctx->pmu); 2390 } 2391 2392 /* 2393 * Test whether two contexts are equivalent, i.e. whether they have both been 2394 * cloned from the same version of the same context. 2395 * 2396 * Equivalence is measured using a generation number in the context that is 2397 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2398 * and list_del_event(). 2399 */ 2400 static int context_equiv(struct perf_event_context *ctx1, 2401 struct perf_event_context *ctx2) 2402 { 2403 lockdep_assert_held(&ctx1->lock); 2404 lockdep_assert_held(&ctx2->lock); 2405 2406 /* Pinning disables the swap optimization */ 2407 if (ctx1->pin_count || ctx2->pin_count) 2408 return 0; 2409 2410 /* If ctx1 is the parent of ctx2 */ 2411 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2412 return 1; 2413 2414 /* If ctx2 is the parent of ctx1 */ 2415 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2416 return 1; 2417 2418 /* 2419 * If ctx1 and ctx2 have the same parent; we flatten the parent 2420 * hierarchy, see perf_event_init_context(). 2421 */ 2422 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2423 ctx1->parent_gen == ctx2->parent_gen) 2424 return 1; 2425 2426 /* Unmatched */ 2427 return 0; 2428 } 2429 2430 static void __perf_event_sync_stat(struct perf_event *event, 2431 struct perf_event *next_event) 2432 { 2433 u64 value; 2434 2435 if (!event->attr.inherit_stat) 2436 return; 2437 2438 /* 2439 * Update the event value, we cannot use perf_event_read() 2440 * because we're in the middle of a context switch and have IRQs 2441 * disabled, which upsets smp_call_function_single(), however 2442 * we know the event must be on the current CPU, therefore we 2443 * don't need to use it. 2444 */ 2445 switch (event->state) { 2446 case PERF_EVENT_STATE_ACTIVE: 2447 event->pmu->read(event); 2448 /* fall-through */ 2449 2450 case PERF_EVENT_STATE_INACTIVE: 2451 update_event_times(event); 2452 break; 2453 2454 default: 2455 break; 2456 } 2457 2458 /* 2459 * In order to keep per-task stats reliable we need to flip the event 2460 * values when we flip the contexts. 2461 */ 2462 value = local64_read(&next_event->count); 2463 value = local64_xchg(&event->count, value); 2464 local64_set(&next_event->count, value); 2465 2466 swap(event->total_time_enabled, next_event->total_time_enabled); 2467 swap(event->total_time_running, next_event->total_time_running); 2468 2469 /* 2470 * Since we swizzled the values, update the user visible data too. 2471 */ 2472 perf_event_update_userpage(event); 2473 perf_event_update_userpage(next_event); 2474 } 2475 2476 static void perf_event_sync_stat(struct perf_event_context *ctx, 2477 struct perf_event_context *next_ctx) 2478 { 2479 struct perf_event *event, *next_event; 2480 2481 if (!ctx->nr_stat) 2482 return; 2483 2484 update_context_time(ctx); 2485 2486 event = list_first_entry(&ctx->event_list, 2487 struct perf_event, event_entry); 2488 2489 next_event = list_first_entry(&next_ctx->event_list, 2490 struct perf_event, event_entry); 2491 2492 while (&event->event_entry != &ctx->event_list && 2493 &next_event->event_entry != &next_ctx->event_list) { 2494 2495 __perf_event_sync_stat(event, next_event); 2496 2497 event = list_next_entry(event, event_entry); 2498 next_event = list_next_entry(next_event, event_entry); 2499 } 2500 } 2501 2502 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2503 struct task_struct *next) 2504 { 2505 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2506 struct perf_event_context *next_ctx; 2507 struct perf_event_context *parent, *next_parent; 2508 struct perf_cpu_context *cpuctx; 2509 int do_switch = 1; 2510 2511 if (likely(!ctx)) 2512 return; 2513 2514 cpuctx = __get_cpu_context(ctx); 2515 if (!cpuctx->task_ctx) 2516 return; 2517 2518 rcu_read_lock(); 2519 next_ctx = next->perf_event_ctxp[ctxn]; 2520 if (!next_ctx) 2521 goto unlock; 2522 2523 parent = rcu_dereference(ctx->parent_ctx); 2524 next_parent = rcu_dereference(next_ctx->parent_ctx); 2525 2526 /* If neither context have a parent context; they cannot be clones. */ 2527 if (!parent && !next_parent) 2528 goto unlock; 2529 2530 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2531 /* 2532 * Looks like the two contexts are clones, so we might be 2533 * able to optimize the context switch. We lock both 2534 * contexts and check that they are clones under the 2535 * lock (including re-checking that neither has been 2536 * uncloned in the meantime). It doesn't matter which 2537 * order we take the locks because no other cpu could 2538 * be trying to lock both of these tasks. 2539 */ 2540 raw_spin_lock(&ctx->lock); 2541 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2542 if (context_equiv(ctx, next_ctx)) { 2543 /* 2544 * XXX do we need a memory barrier of sorts 2545 * wrt to rcu_dereference() of perf_event_ctxp 2546 */ 2547 task->perf_event_ctxp[ctxn] = next_ctx; 2548 next->perf_event_ctxp[ctxn] = ctx; 2549 ctx->task = next; 2550 next_ctx->task = task; 2551 2552 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2553 2554 do_switch = 0; 2555 2556 perf_event_sync_stat(ctx, next_ctx); 2557 } 2558 raw_spin_unlock(&next_ctx->lock); 2559 raw_spin_unlock(&ctx->lock); 2560 } 2561 unlock: 2562 rcu_read_unlock(); 2563 2564 if (do_switch) { 2565 raw_spin_lock(&ctx->lock); 2566 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2567 cpuctx->task_ctx = NULL; 2568 raw_spin_unlock(&ctx->lock); 2569 } 2570 } 2571 2572 void perf_sched_cb_dec(struct pmu *pmu) 2573 { 2574 this_cpu_dec(perf_sched_cb_usages); 2575 } 2576 2577 void perf_sched_cb_inc(struct pmu *pmu) 2578 { 2579 this_cpu_inc(perf_sched_cb_usages); 2580 } 2581 2582 /* 2583 * This function provides the context switch callback to the lower code 2584 * layer. It is invoked ONLY when the context switch callback is enabled. 2585 */ 2586 static void perf_pmu_sched_task(struct task_struct *prev, 2587 struct task_struct *next, 2588 bool sched_in) 2589 { 2590 struct perf_cpu_context *cpuctx; 2591 struct pmu *pmu; 2592 unsigned long flags; 2593 2594 if (prev == next) 2595 return; 2596 2597 local_irq_save(flags); 2598 2599 rcu_read_lock(); 2600 2601 list_for_each_entry_rcu(pmu, &pmus, entry) { 2602 if (pmu->sched_task) { 2603 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2604 2605 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2606 2607 perf_pmu_disable(pmu); 2608 2609 pmu->sched_task(cpuctx->task_ctx, sched_in); 2610 2611 perf_pmu_enable(pmu); 2612 2613 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2614 } 2615 } 2616 2617 rcu_read_unlock(); 2618 2619 local_irq_restore(flags); 2620 } 2621 2622 #define for_each_task_context_nr(ctxn) \ 2623 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2624 2625 /* 2626 * Called from scheduler to remove the events of the current task, 2627 * with interrupts disabled. 2628 * 2629 * We stop each event and update the event value in event->count. 2630 * 2631 * This does not protect us against NMI, but disable() 2632 * sets the disabled bit in the control field of event _before_ 2633 * accessing the event control register. If a NMI hits, then it will 2634 * not restart the event. 2635 */ 2636 void __perf_event_task_sched_out(struct task_struct *task, 2637 struct task_struct *next) 2638 { 2639 int ctxn; 2640 2641 if (__this_cpu_read(perf_sched_cb_usages)) 2642 perf_pmu_sched_task(task, next, false); 2643 2644 for_each_task_context_nr(ctxn) 2645 perf_event_context_sched_out(task, ctxn, next); 2646 2647 /* 2648 * if cgroup events exist on this CPU, then we need 2649 * to check if we have to switch out PMU state. 2650 * cgroup event are system-wide mode only 2651 */ 2652 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2653 perf_cgroup_sched_out(task, next); 2654 } 2655 2656 static void task_ctx_sched_out(struct perf_event_context *ctx) 2657 { 2658 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2659 2660 if (!cpuctx->task_ctx) 2661 return; 2662 2663 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2664 return; 2665 2666 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2667 cpuctx->task_ctx = NULL; 2668 } 2669 2670 /* 2671 * Called with IRQs disabled 2672 */ 2673 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2674 enum event_type_t event_type) 2675 { 2676 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2677 } 2678 2679 static void 2680 ctx_pinned_sched_in(struct perf_event_context *ctx, 2681 struct perf_cpu_context *cpuctx) 2682 { 2683 struct perf_event *event; 2684 2685 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2686 if (event->state <= PERF_EVENT_STATE_OFF) 2687 continue; 2688 if (!event_filter_match(event)) 2689 continue; 2690 2691 /* may need to reset tstamp_enabled */ 2692 if (is_cgroup_event(event)) 2693 perf_cgroup_mark_enabled(event, ctx); 2694 2695 if (group_can_go_on(event, cpuctx, 1)) 2696 group_sched_in(event, cpuctx, ctx); 2697 2698 /* 2699 * If this pinned group hasn't been scheduled, 2700 * put it in error state. 2701 */ 2702 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2703 update_group_times(event); 2704 event->state = PERF_EVENT_STATE_ERROR; 2705 } 2706 } 2707 } 2708 2709 static void 2710 ctx_flexible_sched_in(struct perf_event_context *ctx, 2711 struct perf_cpu_context *cpuctx) 2712 { 2713 struct perf_event *event; 2714 int can_add_hw = 1; 2715 2716 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2717 /* Ignore events in OFF or ERROR state */ 2718 if (event->state <= PERF_EVENT_STATE_OFF) 2719 continue; 2720 /* 2721 * Listen to the 'cpu' scheduling filter constraint 2722 * of events: 2723 */ 2724 if (!event_filter_match(event)) 2725 continue; 2726 2727 /* may need to reset tstamp_enabled */ 2728 if (is_cgroup_event(event)) 2729 perf_cgroup_mark_enabled(event, ctx); 2730 2731 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2732 if (group_sched_in(event, cpuctx, ctx)) 2733 can_add_hw = 0; 2734 } 2735 } 2736 } 2737 2738 static void 2739 ctx_sched_in(struct perf_event_context *ctx, 2740 struct perf_cpu_context *cpuctx, 2741 enum event_type_t event_type, 2742 struct task_struct *task) 2743 { 2744 u64 now; 2745 int is_active = ctx->is_active; 2746 2747 ctx->is_active |= event_type; 2748 if (likely(!ctx->nr_events)) 2749 return; 2750 2751 now = perf_clock(); 2752 ctx->timestamp = now; 2753 perf_cgroup_set_timestamp(task, ctx); 2754 /* 2755 * First go through the list and put on any pinned groups 2756 * in order to give them the best chance of going on. 2757 */ 2758 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2759 ctx_pinned_sched_in(ctx, cpuctx); 2760 2761 /* Then walk through the lower prio flexible groups */ 2762 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2763 ctx_flexible_sched_in(ctx, cpuctx); 2764 } 2765 2766 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2767 enum event_type_t event_type, 2768 struct task_struct *task) 2769 { 2770 struct perf_event_context *ctx = &cpuctx->ctx; 2771 2772 ctx_sched_in(ctx, cpuctx, event_type, task); 2773 } 2774 2775 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2776 struct task_struct *task) 2777 { 2778 struct perf_cpu_context *cpuctx; 2779 2780 cpuctx = __get_cpu_context(ctx); 2781 if (cpuctx->task_ctx == ctx) 2782 return; 2783 2784 perf_ctx_lock(cpuctx, ctx); 2785 perf_pmu_disable(ctx->pmu); 2786 /* 2787 * We want to keep the following priority order: 2788 * cpu pinned (that don't need to move), task pinned, 2789 * cpu flexible, task flexible. 2790 */ 2791 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2792 2793 if (ctx->nr_events) 2794 cpuctx->task_ctx = ctx; 2795 2796 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2797 2798 perf_pmu_enable(ctx->pmu); 2799 perf_ctx_unlock(cpuctx, ctx); 2800 } 2801 2802 /* 2803 * Called from scheduler to add the events of the current task 2804 * with interrupts disabled. 2805 * 2806 * We restore the event value and then enable it. 2807 * 2808 * This does not protect us against NMI, but enable() 2809 * sets the enabled bit in the control field of event _before_ 2810 * accessing the event control register. If a NMI hits, then it will 2811 * keep the event running. 2812 */ 2813 void __perf_event_task_sched_in(struct task_struct *prev, 2814 struct task_struct *task) 2815 { 2816 struct perf_event_context *ctx; 2817 int ctxn; 2818 2819 for_each_task_context_nr(ctxn) { 2820 ctx = task->perf_event_ctxp[ctxn]; 2821 if (likely(!ctx)) 2822 continue; 2823 2824 perf_event_context_sched_in(ctx, task); 2825 } 2826 /* 2827 * if cgroup events exist on this CPU, then we need 2828 * to check if we have to switch in PMU state. 2829 * cgroup event are system-wide mode only 2830 */ 2831 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2832 perf_cgroup_sched_in(prev, task); 2833 2834 if (__this_cpu_read(perf_sched_cb_usages)) 2835 perf_pmu_sched_task(prev, task, true); 2836 } 2837 2838 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2839 { 2840 u64 frequency = event->attr.sample_freq; 2841 u64 sec = NSEC_PER_SEC; 2842 u64 divisor, dividend; 2843 2844 int count_fls, nsec_fls, frequency_fls, sec_fls; 2845 2846 count_fls = fls64(count); 2847 nsec_fls = fls64(nsec); 2848 frequency_fls = fls64(frequency); 2849 sec_fls = 30; 2850 2851 /* 2852 * We got @count in @nsec, with a target of sample_freq HZ 2853 * the target period becomes: 2854 * 2855 * @count * 10^9 2856 * period = ------------------- 2857 * @nsec * sample_freq 2858 * 2859 */ 2860 2861 /* 2862 * Reduce accuracy by one bit such that @a and @b converge 2863 * to a similar magnitude. 2864 */ 2865 #define REDUCE_FLS(a, b) \ 2866 do { \ 2867 if (a##_fls > b##_fls) { \ 2868 a >>= 1; \ 2869 a##_fls--; \ 2870 } else { \ 2871 b >>= 1; \ 2872 b##_fls--; \ 2873 } \ 2874 } while (0) 2875 2876 /* 2877 * Reduce accuracy until either term fits in a u64, then proceed with 2878 * the other, so that finally we can do a u64/u64 division. 2879 */ 2880 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2881 REDUCE_FLS(nsec, frequency); 2882 REDUCE_FLS(sec, count); 2883 } 2884 2885 if (count_fls + sec_fls > 64) { 2886 divisor = nsec * frequency; 2887 2888 while (count_fls + sec_fls > 64) { 2889 REDUCE_FLS(count, sec); 2890 divisor >>= 1; 2891 } 2892 2893 dividend = count * sec; 2894 } else { 2895 dividend = count * sec; 2896 2897 while (nsec_fls + frequency_fls > 64) { 2898 REDUCE_FLS(nsec, frequency); 2899 dividend >>= 1; 2900 } 2901 2902 divisor = nsec * frequency; 2903 } 2904 2905 if (!divisor) 2906 return dividend; 2907 2908 return div64_u64(dividend, divisor); 2909 } 2910 2911 static DEFINE_PER_CPU(int, perf_throttled_count); 2912 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2913 2914 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2915 { 2916 struct hw_perf_event *hwc = &event->hw; 2917 s64 period, sample_period; 2918 s64 delta; 2919 2920 period = perf_calculate_period(event, nsec, count); 2921 2922 delta = (s64)(period - hwc->sample_period); 2923 delta = (delta + 7) / 8; /* low pass filter */ 2924 2925 sample_period = hwc->sample_period + delta; 2926 2927 if (!sample_period) 2928 sample_period = 1; 2929 2930 hwc->sample_period = sample_period; 2931 2932 if (local64_read(&hwc->period_left) > 8*sample_period) { 2933 if (disable) 2934 event->pmu->stop(event, PERF_EF_UPDATE); 2935 2936 local64_set(&hwc->period_left, 0); 2937 2938 if (disable) 2939 event->pmu->start(event, PERF_EF_RELOAD); 2940 } 2941 } 2942 2943 /* 2944 * combine freq adjustment with unthrottling to avoid two passes over the 2945 * events. At the same time, make sure, having freq events does not change 2946 * the rate of unthrottling as that would introduce bias. 2947 */ 2948 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2949 int needs_unthr) 2950 { 2951 struct perf_event *event; 2952 struct hw_perf_event *hwc; 2953 u64 now, period = TICK_NSEC; 2954 s64 delta; 2955 2956 /* 2957 * only need to iterate over all events iff: 2958 * - context have events in frequency mode (needs freq adjust) 2959 * - there are events to unthrottle on this cpu 2960 */ 2961 if (!(ctx->nr_freq || needs_unthr)) 2962 return; 2963 2964 raw_spin_lock(&ctx->lock); 2965 perf_pmu_disable(ctx->pmu); 2966 2967 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2968 if (event->state != PERF_EVENT_STATE_ACTIVE) 2969 continue; 2970 2971 if (!event_filter_match(event)) 2972 continue; 2973 2974 perf_pmu_disable(event->pmu); 2975 2976 hwc = &event->hw; 2977 2978 if (hwc->interrupts == MAX_INTERRUPTS) { 2979 hwc->interrupts = 0; 2980 perf_log_throttle(event, 1); 2981 event->pmu->start(event, 0); 2982 } 2983 2984 if (!event->attr.freq || !event->attr.sample_freq) 2985 goto next; 2986 2987 /* 2988 * stop the event and update event->count 2989 */ 2990 event->pmu->stop(event, PERF_EF_UPDATE); 2991 2992 now = local64_read(&event->count); 2993 delta = now - hwc->freq_count_stamp; 2994 hwc->freq_count_stamp = now; 2995 2996 /* 2997 * restart the event 2998 * reload only if value has changed 2999 * we have stopped the event so tell that 3000 * to perf_adjust_period() to avoid stopping it 3001 * twice. 3002 */ 3003 if (delta > 0) 3004 perf_adjust_period(event, period, delta, false); 3005 3006 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3007 next: 3008 perf_pmu_enable(event->pmu); 3009 } 3010 3011 perf_pmu_enable(ctx->pmu); 3012 raw_spin_unlock(&ctx->lock); 3013 } 3014 3015 /* 3016 * Round-robin a context's events: 3017 */ 3018 static void rotate_ctx(struct perf_event_context *ctx) 3019 { 3020 /* 3021 * Rotate the first entry last of non-pinned groups. Rotation might be 3022 * disabled by the inheritance code. 3023 */ 3024 if (!ctx->rotate_disable) 3025 list_rotate_left(&ctx->flexible_groups); 3026 } 3027 3028 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3029 { 3030 struct perf_event_context *ctx = NULL; 3031 int rotate = 0; 3032 3033 if (cpuctx->ctx.nr_events) { 3034 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3035 rotate = 1; 3036 } 3037 3038 ctx = cpuctx->task_ctx; 3039 if (ctx && ctx->nr_events) { 3040 if (ctx->nr_events != ctx->nr_active) 3041 rotate = 1; 3042 } 3043 3044 if (!rotate) 3045 goto done; 3046 3047 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3048 perf_pmu_disable(cpuctx->ctx.pmu); 3049 3050 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3051 if (ctx) 3052 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3053 3054 rotate_ctx(&cpuctx->ctx); 3055 if (ctx) 3056 rotate_ctx(ctx); 3057 3058 perf_event_sched_in(cpuctx, ctx, current); 3059 3060 perf_pmu_enable(cpuctx->ctx.pmu); 3061 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3062 done: 3063 3064 return rotate; 3065 } 3066 3067 #ifdef CONFIG_NO_HZ_FULL 3068 bool perf_event_can_stop_tick(void) 3069 { 3070 if (atomic_read(&nr_freq_events) || 3071 __this_cpu_read(perf_throttled_count)) 3072 return false; 3073 else 3074 return true; 3075 } 3076 #endif 3077 3078 void perf_event_task_tick(void) 3079 { 3080 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3081 struct perf_event_context *ctx, *tmp; 3082 int throttled; 3083 3084 WARN_ON(!irqs_disabled()); 3085 3086 __this_cpu_inc(perf_throttled_seq); 3087 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3088 3089 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3090 perf_adjust_freq_unthr_context(ctx, throttled); 3091 } 3092 3093 static int event_enable_on_exec(struct perf_event *event, 3094 struct perf_event_context *ctx) 3095 { 3096 if (!event->attr.enable_on_exec) 3097 return 0; 3098 3099 event->attr.enable_on_exec = 0; 3100 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3101 return 0; 3102 3103 __perf_event_mark_enabled(event); 3104 3105 return 1; 3106 } 3107 3108 /* 3109 * Enable all of a task's events that have been marked enable-on-exec. 3110 * This expects task == current. 3111 */ 3112 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3113 { 3114 struct perf_event_context *clone_ctx = NULL; 3115 struct perf_event *event; 3116 unsigned long flags; 3117 int enabled = 0; 3118 int ret; 3119 3120 local_irq_save(flags); 3121 if (!ctx || !ctx->nr_events) 3122 goto out; 3123 3124 /* 3125 * We must ctxsw out cgroup events to avoid conflict 3126 * when invoking perf_task_event_sched_in() later on 3127 * in this function. Otherwise we end up trying to 3128 * ctxswin cgroup events which are already scheduled 3129 * in. 3130 */ 3131 perf_cgroup_sched_out(current, NULL); 3132 3133 raw_spin_lock(&ctx->lock); 3134 task_ctx_sched_out(ctx); 3135 3136 list_for_each_entry(event, &ctx->event_list, event_entry) { 3137 ret = event_enable_on_exec(event, ctx); 3138 if (ret) 3139 enabled = 1; 3140 } 3141 3142 /* 3143 * Unclone this context if we enabled any event. 3144 */ 3145 if (enabled) 3146 clone_ctx = unclone_ctx(ctx); 3147 3148 raw_spin_unlock(&ctx->lock); 3149 3150 /* 3151 * Also calls ctxswin for cgroup events, if any: 3152 */ 3153 perf_event_context_sched_in(ctx, ctx->task); 3154 out: 3155 local_irq_restore(flags); 3156 3157 if (clone_ctx) 3158 put_ctx(clone_ctx); 3159 } 3160 3161 void perf_event_exec(void) 3162 { 3163 struct perf_event_context *ctx; 3164 int ctxn; 3165 3166 rcu_read_lock(); 3167 for_each_task_context_nr(ctxn) { 3168 ctx = current->perf_event_ctxp[ctxn]; 3169 if (!ctx) 3170 continue; 3171 3172 perf_event_enable_on_exec(ctx); 3173 } 3174 rcu_read_unlock(); 3175 } 3176 3177 /* 3178 * Cross CPU call to read the hardware event 3179 */ 3180 static void __perf_event_read(void *info) 3181 { 3182 struct perf_event *event = info; 3183 struct perf_event_context *ctx = event->ctx; 3184 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3185 3186 /* 3187 * If this is a task context, we need to check whether it is 3188 * the current task context of this cpu. If not it has been 3189 * scheduled out before the smp call arrived. In that case 3190 * event->count would have been updated to a recent sample 3191 * when the event was scheduled out. 3192 */ 3193 if (ctx->task && cpuctx->task_ctx != ctx) 3194 return; 3195 3196 raw_spin_lock(&ctx->lock); 3197 if (ctx->is_active) { 3198 update_context_time(ctx); 3199 update_cgrp_time_from_event(event); 3200 } 3201 update_event_times(event); 3202 if (event->state == PERF_EVENT_STATE_ACTIVE) 3203 event->pmu->read(event); 3204 raw_spin_unlock(&ctx->lock); 3205 } 3206 3207 static inline u64 perf_event_count(struct perf_event *event) 3208 { 3209 if (event->pmu->count) 3210 return event->pmu->count(event); 3211 3212 return __perf_event_count(event); 3213 } 3214 3215 static u64 perf_event_read(struct perf_event *event) 3216 { 3217 /* 3218 * If event is enabled and currently active on a CPU, update the 3219 * value in the event structure: 3220 */ 3221 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3222 smp_call_function_single(event->oncpu, 3223 __perf_event_read, event, 1); 3224 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3225 struct perf_event_context *ctx = event->ctx; 3226 unsigned long flags; 3227 3228 raw_spin_lock_irqsave(&ctx->lock, flags); 3229 /* 3230 * may read while context is not active 3231 * (e.g., thread is blocked), in that case 3232 * we cannot update context time 3233 */ 3234 if (ctx->is_active) { 3235 update_context_time(ctx); 3236 update_cgrp_time_from_event(event); 3237 } 3238 update_event_times(event); 3239 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3240 } 3241 3242 return perf_event_count(event); 3243 } 3244 3245 /* 3246 * Initialize the perf_event context in a task_struct: 3247 */ 3248 static void __perf_event_init_context(struct perf_event_context *ctx) 3249 { 3250 raw_spin_lock_init(&ctx->lock); 3251 mutex_init(&ctx->mutex); 3252 INIT_LIST_HEAD(&ctx->active_ctx_list); 3253 INIT_LIST_HEAD(&ctx->pinned_groups); 3254 INIT_LIST_HEAD(&ctx->flexible_groups); 3255 INIT_LIST_HEAD(&ctx->event_list); 3256 atomic_set(&ctx->refcount, 1); 3257 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3258 } 3259 3260 static struct perf_event_context * 3261 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3262 { 3263 struct perf_event_context *ctx; 3264 3265 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3266 if (!ctx) 3267 return NULL; 3268 3269 __perf_event_init_context(ctx); 3270 if (task) { 3271 ctx->task = task; 3272 get_task_struct(task); 3273 } 3274 ctx->pmu = pmu; 3275 3276 return ctx; 3277 } 3278 3279 static struct task_struct * 3280 find_lively_task_by_vpid(pid_t vpid) 3281 { 3282 struct task_struct *task; 3283 int err; 3284 3285 rcu_read_lock(); 3286 if (!vpid) 3287 task = current; 3288 else 3289 task = find_task_by_vpid(vpid); 3290 if (task) 3291 get_task_struct(task); 3292 rcu_read_unlock(); 3293 3294 if (!task) 3295 return ERR_PTR(-ESRCH); 3296 3297 /* Reuse ptrace permission checks for now. */ 3298 err = -EACCES; 3299 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3300 goto errout; 3301 3302 return task; 3303 errout: 3304 put_task_struct(task); 3305 return ERR_PTR(err); 3306 3307 } 3308 3309 /* 3310 * Returns a matching context with refcount and pincount. 3311 */ 3312 static struct perf_event_context * 3313 find_get_context(struct pmu *pmu, struct task_struct *task, 3314 struct perf_event *event) 3315 { 3316 struct perf_event_context *ctx, *clone_ctx = NULL; 3317 struct perf_cpu_context *cpuctx; 3318 void *task_ctx_data = NULL; 3319 unsigned long flags; 3320 int ctxn, err; 3321 int cpu = event->cpu; 3322 3323 if (!task) { 3324 /* Must be root to operate on a CPU event: */ 3325 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3326 return ERR_PTR(-EACCES); 3327 3328 /* 3329 * We could be clever and allow to attach a event to an 3330 * offline CPU and activate it when the CPU comes up, but 3331 * that's for later. 3332 */ 3333 if (!cpu_online(cpu)) 3334 return ERR_PTR(-ENODEV); 3335 3336 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3337 ctx = &cpuctx->ctx; 3338 get_ctx(ctx); 3339 ++ctx->pin_count; 3340 3341 return ctx; 3342 } 3343 3344 err = -EINVAL; 3345 ctxn = pmu->task_ctx_nr; 3346 if (ctxn < 0) 3347 goto errout; 3348 3349 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3350 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3351 if (!task_ctx_data) { 3352 err = -ENOMEM; 3353 goto errout; 3354 } 3355 } 3356 3357 retry: 3358 ctx = perf_lock_task_context(task, ctxn, &flags); 3359 if (ctx) { 3360 clone_ctx = unclone_ctx(ctx); 3361 ++ctx->pin_count; 3362 3363 if (task_ctx_data && !ctx->task_ctx_data) { 3364 ctx->task_ctx_data = task_ctx_data; 3365 task_ctx_data = NULL; 3366 } 3367 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3368 3369 if (clone_ctx) 3370 put_ctx(clone_ctx); 3371 } else { 3372 ctx = alloc_perf_context(pmu, task); 3373 err = -ENOMEM; 3374 if (!ctx) 3375 goto errout; 3376 3377 if (task_ctx_data) { 3378 ctx->task_ctx_data = task_ctx_data; 3379 task_ctx_data = NULL; 3380 } 3381 3382 err = 0; 3383 mutex_lock(&task->perf_event_mutex); 3384 /* 3385 * If it has already passed perf_event_exit_task(). 3386 * we must see PF_EXITING, it takes this mutex too. 3387 */ 3388 if (task->flags & PF_EXITING) 3389 err = -ESRCH; 3390 else if (task->perf_event_ctxp[ctxn]) 3391 err = -EAGAIN; 3392 else { 3393 get_ctx(ctx); 3394 ++ctx->pin_count; 3395 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3396 } 3397 mutex_unlock(&task->perf_event_mutex); 3398 3399 if (unlikely(err)) { 3400 put_ctx(ctx); 3401 3402 if (err == -EAGAIN) 3403 goto retry; 3404 goto errout; 3405 } 3406 } 3407 3408 kfree(task_ctx_data); 3409 return ctx; 3410 3411 errout: 3412 kfree(task_ctx_data); 3413 return ERR_PTR(err); 3414 } 3415 3416 static void perf_event_free_filter(struct perf_event *event); 3417 static void perf_event_free_bpf_prog(struct perf_event *event); 3418 3419 static void free_event_rcu(struct rcu_head *head) 3420 { 3421 struct perf_event *event; 3422 3423 event = container_of(head, struct perf_event, rcu_head); 3424 if (event->ns) 3425 put_pid_ns(event->ns); 3426 perf_event_free_filter(event); 3427 kfree(event); 3428 } 3429 3430 static void ring_buffer_attach(struct perf_event *event, 3431 struct ring_buffer *rb); 3432 3433 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3434 { 3435 if (event->parent) 3436 return; 3437 3438 if (is_cgroup_event(event)) 3439 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3440 } 3441 3442 static void unaccount_event(struct perf_event *event) 3443 { 3444 if (event->parent) 3445 return; 3446 3447 if (event->attach_state & PERF_ATTACH_TASK) 3448 static_key_slow_dec_deferred(&perf_sched_events); 3449 if (event->attr.mmap || event->attr.mmap_data) 3450 atomic_dec(&nr_mmap_events); 3451 if (event->attr.comm) 3452 atomic_dec(&nr_comm_events); 3453 if (event->attr.task) 3454 atomic_dec(&nr_task_events); 3455 if (event->attr.freq) 3456 atomic_dec(&nr_freq_events); 3457 if (is_cgroup_event(event)) 3458 static_key_slow_dec_deferred(&perf_sched_events); 3459 if (has_branch_stack(event)) 3460 static_key_slow_dec_deferred(&perf_sched_events); 3461 3462 unaccount_event_cpu(event, event->cpu); 3463 } 3464 3465 /* 3466 * The following implement mutual exclusion of events on "exclusive" pmus 3467 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3468 * at a time, so we disallow creating events that might conflict, namely: 3469 * 3470 * 1) cpu-wide events in the presence of per-task events, 3471 * 2) per-task events in the presence of cpu-wide events, 3472 * 3) two matching events on the same context. 3473 * 3474 * The former two cases are handled in the allocation path (perf_event_alloc(), 3475 * __free_event()), the latter -- before the first perf_install_in_context(). 3476 */ 3477 static int exclusive_event_init(struct perf_event *event) 3478 { 3479 struct pmu *pmu = event->pmu; 3480 3481 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3482 return 0; 3483 3484 /* 3485 * Prevent co-existence of per-task and cpu-wide events on the 3486 * same exclusive pmu. 3487 * 3488 * Negative pmu::exclusive_cnt means there are cpu-wide 3489 * events on this "exclusive" pmu, positive means there are 3490 * per-task events. 3491 * 3492 * Since this is called in perf_event_alloc() path, event::ctx 3493 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3494 * to mean "per-task event", because unlike other attach states it 3495 * never gets cleared. 3496 */ 3497 if (event->attach_state & PERF_ATTACH_TASK) { 3498 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3499 return -EBUSY; 3500 } else { 3501 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3502 return -EBUSY; 3503 } 3504 3505 return 0; 3506 } 3507 3508 static void exclusive_event_destroy(struct perf_event *event) 3509 { 3510 struct pmu *pmu = event->pmu; 3511 3512 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3513 return; 3514 3515 /* see comment in exclusive_event_init() */ 3516 if (event->attach_state & PERF_ATTACH_TASK) 3517 atomic_dec(&pmu->exclusive_cnt); 3518 else 3519 atomic_inc(&pmu->exclusive_cnt); 3520 } 3521 3522 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3523 { 3524 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3525 (e1->cpu == e2->cpu || 3526 e1->cpu == -1 || 3527 e2->cpu == -1)) 3528 return true; 3529 return false; 3530 } 3531 3532 /* Called under the same ctx::mutex as perf_install_in_context() */ 3533 static bool exclusive_event_installable(struct perf_event *event, 3534 struct perf_event_context *ctx) 3535 { 3536 struct perf_event *iter_event; 3537 struct pmu *pmu = event->pmu; 3538 3539 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3540 return true; 3541 3542 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3543 if (exclusive_event_match(iter_event, event)) 3544 return false; 3545 } 3546 3547 return true; 3548 } 3549 3550 static void __free_event(struct perf_event *event) 3551 { 3552 if (!event->parent) { 3553 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3554 put_callchain_buffers(); 3555 } 3556 3557 perf_event_free_bpf_prog(event); 3558 3559 if (event->destroy) 3560 event->destroy(event); 3561 3562 if (event->ctx) 3563 put_ctx(event->ctx); 3564 3565 if (event->pmu) { 3566 exclusive_event_destroy(event); 3567 module_put(event->pmu->module); 3568 } 3569 3570 call_rcu(&event->rcu_head, free_event_rcu); 3571 } 3572 3573 static void _free_event(struct perf_event *event) 3574 { 3575 irq_work_sync(&event->pending); 3576 3577 unaccount_event(event); 3578 3579 if (event->rb) { 3580 /* 3581 * Can happen when we close an event with re-directed output. 3582 * 3583 * Since we have a 0 refcount, perf_mmap_close() will skip 3584 * over us; possibly making our ring_buffer_put() the last. 3585 */ 3586 mutex_lock(&event->mmap_mutex); 3587 ring_buffer_attach(event, NULL); 3588 mutex_unlock(&event->mmap_mutex); 3589 } 3590 3591 if (is_cgroup_event(event)) 3592 perf_detach_cgroup(event); 3593 3594 __free_event(event); 3595 } 3596 3597 /* 3598 * Used to free events which have a known refcount of 1, such as in error paths 3599 * where the event isn't exposed yet and inherited events. 3600 */ 3601 static void free_event(struct perf_event *event) 3602 { 3603 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3604 "unexpected event refcount: %ld; ptr=%p\n", 3605 atomic_long_read(&event->refcount), event)) { 3606 /* leak to avoid use-after-free */ 3607 return; 3608 } 3609 3610 _free_event(event); 3611 } 3612 3613 /* 3614 * Remove user event from the owner task. 3615 */ 3616 static void perf_remove_from_owner(struct perf_event *event) 3617 { 3618 struct task_struct *owner; 3619 3620 rcu_read_lock(); 3621 owner = ACCESS_ONCE(event->owner); 3622 /* 3623 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3624 * !owner it means the list deletion is complete and we can indeed 3625 * free this event, otherwise we need to serialize on 3626 * owner->perf_event_mutex. 3627 */ 3628 smp_read_barrier_depends(); 3629 if (owner) { 3630 /* 3631 * Since delayed_put_task_struct() also drops the last 3632 * task reference we can safely take a new reference 3633 * while holding the rcu_read_lock(). 3634 */ 3635 get_task_struct(owner); 3636 } 3637 rcu_read_unlock(); 3638 3639 if (owner) { 3640 /* 3641 * If we're here through perf_event_exit_task() we're already 3642 * holding ctx->mutex which would be an inversion wrt. the 3643 * normal lock order. 3644 * 3645 * However we can safely take this lock because its the child 3646 * ctx->mutex. 3647 */ 3648 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3649 3650 /* 3651 * We have to re-check the event->owner field, if it is cleared 3652 * we raced with perf_event_exit_task(), acquiring the mutex 3653 * ensured they're done, and we can proceed with freeing the 3654 * event. 3655 */ 3656 if (event->owner) 3657 list_del_init(&event->owner_entry); 3658 mutex_unlock(&owner->perf_event_mutex); 3659 put_task_struct(owner); 3660 } 3661 } 3662 3663 static void put_event(struct perf_event *event) 3664 { 3665 struct perf_event_context *ctx; 3666 3667 if (!atomic_long_dec_and_test(&event->refcount)) 3668 return; 3669 3670 if (!is_kernel_event(event)) 3671 perf_remove_from_owner(event); 3672 3673 /* 3674 * There are two ways this annotation is useful: 3675 * 3676 * 1) there is a lock recursion from perf_event_exit_task 3677 * see the comment there. 3678 * 3679 * 2) there is a lock-inversion with mmap_sem through 3680 * perf_event_read_group(), which takes faults while 3681 * holding ctx->mutex, however this is called after 3682 * the last filedesc died, so there is no possibility 3683 * to trigger the AB-BA case. 3684 */ 3685 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3686 WARN_ON_ONCE(ctx->parent_ctx); 3687 perf_remove_from_context(event, true); 3688 perf_event_ctx_unlock(event, ctx); 3689 3690 _free_event(event); 3691 } 3692 3693 int perf_event_release_kernel(struct perf_event *event) 3694 { 3695 put_event(event); 3696 return 0; 3697 } 3698 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3699 3700 /* 3701 * Called when the last reference to the file is gone. 3702 */ 3703 static int perf_release(struct inode *inode, struct file *file) 3704 { 3705 put_event(file->private_data); 3706 return 0; 3707 } 3708 3709 /* 3710 * Remove all orphanes events from the context. 3711 */ 3712 static void orphans_remove_work(struct work_struct *work) 3713 { 3714 struct perf_event_context *ctx; 3715 struct perf_event *event, *tmp; 3716 3717 ctx = container_of(work, struct perf_event_context, 3718 orphans_remove.work); 3719 3720 mutex_lock(&ctx->mutex); 3721 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3722 struct perf_event *parent_event = event->parent; 3723 3724 if (!is_orphaned_child(event)) 3725 continue; 3726 3727 perf_remove_from_context(event, true); 3728 3729 mutex_lock(&parent_event->child_mutex); 3730 list_del_init(&event->child_list); 3731 mutex_unlock(&parent_event->child_mutex); 3732 3733 free_event(event); 3734 put_event(parent_event); 3735 } 3736 3737 raw_spin_lock_irq(&ctx->lock); 3738 ctx->orphans_remove_sched = false; 3739 raw_spin_unlock_irq(&ctx->lock); 3740 mutex_unlock(&ctx->mutex); 3741 3742 put_ctx(ctx); 3743 } 3744 3745 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3746 { 3747 struct perf_event *child; 3748 u64 total = 0; 3749 3750 *enabled = 0; 3751 *running = 0; 3752 3753 mutex_lock(&event->child_mutex); 3754 total += perf_event_read(event); 3755 *enabled += event->total_time_enabled + 3756 atomic64_read(&event->child_total_time_enabled); 3757 *running += event->total_time_running + 3758 atomic64_read(&event->child_total_time_running); 3759 3760 list_for_each_entry(child, &event->child_list, child_list) { 3761 total += perf_event_read(child); 3762 *enabled += child->total_time_enabled; 3763 *running += child->total_time_running; 3764 } 3765 mutex_unlock(&event->child_mutex); 3766 3767 return total; 3768 } 3769 EXPORT_SYMBOL_GPL(perf_event_read_value); 3770 3771 static int perf_event_read_group(struct perf_event *event, 3772 u64 read_format, char __user *buf) 3773 { 3774 struct perf_event *leader = event->group_leader, *sub; 3775 struct perf_event_context *ctx = leader->ctx; 3776 int n = 0, size = 0, ret; 3777 u64 count, enabled, running; 3778 u64 values[5]; 3779 3780 lockdep_assert_held(&ctx->mutex); 3781 3782 count = perf_event_read_value(leader, &enabled, &running); 3783 3784 values[n++] = 1 + leader->nr_siblings; 3785 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3786 values[n++] = enabled; 3787 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3788 values[n++] = running; 3789 values[n++] = count; 3790 if (read_format & PERF_FORMAT_ID) 3791 values[n++] = primary_event_id(leader); 3792 3793 size = n * sizeof(u64); 3794 3795 if (copy_to_user(buf, values, size)) 3796 return -EFAULT; 3797 3798 ret = size; 3799 3800 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3801 n = 0; 3802 3803 values[n++] = perf_event_read_value(sub, &enabled, &running); 3804 if (read_format & PERF_FORMAT_ID) 3805 values[n++] = primary_event_id(sub); 3806 3807 size = n * sizeof(u64); 3808 3809 if (copy_to_user(buf + ret, values, size)) { 3810 return -EFAULT; 3811 } 3812 3813 ret += size; 3814 } 3815 3816 return ret; 3817 } 3818 3819 static int perf_event_read_one(struct perf_event *event, 3820 u64 read_format, char __user *buf) 3821 { 3822 u64 enabled, running; 3823 u64 values[4]; 3824 int n = 0; 3825 3826 values[n++] = perf_event_read_value(event, &enabled, &running); 3827 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3828 values[n++] = enabled; 3829 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3830 values[n++] = running; 3831 if (read_format & PERF_FORMAT_ID) 3832 values[n++] = primary_event_id(event); 3833 3834 if (copy_to_user(buf, values, n * sizeof(u64))) 3835 return -EFAULT; 3836 3837 return n * sizeof(u64); 3838 } 3839 3840 static bool is_event_hup(struct perf_event *event) 3841 { 3842 bool no_children; 3843 3844 if (event->state != PERF_EVENT_STATE_EXIT) 3845 return false; 3846 3847 mutex_lock(&event->child_mutex); 3848 no_children = list_empty(&event->child_list); 3849 mutex_unlock(&event->child_mutex); 3850 return no_children; 3851 } 3852 3853 /* 3854 * Read the performance event - simple non blocking version for now 3855 */ 3856 static ssize_t 3857 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3858 { 3859 u64 read_format = event->attr.read_format; 3860 int ret; 3861 3862 /* 3863 * Return end-of-file for a read on a event that is in 3864 * error state (i.e. because it was pinned but it couldn't be 3865 * scheduled on to the CPU at some point). 3866 */ 3867 if (event->state == PERF_EVENT_STATE_ERROR) 3868 return 0; 3869 3870 if (count < event->read_size) 3871 return -ENOSPC; 3872 3873 WARN_ON_ONCE(event->ctx->parent_ctx); 3874 if (read_format & PERF_FORMAT_GROUP) 3875 ret = perf_event_read_group(event, read_format, buf); 3876 else 3877 ret = perf_event_read_one(event, read_format, buf); 3878 3879 return ret; 3880 } 3881 3882 static ssize_t 3883 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3884 { 3885 struct perf_event *event = file->private_data; 3886 struct perf_event_context *ctx; 3887 int ret; 3888 3889 ctx = perf_event_ctx_lock(event); 3890 ret = perf_read_hw(event, buf, count); 3891 perf_event_ctx_unlock(event, ctx); 3892 3893 return ret; 3894 } 3895 3896 static unsigned int perf_poll(struct file *file, poll_table *wait) 3897 { 3898 struct perf_event *event = file->private_data; 3899 struct ring_buffer *rb; 3900 unsigned int events = POLLHUP; 3901 3902 poll_wait(file, &event->waitq, wait); 3903 3904 if (is_event_hup(event)) 3905 return events; 3906 3907 /* 3908 * Pin the event->rb by taking event->mmap_mutex; otherwise 3909 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3910 */ 3911 mutex_lock(&event->mmap_mutex); 3912 rb = event->rb; 3913 if (rb) 3914 events = atomic_xchg(&rb->poll, 0); 3915 mutex_unlock(&event->mmap_mutex); 3916 return events; 3917 } 3918 3919 static void _perf_event_reset(struct perf_event *event) 3920 { 3921 (void)perf_event_read(event); 3922 local64_set(&event->count, 0); 3923 perf_event_update_userpage(event); 3924 } 3925 3926 /* 3927 * Holding the top-level event's child_mutex means that any 3928 * descendant process that has inherited this event will block 3929 * in sync_child_event if it goes to exit, thus satisfying the 3930 * task existence requirements of perf_event_enable/disable. 3931 */ 3932 static void perf_event_for_each_child(struct perf_event *event, 3933 void (*func)(struct perf_event *)) 3934 { 3935 struct perf_event *child; 3936 3937 WARN_ON_ONCE(event->ctx->parent_ctx); 3938 3939 mutex_lock(&event->child_mutex); 3940 func(event); 3941 list_for_each_entry(child, &event->child_list, child_list) 3942 func(child); 3943 mutex_unlock(&event->child_mutex); 3944 } 3945 3946 static void perf_event_for_each(struct perf_event *event, 3947 void (*func)(struct perf_event *)) 3948 { 3949 struct perf_event_context *ctx = event->ctx; 3950 struct perf_event *sibling; 3951 3952 lockdep_assert_held(&ctx->mutex); 3953 3954 event = event->group_leader; 3955 3956 perf_event_for_each_child(event, func); 3957 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3958 perf_event_for_each_child(sibling, func); 3959 } 3960 3961 struct period_event { 3962 struct perf_event *event; 3963 u64 value; 3964 }; 3965 3966 static int __perf_event_period(void *info) 3967 { 3968 struct period_event *pe = info; 3969 struct perf_event *event = pe->event; 3970 struct perf_event_context *ctx = event->ctx; 3971 u64 value = pe->value; 3972 bool active; 3973 3974 raw_spin_lock(&ctx->lock); 3975 if (event->attr.freq) { 3976 event->attr.sample_freq = value; 3977 } else { 3978 event->attr.sample_period = value; 3979 event->hw.sample_period = value; 3980 } 3981 3982 active = (event->state == PERF_EVENT_STATE_ACTIVE); 3983 if (active) { 3984 perf_pmu_disable(ctx->pmu); 3985 event->pmu->stop(event, PERF_EF_UPDATE); 3986 } 3987 3988 local64_set(&event->hw.period_left, 0); 3989 3990 if (active) { 3991 event->pmu->start(event, PERF_EF_RELOAD); 3992 perf_pmu_enable(ctx->pmu); 3993 } 3994 raw_spin_unlock(&ctx->lock); 3995 3996 return 0; 3997 } 3998 3999 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4000 { 4001 struct period_event pe = { .event = event, }; 4002 struct perf_event_context *ctx = event->ctx; 4003 struct task_struct *task; 4004 u64 value; 4005 4006 if (!is_sampling_event(event)) 4007 return -EINVAL; 4008 4009 if (copy_from_user(&value, arg, sizeof(value))) 4010 return -EFAULT; 4011 4012 if (!value) 4013 return -EINVAL; 4014 4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4016 return -EINVAL; 4017 4018 task = ctx->task; 4019 pe.value = value; 4020 4021 if (!task) { 4022 cpu_function_call(event->cpu, __perf_event_period, &pe); 4023 return 0; 4024 } 4025 4026 retry: 4027 if (!task_function_call(task, __perf_event_period, &pe)) 4028 return 0; 4029 4030 raw_spin_lock_irq(&ctx->lock); 4031 if (ctx->is_active) { 4032 raw_spin_unlock_irq(&ctx->lock); 4033 task = ctx->task; 4034 goto retry; 4035 } 4036 4037 __perf_event_period(&pe); 4038 raw_spin_unlock_irq(&ctx->lock); 4039 4040 return 0; 4041 } 4042 4043 static const struct file_operations perf_fops; 4044 4045 static inline int perf_fget_light(int fd, struct fd *p) 4046 { 4047 struct fd f = fdget(fd); 4048 if (!f.file) 4049 return -EBADF; 4050 4051 if (f.file->f_op != &perf_fops) { 4052 fdput(f); 4053 return -EBADF; 4054 } 4055 *p = f; 4056 return 0; 4057 } 4058 4059 static int perf_event_set_output(struct perf_event *event, 4060 struct perf_event *output_event); 4061 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4062 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4063 4064 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4065 { 4066 void (*func)(struct perf_event *); 4067 u32 flags = arg; 4068 4069 switch (cmd) { 4070 case PERF_EVENT_IOC_ENABLE: 4071 func = _perf_event_enable; 4072 break; 4073 case PERF_EVENT_IOC_DISABLE: 4074 func = _perf_event_disable; 4075 break; 4076 case PERF_EVENT_IOC_RESET: 4077 func = _perf_event_reset; 4078 break; 4079 4080 case PERF_EVENT_IOC_REFRESH: 4081 return _perf_event_refresh(event, arg); 4082 4083 case PERF_EVENT_IOC_PERIOD: 4084 return perf_event_period(event, (u64 __user *)arg); 4085 4086 case PERF_EVENT_IOC_ID: 4087 { 4088 u64 id = primary_event_id(event); 4089 4090 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4091 return -EFAULT; 4092 return 0; 4093 } 4094 4095 case PERF_EVENT_IOC_SET_OUTPUT: 4096 { 4097 int ret; 4098 if (arg != -1) { 4099 struct perf_event *output_event; 4100 struct fd output; 4101 ret = perf_fget_light(arg, &output); 4102 if (ret) 4103 return ret; 4104 output_event = output.file->private_data; 4105 ret = perf_event_set_output(event, output_event); 4106 fdput(output); 4107 } else { 4108 ret = perf_event_set_output(event, NULL); 4109 } 4110 return ret; 4111 } 4112 4113 case PERF_EVENT_IOC_SET_FILTER: 4114 return perf_event_set_filter(event, (void __user *)arg); 4115 4116 case PERF_EVENT_IOC_SET_BPF: 4117 return perf_event_set_bpf_prog(event, arg); 4118 4119 default: 4120 return -ENOTTY; 4121 } 4122 4123 if (flags & PERF_IOC_FLAG_GROUP) 4124 perf_event_for_each(event, func); 4125 else 4126 perf_event_for_each_child(event, func); 4127 4128 return 0; 4129 } 4130 4131 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4132 { 4133 struct perf_event *event = file->private_data; 4134 struct perf_event_context *ctx; 4135 long ret; 4136 4137 ctx = perf_event_ctx_lock(event); 4138 ret = _perf_ioctl(event, cmd, arg); 4139 perf_event_ctx_unlock(event, ctx); 4140 4141 return ret; 4142 } 4143 4144 #ifdef CONFIG_COMPAT 4145 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4146 unsigned long arg) 4147 { 4148 switch (_IOC_NR(cmd)) { 4149 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4150 case _IOC_NR(PERF_EVENT_IOC_ID): 4151 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4152 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4153 cmd &= ~IOCSIZE_MASK; 4154 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4155 } 4156 break; 4157 } 4158 return perf_ioctl(file, cmd, arg); 4159 } 4160 #else 4161 # define perf_compat_ioctl NULL 4162 #endif 4163 4164 int perf_event_task_enable(void) 4165 { 4166 struct perf_event_context *ctx; 4167 struct perf_event *event; 4168 4169 mutex_lock(¤t->perf_event_mutex); 4170 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4171 ctx = perf_event_ctx_lock(event); 4172 perf_event_for_each_child(event, _perf_event_enable); 4173 perf_event_ctx_unlock(event, ctx); 4174 } 4175 mutex_unlock(¤t->perf_event_mutex); 4176 4177 return 0; 4178 } 4179 4180 int perf_event_task_disable(void) 4181 { 4182 struct perf_event_context *ctx; 4183 struct perf_event *event; 4184 4185 mutex_lock(¤t->perf_event_mutex); 4186 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4187 ctx = perf_event_ctx_lock(event); 4188 perf_event_for_each_child(event, _perf_event_disable); 4189 perf_event_ctx_unlock(event, ctx); 4190 } 4191 mutex_unlock(¤t->perf_event_mutex); 4192 4193 return 0; 4194 } 4195 4196 static int perf_event_index(struct perf_event *event) 4197 { 4198 if (event->hw.state & PERF_HES_STOPPED) 4199 return 0; 4200 4201 if (event->state != PERF_EVENT_STATE_ACTIVE) 4202 return 0; 4203 4204 return event->pmu->event_idx(event); 4205 } 4206 4207 static void calc_timer_values(struct perf_event *event, 4208 u64 *now, 4209 u64 *enabled, 4210 u64 *running) 4211 { 4212 u64 ctx_time; 4213 4214 *now = perf_clock(); 4215 ctx_time = event->shadow_ctx_time + *now; 4216 *enabled = ctx_time - event->tstamp_enabled; 4217 *running = ctx_time - event->tstamp_running; 4218 } 4219 4220 static void perf_event_init_userpage(struct perf_event *event) 4221 { 4222 struct perf_event_mmap_page *userpg; 4223 struct ring_buffer *rb; 4224 4225 rcu_read_lock(); 4226 rb = rcu_dereference(event->rb); 4227 if (!rb) 4228 goto unlock; 4229 4230 userpg = rb->user_page; 4231 4232 /* Allow new userspace to detect that bit 0 is deprecated */ 4233 userpg->cap_bit0_is_deprecated = 1; 4234 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4235 userpg->data_offset = PAGE_SIZE; 4236 userpg->data_size = perf_data_size(rb); 4237 4238 unlock: 4239 rcu_read_unlock(); 4240 } 4241 4242 void __weak arch_perf_update_userpage( 4243 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4244 { 4245 } 4246 4247 /* 4248 * Callers need to ensure there can be no nesting of this function, otherwise 4249 * the seqlock logic goes bad. We can not serialize this because the arch 4250 * code calls this from NMI context. 4251 */ 4252 void perf_event_update_userpage(struct perf_event *event) 4253 { 4254 struct perf_event_mmap_page *userpg; 4255 struct ring_buffer *rb; 4256 u64 enabled, running, now; 4257 4258 rcu_read_lock(); 4259 rb = rcu_dereference(event->rb); 4260 if (!rb) 4261 goto unlock; 4262 4263 /* 4264 * compute total_time_enabled, total_time_running 4265 * based on snapshot values taken when the event 4266 * was last scheduled in. 4267 * 4268 * we cannot simply called update_context_time() 4269 * because of locking issue as we can be called in 4270 * NMI context 4271 */ 4272 calc_timer_values(event, &now, &enabled, &running); 4273 4274 userpg = rb->user_page; 4275 /* 4276 * Disable preemption so as to not let the corresponding user-space 4277 * spin too long if we get preempted. 4278 */ 4279 preempt_disable(); 4280 ++userpg->lock; 4281 barrier(); 4282 userpg->index = perf_event_index(event); 4283 userpg->offset = perf_event_count(event); 4284 if (userpg->index) 4285 userpg->offset -= local64_read(&event->hw.prev_count); 4286 4287 userpg->time_enabled = enabled + 4288 atomic64_read(&event->child_total_time_enabled); 4289 4290 userpg->time_running = running + 4291 atomic64_read(&event->child_total_time_running); 4292 4293 arch_perf_update_userpage(event, userpg, now); 4294 4295 barrier(); 4296 ++userpg->lock; 4297 preempt_enable(); 4298 unlock: 4299 rcu_read_unlock(); 4300 } 4301 4302 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4303 { 4304 struct perf_event *event = vma->vm_file->private_data; 4305 struct ring_buffer *rb; 4306 int ret = VM_FAULT_SIGBUS; 4307 4308 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4309 if (vmf->pgoff == 0) 4310 ret = 0; 4311 return ret; 4312 } 4313 4314 rcu_read_lock(); 4315 rb = rcu_dereference(event->rb); 4316 if (!rb) 4317 goto unlock; 4318 4319 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4320 goto unlock; 4321 4322 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4323 if (!vmf->page) 4324 goto unlock; 4325 4326 get_page(vmf->page); 4327 vmf->page->mapping = vma->vm_file->f_mapping; 4328 vmf->page->index = vmf->pgoff; 4329 4330 ret = 0; 4331 unlock: 4332 rcu_read_unlock(); 4333 4334 return ret; 4335 } 4336 4337 static void ring_buffer_attach(struct perf_event *event, 4338 struct ring_buffer *rb) 4339 { 4340 struct ring_buffer *old_rb = NULL; 4341 unsigned long flags; 4342 4343 if (event->rb) { 4344 /* 4345 * Should be impossible, we set this when removing 4346 * event->rb_entry and wait/clear when adding event->rb_entry. 4347 */ 4348 WARN_ON_ONCE(event->rcu_pending); 4349 4350 old_rb = event->rb; 4351 spin_lock_irqsave(&old_rb->event_lock, flags); 4352 list_del_rcu(&event->rb_entry); 4353 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4354 4355 event->rcu_batches = get_state_synchronize_rcu(); 4356 event->rcu_pending = 1; 4357 } 4358 4359 if (rb) { 4360 if (event->rcu_pending) { 4361 cond_synchronize_rcu(event->rcu_batches); 4362 event->rcu_pending = 0; 4363 } 4364 4365 spin_lock_irqsave(&rb->event_lock, flags); 4366 list_add_rcu(&event->rb_entry, &rb->event_list); 4367 spin_unlock_irqrestore(&rb->event_lock, flags); 4368 } 4369 4370 rcu_assign_pointer(event->rb, rb); 4371 4372 if (old_rb) { 4373 ring_buffer_put(old_rb); 4374 /* 4375 * Since we detached before setting the new rb, so that we 4376 * could attach the new rb, we could have missed a wakeup. 4377 * Provide it now. 4378 */ 4379 wake_up_all(&event->waitq); 4380 } 4381 } 4382 4383 static void ring_buffer_wakeup(struct perf_event *event) 4384 { 4385 struct ring_buffer *rb; 4386 4387 rcu_read_lock(); 4388 rb = rcu_dereference(event->rb); 4389 if (rb) { 4390 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4391 wake_up_all(&event->waitq); 4392 } 4393 rcu_read_unlock(); 4394 } 4395 4396 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4397 { 4398 struct ring_buffer *rb; 4399 4400 rcu_read_lock(); 4401 rb = rcu_dereference(event->rb); 4402 if (rb) { 4403 if (!atomic_inc_not_zero(&rb->refcount)) 4404 rb = NULL; 4405 } 4406 rcu_read_unlock(); 4407 4408 return rb; 4409 } 4410 4411 void ring_buffer_put(struct ring_buffer *rb) 4412 { 4413 if (!atomic_dec_and_test(&rb->refcount)) 4414 return; 4415 4416 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4417 4418 call_rcu(&rb->rcu_head, rb_free_rcu); 4419 } 4420 4421 static void perf_mmap_open(struct vm_area_struct *vma) 4422 { 4423 struct perf_event *event = vma->vm_file->private_data; 4424 4425 atomic_inc(&event->mmap_count); 4426 atomic_inc(&event->rb->mmap_count); 4427 4428 if (vma->vm_pgoff) 4429 atomic_inc(&event->rb->aux_mmap_count); 4430 4431 if (event->pmu->event_mapped) 4432 event->pmu->event_mapped(event); 4433 } 4434 4435 /* 4436 * A buffer can be mmap()ed multiple times; either directly through the same 4437 * event, or through other events by use of perf_event_set_output(). 4438 * 4439 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4440 * the buffer here, where we still have a VM context. This means we need 4441 * to detach all events redirecting to us. 4442 */ 4443 static void perf_mmap_close(struct vm_area_struct *vma) 4444 { 4445 struct perf_event *event = vma->vm_file->private_data; 4446 4447 struct ring_buffer *rb = ring_buffer_get(event); 4448 struct user_struct *mmap_user = rb->mmap_user; 4449 int mmap_locked = rb->mmap_locked; 4450 unsigned long size = perf_data_size(rb); 4451 4452 if (event->pmu->event_unmapped) 4453 event->pmu->event_unmapped(event); 4454 4455 /* 4456 * rb->aux_mmap_count will always drop before rb->mmap_count and 4457 * event->mmap_count, so it is ok to use event->mmap_mutex to 4458 * serialize with perf_mmap here. 4459 */ 4460 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4461 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4462 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4463 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4464 4465 rb_free_aux(rb); 4466 mutex_unlock(&event->mmap_mutex); 4467 } 4468 4469 atomic_dec(&rb->mmap_count); 4470 4471 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4472 goto out_put; 4473 4474 ring_buffer_attach(event, NULL); 4475 mutex_unlock(&event->mmap_mutex); 4476 4477 /* If there's still other mmap()s of this buffer, we're done. */ 4478 if (atomic_read(&rb->mmap_count)) 4479 goto out_put; 4480 4481 /* 4482 * No other mmap()s, detach from all other events that might redirect 4483 * into the now unreachable buffer. Somewhat complicated by the 4484 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4485 */ 4486 again: 4487 rcu_read_lock(); 4488 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4489 if (!atomic_long_inc_not_zero(&event->refcount)) { 4490 /* 4491 * This event is en-route to free_event() which will 4492 * detach it and remove it from the list. 4493 */ 4494 continue; 4495 } 4496 rcu_read_unlock(); 4497 4498 mutex_lock(&event->mmap_mutex); 4499 /* 4500 * Check we didn't race with perf_event_set_output() which can 4501 * swizzle the rb from under us while we were waiting to 4502 * acquire mmap_mutex. 4503 * 4504 * If we find a different rb; ignore this event, a next 4505 * iteration will no longer find it on the list. We have to 4506 * still restart the iteration to make sure we're not now 4507 * iterating the wrong list. 4508 */ 4509 if (event->rb == rb) 4510 ring_buffer_attach(event, NULL); 4511 4512 mutex_unlock(&event->mmap_mutex); 4513 put_event(event); 4514 4515 /* 4516 * Restart the iteration; either we're on the wrong list or 4517 * destroyed its integrity by doing a deletion. 4518 */ 4519 goto again; 4520 } 4521 rcu_read_unlock(); 4522 4523 /* 4524 * It could be there's still a few 0-ref events on the list; they'll 4525 * get cleaned up by free_event() -- they'll also still have their 4526 * ref on the rb and will free it whenever they are done with it. 4527 * 4528 * Aside from that, this buffer is 'fully' detached and unmapped, 4529 * undo the VM accounting. 4530 */ 4531 4532 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4533 vma->vm_mm->pinned_vm -= mmap_locked; 4534 free_uid(mmap_user); 4535 4536 out_put: 4537 ring_buffer_put(rb); /* could be last */ 4538 } 4539 4540 static const struct vm_operations_struct perf_mmap_vmops = { 4541 .open = perf_mmap_open, 4542 .close = perf_mmap_close, /* non mergable */ 4543 .fault = perf_mmap_fault, 4544 .page_mkwrite = perf_mmap_fault, 4545 }; 4546 4547 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4548 { 4549 struct perf_event *event = file->private_data; 4550 unsigned long user_locked, user_lock_limit; 4551 struct user_struct *user = current_user(); 4552 unsigned long locked, lock_limit; 4553 struct ring_buffer *rb = NULL; 4554 unsigned long vma_size; 4555 unsigned long nr_pages; 4556 long user_extra = 0, extra = 0; 4557 int ret = 0, flags = 0; 4558 4559 /* 4560 * Don't allow mmap() of inherited per-task counters. This would 4561 * create a performance issue due to all children writing to the 4562 * same rb. 4563 */ 4564 if (event->cpu == -1 && event->attr.inherit) 4565 return -EINVAL; 4566 4567 if (!(vma->vm_flags & VM_SHARED)) 4568 return -EINVAL; 4569 4570 vma_size = vma->vm_end - vma->vm_start; 4571 4572 if (vma->vm_pgoff == 0) { 4573 nr_pages = (vma_size / PAGE_SIZE) - 1; 4574 } else { 4575 /* 4576 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4577 * mapped, all subsequent mappings should have the same size 4578 * and offset. Must be above the normal perf buffer. 4579 */ 4580 u64 aux_offset, aux_size; 4581 4582 if (!event->rb) 4583 return -EINVAL; 4584 4585 nr_pages = vma_size / PAGE_SIZE; 4586 4587 mutex_lock(&event->mmap_mutex); 4588 ret = -EINVAL; 4589 4590 rb = event->rb; 4591 if (!rb) 4592 goto aux_unlock; 4593 4594 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4595 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4596 4597 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4598 goto aux_unlock; 4599 4600 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4601 goto aux_unlock; 4602 4603 /* already mapped with a different offset */ 4604 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4605 goto aux_unlock; 4606 4607 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4608 goto aux_unlock; 4609 4610 /* already mapped with a different size */ 4611 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4612 goto aux_unlock; 4613 4614 if (!is_power_of_2(nr_pages)) 4615 goto aux_unlock; 4616 4617 if (!atomic_inc_not_zero(&rb->mmap_count)) 4618 goto aux_unlock; 4619 4620 if (rb_has_aux(rb)) { 4621 atomic_inc(&rb->aux_mmap_count); 4622 ret = 0; 4623 goto unlock; 4624 } 4625 4626 atomic_set(&rb->aux_mmap_count, 1); 4627 user_extra = nr_pages; 4628 4629 goto accounting; 4630 } 4631 4632 /* 4633 * If we have rb pages ensure they're a power-of-two number, so we 4634 * can do bitmasks instead of modulo. 4635 */ 4636 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4637 return -EINVAL; 4638 4639 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4640 return -EINVAL; 4641 4642 WARN_ON_ONCE(event->ctx->parent_ctx); 4643 again: 4644 mutex_lock(&event->mmap_mutex); 4645 if (event->rb) { 4646 if (event->rb->nr_pages != nr_pages) { 4647 ret = -EINVAL; 4648 goto unlock; 4649 } 4650 4651 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4652 /* 4653 * Raced against perf_mmap_close() through 4654 * perf_event_set_output(). Try again, hope for better 4655 * luck. 4656 */ 4657 mutex_unlock(&event->mmap_mutex); 4658 goto again; 4659 } 4660 4661 goto unlock; 4662 } 4663 4664 user_extra = nr_pages + 1; 4665 4666 accounting: 4667 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4668 4669 /* 4670 * Increase the limit linearly with more CPUs: 4671 */ 4672 user_lock_limit *= num_online_cpus(); 4673 4674 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4675 4676 if (user_locked > user_lock_limit) 4677 extra = user_locked - user_lock_limit; 4678 4679 lock_limit = rlimit(RLIMIT_MEMLOCK); 4680 lock_limit >>= PAGE_SHIFT; 4681 locked = vma->vm_mm->pinned_vm + extra; 4682 4683 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4684 !capable(CAP_IPC_LOCK)) { 4685 ret = -EPERM; 4686 goto unlock; 4687 } 4688 4689 WARN_ON(!rb && event->rb); 4690 4691 if (vma->vm_flags & VM_WRITE) 4692 flags |= RING_BUFFER_WRITABLE; 4693 4694 if (!rb) { 4695 rb = rb_alloc(nr_pages, 4696 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4697 event->cpu, flags); 4698 4699 if (!rb) { 4700 ret = -ENOMEM; 4701 goto unlock; 4702 } 4703 4704 atomic_set(&rb->mmap_count, 1); 4705 rb->mmap_user = get_current_user(); 4706 rb->mmap_locked = extra; 4707 4708 ring_buffer_attach(event, rb); 4709 4710 perf_event_init_userpage(event); 4711 perf_event_update_userpage(event); 4712 } else { 4713 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4714 event->attr.aux_watermark, flags); 4715 if (!ret) 4716 rb->aux_mmap_locked = extra; 4717 } 4718 4719 unlock: 4720 if (!ret) { 4721 atomic_long_add(user_extra, &user->locked_vm); 4722 vma->vm_mm->pinned_vm += extra; 4723 4724 atomic_inc(&event->mmap_count); 4725 } else if (rb) { 4726 atomic_dec(&rb->mmap_count); 4727 } 4728 aux_unlock: 4729 mutex_unlock(&event->mmap_mutex); 4730 4731 /* 4732 * Since pinned accounting is per vm we cannot allow fork() to copy our 4733 * vma. 4734 */ 4735 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4736 vma->vm_ops = &perf_mmap_vmops; 4737 4738 if (event->pmu->event_mapped) 4739 event->pmu->event_mapped(event); 4740 4741 return ret; 4742 } 4743 4744 static int perf_fasync(int fd, struct file *filp, int on) 4745 { 4746 struct inode *inode = file_inode(filp); 4747 struct perf_event *event = filp->private_data; 4748 int retval; 4749 4750 mutex_lock(&inode->i_mutex); 4751 retval = fasync_helper(fd, filp, on, &event->fasync); 4752 mutex_unlock(&inode->i_mutex); 4753 4754 if (retval < 0) 4755 return retval; 4756 4757 return 0; 4758 } 4759 4760 static const struct file_operations perf_fops = { 4761 .llseek = no_llseek, 4762 .release = perf_release, 4763 .read = perf_read, 4764 .poll = perf_poll, 4765 .unlocked_ioctl = perf_ioctl, 4766 .compat_ioctl = perf_compat_ioctl, 4767 .mmap = perf_mmap, 4768 .fasync = perf_fasync, 4769 }; 4770 4771 /* 4772 * Perf event wakeup 4773 * 4774 * If there's data, ensure we set the poll() state and publish everything 4775 * to user-space before waking everybody up. 4776 */ 4777 4778 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 4779 { 4780 /* only the parent has fasync state */ 4781 if (event->parent) 4782 event = event->parent; 4783 return &event->fasync; 4784 } 4785 4786 void perf_event_wakeup(struct perf_event *event) 4787 { 4788 ring_buffer_wakeup(event); 4789 4790 if (event->pending_kill) { 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 4792 event->pending_kill = 0; 4793 } 4794 } 4795 4796 static void perf_pending_event(struct irq_work *entry) 4797 { 4798 struct perf_event *event = container_of(entry, 4799 struct perf_event, pending); 4800 int rctx; 4801 4802 rctx = perf_swevent_get_recursion_context(); 4803 /* 4804 * If we 'fail' here, that's OK, it means recursion is already disabled 4805 * and we won't recurse 'further'. 4806 */ 4807 4808 if (event->pending_disable) { 4809 event->pending_disable = 0; 4810 __perf_event_disable(event); 4811 } 4812 4813 if (event->pending_wakeup) { 4814 event->pending_wakeup = 0; 4815 perf_event_wakeup(event); 4816 } 4817 4818 if (rctx >= 0) 4819 perf_swevent_put_recursion_context(rctx); 4820 } 4821 4822 /* 4823 * We assume there is only KVM supporting the callbacks. 4824 * Later on, we might change it to a list if there is 4825 * another virtualization implementation supporting the callbacks. 4826 */ 4827 struct perf_guest_info_callbacks *perf_guest_cbs; 4828 4829 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4830 { 4831 perf_guest_cbs = cbs; 4832 return 0; 4833 } 4834 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4835 4836 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4837 { 4838 perf_guest_cbs = NULL; 4839 return 0; 4840 } 4841 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4842 4843 static void 4844 perf_output_sample_regs(struct perf_output_handle *handle, 4845 struct pt_regs *regs, u64 mask) 4846 { 4847 int bit; 4848 4849 for_each_set_bit(bit, (const unsigned long *) &mask, 4850 sizeof(mask) * BITS_PER_BYTE) { 4851 u64 val; 4852 4853 val = perf_reg_value(regs, bit); 4854 perf_output_put(handle, val); 4855 } 4856 } 4857 4858 static void perf_sample_regs_user(struct perf_regs *regs_user, 4859 struct pt_regs *regs, 4860 struct pt_regs *regs_user_copy) 4861 { 4862 if (user_mode(regs)) { 4863 regs_user->abi = perf_reg_abi(current); 4864 regs_user->regs = regs; 4865 } else if (current->mm) { 4866 perf_get_regs_user(regs_user, regs, regs_user_copy); 4867 } else { 4868 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4869 regs_user->regs = NULL; 4870 } 4871 } 4872 4873 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 4874 struct pt_regs *regs) 4875 { 4876 regs_intr->regs = regs; 4877 regs_intr->abi = perf_reg_abi(current); 4878 } 4879 4880 4881 /* 4882 * Get remaining task size from user stack pointer. 4883 * 4884 * It'd be better to take stack vma map and limit this more 4885 * precisly, but there's no way to get it safely under interrupt, 4886 * so using TASK_SIZE as limit. 4887 */ 4888 static u64 perf_ustack_task_size(struct pt_regs *regs) 4889 { 4890 unsigned long addr = perf_user_stack_pointer(regs); 4891 4892 if (!addr || addr >= TASK_SIZE) 4893 return 0; 4894 4895 return TASK_SIZE - addr; 4896 } 4897 4898 static u16 4899 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4900 struct pt_regs *regs) 4901 { 4902 u64 task_size; 4903 4904 /* No regs, no stack pointer, no dump. */ 4905 if (!regs) 4906 return 0; 4907 4908 /* 4909 * Check if we fit in with the requested stack size into the: 4910 * - TASK_SIZE 4911 * If we don't, we limit the size to the TASK_SIZE. 4912 * 4913 * - remaining sample size 4914 * If we don't, we customize the stack size to 4915 * fit in to the remaining sample size. 4916 */ 4917 4918 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4919 stack_size = min(stack_size, (u16) task_size); 4920 4921 /* Current header size plus static size and dynamic size. */ 4922 header_size += 2 * sizeof(u64); 4923 4924 /* Do we fit in with the current stack dump size? */ 4925 if ((u16) (header_size + stack_size) < header_size) { 4926 /* 4927 * If we overflow the maximum size for the sample, 4928 * we customize the stack dump size to fit in. 4929 */ 4930 stack_size = USHRT_MAX - header_size - sizeof(u64); 4931 stack_size = round_up(stack_size, sizeof(u64)); 4932 } 4933 4934 return stack_size; 4935 } 4936 4937 static void 4938 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 4939 struct pt_regs *regs) 4940 { 4941 /* Case of a kernel thread, nothing to dump */ 4942 if (!regs) { 4943 u64 size = 0; 4944 perf_output_put(handle, size); 4945 } else { 4946 unsigned long sp; 4947 unsigned int rem; 4948 u64 dyn_size; 4949 4950 /* 4951 * We dump: 4952 * static size 4953 * - the size requested by user or the best one we can fit 4954 * in to the sample max size 4955 * data 4956 * - user stack dump data 4957 * dynamic size 4958 * - the actual dumped size 4959 */ 4960 4961 /* Static size. */ 4962 perf_output_put(handle, dump_size); 4963 4964 /* Data. */ 4965 sp = perf_user_stack_pointer(regs); 4966 rem = __output_copy_user(handle, (void *) sp, dump_size); 4967 dyn_size = dump_size - rem; 4968 4969 perf_output_skip(handle, rem); 4970 4971 /* Dynamic size. */ 4972 perf_output_put(handle, dyn_size); 4973 } 4974 } 4975 4976 static void __perf_event_header__init_id(struct perf_event_header *header, 4977 struct perf_sample_data *data, 4978 struct perf_event *event) 4979 { 4980 u64 sample_type = event->attr.sample_type; 4981 4982 data->type = sample_type; 4983 header->size += event->id_header_size; 4984 4985 if (sample_type & PERF_SAMPLE_TID) { 4986 /* namespace issues */ 4987 data->tid_entry.pid = perf_event_pid(event, current); 4988 data->tid_entry.tid = perf_event_tid(event, current); 4989 } 4990 4991 if (sample_type & PERF_SAMPLE_TIME) 4992 data->time = perf_event_clock(event); 4993 4994 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 4995 data->id = primary_event_id(event); 4996 4997 if (sample_type & PERF_SAMPLE_STREAM_ID) 4998 data->stream_id = event->id; 4999 5000 if (sample_type & PERF_SAMPLE_CPU) { 5001 data->cpu_entry.cpu = raw_smp_processor_id(); 5002 data->cpu_entry.reserved = 0; 5003 } 5004 } 5005 5006 void perf_event_header__init_id(struct perf_event_header *header, 5007 struct perf_sample_data *data, 5008 struct perf_event *event) 5009 { 5010 if (event->attr.sample_id_all) 5011 __perf_event_header__init_id(header, data, event); 5012 } 5013 5014 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 5015 struct perf_sample_data *data) 5016 { 5017 u64 sample_type = data->type; 5018 5019 if (sample_type & PERF_SAMPLE_TID) 5020 perf_output_put(handle, data->tid_entry); 5021 5022 if (sample_type & PERF_SAMPLE_TIME) 5023 perf_output_put(handle, data->time); 5024 5025 if (sample_type & PERF_SAMPLE_ID) 5026 perf_output_put(handle, data->id); 5027 5028 if (sample_type & PERF_SAMPLE_STREAM_ID) 5029 perf_output_put(handle, data->stream_id); 5030 5031 if (sample_type & PERF_SAMPLE_CPU) 5032 perf_output_put(handle, data->cpu_entry); 5033 5034 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5035 perf_output_put(handle, data->id); 5036 } 5037 5038 void perf_event__output_id_sample(struct perf_event *event, 5039 struct perf_output_handle *handle, 5040 struct perf_sample_data *sample) 5041 { 5042 if (event->attr.sample_id_all) 5043 __perf_event__output_id_sample(handle, sample); 5044 } 5045 5046 static void perf_output_read_one(struct perf_output_handle *handle, 5047 struct perf_event *event, 5048 u64 enabled, u64 running) 5049 { 5050 u64 read_format = event->attr.read_format; 5051 u64 values[4]; 5052 int n = 0; 5053 5054 values[n++] = perf_event_count(event); 5055 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5056 values[n++] = enabled + 5057 atomic64_read(&event->child_total_time_enabled); 5058 } 5059 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5060 values[n++] = running + 5061 atomic64_read(&event->child_total_time_running); 5062 } 5063 if (read_format & PERF_FORMAT_ID) 5064 values[n++] = primary_event_id(event); 5065 5066 __output_copy(handle, values, n * sizeof(u64)); 5067 } 5068 5069 /* 5070 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5071 */ 5072 static void perf_output_read_group(struct perf_output_handle *handle, 5073 struct perf_event *event, 5074 u64 enabled, u64 running) 5075 { 5076 struct perf_event *leader = event->group_leader, *sub; 5077 u64 read_format = event->attr.read_format; 5078 u64 values[5]; 5079 int n = 0; 5080 5081 values[n++] = 1 + leader->nr_siblings; 5082 5083 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5084 values[n++] = enabled; 5085 5086 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5087 values[n++] = running; 5088 5089 if (leader != event) 5090 leader->pmu->read(leader); 5091 5092 values[n++] = perf_event_count(leader); 5093 if (read_format & PERF_FORMAT_ID) 5094 values[n++] = primary_event_id(leader); 5095 5096 __output_copy(handle, values, n * sizeof(u64)); 5097 5098 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5099 n = 0; 5100 5101 if ((sub != event) && 5102 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5103 sub->pmu->read(sub); 5104 5105 values[n++] = perf_event_count(sub); 5106 if (read_format & PERF_FORMAT_ID) 5107 values[n++] = primary_event_id(sub); 5108 5109 __output_copy(handle, values, n * sizeof(u64)); 5110 } 5111 } 5112 5113 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5114 PERF_FORMAT_TOTAL_TIME_RUNNING) 5115 5116 static void perf_output_read(struct perf_output_handle *handle, 5117 struct perf_event *event) 5118 { 5119 u64 enabled = 0, running = 0, now; 5120 u64 read_format = event->attr.read_format; 5121 5122 /* 5123 * compute total_time_enabled, total_time_running 5124 * based on snapshot values taken when the event 5125 * was last scheduled in. 5126 * 5127 * we cannot simply called update_context_time() 5128 * because of locking issue as we are called in 5129 * NMI context 5130 */ 5131 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5132 calc_timer_values(event, &now, &enabled, &running); 5133 5134 if (event->attr.read_format & PERF_FORMAT_GROUP) 5135 perf_output_read_group(handle, event, enabled, running); 5136 else 5137 perf_output_read_one(handle, event, enabled, running); 5138 } 5139 5140 void perf_output_sample(struct perf_output_handle *handle, 5141 struct perf_event_header *header, 5142 struct perf_sample_data *data, 5143 struct perf_event *event) 5144 { 5145 u64 sample_type = data->type; 5146 5147 perf_output_put(handle, *header); 5148 5149 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5150 perf_output_put(handle, data->id); 5151 5152 if (sample_type & PERF_SAMPLE_IP) 5153 perf_output_put(handle, data->ip); 5154 5155 if (sample_type & PERF_SAMPLE_TID) 5156 perf_output_put(handle, data->tid_entry); 5157 5158 if (sample_type & PERF_SAMPLE_TIME) 5159 perf_output_put(handle, data->time); 5160 5161 if (sample_type & PERF_SAMPLE_ADDR) 5162 perf_output_put(handle, data->addr); 5163 5164 if (sample_type & PERF_SAMPLE_ID) 5165 perf_output_put(handle, data->id); 5166 5167 if (sample_type & PERF_SAMPLE_STREAM_ID) 5168 perf_output_put(handle, data->stream_id); 5169 5170 if (sample_type & PERF_SAMPLE_CPU) 5171 perf_output_put(handle, data->cpu_entry); 5172 5173 if (sample_type & PERF_SAMPLE_PERIOD) 5174 perf_output_put(handle, data->period); 5175 5176 if (sample_type & PERF_SAMPLE_READ) 5177 perf_output_read(handle, event); 5178 5179 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5180 if (data->callchain) { 5181 int size = 1; 5182 5183 if (data->callchain) 5184 size += data->callchain->nr; 5185 5186 size *= sizeof(u64); 5187 5188 __output_copy(handle, data->callchain, size); 5189 } else { 5190 u64 nr = 0; 5191 perf_output_put(handle, nr); 5192 } 5193 } 5194 5195 if (sample_type & PERF_SAMPLE_RAW) { 5196 if (data->raw) { 5197 perf_output_put(handle, data->raw->size); 5198 __output_copy(handle, data->raw->data, 5199 data->raw->size); 5200 } else { 5201 struct { 5202 u32 size; 5203 u32 data; 5204 } raw = { 5205 .size = sizeof(u32), 5206 .data = 0, 5207 }; 5208 perf_output_put(handle, raw); 5209 } 5210 } 5211 5212 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5213 if (data->br_stack) { 5214 size_t size; 5215 5216 size = data->br_stack->nr 5217 * sizeof(struct perf_branch_entry); 5218 5219 perf_output_put(handle, data->br_stack->nr); 5220 perf_output_copy(handle, data->br_stack->entries, size); 5221 } else { 5222 /* 5223 * we always store at least the value of nr 5224 */ 5225 u64 nr = 0; 5226 perf_output_put(handle, nr); 5227 } 5228 } 5229 5230 if (sample_type & PERF_SAMPLE_REGS_USER) { 5231 u64 abi = data->regs_user.abi; 5232 5233 /* 5234 * If there are no regs to dump, notice it through 5235 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5236 */ 5237 perf_output_put(handle, abi); 5238 5239 if (abi) { 5240 u64 mask = event->attr.sample_regs_user; 5241 perf_output_sample_regs(handle, 5242 data->regs_user.regs, 5243 mask); 5244 } 5245 } 5246 5247 if (sample_type & PERF_SAMPLE_STACK_USER) { 5248 perf_output_sample_ustack(handle, 5249 data->stack_user_size, 5250 data->regs_user.regs); 5251 } 5252 5253 if (sample_type & PERF_SAMPLE_WEIGHT) 5254 perf_output_put(handle, data->weight); 5255 5256 if (sample_type & PERF_SAMPLE_DATA_SRC) 5257 perf_output_put(handle, data->data_src.val); 5258 5259 if (sample_type & PERF_SAMPLE_TRANSACTION) 5260 perf_output_put(handle, data->txn); 5261 5262 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5263 u64 abi = data->regs_intr.abi; 5264 /* 5265 * If there are no regs to dump, notice it through 5266 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5267 */ 5268 perf_output_put(handle, abi); 5269 5270 if (abi) { 5271 u64 mask = event->attr.sample_regs_intr; 5272 5273 perf_output_sample_regs(handle, 5274 data->regs_intr.regs, 5275 mask); 5276 } 5277 } 5278 5279 if (!event->attr.watermark) { 5280 int wakeup_events = event->attr.wakeup_events; 5281 5282 if (wakeup_events) { 5283 struct ring_buffer *rb = handle->rb; 5284 int events = local_inc_return(&rb->events); 5285 5286 if (events >= wakeup_events) { 5287 local_sub(wakeup_events, &rb->events); 5288 local_inc(&rb->wakeup); 5289 } 5290 } 5291 } 5292 } 5293 5294 void perf_prepare_sample(struct perf_event_header *header, 5295 struct perf_sample_data *data, 5296 struct perf_event *event, 5297 struct pt_regs *regs) 5298 { 5299 u64 sample_type = event->attr.sample_type; 5300 5301 header->type = PERF_RECORD_SAMPLE; 5302 header->size = sizeof(*header) + event->header_size; 5303 5304 header->misc = 0; 5305 header->misc |= perf_misc_flags(regs); 5306 5307 __perf_event_header__init_id(header, data, event); 5308 5309 if (sample_type & PERF_SAMPLE_IP) 5310 data->ip = perf_instruction_pointer(regs); 5311 5312 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5313 int size = 1; 5314 5315 data->callchain = perf_callchain(event, regs); 5316 5317 if (data->callchain) 5318 size += data->callchain->nr; 5319 5320 header->size += size * sizeof(u64); 5321 } 5322 5323 if (sample_type & PERF_SAMPLE_RAW) { 5324 int size = sizeof(u32); 5325 5326 if (data->raw) 5327 size += data->raw->size; 5328 else 5329 size += sizeof(u32); 5330 5331 WARN_ON_ONCE(size & (sizeof(u64)-1)); 5332 header->size += size; 5333 } 5334 5335 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5336 int size = sizeof(u64); /* nr */ 5337 if (data->br_stack) { 5338 size += data->br_stack->nr 5339 * sizeof(struct perf_branch_entry); 5340 } 5341 header->size += size; 5342 } 5343 5344 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5345 perf_sample_regs_user(&data->regs_user, regs, 5346 &data->regs_user_copy); 5347 5348 if (sample_type & PERF_SAMPLE_REGS_USER) { 5349 /* regs dump ABI info */ 5350 int size = sizeof(u64); 5351 5352 if (data->regs_user.regs) { 5353 u64 mask = event->attr.sample_regs_user; 5354 size += hweight64(mask) * sizeof(u64); 5355 } 5356 5357 header->size += size; 5358 } 5359 5360 if (sample_type & PERF_SAMPLE_STACK_USER) { 5361 /* 5362 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5363 * processed as the last one or have additional check added 5364 * in case new sample type is added, because we could eat 5365 * up the rest of the sample size. 5366 */ 5367 u16 stack_size = event->attr.sample_stack_user; 5368 u16 size = sizeof(u64); 5369 5370 stack_size = perf_sample_ustack_size(stack_size, header->size, 5371 data->regs_user.regs); 5372 5373 /* 5374 * If there is something to dump, add space for the dump 5375 * itself and for the field that tells the dynamic size, 5376 * which is how many have been actually dumped. 5377 */ 5378 if (stack_size) 5379 size += sizeof(u64) + stack_size; 5380 5381 data->stack_user_size = stack_size; 5382 header->size += size; 5383 } 5384 5385 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5386 /* regs dump ABI info */ 5387 int size = sizeof(u64); 5388 5389 perf_sample_regs_intr(&data->regs_intr, regs); 5390 5391 if (data->regs_intr.regs) { 5392 u64 mask = event->attr.sample_regs_intr; 5393 5394 size += hweight64(mask) * sizeof(u64); 5395 } 5396 5397 header->size += size; 5398 } 5399 } 5400 5401 void perf_event_output(struct perf_event *event, 5402 struct perf_sample_data *data, 5403 struct pt_regs *regs) 5404 { 5405 struct perf_output_handle handle; 5406 struct perf_event_header header; 5407 5408 /* protect the callchain buffers */ 5409 rcu_read_lock(); 5410 5411 perf_prepare_sample(&header, data, event, regs); 5412 5413 if (perf_output_begin(&handle, event, header.size)) 5414 goto exit; 5415 5416 perf_output_sample(&handle, &header, data, event); 5417 5418 perf_output_end(&handle); 5419 5420 exit: 5421 rcu_read_unlock(); 5422 } 5423 5424 /* 5425 * read event_id 5426 */ 5427 5428 struct perf_read_event { 5429 struct perf_event_header header; 5430 5431 u32 pid; 5432 u32 tid; 5433 }; 5434 5435 static void 5436 perf_event_read_event(struct perf_event *event, 5437 struct task_struct *task) 5438 { 5439 struct perf_output_handle handle; 5440 struct perf_sample_data sample; 5441 struct perf_read_event read_event = { 5442 .header = { 5443 .type = PERF_RECORD_READ, 5444 .misc = 0, 5445 .size = sizeof(read_event) + event->read_size, 5446 }, 5447 .pid = perf_event_pid(event, task), 5448 .tid = perf_event_tid(event, task), 5449 }; 5450 int ret; 5451 5452 perf_event_header__init_id(&read_event.header, &sample, event); 5453 ret = perf_output_begin(&handle, event, read_event.header.size); 5454 if (ret) 5455 return; 5456 5457 perf_output_put(&handle, read_event); 5458 perf_output_read(&handle, event); 5459 perf_event__output_id_sample(event, &handle, &sample); 5460 5461 perf_output_end(&handle); 5462 } 5463 5464 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5465 5466 static void 5467 perf_event_aux_ctx(struct perf_event_context *ctx, 5468 perf_event_aux_output_cb output, 5469 void *data) 5470 { 5471 struct perf_event *event; 5472 5473 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5474 if (event->state < PERF_EVENT_STATE_INACTIVE) 5475 continue; 5476 if (!event_filter_match(event)) 5477 continue; 5478 output(event, data); 5479 } 5480 } 5481 5482 static void 5483 perf_event_aux(perf_event_aux_output_cb output, void *data, 5484 struct perf_event_context *task_ctx) 5485 { 5486 struct perf_cpu_context *cpuctx; 5487 struct perf_event_context *ctx; 5488 struct pmu *pmu; 5489 int ctxn; 5490 5491 rcu_read_lock(); 5492 list_for_each_entry_rcu(pmu, &pmus, entry) { 5493 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5494 if (cpuctx->unique_pmu != pmu) 5495 goto next; 5496 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5497 if (task_ctx) 5498 goto next; 5499 ctxn = pmu->task_ctx_nr; 5500 if (ctxn < 0) 5501 goto next; 5502 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5503 if (ctx) 5504 perf_event_aux_ctx(ctx, output, data); 5505 next: 5506 put_cpu_ptr(pmu->pmu_cpu_context); 5507 } 5508 5509 if (task_ctx) { 5510 preempt_disable(); 5511 perf_event_aux_ctx(task_ctx, output, data); 5512 preempt_enable(); 5513 } 5514 rcu_read_unlock(); 5515 } 5516 5517 /* 5518 * task tracking -- fork/exit 5519 * 5520 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5521 */ 5522 5523 struct perf_task_event { 5524 struct task_struct *task; 5525 struct perf_event_context *task_ctx; 5526 5527 struct { 5528 struct perf_event_header header; 5529 5530 u32 pid; 5531 u32 ppid; 5532 u32 tid; 5533 u32 ptid; 5534 u64 time; 5535 } event_id; 5536 }; 5537 5538 static int perf_event_task_match(struct perf_event *event) 5539 { 5540 return event->attr.comm || event->attr.mmap || 5541 event->attr.mmap2 || event->attr.mmap_data || 5542 event->attr.task; 5543 } 5544 5545 static void perf_event_task_output(struct perf_event *event, 5546 void *data) 5547 { 5548 struct perf_task_event *task_event = data; 5549 struct perf_output_handle handle; 5550 struct perf_sample_data sample; 5551 struct task_struct *task = task_event->task; 5552 int ret, size = task_event->event_id.header.size; 5553 5554 if (!perf_event_task_match(event)) 5555 return; 5556 5557 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5558 5559 ret = perf_output_begin(&handle, event, 5560 task_event->event_id.header.size); 5561 if (ret) 5562 goto out; 5563 5564 task_event->event_id.pid = perf_event_pid(event, task); 5565 task_event->event_id.ppid = perf_event_pid(event, current); 5566 5567 task_event->event_id.tid = perf_event_tid(event, task); 5568 task_event->event_id.ptid = perf_event_tid(event, current); 5569 5570 task_event->event_id.time = perf_event_clock(event); 5571 5572 perf_output_put(&handle, task_event->event_id); 5573 5574 perf_event__output_id_sample(event, &handle, &sample); 5575 5576 perf_output_end(&handle); 5577 out: 5578 task_event->event_id.header.size = size; 5579 } 5580 5581 static void perf_event_task(struct task_struct *task, 5582 struct perf_event_context *task_ctx, 5583 int new) 5584 { 5585 struct perf_task_event task_event; 5586 5587 if (!atomic_read(&nr_comm_events) && 5588 !atomic_read(&nr_mmap_events) && 5589 !atomic_read(&nr_task_events)) 5590 return; 5591 5592 task_event = (struct perf_task_event){ 5593 .task = task, 5594 .task_ctx = task_ctx, 5595 .event_id = { 5596 .header = { 5597 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5598 .misc = 0, 5599 .size = sizeof(task_event.event_id), 5600 }, 5601 /* .pid */ 5602 /* .ppid */ 5603 /* .tid */ 5604 /* .ptid */ 5605 /* .time */ 5606 }, 5607 }; 5608 5609 perf_event_aux(perf_event_task_output, 5610 &task_event, 5611 task_ctx); 5612 } 5613 5614 void perf_event_fork(struct task_struct *task) 5615 { 5616 perf_event_task(task, NULL, 1); 5617 } 5618 5619 /* 5620 * comm tracking 5621 */ 5622 5623 struct perf_comm_event { 5624 struct task_struct *task; 5625 char *comm; 5626 int comm_size; 5627 5628 struct { 5629 struct perf_event_header header; 5630 5631 u32 pid; 5632 u32 tid; 5633 } event_id; 5634 }; 5635 5636 static int perf_event_comm_match(struct perf_event *event) 5637 { 5638 return event->attr.comm; 5639 } 5640 5641 static void perf_event_comm_output(struct perf_event *event, 5642 void *data) 5643 { 5644 struct perf_comm_event *comm_event = data; 5645 struct perf_output_handle handle; 5646 struct perf_sample_data sample; 5647 int size = comm_event->event_id.header.size; 5648 int ret; 5649 5650 if (!perf_event_comm_match(event)) 5651 return; 5652 5653 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5654 ret = perf_output_begin(&handle, event, 5655 comm_event->event_id.header.size); 5656 5657 if (ret) 5658 goto out; 5659 5660 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5661 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5662 5663 perf_output_put(&handle, comm_event->event_id); 5664 __output_copy(&handle, comm_event->comm, 5665 comm_event->comm_size); 5666 5667 perf_event__output_id_sample(event, &handle, &sample); 5668 5669 perf_output_end(&handle); 5670 out: 5671 comm_event->event_id.header.size = size; 5672 } 5673 5674 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5675 { 5676 char comm[TASK_COMM_LEN]; 5677 unsigned int size; 5678 5679 memset(comm, 0, sizeof(comm)); 5680 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5681 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5682 5683 comm_event->comm = comm; 5684 comm_event->comm_size = size; 5685 5686 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5687 5688 perf_event_aux(perf_event_comm_output, 5689 comm_event, 5690 NULL); 5691 } 5692 5693 void perf_event_comm(struct task_struct *task, bool exec) 5694 { 5695 struct perf_comm_event comm_event; 5696 5697 if (!atomic_read(&nr_comm_events)) 5698 return; 5699 5700 comm_event = (struct perf_comm_event){ 5701 .task = task, 5702 /* .comm */ 5703 /* .comm_size */ 5704 .event_id = { 5705 .header = { 5706 .type = PERF_RECORD_COMM, 5707 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5708 /* .size */ 5709 }, 5710 /* .pid */ 5711 /* .tid */ 5712 }, 5713 }; 5714 5715 perf_event_comm_event(&comm_event); 5716 } 5717 5718 /* 5719 * mmap tracking 5720 */ 5721 5722 struct perf_mmap_event { 5723 struct vm_area_struct *vma; 5724 5725 const char *file_name; 5726 int file_size; 5727 int maj, min; 5728 u64 ino; 5729 u64 ino_generation; 5730 u32 prot, flags; 5731 5732 struct { 5733 struct perf_event_header header; 5734 5735 u32 pid; 5736 u32 tid; 5737 u64 start; 5738 u64 len; 5739 u64 pgoff; 5740 } event_id; 5741 }; 5742 5743 static int perf_event_mmap_match(struct perf_event *event, 5744 void *data) 5745 { 5746 struct perf_mmap_event *mmap_event = data; 5747 struct vm_area_struct *vma = mmap_event->vma; 5748 int executable = vma->vm_flags & VM_EXEC; 5749 5750 return (!executable && event->attr.mmap_data) || 5751 (executable && (event->attr.mmap || event->attr.mmap2)); 5752 } 5753 5754 static void perf_event_mmap_output(struct perf_event *event, 5755 void *data) 5756 { 5757 struct perf_mmap_event *mmap_event = data; 5758 struct perf_output_handle handle; 5759 struct perf_sample_data sample; 5760 int size = mmap_event->event_id.header.size; 5761 int ret; 5762 5763 if (!perf_event_mmap_match(event, data)) 5764 return; 5765 5766 if (event->attr.mmap2) { 5767 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5768 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5769 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5770 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5771 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5772 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5773 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5774 } 5775 5776 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5777 ret = perf_output_begin(&handle, event, 5778 mmap_event->event_id.header.size); 5779 if (ret) 5780 goto out; 5781 5782 mmap_event->event_id.pid = perf_event_pid(event, current); 5783 mmap_event->event_id.tid = perf_event_tid(event, current); 5784 5785 perf_output_put(&handle, mmap_event->event_id); 5786 5787 if (event->attr.mmap2) { 5788 perf_output_put(&handle, mmap_event->maj); 5789 perf_output_put(&handle, mmap_event->min); 5790 perf_output_put(&handle, mmap_event->ino); 5791 perf_output_put(&handle, mmap_event->ino_generation); 5792 perf_output_put(&handle, mmap_event->prot); 5793 perf_output_put(&handle, mmap_event->flags); 5794 } 5795 5796 __output_copy(&handle, mmap_event->file_name, 5797 mmap_event->file_size); 5798 5799 perf_event__output_id_sample(event, &handle, &sample); 5800 5801 perf_output_end(&handle); 5802 out: 5803 mmap_event->event_id.header.size = size; 5804 } 5805 5806 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5807 { 5808 struct vm_area_struct *vma = mmap_event->vma; 5809 struct file *file = vma->vm_file; 5810 int maj = 0, min = 0; 5811 u64 ino = 0, gen = 0; 5812 u32 prot = 0, flags = 0; 5813 unsigned int size; 5814 char tmp[16]; 5815 char *buf = NULL; 5816 char *name; 5817 5818 if (file) { 5819 struct inode *inode; 5820 dev_t dev; 5821 5822 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5823 if (!buf) { 5824 name = "//enomem"; 5825 goto cpy_name; 5826 } 5827 /* 5828 * d_path() works from the end of the rb backwards, so we 5829 * need to add enough zero bytes after the string to handle 5830 * the 64bit alignment we do later. 5831 */ 5832 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 5833 if (IS_ERR(name)) { 5834 name = "//toolong"; 5835 goto cpy_name; 5836 } 5837 inode = file_inode(vma->vm_file); 5838 dev = inode->i_sb->s_dev; 5839 ino = inode->i_ino; 5840 gen = inode->i_generation; 5841 maj = MAJOR(dev); 5842 min = MINOR(dev); 5843 5844 if (vma->vm_flags & VM_READ) 5845 prot |= PROT_READ; 5846 if (vma->vm_flags & VM_WRITE) 5847 prot |= PROT_WRITE; 5848 if (vma->vm_flags & VM_EXEC) 5849 prot |= PROT_EXEC; 5850 5851 if (vma->vm_flags & VM_MAYSHARE) 5852 flags = MAP_SHARED; 5853 else 5854 flags = MAP_PRIVATE; 5855 5856 if (vma->vm_flags & VM_DENYWRITE) 5857 flags |= MAP_DENYWRITE; 5858 if (vma->vm_flags & VM_MAYEXEC) 5859 flags |= MAP_EXECUTABLE; 5860 if (vma->vm_flags & VM_LOCKED) 5861 flags |= MAP_LOCKED; 5862 if (vma->vm_flags & VM_HUGETLB) 5863 flags |= MAP_HUGETLB; 5864 5865 goto got_name; 5866 } else { 5867 if (vma->vm_ops && vma->vm_ops->name) { 5868 name = (char *) vma->vm_ops->name(vma); 5869 if (name) 5870 goto cpy_name; 5871 } 5872 5873 name = (char *)arch_vma_name(vma); 5874 if (name) 5875 goto cpy_name; 5876 5877 if (vma->vm_start <= vma->vm_mm->start_brk && 5878 vma->vm_end >= vma->vm_mm->brk) { 5879 name = "[heap]"; 5880 goto cpy_name; 5881 } 5882 if (vma->vm_start <= vma->vm_mm->start_stack && 5883 vma->vm_end >= vma->vm_mm->start_stack) { 5884 name = "[stack]"; 5885 goto cpy_name; 5886 } 5887 5888 name = "//anon"; 5889 goto cpy_name; 5890 } 5891 5892 cpy_name: 5893 strlcpy(tmp, name, sizeof(tmp)); 5894 name = tmp; 5895 got_name: 5896 /* 5897 * Since our buffer works in 8 byte units we need to align our string 5898 * size to a multiple of 8. However, we must guarantee the tail end is 5899 * zero'd out to avoid leaking random bits to userspace. 5900 */ 5901 size = strlen(name)+1; 5902 while (!IS_ALIGNED(size, sizeof(u64))) 5903 name[size++] = '\0'; 5904 5905 mmap_event->file_name = name; 5906 mmap_event->file_size = size; 5907 mmap_event->maj = maj; 5908 mmap_event->min = min; 5909 mmap_event->ino = ino; 5910 mmap_event->ino_generation = gen; 5911 mmap_event->prot = prot; 5912 mmap_event->flags = flags; 5913 5914 if (!(vma->vm_flags & VM_EXEC)) 5915 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5916 5917 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5918 5919 perf_event_aux(perf_event_mmap_output, 5920 mmap_event, 5921 NULL); 5922 5923 kfree(buf); 5924 } 5925 5926 void perf_event_mmap(struct vm_area_struct *vma) 5927 { 5928 struct perf_mmap_event mmap_event; 5929 5930 if (!atomic_read(&nr_mmap_events)) 5931 return; 5932 5933 mmap_event = (struct perf_mmap_event){ 5934 .vma = vma, 5935 /* .file_name */ 5936 /* .file_size */ 5937 .event_id = { 5938 .header = { 5939 .type = PERF_RECORD_MMAP, 5940 .misc = PERF_RECORD_MISC_USER, 5941 /* .size */ 5942 }, 5943 /* .pid */ 5944 /* .tid */ 5945 .start = vma->vm_start, 5946 .len = vma->vm_end - vma->vm_start, 5947 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 5948 }, 5949 /* .maj (attr_mmap2 only) */ 5950 /* .min (attr_mmap2 only) */ 5951 /* .ino (attr_mmap2 only) */ 5952 /* .ino_generation (attr_mmap2 only) */ 5953 /* .prot (attr_mmap2 only) */ 5954 /* .flags (attr_mmap2 only) */ 5955 }; 5956 5957 perf_event_mmap_event(&mmap_event); 5958 } 5959 5960 void perf_event_aux_event(struct perf_event *event, unsigned long head, 5961 unsigned long size, u64 flags) 5962 { 5963 struct perf_output_handle handle; 5964 struct perf_sample_data sample; 5965 struct perf_aux_event { 5966 struct perf_event_header header; 5967 u64 offset; 5968 u64 size; 5969 u64 flags; 5970 } rec = { 5971 .header = { 5972 .type = PERF_RECORD_AUX, 5973 .misc = 0, 5974 .size = sizeof(rec), 5975 }, 5976 .offset = head, 5977 .size = size, 5978 .flags = flags, 5979 }; 5980 int ret; 5981 5982 perf_event_header__init_id(&rec.header, &sample, event); 5983 ret = perf_output_begin(&handle, event, rec.header.size); 5984 5985 if (ret) 5986 return; 5987 5988 perf_output_put(&handle, rec); 5989 perf_event__output_id_sample(event, &handle, &sample); 5990 5991 perf_output_end(&handle); 5992 } 5993 5994 /* 5995 * Lost/dropped samples logging 5996 */ 5997 void perf_log_lost_samples(struct perf_event *event, u64 lost) 5998 { 5999 struct perf_output_handle handle; 6000 struct perf_sample_data sample; 6001 int ret; 6002 6003 struct { 6004 struct perf_event_header header; 6005 u64 lost; 6006 } lost_samples_event = { 6007 .header = { 6008 .type = PERF_RECORD_LOST_SAMPLES, 6009 .misc = 0, 6010 .size = sizeof(lost_samples_event), 6011 }, 6012 .lost = lost, 6013 }; 6014 6015 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 6016 6017 ret = perf_output_begin(&handle, event, 6018 lost_samples_event.header.size); 6019 if (ret) 6020 return; 6021 6022 perf_output_put(&handle, lost_samples_event); 6023 perf_event__output_id_sample(event, &handle, &sample); 6024 perf_output_end(&handle); 6025 } 6026 6027 /* 6028 * IRQ throttle logging 6029 */ 6030 6031 static void perf_log_throttle(struct perf_event *event, int enable) 6032 { 6033 struct perf_output_handle handle; 6034 struct perf_sample_data sample; 6035 int ret; 6036 6037 struct { 6038 struct perf_event_header header; 6039 u64 time; 6040 u64 id; 6041 u64 stream_id; 6042 } throttle_event = { 6043 .header = { 6044 .type = PERF_RECORD_THROTTLE, 6045 .misc = 0, 6046 .size = sizeof(throttle_event), 6047 }, 6048 .time = perf_event_clock(event), 6049 .id = primary_event_id(event), 6050 .stream_id = event->id, 6051 }; 6052 6053 if (enable) 6054 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6055 6056 perf_event_header__init_id(&throttle_event.header, &sample, event); 6057 6058 ret = perf_output_begin(&handle, event, 6059 throttle_event.header.size); 6060 if (ret) 6061 return; 6062 6063 perf_output_put(&handle, throttle_event); 6064 perf_event__output_id_sample(event, &handle, &sample); 6065 perf_output_end(&handle); 6066 } 6067 6068 static void perf_log_itrace_start(struct perf_event *event) 6069 { 6070 struct perf_output_handle handle; 6071 struct perf_sample_data sample; 6072 struct perf_aux_event { 6073 struct perf_event_header header; 6074 u32 pid; 6075 u32 tid; 6076 } rec; 6077 int ret; 6078 6079 if (event->parent) 6080 event = event->parent; 6081 6082 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6083 event->hw.itrace_started) 6084 return; 6085 6086 event->hw.itrace_started = 1; 6087 6088 rec.header.type = PERF_RECORD_ITRACE_START; 6089 rec.header.misc = 0; 6090 rec.header.size = sizeof(rec); 6091 rec.pid = perf_event_pid(event, current); 6092 rec.tid = perf_event_tid(event, current); 6093 6094 perf_event_header__init_id(&rec.header, &sample, event); 6095 ret = perf_output_begin(&handle, event, rec.header.size); 6096 6097 if (ret) 6098 return; 6099 6100 perf_output_put(&handle, rec); 6101 perf_event__output_id_sample(event, &handle, &sample); 6102 6103 perf_output_end(&handle); 6104 } 6105 6106 /* 6107 * Generic event overflow handling, sampling. 6108 */ 6109 6110 static int __perf_event_overflow(struct perf_event *event, 6111 int throttle, struct perf_sample_data *data, 6112 struct pt_regs *regs) 6113 { 6114 int events = atomic_read(&event->event_limit); 6115 struct hw_perf_event *hwc = &event->hw; 6116 u64 seq; 6117 int ret = 0; 6118 6119 /* 6120 * Non-sampling counters might still use the PMI to fold short 6121 * hardware counters, ignore those. 6122 */ 6123 if (unlikely(!is_sampling_event(event))) 6124 return 0; 6125 6126 seq = __this_cpu_read(perf_throttled_seq); 6127 if (seq != hwc->interrupts_seq) { 6128 hwc->interrupts_seq = seq; 6129 hwc->interrupts = 1; 6130 } else { 6131 hwc->interrupts++; 6132 if (unlikely(throttle 6133 && hwc->interrupts >= max_samples_per_tick)) { 6134 __this_cpu_inc(perf_throttled_count); 6135 hwc->interrupts = MAX_INTERRUPTS; 6136 perf_log_throttle(event, 0); 6137 tick_nohz_full_kick(); 6138 ret = 1; 6139 } 6140 } 6141 6142 if (event->attr.freq) { 6143 u64 now = perf_clock(); 6144 s64 delta = now - hwc->freq_time_stamp; 6145 6146 hwc->freq_time_stamp = now; 6147 6148 if (delta > 0 && delta < 2*TICK_NSEC) 6149 perf_adjust_period(event, delta, hwc->last_period, true); 6150 } 6151 6152 /* 6153 * XXX event_limit might not quite work as expected on inherited 6154 * events 6155 */ 6156 6157 event->pending_kill = POLL_IN; 6158 if (events && atomic_dec_and_test(&event->event_limit)) { 6159 ret = 1; 6160 event->pending_kill = POLL_HUP; 6161 event->pending_disable = 1; 6162 irq_work_queue(&event->pending); 6163 } 6164 6165 if (event->overflow_handler) 6166 event->overflow_handler(event, data, regs); 6167 else 6168 perf_event_output(event, data, regs); 6169 6170 if (*perf_event_fasync(event) && event->pending_kill) { 6171 event->pending_wakeup = 1; 6172 irq_work_queue(&event->pending); 6173 } 6174 6175 return ret; 6176 } 6177 6178 int perf_event_overflow(struct perf_event *event, 6179 struct perf_sample_data *data, 6180 struct pt_regs *regs) 6181 { 6182 return __perf_event_overflow(event, 1, data, regs); 6183 } 6184 6185 /* 6186 * Generic software event infrastructure 6187 */ 6188 6189 struct swevent_htable { 6190 struct swevent_hlist *swevent_hlist; 6191 struct mutex hlist_mutex; 6192 int hlist_refcount; 6193 6194 /* Recursion avoidance in each contexts */ 6195 int recursion[PERF_NR_CONTEXTS]; 6196 6197 /* Keeps track of cpu being initialized/exited */ 6198 bool online; 6199 }; 6200 6201 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6202 6203 /* 6204 * We directly increment event->count and keep a second value in 6205 * event->hw.period_left to count intervals. This period event 6206 * is kept in the range [-sample_period, 0] so that we can use the 6207 * sign as trigger. 6208 */ 6209 6210 u64 perf_swevent_set_period(struct perf_event *event) 6211 { 6212 struct hw_perf_event *hwc = &event->hw; 6213 u64 period = hwc->last_period; 6214 u64 nr, offset; 6215 s64 old, val; 6216 6217 hwc->last_period = hwc->sample_period; 6218 6219 again: 6220 old = val = local64_read(&hwc->period_left); 6221 if (val < 0) 6222 return 0; 6223 6224 nr = div64_u64(period + val, period); 6225 offset = nr * period; 6226 val -= offset; 6227 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6228 goto again; 6229 6230 return nr; 6231 } 6232 6233 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6234 struct perf_sample_data *data, 6235 struct pt_regs *regs) 6236 { 6237 struct hw_perf_event *hwc = &event->hw; 6238 int throttle = 0; 6239 6240 if (!overflow) 6241 overflow = perf_swevent_set_period(event); 6242 6243 if (hwc->interrupts == MAX_INTERRUPTS) 6244 return; 6245 6246 for (; overflow; overflow--) { 6247 if (__perf_event_overflow(event, throttle, 6248 data, regs)) { 6249 /* 6250 * We inhibit the overflow from happening when 6251 * hwc->interrupts == MAX_INTERRUPTS. 6252 */ 6253 break; 6254 } 6255 throttle = 1; 6256 } 6257 } 6258 6259 static void perf_swevent_event(struct perf_event *event, u64 nr, 6260 struct perf_sample_data *data, 6261 struct pt_regs *regs) 6262 { 6263 struct hw_perf_event *hwc = &event->hw; 6264 6265 local64_add(nr, &event->count); 6266 6267 if (!regs) 6268 return; 6269 6270 if (!is_sampling_event(event)) 6271 return; 6272 6273 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6274 data->period = nr; 6275 return perf_swevent_overflow(event, 1, data, regs); 6276 } else 6277 data->period = event->hw.last_period; 6278 6279 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6280 return perf_swevent_overflow(event, 1, data, regs); 6281 6282 if (local64_add_negative(nr, &hwc->period_left)) 6283 return; 6284 6285 perf_swevent_overflow(event, 0, data, regs); 6286 } 6287 6288 static int perf_exclude_event(struct perf_event *event, 6289 struct pt_regs *regs) 6290 { 6291 if (event->hw.state & PERF_HES_STOPPED) 6292 return 1; 6293 6294 if (regs) { 6295 if (event->attr.exclude_user && user_mode(regs)) 6296 return 1; 6297 6298 if (event->attr.exclude_kernel && !user_mode(regs)) 6299 return 1; 6300 } 6301 6302 return 0; 6303 } 6304 6305 static int perf_swevent_match(struct perf_event *event, 6306 enum perf_type_id type, 6307 u32 event_id, 6308 struct perf_sample_data *data, 6309 struct pt_regs *regs) 6310 { 6311 if (event->attr.type != type) 6312 return 0; 6313 6314 if (event->attr.config != event_id) 6315 return 0; 6316 6317 if (perf_exclude_event(event, regs)) 6318 return 0; 6319 6320 return 1; 6321 } 6322 6323 static inline u64 swevent_hash(u64 type, u32 event_id) 6324 { 6325 u64 val = event_id | (type << 32); 6326 6327 return hash_64(val, SWEVENT_HLIST_BITS); 6328 } 6329 6330 static inline struct hlist_head * 6331 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6332 { 6333 u64 hash = swevent_hash(type, event_id); 6334 6335 return &hlist->heads[hash]; 6336 } 6337 6338 /* For the read side: events when they trigger */ 6339 static inline struct hlist_head * 6340 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6341 { 6342 struct swevent_hlist *hlist; 6343 6344 hlist = rcu_dereference(swhash->swevent_hlist); 6345 if (!hlist) 6346 return NULL; 6347 6348 return __find_swevent_head(hlist, type, event_id); 6349 } 6350 6351 /* For the event head insertion and removal in the hlist */ 6352 static inline struct hlist_head * 6353 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6354 { 6355 struct swevent_hlist *hlist; 6356 u32 event_id = event->attr.config; 6357 u64 type = event->attr.type; 6358 6359 /* 6360 * Event scheduling is always serialized against hlist allocation 6361 * and release. Which makes the protected version suitable here. 6362 * The context lock guarantees that. 6363 */ 6364 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6365 lockdep_is_held(&event->ctx->lock)); 6366 if (!hlist) 6367 return NULL; 6368 6369 return __find_swevent_head(hlist, type, event_id); 6370 } 6371 6372 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6373 u64 nr, 6374 struct perf_sample_data *data, 6375 struct pt_regs *regs) 6376 { 6377 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6378 struct perf_event *event; 6379 struct hlist_head *head; 6380 6381 rcu_read_lock(); 6382 head = find_swevent_head_rcu(swhash, type, event_id); 6383 if (!head) 6384 goto end; 6385 6386 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6387 if (perf_swevent_match(event, type, event_id, data, regs)) 6388 perf_swevent_event(event, nr, data, regs); 6389 } 6390 end: 6391 rcu_read_unlock(); 6392 } 6393 6394 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6395 6396 int perf_swevent_get_recursion_context(void) 6397 { 6398 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6399 6400 return get_recursion_context(swhash->recursion); 6401 } 6402 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6403 6404 inline void perf_swevent_put_recursion_context(int rctx) 6405 { 6406 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6407 6408 put_recursion_context(swhash->recursion, rctx); 6409 } 6410 6411 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6412 { 6413 struct perf_sample_data data; 6414 6415 if (WARN_ON_ONCE(!regs)) 6416 return; 6417 6418 perf_sample_data_init(&data, addr, 0); 6419 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6420 } 6421 6422 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6423 { 6424 int rctx; 6425 6426 preempt_disable_notrace(); 6427 rctx = perf_swevent_get_recursion_context(); 6428 if (unlikely(rctx < 0)) 6429 goto fail; 6430 6431 ___perf_sw_event(event_id, nr, regs, addr); 6432 6433 perf_swevent_put_recursion_context(rctx); 6434 fail: 6435 preempt_enable_notrace(); 6436 } 6437 6438 static void perf_swevent_read(struct perf_event *event) 6439 { 6440 } 6441 6442 static int perf_swevent_add(struct perf_event *event, int flags) 6443 { 6444 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6445 struct hw_perf_event *hwc = &event->hw; 6446 struct hlist_head *head; 6447 6448 if (is_sampling_event(event)) { 6449 hwc->last_period = hwc->sample_period; 6450 perf_swevent_set_period(event); 6451 } 6452 6453 hwc->state = !(flags & PERF_EF_START); 6454 6455 head = find_swevent_head(swhash, event); 6456 if (!head) { 6457 /* 6458 * We can race with cpu hotplug code. Do not 6459 * WARN if the cpu just got unplugged. 6460 */ 6461 WARN_ON_ONCE(swhash->online); 6462 return -EINVAL; 6463 } 6464 6465 hlist_add_head_rcu(&event->hlist_entry, head); 6466 perf_event_update_userpage(event); 6467 6468 return 0; 6469 } 6470 6471 static void perf_swevent_del(struct perf_event *event, int flags) 6472 { 6473 hlist_del_rcu(&event->hlist_entry); 6474 } 6475 6476 static void perf_swevent_start(struct perf_event *event, int flags) 6477 { 6478 event->hw.state = 0; 6479 } 6480 6481 static void perf_swevent_stop(struct perf_event *event, int flags) 6482 { 6483 event->hw.state = PERF_HES_STOPPED; 6484 } 6485 6486 /* Deref the hlist from the update side */ 6487 static inline struct swevent_hlist * 6488 swevent_hlist_deref(struct swevent_htable *swhash) 6489 { 6490 return rcu_dereference_protected(swhash->swevent_hlist, 6491 lockdep_is_held(&swhash->hlist_mutex)); 6492 } 6493 6494 static void swevent_hlist_release(struct swevent_htable *swhash) 6495 { 6496 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6497 6498 if (!hlist) 6499 return; 6500 6501 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6502 kfree_rcu(hlist, rcu_head); 6503 } 6504 6505 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6506 { 6507 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6508 6509 mutex_lock(&swhash->hlist_mutex); 6510 6511 if (!--swhash->hlist_refcount) 6512 swevent_hlist_release(swhash); 6513 6514 mutex_unlock(&swhash->hlist_mutex); 6515 } 6516 6517 static void swevent_hlist_put(struct perf_event *event) 6518 { 6519 int cpu; 6520 6521 for_each_possible_cpu(cpu) 6522 swevent_hlist_put_cpu(event, cpu); 6523 } 6524 6525 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6526 { 6527 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6528 int err = 0; 6529 6530 mutex_lock(&swhash->hlist_mutex); 6531 6532 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6533 struct swevent_hlist *hlist; 6534 6535 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6536 if (!hlist) { 6537 err = -ENOMEM; 6538 goto exit; 6539 } 6540 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6541 } 6542 swhash->hlist_refcount++; 6543 exit: 6544 mutex_unlock(&swhash->hlist_mutex); 6545 6546 return err; 6547 } 6548 6549 static int swevent_hlist_get(struct perf_event *event) 6550 { 6551 int err; 6552 int cpu, failed_cpu; 6553 6554 get_online_cpus(); 6555 for_each_possible_cpu(cpu) { 6556 err = swevent_hlist_get_cpu(event, cpu); 6557 if (err) { 6558 failed_cpu = cpu; 6559 goto fail; 6560 } 6561 } 6562 put_online_cpus(); 6563 6564 return 0; 6565 fail: 6566 for_each_possible_cpu(cpu) { 6567 if (cpu == failed_cpu) 6568 break; 6569 swevent_hlist_put_cpu(event, cpu); 6570 } 6571 6572 put_online_cpus(); 6573 return err; 6574 } 6575 6576 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6577 6578 static void sw_perf_event_destroy(struct perf_event *event) 6579 { 6580 u64 event_id = event->attr.config; 6581 6582 WARN_ON(event->parent); 6583 6584 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6585 swevent_hlist_put(event); 6586 } 6587 6588 static int perf_swevent_init(struct perf_event *event) 6589 { 6590 u64 event_id = event->attr.config; 6591 6592 if (event->attr.type != PERF_TYPE_SOFTWARE) 6593 return -ENOENT; 6594 6595 /* 6596 * no branch sampling for software events 6597 */ 6598 if (has_branch_stack(event)) 6599 return -EOPNOTSUPP; 6600 6601 switch (event_id) { 6602 case PERF_COUNT_SW_CPU_CLOCK: 6603 case PERF_COUNT_SW_TASK_CLOCK: 6604 return -ENOENT; 6605 6606 default: 6607 break; 6608 } 6609 6610 if (event_id >= PERF_COUNT_SW_MAX) 6611 return -ENOENT; 6612 6613 if (!event->parent) { 6614 int err; 6615 6616 err = swevent_hlist_get(event); 6617 if (err) 6618 return err; 6619 6620 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6621 event->destroy = sw_perf_event_destroy; 6622 } 6623 6624 return 0; 6625 } 6626 6627 static struct pmu perf_swevent = { 6628 .task_ctx_nr = perf_sw_context, 6629 6630 .capabilities = PERF_PMU_CAP_NO_NMI, 6631 6632 .event_init = perf_swevent_init, 6633 .add = perf_swevent_add, 6634 .del = perf_swevent_del, 6635 .start = perf_swevent_start, 6636 .stop = perf_swevent_stop, 6637 .read = perf_swevent_read, 6638 }; 6639 6640 #ifdef CONFIG_EVENT_TRACING 6641 6642 static int perf_tp_filter_match(struct perf_event *event, 6643 struct perf_sample_data *data) 6644 { 6645 void *record = data->raw->data; 6646 6647 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6648 return 1; 6649 return 0; 6650 } 6651 6652 static int perf_tp_event_match(struct perf_event *event, 6653 struct perf_sample_data *data, 6654 struct pt_regs *regs) 6655 { 6656 if (event->hw.state & PERF_HES_STOPPED) 6657 return 0; 6658 /* 6659 * All tracepoints are from kernel-space. 6660 */ 6661 if (event->attr.exclude_kernel) 6662 return 0; 6663 6664 if (!perf_tp_filter_match(event, data)) 6665 return 0; 6666 6667 return 1; 6668 } 6669 6670 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6671 struct pt_regs *regs, struct hlist_head *head, int rctx, 6672 struct task_struct *task) 6673 { 6674 struct perf_sample_data data; 6675 struct perf_event *event; 6676 6677 struct perf_raw_record raw = { 6678 .size = entry_size, 6679 .data = record, 6680 }; 6681 6682 perf_sample_data_init(&data, addr, 0); 6683 data.raw = &raw; 6684 6685 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6686 if (perf_tp_event_match(event, &data, regs)) 6687 perf_swevent_event(event, count, &data, regs); 6688 } 6689 6690 /* 6691 * If we got specified a target task, also iterate its context and 6692 * deliver this event there too. 6693 */ 6694 if (task && task != current) { 6695 struct perf_event_context *ctx; 6696 struct trace_entry *entry = record; 6697 6698 rcu_read_lock(); 6699 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6700 if (!ctx) 6701 goto unlock; 6702 6703 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6704 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6705 continue; 6706 if (event->attr.config != entry->type) 6707 continue; 6708 if (perf_tp_event_match(event, &data, regs)) 6709 perf_swevent_event(event, count, &data, regs); 6710 } 6711 unlock: 6712 rcu_read_unlock(); 6713 } 6714 6715 perf_swevent_put_recursion_context(rctx); 6716 } 6717 EXPORT_SYMBOL_GPL(perf_tp_event); 6718 6719 static void tp_perf_event_destroy(struct perf_event *event) 6720 { 6721 perf_trace_destroy(event); 6722 } 6723 6724 static int perf_tp_event_init(struct perf_event *event) 6725 { 6726 int err; 6727 6728 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6729 return -ENOENT; 6730 6731 /* 6732 * no branch sampling for tracepoint events 6733 */ 6734 if (has_branch_stack(event)) 6735 return -EOPNOTSUPP; 6736 6737 err = perf_trace_init(event); 6738 if (err) 6739 return err; 6740 6741 event->destroy = tp_perf_event_destroy; 6742 6743 return 0; 6744 } 6745 6746 static struct pmu perf_tracepoint = { 6747 .task_ctx_nr = perf_sw_context, 6748 6749 .event_init = perf_tp_event_init, 6750 .add = perf_trace_add, 6751 .del = perf_trace_del, 6752 .start = perf_swevent_start, 6753 .stop = perf_swevent_stop, 6754 .read = perf_swevent_read, 6755 }; 6756 6757 static inline void perf_tp_register(void) 6758 { 6759 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 6760 } 6761 6762 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6763 { 6764 char *filter_str; 6765 int ret; 6766 6767 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6768 return -EINVAL; 6769 6770 filter_str = strndup_user(arg, PAGE_SIZE); 6771 if (IS_ERR(filter_str)) 6772 return PTR_ERR(filter_str); 6773 6774 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 6775 6776 kfree(filter_str); 6777 return ret; 6778 } 6779 6780 static void perf_event_free_filter(struct perf_event *event) 6781 { 6782 ftrace_profile_free_filter(event); 6783 } 6784 6785 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6786 { 6787 struct bpf_prog *prog; 6788 6789 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6790 return -EINVAL; 6791 6792 if (event->tp_event->prog) 6793 return -EEXIST; 6794 6795 if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) 6796 /* bpf programs can only be attached to kprobes */ 6797 return -EINVAL; 6798 6799 prog = bpf_prog_get(prog_fd); 6800 if (IS_ERR(prog)) 6801 return PTR_ERR(prog); 6802 6803 if (prog->type != BPF_PROG_TYPE_KPROBE) { 6804 /* valid fd, but invalid bpf program type */ 6805 bpf_prog_put(prog); 6806 return -EINVAL; 6807 } 6808 6809 event->tp_event->prog = prog; 6810 6811 return 0; 6812 } 6813 6814 static void perf_event_free_bpf_prog(struct perf_event *event) 6815 { 6816 struct bpf_prog *prog; 6817 6818 if (!event->tp_event) 6819 return; 6820 6821 prog = event->tp_event->prog; 6822 if (prog) { 6823 event->tp_event->prog = NULL; 6824 bpf_prog_put(prog); 6825 } 6826 } 6827 6828 #else 6829 6830 static inline void perf_tp_register(void) 6831 { 6832 } 6833 6834 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 6835 { 6836 return -ENOENT; 6837 } 6838 6839 static void perf_event_free_filter(struct perf_event *event) 6840 { 6841 } 6842 6843 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 6844 { 6845 return -ENOENT; 6846 } 6847 6848 static void perf_event_free_bpf_prog(struct perf_event *event) 6849 { 6850 } 6851 #endif /* CONFIG_EVENT_TRACING */ 6852 6853 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6854 void perf_bp_event(struct perf_event *bp, void *data) 6855 { 6856 struct perf_sample_data sample; 6857 struct pt_regs *regs = data; 6858 6859 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 6860 6861 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 6862 perf_swevent_event(bp, 1, &sample, regs); 6863 } 6864 #endif 6865 6866 /* 6867 * hrtimer based swevent callback 6868 */ 6869 6870 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 6871 { 6872 enum hrtimer_restart ret = HRTIMER_RESTART; 6873 struct perf_sample_data data; 6874 struct pt_regs *regs; 6875 struct perf_event *event; 6876 u64 period; 6877 6878 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 6879 6880 if (event->state != PERF_EVENT_STATE_ACTIVE) 6881 return HRTIMER_NORESTART; 6882 6883 event->pmu->read(event); 6884 6885 perf_sample_data_init(&data, 0, event->hw.last_period); 6886 regs = get_irq_regs(); 6887 6888 if (regs && !perf_exclude_event(event, regs)) { 6889 if (!(event->attr.exclude_idle && is_idle_task(current))) 6890 if (__perf_event_overflow(event, 1, &data, regs)) 6891 ret = HRTIMER_NORESTART; 6892 } 6893 6894 period = max_t(u64, 10000, event->hw.sample_period); 6895 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 6896 6897 return ret; 6898 } 6899 6900 static void perf_swevent_start_hrtimer(struct perf_event *event) 6901 { 6902 struct hw_perf_event *hwc = &event->hw; 6903 s64 period; 6904 6905 if (!is_sampling_event(event)) 6906 return; 6907 6908 period = local64_read(&hwc->period_left); 6909 if (period) { 6910 if (period < 0) 6911 period = 10000; 6912 6913 local64_set(&hwc->period_left, 0); 6914 } else { 6915 period = max_t(u64, 10000, hwc->sample_period); 6916 } 6917 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 6918 HRTIMER_MODE_REL_PINNED); 6919 } 6920 6921 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 6922 { 6923 struct hw_perf_event *hwc = &event->hw; 6924 6925 if (is_sampling_event(event)) { 6926 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 6927 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 6928 6929 hrtimer_cancel(&hwc->hrtimer); 6930 } 6931 } 6932 6933 static void perf_swevent_init_hrtimer(struct perf_event *event) 6934 { 6935 struct hw_perf_event *hwc = &event->hw; 6936 6937 if (!is_sampling_event(event)) 6938 return; 6939 6940 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6941 hwc->hrtimer.function = perf_swevent_hrtimer; 6942 6943 /* 6944 * Since hrtimers have a fixed rate, we can do a static freq->period 6945 * mapping and avoid the whole period adjust feedback stuff. 6946 */ 6947 if (event->attr.freq) { 6948 long freq = event->attr.sample_freq; 6949 6950 event->attr.sample_period = NSEC_PER_SEC / freq; 6951 hwc->sample_period = event->attr.sample_period; 6952 local64_set(&hwc->period_left, hwc->sample_period); 6953 hwc->last_period = hwc->sample_period; 6954 event->attr.freq = 0; 6955 } 6956 } 6957 6958 /* 6959 * Software event: cpu wall time clock 6960 */ 6961 6962 static void cpu_clock_event_update(struct perf_event *event) 6963 { 6964 s64 prev; 6965 u64 now; 6966 6967 now = local_clock(); 6968 prev = local64_xchg(&event->hw.prev_count, now); 6969 local64_add(now - prev, &event->count); 6970 } 6971 6972 static void cpu_clock_event_start(struct perf_event *event, int flags) 6973 { 6974 local64_set(&event->hw.prev_count, local_clock()); 6975 perf_swevent_start_hrtimer(event); 6976 } 6977 6978 static void cpu_clock_event_stop(struct perf_event *event, int flags) 6979 { 6980 perf_swevent_cancel_hrtimer(event); 6981 cpu_clock_event_update(event); 6982 } 6983 6984 static int cpu_clock_event_add(struct perf_event *event, int flags) 6985 { 6986 if (flags & PERF_EF_START) 6987 cpu_clock_event_start(event, flags); 6988 perf_event_update_userpage(event); 6989 6990 return 0; 6991 } 6992 6993 static void cpu_clock_event_del(struct perf_event *event, int flags) 6994 { 6995 cpu_clock_event_stop(event, flags); 6996 } 6997 6998 static void cpu_clock_event_read(struct perf_event *event) 6999 { 7000 cpu_clock_event_update(event); 7001 } 7002 7003 static int cpu_clock_event_init(struct perf_event *event) 7004 { 7005 if (event->attr.type != PERF_TYPE_SOFTWARE) 7006 return -ENOENT; 7007 7008 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7009 return -ENOENT; 7010 7011 /* 7012 * no branch sampling for software events 7013 */ 7014 if (has_branch_stack(event)) 7015 return -EOPNOTSUPP; 7016 7017 perf_swevent_init_hrtimer(event); 7018 7019 return 0; 7020 } 7021 7022 static struct pmu perf_cpu_clock = { 7023 .task_ctx_nr = perf_sw_context, 7024 7025 .capabilities = PERF_PMU_CAP_NO_NMI, 7026 7027 .event_init = cpu_clock_event_init, 7028 .add = cpu_clock_event_add, 7029 .del = cpu_clock_event_del, 7030 .start = cpu_clock_event_start, 7031 .stop = cpu_clock_event_stop, 7032 .read = cpu_clock_event_read, 7033 }; 7034 7035 /* 7036 * Software event: task time clock 7037 */ 7038 7039 static void task_clock_event_update(struct perf_event *event, u64 now) 7040 { 7041 u64 prev; 7042 s64 delta; 7043 7044 prev = local64_xchg(&event->hw.prev_count, now); 7045 delta = now - prev; 7046 local64_add(delta, &event->count); 7047 } 7048 7049 static void task_clock_event_start(struct perf_event *event, int flags) 7050 { 7051 local64_set(&event->hw.prev_count, event->ctx->time); 7052 perf_swevent_start_hrtimer(event); 7053 } 7054 7055 static void task_clock_event_stop(struct perf_event *event, int flags) 7056 { 7057 perf_swevent_cancel_hrtimer(event); 7058 task_clock_event_update(event, event->ctx->time); 7059 } 7060 7061 static int task_clock_event_add(struct perf_event *event, int flags) 7062 { 7063 if (flags & PERF_EF_START) 7064 task_clock_event_start(event, flags); 7065 perf_event_update_userpage(event); 7066 7067 return 0; 7068 } 7069 7070 static void task_clock_event_del(struct perf_event *event, int flags) 7071 { 7072 task_clock_event_stop(event, PERF_EF_UPDATE); 7073 } 7074 7075 static void task_clock_event_read(struct perf_event *event) 7076 { 7077 u64 now = perf_clock(); 7078 u64 delta = now - event->ctx->timestamp; 7079 u64 time = event->ctx->time + delta; 7080 7081 task_clock_event_update(event, time); 7082 } 7083 7084 static int task_clock_event_init(struct perf_event *event) 7085 { 7086 if (event->attr.type != PERF_TYPE_SOFTWARE) 7087 return -ENOENT; 7088 7089 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7090 return -ENOENT; 7091 7092 /* 7093 * no branch sampling for software events 7094 */ 7095 if (has_branch_stack(event)) 7096 return -EOPNOTSUPP; 7097 7098 perf_swevent_init_hrtimer(event); 7099 7100 return 0; 7101 } 7102 7103 static struct pmu perf_task_clock = { 7104 .task_ctx_nr = perf_sw_context, 7105 7106 .capabilities = PERF_PMU_CAP_NO_NMI, 7107 7108 .event_init = task_clock_event_init, 7109 .add = task_clock_event_add, 7110 .del = task_clock_event_del, 7111 .start = task_clock_event_start, 7112 .stop = task_clock_event_stop, 7113 .read = task_clock_event_read, 7114 }; 7115 7116 static void perf_pmu_nop_void(struct pmu *pmu) 7117 { 7118 } 7119 7120 static int perf_pmu_nop_int(struct pmu *pmu) 7121 { 7122 return 0; 7123 } 7124 7125 static void perf_pmu_start_txn(struct pmu *pmu) 7126 { 7127 perf_pmu_disable(pmu); 7128 } 7129 7130 static int perf_pmu_commit_txn(struct pmu *pmu) 7131 { 7132 perf_pmu_enable(pmu); 7133 return 0; 7134 } 7135 7136 static void perf_pmu_cancel_txn(struct pmu *pmu) 7137 { 7138 perf_pmu_enable(pmu); 7139 } 7140 7141 static int perf_event_idx_default(struct perf_event *event) 7142 { 7143 return 0; 7144 } 7145 7146 /* 7147 * Ensures all contexts with the same task_ctx_nr have the same 7148 * pmu_cpu_context too. 7149 */ 7150 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7151 { 7152 struct pmu *pmu; 7153 7154 if (ctxn < 0) 7155 return NULL; 7156 7157 list_for_each_entry(pmu, &pmus, entry) { 7158 if (pmu->task_ctx_nr == ctxn) 7159 return pmu->pmu_cpu_context; 7160 } 7161 7162 return NULL; 7163 } 7164 7165 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7166 { 7167 int cpu; 7168 7169 for_each_possible_cpu(cpu) { 7170 struct perf_cpu_context *cpuctx; 7171 7172 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7173 7174 if (cpuctx->unique_pmu == old_pmu) 7175 cpuctx->unique_pmu = pmu; 7176 } 7177 } 7178 7179 static void free_pmu_context(struct pmu *pmu) 7180 { 7181 struct pmu *i; 7182 7183 mutex_lock(&pmus_lock); 7184 /* 7185 * Like a real lame refcount. 7186 */ 7187 list_for_each_entry(i, &pmus, entry) { 7188 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7189 update_pmu_context(i, pmu); 7190 goto out; 7191 } 7192 } 7193 7194 free_percpu(pmu->pmu_cpu_context); 7195 out: 7196 mutex_unlock(&pmus_lock); 7197 } 7198 static struct idr pmu_idr; 7199 7200 static ssize_t 7201 type_show(struct device *dev, struct device_attribute *attr, char *page) 7202 { 7203 struct pmu *pmu = dev_get_drvdata(dev); 7204 7205 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7206 } 7207 static DEVICE_ATTR_RO(type); 7208 7209 static ssize_t 7210 perf_event_mux_interval_ms_show(struct device *dev, 7211 struct device_attribute *attr, 7212 char *page) 7213 { 7214 struct pmu *pmu = dev_get_drvdata(dev); 7215 7216 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7217 } 7218 7219 static DEFINE_MUTEX(mux_interval_mutex); 7220 7221 static ssize_t 7222 perf_event_mux_interval_ms_store(struct device *dev, 7223 struct device_attribute *attr, 7224 const char *buf, size_t count) 7225 { 7226 struct pmu *pmu = dev_get_drvdata(dev); 7227 int timer, cpu, ret; 7228 7229 ret = kstrtoint(buf, 0, &timer); 7230 if (ret) 7231 return ret; 7232 7233 if (timer < 1) 7234 return -EINVAL; 7235 7236 /* same value, noting to do */ 7237 if (timer == pmu->hrtimer_interval_ms) 7238 return count; 7239 7240 mutex_lock(&mux_interval_mutex); 7241 pmu->hrtimer_interval_ms = timer; 7242 7243 /* update all cpuctx for this PMU */ 7244 get_online_cpus(); 7245 for_each_online_cpu(cpu) { 7246 struct perf_cpu_context *cpuctx; 7247 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7248 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7249 7250 cpu_function_call(cpu, 7251 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7252 } 7253 put_online_cpus(); 7254 mutex_unlock(&mux_interval_mutex); 7255 7256 return count; 7257 } 7258 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7259 7260 static struct attribute *pmu_dev_attrs[] = { 7261 &dev_attr_type.attr, 7262 &dev_attr_perf_event_mux_interval_ms.attr, 7263 NULL, 7264 }; 7265 ATTRIBUTE_GROUPS(pmu_dev); 7266 7267 static int pmu_bus_running; 7268 static struct bus_type pmu_bus = { 7269 .name = "event_source", 7270 .dev_groups = pmu_dev_groups, 7271 }; 7272 7273 static void pmu_dev_release(struct device *dev) 7274 { 7275 kfree(dev); 7276 } 7277 7278 static int pmu_dev_alloc(struct pmu *pmu) 7279 { 7280 int ret = -ENOMEM; 7281 7282 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7283 if (!pmu->dev) 7284 goto out; 7285 7286 pmu->dev->groups = pmu->attr_groups; 7287 device_initialize(pmu->dev); 7288 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7289 if (ret) 7290 goto free_dev; 7291 7292 dev_set_drvdata(pmu->dev, pmu); 7293 pmu->dev->bus = &pmu_bus; 7294 pmu->dev->release = pmu_dev_release; 7295 ret = device_add(pmu->dev); 7296 if (ret) 7297 goto free_dev; 7298 7299 out: 7300 return ret; 7301 7302 free_dev: 7303 put_device(pmu->dev); 7304 goto out; 7305 } 7306 7307 static struct lock_class_key cpuctx_mutex; 7308 static struct lock_class_key cpuctx_lock; 7309 7310 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7311 { 7312 int cpu, ret; 7313 7314 mutex_lock(&pmus_lock); 7315 ret = -ENOMEM; 7316 pmu->pmu_disable_count = alloc_percpu(int); 7317 if (!pmu->pmu_disable_count) 7318 goto unlock; 7319 7320 pmu->type = -1; 7321 if (!name) 7322 goto skip_type; 7323 pmu->name = name; 7324 7325 if (type < 0) { 7326 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7327 if (type < 0) { 7328 ret = type; 7329 goto free_pdc; 7330 } 7331 } 7332 pmu->type = type; 7333 7334 if (pmu_bus_running) { 7335 ret = pmu_dev_alloc(pmu); 7336 if (ret) 7337 goto free_idr; 7338 } 7339 7340 skip_type: 7341 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7342 if (pmu->pmu_cpu_context) 7343 goto got_cpu_context; 7344 7345 ret = -ENOMEM; 7346 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7347 if (!pmu->pmu_cpu_context) 7348 goto free_dev; 7349 7350 for_each_possible_cpu(cpu) { 7351 struct perf_cpu_context *cpuctx; 7352 7353 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7354 __perf_event_init_context(&cpuctx->ctx); 7355 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7356 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7357 cpuctx->ctx.pmu = pmu; 7358 7359 __perf_mux_hrtimer_init(cpuctx, cpu); 7360 7361 cpuctx->unique_pmu = pmu; 7362 } 7363 7364 got_cpu_context: 7365 if (!pmu->start_txn) { 7366 if (pmu->pmu_enable) { 7367 /* 7368 * If we have pmu_enable/pmu_disable calls, install 7369 * transaction stubs that use that to try and batch 7370 * hardware accesses. 7371 */ 7372 pmu->start_txn = perf_pmu_start_txn; 7373 pmu->commit_txn = perf_pmu_commit_txn; 7374 pmu->cancel_txn = perf_pmu_cancel_txn; 7375 } else { 7376 pmu->start_txn = perf_pmu_nop_void; 7377 pmu->commit_txn = perf_pmu_nop_int; 7378 pmu->cancel_txn = perf_pmu_nop_void; 7379 } 7380 } 7381 7382 if (!pmu->pmu_enable) { 7383 pmu->pmu_enable = perf_pmu_nop_void; 7384 pmu->pmu_disable = perf_pmu_nop_void; 7385 } 7386 7387 if (!pmu->event_idx) 7388 pmu->event_idx = perf_event_idx_default; 7389 7390 list_add_rcu(&pmu->entry, &pmus); 7391 atomic_set(&pmu->exclusive_cnt, 0); 7392 ret = 0; 7393 unlock: 7394 mutex_unlock(&pmus_lock); 7395 7396 return ret; 7397 7398 free_dev: 7399 device_del(pmu->dev); 7400 put_device(pmu->dev); 7401 7402 free_idr: 7403 if (pmu->type >= PERF_TYPE_MAX) 7404 idr_remove(&pmu_idr, pmu->type); 7405 7406 free_pdc: 7407 free_percpu(pmu->pmu_disable_count); 7408 goto unlock; 7409 } 7410 EXPORT_SYMBOL_GPL(perf_pmu_register); 7411 7412 void perf_pmu_unregister(struct pmu *pmu) 7413 { 7414 mutex_lock(&pmus_lock); 7415 list_del_rcu(&pmu->entry); 7416 mutex_unlock(&pmus_lock); 7417 7418 /* 7419 * We dereference the pmu list under both SRCU and regular RCU, so 7420 * synchronize against both of those. 7421 */ 7422 synchronize_srcu(&pmus_srcu); 7423 synchronize_rcu(); 7424 7425 free_percpu(pmu->pmu_disable_count); 7426 if (pmu->type >= PERF_TYPE_MAX) 7427 idr_remove(&pmu_idr, pmu->type); 7428 device_del(pmu->dev); 7429 put_device(pmu->dev); 7430 free_pmu_context(pmu); 7431 } 7432 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7433 7434 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7435 { 7436 struct perf_event_context *ctx = NULL; 7437 int ret; 7438 7439 if (!try_module_get(pmu->module)) 7440 return -ENODEV; 7441 7442 if (event->group_leader != event) { 7443 /* 7444 * This ctx->mutex can nest when we're called through 7445 * inheritance. See the perf_event_ctx_lock_nested() comment. 7446 */ 7447 ctx = perf_event_ctx_lock_nested(event->group_leader, 7448 SINGLE_DEPTH_NESTING); 7449 BUG_ON(!ctx); 7450 } 7451 7452 event->pmu = pmu; 7453 ret = pmu->event_init(event); 7454 7455 if (ctx) 7456 perf_event_ctx_unlock(event->group_leader, ctx); 7457 7458 if (ret) 7459 module_put(pmu->module); 7460 7461 return ret; 7462 } 7463 7464 struct pmu *perf_init_event(struct perf_event *event) 7465 { 7466 struct pmu *pmu = NULL; 7467 int idx; 7468 int ret; 7469 7470 idx = srcu_read_lock(&pmus_srcu); 7471 7472 rcu_read_lock(); 7473 pmu = idr_find(&pmu_idr, event->attr.type); 7474 rcu_read_unlock(); 7475 if (pmu) { 7476 ret = perf_try_init_event(pmu, event); 7477 if (ret) 7478 pmu = ERR_PTR(ret); 7479 goto unlock; 7480 } 7481 7482 list_for_each_entry_rcu(pmu, &pmus, entry) { 7483 ret = perf_try_init_event(pmu, event); 7484 if (!ret) 7485 goto unlock; 7486 7487 if (ret != -ENOENT) { 7488 pmu = ERR_PTR(ret); 7489 goto unlock; 7490 } 7491 } 7492 pmu = ERR_PTR(-ENOENT); 7493 unlock: 7494 srcu_read_unlock(&pmus_srcu, idx); 7495 7496 return pmu; 7497 } 7498 7499 static void account_event_cpu(struct perf_event *event, int cpu) 7500 { 7501 if (event->parent) 7502 return; 7503 7504 if (is_cgroup_event(event)) 7505 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7506 } 7507 7508 static void account_event(struct perf_event *event) 7509 { 7510 if (event->parent) 7511 return; 7512 7513 if (event->attach_state & PERF_ATTACH_TASK) 7514 static_key_slow_inc(&perf_sched_events.key); 7515 if (event->attr.mmap || event->attr.mmap_data) 7516 atomic_inc(&nr_mmap_events); 7517 if (event->attr.comm) 7518 atomic_inc(&nr_comm_events); 7519 if (event->attr.task) 7520 atomic_inc(&nr_task_events); 7521 if (event->attr.freq) { 7522 if (atomic_inc_return(&nr_freq_events) == 1) 7523 tick_nohz_full_kick_all(); 7524 } 7525 if (has_branch_stack(event)) 7526 static_key_slow_inc(&perf_sched_events.key); 7527 if (is_cgroup_event(event)) 7528 static_key_slow_inc(&perf_sched_events.key); 7529 7530 account_event_cpu(event, event->cpu); 7531 } 7532 7533 /* 7534 * Allocate and initialize a event structure 7535 */ 7536 static struct perf_event * 7537 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7538 struct task_struct *task, 7539 struct perf_event *group_leader, 7540 struct perf_event *parent_event, 7541 perf_overflow_handler_t overflow_handler, 7542 void *context, int cgroup_fd) 7543 { 7544 struct pmu *pmu; 7545 struct perf_event *event; 7546 struct hw_perf_event *hwc; 7547 long err = -EINVAL; 7548 7549 if ((unsigned)cpu >= nr_cpu_ids) { 7550 if (!task || cpu != -1) 7551 return ERR_PTR(-EINVAL); 7552 } 7553 7554 event = kzalloc(sizeof(*event), GFP_KERNEL); 7555 if (!event) 7556 return ERR_PTR(-ENOMEM); 7557 7558 /* 7559 * Single events are their own group leaders, with an 7560 * empty sibling list: 7561 */ 7562 if (!group_leader) 7563 group_leader = event; 7564 7565 mutex_init(&event->child_mutex); 7566 INIT_LIST_HEAD(&event->child_list); 7567 7568 INIT_LIST_HEAD(&event->group_entry); 7569 INIT_LIST_HEAD(&event->event_entry); 7570 INIT_LIST_HEAD(&event->sibling_list); 7571 INIT_LIST_HEAD(&event->rb_entry); 7572 INIT_LIST_HEAD(&event->active_entry); 7573 INIT_HLIST_NODE(&event->hlist_entry); 7574 7575 7576 init_waitqueue_head(&event->waitq); 7577 init_irq_work(&event->pending, perf_pending_event); 7578 7579 mutex_init(&event->mmap_mutex); 7580 7581 atomic_long_set(&event->refcount, 1); 7582 event->cpu = cpu; 7583 event->attr = *attr; 7584 event->group_leader = group_leader; 7585 event->pmu = NULL; 7586 event->oncpu = -1; 7587 7588 event->parent = parent_event; 7589 7590 event->ns = get_pid_ns(task_active_pid_ns(current)); 7591 event->id = atomic64_inc_return(&perf_event_id); 7592 7593 event->state = PERF_EVENT_STATE_INACTIVE; 7594 7595 if (task) { 7596 event->attach_state = PERF_ATTACH_TASK; 7597 /* 7598 * XXX pmu::event_init needs to know what task to account to 7599 * and we cannot use the ctx information because we need the 7600 * pmu before we get a ctx. 7601 */ 7602 event->hw.target = task; 7603 } 7604 7605 event->clock = &local_clock; 7606 if (parent_event) 7607 event->clock = parent_event->clock; 7608 7609 if (!overflow_handler && parent_event) { 7610 overflow_handler = parent_event->overflow_handler; 7611 context = parent_event->overflow_handler_context; 7612 } 7613 7614 event->overflow_handler = overflow_handler; 7615 event->overflow_handler_context = context; 7616 7617 perf_event__state_init(event); 7618 7619 pmu = NULL; 7620 7621 hwc = &event->hw; 7622 hwc->sample_period = attr->sample_period; 7623 if (attr->freq && attr->sample_freq) 7624 hwc->sample_period = 1; 7625 hwc->last_period = hwc->sample_period; 7626 7627 local64_set(&hwc->period_left, hwc->sample_period); 7628 7629 /* 7630 * we currently do not support PERF_FORMAT_GROUP on inherited events 7631 */ 7632 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7633 goto err_ns; 7634 7635 if (!has_branch_stack(event)) 7636 event->attr.branch_sample_type = 0; 7637 7638 if (cgroup_fd != -1) { 7639 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7640 if (err) 7641 goto err_ns; 7642 } 7643 7644 pmu = perf_init_event(event); 7645 if (!pmu) 7646 goto err_ns; 7647 else if (IS_ERR(pmu)) { 7648 err = PTR_ERR(pmu); 7649 goto err_ns; 7650 } 7651 7652 err = exclusive_event_init(event); 7653 if (err) 7654 goto err_pmu; 7655 7656 if (!event->parent) { 7657 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7658 err = get_callchain_buffers(); 7659 if (err) 7660 goto err_per_task; 7661 } 7662 } 7663 7664 return event; 7665 7666 err_per_task: 7667 exclusive_event_destroy(event); 7668 7669 err_pmu: 7670 if (event->destroy) 7671 event->destroy(event); 7672 module_put(pmu->module); 7673 err_ns: 7674 if (is_cgroup_event(event)) 7675 perf_detach_cgroup(event); 7676 if (event->ns) 7677 put_pid_ns(event->ns); 7678 kfree(event); 7679 7680 return ERR_PTR(err); 7681 } 7682 7683 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7684 struct perf_event_attr *attr) 7685 { 7686 u32 size; 7687 int ret; 7688 7689 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7690 return -EFAULT; 7691 7692 /* 7693 * zero the full structure, so that a short copy will be nice. 7694 */ 7695 memset(attr, 0, sizeof(*attr)); 7696 7697 ret = get_user(size, &uattr->size); 7698 if (ret) 7699 return ret; 7700 7701 if (size > PAGE_SIZE) /* silly large */ 7702 goto err_size; 7703 7704 if (!size) /* abi compat */ 7705 size = PERF_ATTR_SIZE_VER0; 7706 7707 if (size < PERF_ATTR_SIZE_VER0) 7708 goto err_size; 7709 7710 /* 7711 * If we're handed a bigger struct than we know of, 7712 * ensure all the unknown bits are 0 - i.e. new 7713 * user-space does not rely on any kernel feature 7714 * extensions we dont know about yet. 7715 */ 7716 if (size > sizeof(*attr)) { 7717 unsigned char __user *addr; 7718 unsigned char __user *end; 7719 unsigned char val; 7720 7721 addr = (void __user *)uattr + sizeof(*attr); 7722 end = (void __user *)uattr + size; 7723 7724 for (; addr < end; addr++) { 7725 ret = get_user(val, addr); 7726 if (ret) 7727 return ret; 7728 if (val) 7729 goto err_size; 7730 } 7731 size = sizeof(*attr); 7732 } 7733 7734 ret = copy_from_user(attr, uattr, size); 7735 if (ret) 7736 return -EFAULT; 7737 7738 if (attr->__reserved_1) 7739 return -EINVAL; 7740 7741 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 7742 return -EINVAL; 7743 7744 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 7745 return -EINVAL; 7746 7747 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 7748 u64 mask = attr->branch_sample_type; 7749 7750 /* only using defined bits */ 7751 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 7752 return -EINVAL; 7753 7754 /* at least one branch bit must be set */ 7755 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 7756 return -EINVAL; 7757 7758 /* propagate priv level, when not set for branch */ 7759 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 7760 7761 /* exclude_kernel checked on syscall entry */ 7762 if (!attr->exclude_kernel) 7763 mask |= PERF_SAMPLE_BRANCH_KERNEL; 7764 7765 if (!attr->exclude_user) 7766 mask |= PERF_SAMPLE_BRANCH_USER; 7767 7768 if (!attr->exclude_hv) 7769 mask |= PERF_SAMPLE_BRANCH_HV; 7770 /* 7771 * adjust user setting (for HW filter setup) 7772 */ 7773 attr->branch_sample_type = mask; 7774 } 7775 /* privileged levels capture (kernel, hv): check permissions */ 7776 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 7777 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7778 return -EACCES; 7779 } 7780 7781 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 7782 ret = perf_reg_validate(attr->sample_regs_user); 7783 if (ret) 7784 return ret; 7785 } 7786 7787 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 7788 if (!arch_perf_have_user_stack_dump()) 7789 return -ENOSYS; 7790 7791 /* 7792 * We have __u32 type for the size, but so far 7793 * we can only use __u16 as maximum due to the 7794 * __u16 sample size limit. 7795 */ 7796 if (attr->sample_stack_user >= USHRT_MAX) 7797 ret = -EINVAL; 7798 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 7799 ret = -EINVAL; 7800 } 7801 7802 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 7803 ret = perf_reg_validate(attr->sample_regs_intr); 7804 out: 7805 return ret; 7806 7807 err_size: 7808 put_user(sizeof(*attr), &uattr->size); 7809 ret = -E2BIG; 7810 goto out; 7811 } 7812 7813 static int 7814 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 7815 { 7816 struct ring_buffer *rb = NULL; 7817 int ret = -EINVAL; 7818 7819 if (!output_event) 7820 goto set; 7821 7822 /* don't allow circular references */ 7823 if (event == output_event) 7824 goto out; 7825 7826 /* 7827 * Don't allow cross-cpu buffers 7828 */ 7829 if (output_event->cpu != event->cpu) 7830 goto out; 7831 7832 /* 7833 * If its not a per-cpu rb, it must be the same task. 7834 */ 7835 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 7836 goto out; 7837 7838 /* 7839 * Mixing clocks in the same buffer is trouble you don't need. 7840 */ 7841 if (output_event->clock != event->clock) 7842 goto out; 7843 7844 /* 7845 * If both events generate aux data, they must be on the same PMU 7846 */ 7847 if (has_aux(event) && has_aux(output_event) && 7848 event->pmu != output_event->pmu) 7849 goto out; 7850 7851 set: 7852 mutex_lock(&event->mmap_mutex); 7853 /* Can't redirect output if we've got an active mmap() */ 7854 if (atomic_read(&event->mmap_count)) 7855 goto unlock; 7856 7857 if (output_event) { 7858 /* get the rb we want to redirect to */ 7859 rb = ring_buffer_get(output_event); 7860 if (!rb) 7861 goto unlock; 7862 } 7863 7864 ring_buffer_attach(event, rb); 7865 7866 ret = 0; 7867 unlock: 7868 mutex_unlock(&event->mmap_mutex); 7869 7870 out: 7871 return ret; 7872 } 7873 7874 static void mutex_lock_double(struct mutex *a, struct mutex *b) 7875 { 7876 if (b < a) 7877 swap(a, b); 7878 7879 mutex_lock(a); 7880 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 7881 } 7882 7883 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 7884 { 7885 bool nmi_safe = false; 7886 7887 switch (clk_id) { 7888 case CLOCK_MONOTONIC: 7889 event->clock = &ktime_get_mono_fast_ns; 7890 nmi_safe = true; 7891 break; 7892 7893 case CLOCK_MONOTONIC_RAW: 7894 event->clock = &ktime_get_raw_fast_ns; 7895 nmi_safe = true; 7896 break; 7897 7898 case CLOCK_REALTIME: 7899 event->clock = &ktime_get_real_ns; 7900 break; 7901 7902 case CLOCK_BOOTTIME: 7903 event->clock = &ktime_get_boot_ns; 7904 break; 7905 7906 case CLOCK_TAI: 7907 event->clock = &ktime_get_tai_ns; 7908 break; 7909 7910 default: 7911 return -EINVAL; 7912 } 7913 7914 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 7915 return -EINVAL; 7916 7917 return 0; 7918 } 7919 7920 /** 7921 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7922 * 7923 * @attr_uptr: event_id type attributes for monitoring/sampling 7924 * @pid: target pid 7925 * @cpu: target cpu 7926 * @group_fd: group leader event fd 7927 */ 7928 SYSCALL_DEFINE5(perf_event_open, 7929 struct perf_event_attr __user *, attr_uptr, 7930 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 7931 { 7932 struct perf_event *group_leader = NULL, *output_event = NULL; 7933 struct perf_event *event, *sibling; 7934 struct perf_event_attr attr; 7935 struct perf_event_context *ctx, *uninitialized_var(gctx); 7936 struct file *event_file = NULL; 7937 struct fd group = {NULL, 0}; 7938 struct task_struct *task = NULL; 7939 struct pmu *pmu; 7940 int event_fd; 7941 int move_group = 0; 7942 int err; 7943 int f_flags = O_RDWR; 7944 int cgroup_fd = -1; 7945 7946 /* for future expandability... */ 7947 if (flags & ~PERF_FLAG_ALL) 7948 return -EINVAL; 7949 7950 err = perf_copy_attr(attr_uptr, &attr); 7951 if (err) 7952 return err; 7953 7954 if (!attr.exclude_kernel) { 7955 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7956 return -EACCES; 7957 } 7958 7959 if (attr.freq) { 7960 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7961 return -EINVAL; 7962 } else { 7963 if (attr.sample_period & (1ULL << 63)) 7964 return -EINVAL; 7965 } 7966 7967 /* 7968 * In cgroup mode, the pid argument is used to pass the fd 7969 * opened to the cgroup directory in cgroupfs. The cpu argument 7970 * designates the cpu on which to monitor threads from that 7971 * cgroup. 7972 */ 7973 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 7974 return -EINVAL; 7975 7976 if (flags & PERF_FLAG_FD_CLOEXEC) 7977 f_flags |= O_CLOEXEC; 7978 7979 event_fd = get_unused_fd_flags(f_flags); 7980 if (event_fd < 0) 7981 return event_fd; 7982 7983 if (group_fd != -1) { 7984 err = perf_fget_light(group_fd, &group); 7985 if (err) 7986 goto err_fd; 7987 group_leader = group.file->private_data; 7988 if (flags & PERF_FLAG_FD_OUTPUT) 7989 output_event = group_leader; 7990 if (flags & PERF_FLAG_FD_NO_GROUP) 7991 group_leader = NULL; 7992 } 7993 7994 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 7995 task = find_lively_task_by_vpid(pid); 7996 if (IS_ERR(task)) { 7997 err = PTR_ERR(task); 7998 goto err_group_fd; 7999 } 8000 } 8001 8002 if (task && group_leader && 8003 group_leader->attr.inherit != attr.inherit) { 8004 err = -EINVAL; 8005 goto err_task; 8006 } 8007 8008 get_online_cpus(); 8009 8010 if (flags & PERF_FLAG_PID_CGROUP) 8011 cgroup_fd = pid; 8012 8013 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8014 NULL, NULL, cgroup_fd); 8015 if (IS_ERR(event)) { 8016 err = PTR_ERR(event); 8017 goto err_cpus; 8018 } 8019 8020 if (is_sampling_event(event)) { 8021 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8022 err = -ENOTSUPP; 8023 goto err_alloc; 8024 } 8025 } 8026 8027 account_event(event); 8028 8029 /* 8030 * Special case software events and allow them to be part of 8031 * any hardware group. 8032 */ 8033 pmu = event->pmu; 8034 8035 if (attr.use_clockid) { 8036 err = perf_event_set_clock(event, attr.clockid); 8037 if (err) 8038 goto err_alloc; 8039 } 8040 8041 if (group_leader && 8042 (is_software_event(event) != is_software_event(group_leader))) { 8043 if (is_software_event(event)) { 8044 /* 8045 * If event and group_leader are not both a software 8046 * event, and event is, then group leader is not. 8047 * 8048 * Allow the addition of software events to !software 8049 * groups, this is safe because software events never 8050 * fail to schedule. 8051 */ 8052 pmu = group_leader->pmu; 8053 } else if (is_software_event(group_leader) && 8054 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8055 /* 8056 * In case the group is a pure software group, and we 8057 * try to add a hardware event, move the whole group to 8058 * the hardware context. 8059 */ 8060 move_group = 1; 8061 } 8062 } 8063 8064 /* 8065 * Get the target context (task or percpu): 8066 */ 8067 ctx = find_get_context(pmu, task, event); 8068 if (IS_ERR(ctx)) { 8069 err = PTR_ERR(ctx); 8070 goto err_alloc; 8071 } 8072 8073 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8074 err = -EBUSY; 8075 goto err_context; 8076 } 8077 8078 if (task) { 8079 put_task_struct(task); 8080 task = NULL; 8081 } 8082 8083 /* 8084 * Look up the group leader (we will attach this event to it): 8085 */ 8086 if (group_leader) { 8087 err = -EINVAL; 8088 8089 /* 8090 * Do not allow a recursive hierarchy (this new sibling 8091 * becoming part of another group-sibling): 8092 */ 8093 if (group_leader->group_leader != group_leader) 8094 goto err_context; 8095 8096 /* All events in a group should have the same clock */ 8097 if (group_leader->clock != event->clock) 8098 goto err_context; 8099 8100 /* 8101 * Do not allow to attach to a group in a different 8102 * task or CPU context: 8103 */ 8104 if (move_group) { 8105 /* 8106 * Make sure we're both on the same task, or both 8107 * per-cpu events. 8108 */ 8109 if (group_leader->ctx->task != ctx->task) 8110 goto err_context; 8111 8112 /* 8113 * Make sure we're both events for the same CPU; 8114 * grouping events for different CPUs is broken; since 8115 * you can never concurrently schedule them anyhow. 8116 */ 8117 if (group_leader->cpu != event->cpu) 8118 goto err_context; 8119 } else { 8120 if (group_leader->ctx != ctx) 8121 goto err_context; 8122 } 8123 8124 /* 8125 * Only a group leader can be exclusive or pinned 8126 */ 8127 if (attr.exclusive || attr.pinned) 8128 goto err_context; 8129 } 8130 8131 if (output_event) { 8132 err = perf_event_set_output(event, output_event); 8133 if (err) 8134 goto err_context; 8135 } 8136 8137 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8138 f_flags); 8139 if (IS_ERR(event_file)) { 8140 err = PTR_ERR(event_file); 8141 goto err_context; 8142 } 8143 8144 if (move_group) { 8145 gctx = group_leader->ctx; 8146 8147 /* 8148 * See perf_event_ctx_lock() for comments on the details 8149 * of swizzling perf_event::ctx. 8150 */ 8151 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8152 8153 perf_remove_from_context(group_leader, false); 8154 8155 list_for_each_entry(sibling, &group_leader->sibling_list, 8156 group_entry) { 8157 perf_remove_from_context(sibling, false); 8158 put_ctx(gctx); 8159 } 8160 } else { 8161 mutex_lock(&ctx->mutex); 8162 } 8163 8164 WARN_ON_ONCE(ctx->parent_ctx); 8165 8166 if (move_group) { 8167 /* 8168 * Wait for everybody to stop referencing the events through 8169 * the old lists, before installing it on new lists. 8170 */ 8171 synchronize_rcu(); 8172 8173 /* 8174 * Install the group siblings before the group leader. 8175 * 8176 * Because a group leader will try and install the entire group 8177 * (through the sibling list, which is still in-tact), we can 8178 * end up with siblings installed in the wrong context. 8179 * 8180 * By installing siblings first we NO-OP because they're not 8181 * reachable through the group lists. 8182 */ 8183 list_for_each_entry(sibling, &group_leader->sibling_list, 8184 group_entry) { 8185 perf_event__state_init(sibling); 8186 perf_install_in_context(ctx, sibling, sibling->cpu); 8187 get_ctx(ctx); 8188 } 8189 8190 /* 8191 * Removing from the context ends up with disabled 8192 * event. What we want here is event in the initial 8193 * startup state, ready to be add into new context. 8194 */ 8195 perf_event__state_init(group_leader); 8196 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8197 get_ctx(ctx); 8198 } 8199 8200 if (!exclusive_event_installable(event, ctx)) { 8201 err = -EBUSY; 8202 mutex_unlock(&ctx->mutex); 8203 fput(event_file); 8204 goto err_context; 8205 } 8206 8207 perf_install_in_context(ctx, event, event->cpu); 8208 perf_unpin_context(ctx); 8209 8210 if (move_group) { 8211 mutex_unlock(&gctx->mutex); 8212 put_ctx(gctx); 8213 } 8214 mutex_unlock(&ctx->mutex); 8215 8216 put_online_cpus(); 8217 8218 event->owner = current; 8219 8220 mutex_lock(¤t->perf_event_mutex); 8221 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8222 mutex_unlock(¤t->perf_event_mutex); 8223 8224 /* 8225 * Precalculate sample_data sizes 8226 */ 8227 perf_event__header_size(event); 8228 perf_event__id_header_size(event); 8229 8230 /* 8231 * Drop the reference on the group_event after placing the 8232 * new event on the sibling_list. This ensures destruction 8233 * of the group leader will find the pointer to itself in 8234 * perf_group_detach(). 8235 */ 8236 fdput(group); 8237 fd_install(event_fd, event_file); 8238 return event_fd; 8239 8240 err_context: 8241 perf_unpin_context(ctx); 8242 put_ctx(ctx); 8243 err_alloc: 8244 free_event(event); 8245 err_cpus: 8246 put_online_cpus(); 8247 err_task: 8248 if (task) 8249 put_task_struct(task); 8250 err_group_fd: 8251 fdput(group); 8252 err_fd: 8253 put_unused_fd(event_fd); 8254 return err; 8255 } 8256 8257 /** 8258 * perf_event_create_kernel_counter 8259 * 8260 * @attr: attributes of the counter to create 8261 * @cpu: cpu in which the counter is bound 8262 * @task: task to profile (NULL for percpu) 8263 */ 8264 struct perf_event * 8265 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8266 struct task_struct *task, 8267 perf_overflow_handler_t overflow_handler, 8268 void *context) 8269 { 8270 struct perf_event_context *ctx; 8271 struct perf_event *event; 8272 int err; 8273 8274 /* 8275 * Get the target context (task or percpu): 8276 */ 8277 8278 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8279 overflow_handler, context, -1); 8280 if (IS_ERR(event)) { 8281 err = PTR_ERR(event); 8282 goto err; 8283 } 8284 8285 /* Mark owner so we could distinguish it from user events. */ 8286 event->owner = EVENT_OWNER_KERNEL; 8287 8288 account_event(event); 8289 8290 ctx = find_get_context(event->pmu, task, event); 8291 if (IS_ERR(ctx)) { 8292 err = PTR_ERR(ctx); 8293 goto err_free; 8294 } 8295 8296 WARN_ON_ONCE(ctx->parent_ctx); 8297 mutex_lock(&ctx->mutex); 8298 if (!exclusive_event_installable(event, ctx)) { 8299 mutex_unlock(&ctx->mutex); 8300 perf_unpin_context(ctx); 8301 put_ctx(ctx); 8302 err = -EBUSY; 8303 goto err_free; 8304 } 8305 8306 perf_install_in_context(ctx, event, cpu); 8307 perf_unpin_context(ctx); 8308 mutex_unlock(&ctx->mutex); 8309 8310 return event; 8311 8312 err_free: 8313 free_event(event); 8314 err: 8315 return ERR_PTR(err); 8316 } 8317 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8318 8319 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8320 { 8321 struct perf_event_context *src_ctx; 8322 struct perf_event_context *dst_ctx; 8323 struct perf_event *event, *tmp; 8324 LIST_HEAD(events); 8325 8326 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8327 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8328 8329 /* 8330 * See perf_event_ctx_lock() for comments on the details 8331 * of swizzling perf_event::ctx. 8332 */ 8333 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8334 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8335 event_entry) { 8336 perf_remove_from_context(event, false); 8337 unaccount_event_cpu(event, src_cpu); 8338 put_ctx(src_ctx); 8339 list_add(&event->migrate_entry, &events); 8340 } 8341 8342 /* 8343 * Wait for the events to quiesce before re-instating them. 8344 */ 8345 synchronize_rcu(); 8346 8347 /* 8348 * Re-instate events in 2 passes. 8349 * 8350 * Skip over group leaders and only install siblings on this first 8351 * pass, siblings will not get enabled without a leader, however a 8352 * leader will enable its siblings, even if those are still on the old 8353 * context. 8354 */ 8355 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8356 if (event->group_leader == event) 8357 continue; 8358 8359 list_del(&event->migrate_entry); 8360 if (event->state >= PERF_EVENT_STATE_OFF) 8361 event->state = PERF_EVENT_STATE_INACTIVE; 8362 account_event_cpu(event, dst_cpu); 8363 perf_install_in_context(dst_ctx, event, dst_cpu); 8364 get_ctx(dst_ctx); 8365 } 8366 8367 /* 8368 * Once all the siblings are setup properly, install the group leaders 8369 * to make it go. 8370 */ 8371 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8372 list_del(&event->migrate_entry); 8373 if (event->state >= PERF_EVENT_STATE_OFF) 8374 event->state = PERF_EVENT_STATE_INACTIVE; 8375 account_event_cpu(event, dst_cpu); 8376 perf_install_in_context(dst_ctx, event, dst_cpu); 8377 get_ctx(dst_ctx); 8378 } 8379 mutex_unlock(&dst_ctx->mutex); 8380 mutex_unlock(&src_ctx->mutex); 8381 } 8382 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8383 8384 static void sync_child_event(struct perf_event *child_event, 8385 struct task_struct *child) 8386 { 8387 struct perf_event *parent_event = child_event->parent; 8388 u64 child_val; 8389 8390 if (child_event->attr.inherit_stat) 8391 perf_event_read_event(child_event, child); 8392 8393 child_val = perf_event_count(child_event); 8394 8395 /* 8396 * Add back the child's count to the parent's count: 8397 */ 8398 atomic64_add(child_val, &parent_event->child_count); 8399 atomic64_add(child_event->total_time_enabled, 8400 &parent_event->child_total_time_enabled); 8401 atomic64_add(child_event->total_time_running, 8402 &parent_event->child_total_time_running); 8403 8404 /* 8405 * Remove this event from the parent's list 8406 */ 8407 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8408 mutex_lock(&parent_event->child_mutex); 8409 list_del_init(&child_event->child_list); 8410 mutex_unlock(&parent_event->child_mutex); 8411 8412 /* 8413 * Make sure user/parent get notified, that we just 8414 * lost one event. 8415 */ 8416 perf_event_wakeup(parent_event); 8417 8418 /* 8419 * Release the parent event, if this was the last 8420 * reference to it. 8421 */ 8422 put_event(parent_event); 8423 } 8424 8425 static void 8426 __perf_event_exit_task(struct perf_event *child_event, 8427 struct perf_event_context *child_ctx, 8428 struct task_struct *child) 8429 { 8430 /* 8431 * Do not destroy the 'original' grouping; because of the context 8432 * switch optimization the original events could've ended up in a 8433 * random child task. 8434 * 8435 * If we were to destroy the original group, all group related 8436 * operations would cease to function properly after this random 8437 * child dies. 8438 * 8439 * Do destroy all inherited groups, we don't care about those 8440 * and being thorough is better. 8441 */ 8442 perf_remove_from_context(child_event, !!child_event->parent); 8443 8444 /* 8445 * It can happen that the parent exits first, and has events 8446 * that are still around due to the child reference. These 8447 * events need to be zapped. 8448 */ 8449 if (child_event->parent) { 8450 sync_child_event(child_event, child); 8451 free_event(child_event); 8452 } else { 8453 child_event->state = PERF_EVENT_STATE_EXIT; 8454 perf_event_wakeup(child_event); 8455 } 8456 } 8457 8458 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8459 { 8460 struct perf_event *child_event, *next; 8461 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8462 unsigned long flags; 8463 8464 if (likely(!child->perf_event_ctxp[ctxn])) { 8465 perf_event_task(child, NULL, 0); 8466 return; 8467 } 8468 8469 local_irq_save(flags); 8470 /* 8471 * We can't reschedule here because interrupts are disabled, 8472 * and either child is current or it is a task that can't be 8473 * scheduled, so we are now safe from rescheduling changing 8474 * our context. 8475 */ 8476 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8477 8478 /* 8479 * Take the context lock here so that if find_get_context is 8480 * reading child->perf_event_ctxp, we wait until it has 8481 * incremented the context's refcount before we do put_ctx below. 8482 */ 8483 raw_spin_lock(&child_ctx->lock); 8484 task_ctx_sched_out(child_ctx); 8485 child->perf_event_ctxp[ctxn] = NULL; 8486 8487 /* 8488 * If this context is a clone; unclone it so it can't get 8489 * swapped to another process while we're removing all 8490 * the events from it. 8491 */ 8492 clone_ctx = unclone_ctx(child_ctx); 8493 update_context_time(child_ctx); 8494 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8495 8496 if (clone_ctx) 8497 put_ctx(clone_ctx); 8498 8499 /* 8500 * Report the task dead after unscheduling the events so that we 8501 * won't get any samples after PERF_RECORD_EXIT. We can however still 8502 * get a few PERF_RECORD_READ events. 8503 */ 8504 perf_event_task(child, child_ctx, 0); 8505 8506 /* 8507 * We can recurse on the same lock type through: 8508 * 8509 * __perf_event_exit_task() 8510 * sync_child_event() 8511 * put_event() 8512 * mutex_lock(&ctx->mutex) 8513 * 8514 * But since its the parent context it won't be the same instance. 8515 */ 8516 mutex_lock(&child_ctx->mutex); 8517 8518 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8519 __perf_event_exit_task(child_event, child_ctx, child); 8520 8521 mutex_unlock(&child_ctx->mutex); 8522 8523 put_ctx(child_ctx); 8524 } 8525 8526 /* 8527 * When a child task exits, feed back event values to parent events. 8528 */ 8529 void perf_event_exit_task(struct task_struct *child) 8530 { 8531 struct perf_event *event, *tmp; 8532 int ctxn; 8533 8534 mutex_lock(&child->perf_event_mutex); 8535 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8536 owner_entry) { 8537 list_del_init(&event->owner_entry); 8538 8539 /* 8540 * Ensure the list deletion is visible before we clear 8541 * the owner, closes a race against perf_release() where 8542 * we need to serialize on the owner->perf_event_mutex. 8543 */ 8544 smp_wmb(); 8545 event->owner = NULL; 8546 } 8547 mutex_unlock(&child->perf_event_mutex); 8548 8549 for_each_task_context_nr(ctxn) 8550 perf_event_exit_task_context(child, ctxn); 8551 } 8552 8553 static void perf_free_event(struct perf_event *event, 8554 struct perf_event_context *ctx) 8555 { 8556 struct perf_event *parent = event->parent; 8557 8558 if (WARN_ON_ONCE(!parent)) 8559 return; 8560 8561 mutex_lock(&parent->child_mutex); 8562 list_del_init(&event->child_list); 8563 mutex_unlock(&parent->child_mutex); 8564 8565 put_event(parent); 8566 8567 raw_spin_lock_irq(&ctx->lock); 8568 perf_group_detach(event); 8569 list_del_event(event, ctx); 8570 raw_spin_unlock_irq(&ctx->lock); 8571 free_event(event); 8572 } 8573 8574 /* 8575 * Free an unexposed, unused context as created by inheritance by 8576 * perf_event_init_task below, used by fork() in case of fail. 8577 * 8578 * Not all locks are strictly required, but take them anyway to be nice and 8579 * help out with the lockdep assertions. 8580 */ 8581 void perf_event_free_task(struct task_struct *task) 8582 { 8583 struct perf_event_context *ctx; 8584 struct perf_event *event, *tmp; 8585 int ctxn; 8586 8587 for_each_task_context_nr(ctxn) { 8588 ctx = task->perf_event_ctxp[ctxn]; 8589 if (!ctx) 8590 continue; 8591 8592 mutex_lock(&ctx->mutex); 8593 again: 8594 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8595 group_entry) 8596 perf_free_event(event, ctx); 8597 8598 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8599 group_entry) 8600 perf_free_event(event, ctx); 8601 8602 if (!list_empty(&ctx->pinned_groups) || 8603 !list_empty(&ctx->flexible_groups)) 8604 goto again; 8605 8606 mutex_unlock(&ctx->mutex); 8607 8608 put_ctx(ctx); 8609 } 8610 } 8611 8612 void perf_event_delayed_put(struct task_struct *task) 8613 { 8614 int ctxn; 8615 8616 for_each_task_context_nr(ctxn) 8617 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8618 } 8619 8620 /* 8621 * inherit a event from parent task to child task: 8622 */ 8623 static struct perf_event * 8624 inherit_event(struct perf_event *parent_event, 8625 struct task_struct *parent, 8626 struct perf_event_context *parent_ctx, 8627 struct task_struct *child, 8628 struct perf_event *group_leader, 8629 struct perf_event_context *child_ctx) 8630 { 8631 enum perf_event_active_state parent_state = parent_event->state; 8632 struct perf_event *child_event; 8633 unsigned long flags; 8634 8635 /* 8636 * Instead of creating recursive hierarchies of events, 8637 * we link inherited events back to the original parent, 8638 * which has a filp for sure, which we use as the reference 8639 * count: 8640 */ 8641 if (parent_event->parent) 8642 parent_event = parent_event->parent; 8643 8644 child_event = perf_event_alloc(&parent_event->attr, 8645 parent_event->cpu, 8646 child, 8647 group_leader, parent_event, 8648 NULL, NULL, -1); 8649 if (IS_ERR(child_event)) 8650 return child_event; 8651 8652 if (is_orphaned_event(parent_event) || 8653 !atomic_long_inc_not_zero(&parent_event->refcount)) { 8654 free_event(child_event); 8655 return NULL; 8656 } 8657 8658 get_ctx(child_ctx); 8659 8660 /* 8661 * Make the child state follow the state of the parent event, 8662 * not its attr.disabled bit. We hold the parent's mutex, 8663 * so we won't race with perf_event_{en, dis}able_family. 8664 */ 8665 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 8666 child_event->state = PERF_EVENT_STATE_INACTIVE; 8667 else 8668 child_event->state = PERF_EVENT_STATE_OFF; 8669 8670 if (parent_event->attr.freq) { 8671 u64 sample_period = parent_event->hw.sample_period; 8672 struct hw_perf_event *hwc = &child_event->hw; 8673 8674 hwc->sample_period = sample_period; 8675 hwc->last_period = sample_period; 8676 8677 local64_set(&hwc->period_left, sample_period); 8678 } 8679 8680 child_event->ctx = child_ctx; 8681 child_event->overflow_handler = parent_event->overflow_handler; 8682 child_event->overflow_handler_context 8683 = parent_event->overflow_handler_context; 8684 8685 /* 8686 * Precalculate sample_data sizes 8687 */ 8688 perf_event__header_size(child_event); 8689 perf_event__id_header_size(child_event); 8690 8691 /* 8692 * Link it up in the child's context: 8693 */ 8694 raw_spin_lock_irqsave(&child_ctx->lock, flags); 8695 add_event_to_ctx(child_event, child_ctx); 8696 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8697 8698 /* 8699 * Link this into the parent event's child list 8700 */ 8701 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8702 mutex_lock(&parent_event->child_mutex); 8703 list_add_tail(&child_event->child_list, &parent_event->child_list); 8704 mutex_unlock(&parent_event->child_mutex); 8705 8706 return child_event; 8707 } 8708 8709 static int inherit_group(struct perf_event *parent_event, 8710 struct task_struct *parent, 8711 struct perf_event_context *parent_ctx, 8712 struct task_struct *child, 8713 struct perf_event_context *child_ctx) 8714 { 8715 struct perf_event *leader; 8716 struct perf_event *sub; 8717 struct perf_event *child_ctr; 8718 8719 leader = inherit_event(parent_event, parent, parent_ctx, 8720 child, NULL, child_ctx); 8721 if (IS_ERR(leader)) 8722 return PTR_ERR(leader); 8723 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 8724 child_ctr = inherit_event(sub, parent, parent_ctx, 8725 child, leader, child_ctx); 8726 if (IS_ERR(child_ctr)) 8727 return PTR_ERR(child_ctr); 8728 } 8729 return 0; 8730 } 8731 8732 static int 8733 inherit_task_group(struct perf_event *event, struct task_struct *parent, 8734 struct perf_event_context *parent_ctx, 8735 struct task_struct *child, int ctxn, 8736 int *inherited_all) 8737 { 8738 int ret; 8739 struct perf_event_context *child_ctx; 8740 8741 if (!event->attr.inherit) { 8742 *inherited_all = 0; 8743 return 0; 8744 } 8745 8746 child_ctx = child->perf_event_ctxp[ctxn]; 8747 if (!child_ctx) { 8748 /* 8749 * This is executed from the parent task context, so 8750 * inherit events that have been marked for cloning. 8751 * First allocate and initialize a context for the 8752 * child. 8753 */ 8754 8755 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 8756 if (!child_ctx) 8757 return -ENOMEM; 8758 8759 child->perf_event_ctxp[ctxn] = child_ctx; 8760 } 8761 8762 ret = inherit_group(event, parent, parent_ctx, 8763 child, child_ctx); 8764 8765 if (ret) 8766 *inherited_all = 0; 8767 8768 return ret; 8769 } 8770 8771 /* 8772 * Initialize the perf_event context in task_struct 8773 */ 8774 static int perf_event_init_context(struct task_struct *child, int ctxn) 8775 { 8776 struct perf_event_context *child_ctx, *parent_ctx; 8777 struct perf_event_context *cloned_ctx; 8778 struct perf_event *event; 8779 struct task_struct *parent = current; 8780 int inherited_all = 1; 8781 unsigned long flags; 8782 int ret = 0; 8783 8784 if (likely(!parent->perf_event_ctxp[ctxn])) 8785 return 0; 8786 8787 /* 8788 * If the parent's context is a clone, pin it so it won't get 8789 * swapped under us. 8790 */ 8791 parent_ctx = perf_pin_task_context(parent, ctxn); 8792 if (!parent_ctx) 8793 return 0; 8794 8795 /* 8796 * No need to check if parent_ctx != NULL here; since we saw 8797 * it non-NULL earlier, the only reason for it to become NULL 8798 * is if we exit, and since we're currently in the middle of 8799 * a fork we can't be exiting at the same time. 8800 */ 8801 8802 /* 8803 * Lock the parent list. No need to lock the child - not PID 8804 * hashed yet and not running, so nobody can access it. 8805 */ 8806 mutex_lock(&parent_ctx->mutex); 8807 8808 /* 8809 * We dont have to disable NMIs - we are only looking at 8810 * the list, not manipulating it: 8811 */ 8812 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 8813 ret = inherit_task_group(event, parent, parent_ctx, 8814 child, ctxn, &inherited_all); 8815 if (ret) 8816 break; 8817 } 8818 8819 /* 8820 * We can't hold ctx->lock when iterating the ->flexible_group list due 8821 * to allocations, but we need to prevent rotation because 8822 * rotate_ctx() will change the list from interrupt context. 8823 */ 8824 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8825 parent_ctx->rotate_disable = 1; 8826 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8827 8828 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 8829 ret = inherit_task_group(event, parent, parent_ctx, 8830 child, ctxn, &inherited_all); 8831 if (ret) 8832 break; 8833 } 8834 8835 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 8836 parent_ctx->rotate_disable = 0; 8837 8838 child_ctx = child->perf_event_ctxp[ctxn]; 8839 8840 if (child_ctx && inherited_all) { 8841 /* 8842 * Mark the child context as a clone of the parent 8843 * context, or of whatever the parent is a clone of. 8844 * 8845 * Note that if the parent is a clone, the holding of 8846 * parent_ctx->lock avoids it from being uncloned. 8847 */ 8848 cloned_ctx = parent_ctx->parent_ctx; 8849 if (cloned_ctx) { 8850 child_ctx->parent_ctx = cloned_ctx; 8851 child_ctx->parent_gen = parent_ctx->parent_gen; 8852 } else { 8853 child_ctx->parent_ctx = parent_ctx; 8854 child_ctx->parent_gen = parent_ctx->generation; 8855 } 8856 get_ctx(child_ctx->parent_ctx); 8857 } 8858 8859 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 8860 mutex_unlock(&parent_ctx->mutex); 8861 8862 perf_unpin_context(parent_ctx); 8863 put_ctx(parent_ctx); 8864 8865 return ret; 8866 } 8867 8868 /* 8869 * Initialize the perf_event context in task_struct 8870 */ 8871 int perf_event_init_task(struct task_struct *child) 8872 { 8873 int ctxn, ret; 8874 8875 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 8876 mutex_init(&child->perf_event_mutex); 8877 INIT_LIST_HEAD(&child->perf_event_list); 8878 8879 for_each_task_context_nr(ctxn) { 8880 ret = perf_event_init_context(child, ctxn); 8881 if (ret) { 8882 perf_event_free_task(child); 8883 return ret; 8884 } 8885 } 8886 8887 return 0; 8888 } 8889 8890 static void __init perf_event_init_all_cpus(void) 8891 { 8892 struct swevent_htable *swhash; 8893 int cpu; 8894 8895 for_each_possible_cpu(cpu) { 8896 swhash = &per_cpu(swevent_htable, cpu); 8897 mutex_init(&swhash->hlist_mutex); 8898 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 8899 } 8900 } 8901 8902 static void perf_event_init_cpu(int cpu) 8903 { 8904 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8905 8906 mutex_lock(&swhash->hlist_mutex); 8907 swhash->online = true; 8908 if (swhash->hlist_refcount > 0) { 8909 struct swevent_hlist *hlist; 8910 8911 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 8912 WARN_ON(!hlist); 8913 rcu_assign_pointer(swhash->swevent_hlist, hlist); 8914 } 8915 mutex_unlock(&swhash->hlist_mutex); 8916 } 8917 8918 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 8919 static void __perf_event_exit_context(void *__info) 8920 { 8921 struct remove_event re = { .detach_group = true }; 8922 struct perf_event_context *ctx = __info; 8923 8924 rcu_read_lock(); 8925 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8926 __perf_remove_from_context(&re); 8927 rcu_read_unlock(); 8928 } 8929 8930 static void perf_event_exit_cpu_context(int cpu) 8931 { 8932 struct perf_event_context *ctx; 8933 struct pmu *pmu; 8934 int idx; 8935 8936 idx = srcu_read_lock(&pmus_srcu); 8937 list_for_each_entry_rcu(pmu, &pmus, entry) { 8938 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 8939 8940 mutex_lock(&ctx->mutex); 8941 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 8942 mutex_unlock(&ctx->mutex); 8943 } 8944 srcu_read_unlock(&pmus_srcu, idx); 8945 } 8946 8947 static void perf_event_exit_cpu(int cpu) 8948 { 8949 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 8950 8951 perf_event_exit_cpu_context(cpu); 8952 8953 mutex_lock(&swhash->hlist_mutex); 8954 swhash->online = false; 8955 swevent_hlist_release(swhash); 8956 mutex_unlock(&swhash->hlist_mutex); 8957 } 8958 #else 8959 static inline void perf_event_exit_cpu(int cpu) { } 8960 #endif 8961 8962 static int 8963 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 8964 { 8965 int cpu; 8966 8967 for_each_online_cpu(cpu) 8968 perf_event_exit_cpu(cpu); 8969 8970 return NOTIFY_OK; 8971 } 8972 8973 /* 8974 * Run the perf reboot notifier at the very last possible moment so that 8975 * the generic watchdog code runs as long as possible. 8976 */ 8977 static struct notifier_block perf_reboot_notifier = { 8978 .notifier_call = perf_reboot, 8979 .priority = INT_MIN, 8980 }; 8981 8982 static int 8983 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 8984 { 8985 unsigned int cpu = (long)hcpu; 8986 8987 switch (action & ~CPU_TASKS_FROZEN) { 8988 8989 case CPU_UP_PREPARE: 8990 case CPU_DOWN_FAILED: 8991 perf_event_init_cpu(cpu); 8992 break; 8993 8994 case CPU_UP_CANCELED: 8995 case CPU_DOWN_PREPARE: 8996 perf_event_exit_cpu(cpu); 8997 break; 8998 default: 8999 break; 9000 } 9001 9002 return NOTIFY_OK; 9003 } 9004 9005 void __init perf_event_init(void) 9006 { 9007 int ret; 9008 9009 idr_init(&pmu_idr); 9010 9011 perf_event_init_all_cpus(); 9012 init_srcu_struct(&pmus_srcu); 9013 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9014 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9015 perf_pmu_register(&perf_task_clock, NULL, -1); 9016 perf_tp_register(); 9017 perf_cpu_notifier(perf_cpu_notify); 9018 register_reboot_notifier(&perf_reboot_notifier); 9019 9020 ret = init_hw_breakpoint(); 9021 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9022 9023 /* do not patch jump label more than once per second */ 9024 jump_label_rate_limit(&perf_sched_events, HZ); 9025 9026 /* 9027 * Build time assertion that we keep the data_head at the intended 9028 * location. IOW, validation we got the __reserved[] size right. 9029 */ 9030 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9031 != 1024); 9032 } 9033 9034 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9035 char *page) 9036 { 9037 struct perf_pmu_events_attr *pmu_attr = 9038 container_of(attr, struct perf_pmu_events_attr, attr); 9039 9040 if (pmu_attr->event_str) 9041 return sprintf(page, "%s\n", pmu_attr->event_str); 9042 9043 return 0; 9044 } 9045 9046 static int __init perf_event_sysfs_init(void) 9047 { 9048 struct pmu *pmu; 9049 int ret; 9050 9051 mutex_lock(&pmus_lock); 9052 9053 ret = bus_register(&pmu_bus); 9054 if (ret) 9055 goto unlock; 9056 9057 list_for_each_entry(pmu, &pmus, entry) { 9058 if (!pmu->name || pmu->type < 0) 9059 continue; 9060 9061 ret = pmu_dev_alloc(pmu); 9062 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9063 } 9064 pmu_bus_running = 1; 9065 ret = 0; 9066 9067 unlock: 9068 mutex_unlock(&pmus_lock); 9069 9070 return ret; 9071 } 9072 device_initcall(perf_event_sysfs_init); 9073 9074 #ifdef CONFIG_CGROUP_PERF 9075 static struct cgroup_subsys_state * 9076 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9077 { 9078 struct perf_cgroup *jc; 9079 9080 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9081 if (!jc) 9082 return ERR_PTR(-ENOMEM); 9083 9084 jc->info = alloc_percpu(struct perf_cgroup_info); 9085 if (!jc->info) { 9086 kfree(jc); 9087 return ERR_PTR(-ENOMEM); 9088 } 9089 9090 return &jc->css; 9091 } 9092 9093 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9094 { 9095 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9096 9097 free_percpu(jc->info); 9098 kfree(jc); 9099 } 9100 9101 static int __perf_cgroup_move(void *info) 9102 { 9103 struct task_struct *task = info; 9104 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9105 return 0; 9106 } 9107 9108 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9109 struct cgroup_taskset *tset) 9110 { 9111 struct task_struct *task; 9112 9113 cgroup_taskset_for_each(task, tset) 9114 task_function_call(task, __perf_cgroup_move, task); 9115 } 9116 9117 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 9118 struct cgroup_subsys_state *old_css, 9119 struct task_struct *task) 9120 { 9121 /* 9122 * cgroup_exit() is called in the copy_process() failure path. 9123 * Ignore this case since the task hasn't ran yet, this avoids 9124 * trying to poke a half freed task state from generic code. 9125 */ 9126 if (!(task->flags & PF_EXITING)) 9127 return; 9128 9129 task_function_call(task, __perf_cgroup_move, task); 9130 } 9131 9132 struct cgroup_subsys perf_event_cgrp_subsys = { 9133 .css_alloc = perf_cgroup_css_alloc, 9134 .css_free = perf_cgroup_css_free, 9135 .exit = perf_cgroup_exit, 9136 .attach = perf_cgroup_attach, 9137 }; 9138 #endif /* CONFIG_CGROUP_PERF */ 9139