1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/cgroup.h> 38 #include <linux/perf_event.h> 39 #include <linux/trace_events.h> 40 #include <linux/hw_breakpoint.h> 41 #include <linux/mm_types.h> 42 #include <linux/module.h> 43 #include <linux/mman.h> 44 #include <linux/compat.h> 45 #include <linux/bpf.h> 46 #include <linux/filter.h> 47 48 #include "internal.h" 49 50 #include <asm/irq_regs.h> 51 52 static struct workqueue_struct *perf_wq; 53 54 typedef int (*remote_function_f)(void *); 55 56 struct remote_function_call { 57 struct task_struct *p; 58 remote_function_f func; 59 void *info; 60 int ret; 61 }; 62 63 static void remote_function(void *data) 64 { 65 struct remote_function_call *tfc = data; 66 struct task_struct *p = tfc->p; 67 68 if (p) { 69 tfc->ret = -EAGAIN; 70 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 71 return; 72 } 73 74 tfc->ret = tfc->func(tfc->info); 75 } 76 77 /** 78 * task_function_call - call a function on the cpu on which a task runs 79 * @p: the task to evaluate 80 * @func: the function to be called 81 * @info: the function call argument 82 * 83 * Calls the function @func when the task is currently running. This might 84 * be on the current CPU, which just calls the function directly 85 * 86 * returns: @func return value, or 87 * -ESRCH - when the process isn't running 88 * -EAGAIN - when the process moved away 89 */ 90 static int 91 task_function_call(struct task_struct *p, remote_function_f func, void *info) 92 { 93 struct remote_function_call data = { 94 .p = p, 95 .func = func, 96 .info = info, 97 .ret = -ESRCH, /* No such (running) process */ 98 }; 99 100 if (task_curr(p)) 101 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 102 103 return data.ret; 104 } 105 106 /** 107 * cpu_function_call - call a function on the cpu 108 * @func: the function to be called 109 * @info: the function call argument 110 * 111 * Calls the function @func on the remote cpu. 112 * 113 * returns: @func return value or -ENXIO when the cpu is offline 114 */ 115 static int cpu_function_call(int cpu, remote_function_f func, void *info) 116 { 117 struct remote_function_call data = { 118 .p = NULL, 119 .func = func, 120 .info = info, 121 .ret = -ENXIO, /* No such CPU */ 122 }; 123 124 smp_call_function_single(cpu, remote_function, &data, 1); 125 126 return data.ret; 127 } 128 129 #define EVENT_OWNER_KERNEL ((void *) -1) 130 131 static bool is_kernel_event(struct perf_event *event) 132 { 133 return event->owner == EVENT_OWNER_KERNEL; 134 } 135 136 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 137 PERF_FLAG_FD_OUTPUT |\ 138 PERF_FLAG_PID_CGROUP |\ 139 PERF_FLAG_FD_CLOEXEC) 140 141 /* 142 * branch priv levels that need permission checks 143 */ 144 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 145 (PERF_SAMPLE_BRANCH_KERNEL |\ 146 PERF_SAMPLE_BRANCH_HV) 147 148 enum event_type_t { 149 EVENT_FLEXIBLE = 0x1, 150 EVENT_PINNED = 0x2, 151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 152 }; 153 154 /* 155 * perf_sched_events : >0 events exist 156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 157 */ 158 struct static_key_deferred perf_sched_events __read_mostly; 159 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 160 static DEFINE_PER_CPU(int, perf_sched_cb_usages); 161 162 static atomic_t nr_mmap_events __read_mostly; 163 static atomic_t nr_comm_events __read_mostly; 164 static atomic_t nr_task_events __read_mostly; 165 static atomic_t nr_freq_events __read_mostly; 166 static atomic_t nr_switch_events __read_mostly; 167 168 static LIST_HEAD(pmus); 169 static DEFINE_MUTEX(pmus_lock); 170 static struct srcu_struct pmus_srcu; 171 172 /* 173 * perf event paranoia level: 174 * -1 - not paranoid at all 175 * 0 - disallow raw tracepoint access for unpriv 176 * 1 - disallow cpu events for unpriv 177 * 2 - disallow kernel profiling for unpriv 178 */ 179 int sysctl_perf_event_paranoid __read_mostly = 1; 180 181 /* Minimum for 512 kiB + 1 user control page */ 182 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 183 184 /* 185 * max perf event sample rate 186 */ 187 #define DEFAULT_MAX_SAMPLE_RATE 100000 188 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 189 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 190 191 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 192 193 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 194 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 195 196 static int perf_sample_allowed_ns __read_mostly = 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 198 199 static void update_perf_cpu_limits(void) 200 { 201 u64 tmp = perf_sample_period_ns; 202 203 tmp *= sysctl_perf_cpu_time_max_percent; 204 do_div(tmp, 100); 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 206 } 207 208 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 209 210 int perf_proc_update_handler(struct ctl_table *table, int write, 211 void __user *buffer, size_t *lenp, 212 loff_t *ppos) 213 { 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 215 216 if (ret || !write) 217 return ret; 218 219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 227 228 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 229 void __user *buffer, size_t *lenp, 230 loff_t *ppos) 231 { 232 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 233 234 if (ret || !write) 235 return ret; 236 237 update_perf_cpu_limits(); 238 239 return 0; 240 } 241 242 /* 243 * perf samples are done in some very critical code paths (NMIs). 244 * If they take too much CPU time, the system can lock up and not 245 * get any real work done. This will drop the sample rate when 246 * we detect that events are taking too long. 247 */ 248 #define NR_ACCUMULATED_SAMPLES 128 249 static DEFINE_PER_CPU(u64, running_sample_length); 250 251 static void perf_duration_warn(struct irq_work *w) 252 { 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 254 u64 avg_local_sample_len; 255 u64 local_samples_len; 256 257 local_samples_len = __this_cpu_read(running_sample_length); 258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 259 260 printk_ratelimited(KERN_WARNING 261 "perf interrupt took too long (%lld > %lld), lowering " 262 "kernel.perf_event_max_sample_rate to %d\n", 263 avg_local_sample_len, allowed_ns >> 1, 264 sysctl_perf_event_sample_rate); 265 } 266 267 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 268 269 void perf_sample_event_took(u64 sample_len_ns) 270 { 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 272 u64 avg_local_sample_len; 273 u64 local_samples_len; 274 275 if (allowed_ns == 0) 276 return; 277 278 /* decay the counter by 1 average sample */ 279 local_samples_len = __this_cpu_read(running_sample_length); 280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 281 local_samples_len += sample_len_ns; 282 __this_cpu_write(running_sample_length, local_samples_len); 283 284 /* 285 * note: this will be biased artifically low until we have 286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 287 * from having to maintain a count. 288 */ 289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 290 291 if (avg_local_sample_len <= allowed_ns) 292 return; 293 294 if (max_samples_per_tick <= 1) 295 return; 296 297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 300 301 update_perf_cpu_limits(); 302 303 if (!irq_work_queue(&perf_duration_work)) { 304 early_printk("perf interrupt took too long (%lld > %lld), lowering " 305 "kernel.perf_event_max_sample_rate to %d\n", 306 avg_local_sample_len, allowed_ns >> 1, 307 sysctl_perf_event_sample_rate); 308 } 309 } 310 311 static atomic64_t perf_event_id; 312 313 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 314 enum event_type_t event_type); 315 316 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 317 enum event_type_t event_type, 318 struct task_struct *task); 319 320 static void update_context_time(struct perf_event_context *ctx); 321 static u64 perf_event_time(struct perf_event *event); 322 323 void __weak perf_event_print_debug(void) { } 324 325 extern __weak const char *perf_pmu_name(void) 326 { 327 return "pmu"; 328 } 329 330 static inline u64 perf_clock(void) 331 { 332 return local_clock(); 333 } 334 335 static inline u64 perf_event_clock(struct perf_event *event) 336 { 337 return event->clock(); 338 } 339 340 static inline struct perf_cpu_context * 341 __get_cpu_context(struct perf_event_context *ctx) 342 { 343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 344 } 345 346 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 347 struct perf_event_context *ctx) 348 { 349 raw_spin_lock(&cpuctx->ctx.lock); 350 if (ctx) 351 raw_spin_lock(&ctx->lock); 352 } 353 354 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 355 struct perf_event_context *ctx) 356 { 357 if (ctx) 358 raw_spin_unlock(&ctx->lock); 359 raw_spin_unlock(&cpuctx->ctx.lock); 360 } 361 362 #ifdef CONFIG_CGROUP_PERF 363 364 static inline bool 365 perf_cgroup_match(struct perf_event *event) 366 { 367 struct perf_event_context *ctx = event->ctx; 368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 369 370 /* @event doesn't care about cgroup */ 371 if (!event->cgrp) 372 return true; 373 374 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 375 if (!cpuctx->cgrp) 376 return false; 377 378 /* 379 * Cgroup scoping is recursive. An event enabled for a cgroup is 380 * also enabled for all its descendant cgroups. If @cpuctx's 381 * cgroup is a descendant of @event's (the test covers identity 382 * case), it's a match. 383 */ 384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 385 event->cgrp->css.cgroup); 386 } 387 388 static inline void perf_detach_cgroup(struct perf_event *event) 389 { 390 css_put(&event->cgrp->css); 391 event->cgrp = NULL; 392 } 393 394 static inline int is_cgroup_event(struct perf_event *event) 395 { 396 return event->cgrp != NULL; 397 } 398 399 static inline u64 perf_cgroup_event_time(struct perf_event *event) 400 { 401 struct perf_cgroup_info *t; 402 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); 404 return t->time; 405 } 406 407 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 408 { 409 struct perf_cgroup_info *info; 410 u64 now; 411 412 now = perf_clock(); 413 414 info = this_cpu_ptr(cgrp->info); 415 416 info->time += now - info->timestamp; 417 info->timestamp = now; 418 } 419 420 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 421 { 422 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 423 if (cgrp_out) 424 __update_cgrp_time(cgrp_out); 425 } 426 427 static inline void update_cgrp_time_from_event(struct perf_event *event) 428 { 429 struct perf_cgroup *cgrp; 430 431 /* 432 * ensure we access cgroup data only when needed and 433 * when we know the cgroup is pinned (css_get) 434 */ 435 if (!is_cgroup_event(event)) 436 return; 437 438 cgrp = perf_cgroup_from_task(current); 439 /* 440 * Do not update time when cgroup is not active 441 */ 442 if (cgrp == event->cgrp) 443 __update_cgrp_time(event->cgrp); 444 } 445 446 static inline void 447 perf_cgroup_set_timestamp(struct task_struct *task, 448 struct perf_event_context *ctx) 449 { 450 struct perf_cgroup *cgrp; 451 struct perf_cgroup_info *info; 452 453 /* 454 * ctx->lock held by caller 455 * ensure we do not access cgroup data 456 * unless we have the cgroup pinned (css_get) 457 */ 458 if (!task || !ctx->nr_cgroups) 459 return; 460 461 cgrp = perf_cgroup_from_task(task); 462 info = this_cpu_ptr(cgrp->info); 463 info->timestamp = ctx->timestamp; 464 } 465 466 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 467 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 468 469 /* 470 * reschedule events based on the cgroup constraint of task. 471 * 472 * mode SWOUT : schedule out everything 473 * mode SWIN : schedule in based on cgroup for next 474 */ 475 static void perf_cgroup_switch(struct task_struct *task, int mode) 476 { 477 struct perf_cpu_context *cpuctx; 478 struct pmu *pmu; 479 unsigned long flags; 480 481 /* 482 * disable interrupts to avoid geting nr_cgroup 483 * changes via __perf_event_disable(). Also 484 * avoids preemption. 485 */ 486 local_irq_save(flags); 487 488 /* 489 * we reschedule only in the presence of cgroup 490 * constrained events. 491 */ 492 rcu_read_lock(); 493 494 list_for_each_entry_rcu(pmu, &pmus, entry) { 495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 496 if (cpuctx->unique_pmu != pmu) 497 continue; /* ensure we process each cpuctx once */ 498 499 /* 500 * perf_cgroup_events says at least one 501 * context on this CPU has cgroup events. 502 * 503 * ctx->nr_cgroups reports the number of cgroup 504 * events for a context. 505 */ 506 if (cpuctx->ctx.nr_cgroups > 0) { 507 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 508 perf_pmu_disable(cpuctx->ctx.pmu); 509 510 if (mode & PERF_CGROUP_SWOUT) { 511 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 512 /* 513 * must not be done before ctxswout due 514 * to event_filter_match() in event_sched_out() 515 */ 516 cpuctx->cgrp = NULL; 517 } 518 519 if (mode & PERF_CGROUP_SWIN) { 520 WARN_ON_ONCE(cpuctx->cgrp); 521 /* 522 * set cgrp before ctxsw in to allow 523 * event_filter_match() to not have to pass 524 * task around 525 */ 526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 } 529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 531 } 532 } 533 534 rcu_read_unlock(); 535 536 local_irq_restore(flags); 537 } 538 539 static inline void perf_cgroup_sched_out(struct task_struct *task, 540 struct task_struct *next) 541 { 542 struct perf_cgroup *cgrp1; 543 struct perf_cgroup *cgrp2 = NULL; 544 545 /* 546 * we come here when we know perf_cgroup_events > 0 547 */ 548 cgrp1 = perf_cgroup_from_task(task); 549 550 /* 551 * next is NULL when called from perf_event_enable_on_exec() 552 * that will systematically cause a cgroup_switch() 553 */ 554 if (next) 555 cgrp2 = perf_cgroup_from_task(next); 556 557 /* 558 * only schedule out current cgroup events if we know 559 * that we are switching to a different cgroup. Otherwise, 560 * do no touch the cgroup events. 561 */ 562 if (cgrp1 != cgrp2) 563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 564 } 565 566 static inline void perf_cgroup_sched_in(struct task_struct *prev, 567 struct task_struct *task) 568 { 569 struct perf_cgroup *cgrp1; 570 struct perf_cgroup *cgrp2 = NULL; 571 572 /* 573 * we come here when we know perf_cgroup_events > 0 574 */ 575 cgrp1 = perf_cgroup_from_task(task); 576 577 /* prev can never be NULL */ 578 cgrp2 = perf_cgroup_from_task(prev); 579 580 /* 581 * only need to schedule in cgroup events if we are changing 582 * cgroup during ctxsw. Cgroup events were not scheduled 583 * out of ctxsw out if that was not the case. 584 */ 585 if (cgrp1 != cgrp2) 586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 587 } 588 589 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 590 struct perf_event_attr *attr, 591 struct perf_event *group_leader) 592 { 593 struct perf_cgroup *cgrp; 594 struct cgroup_subsys_state *css; 595 struct fd f = fdget(fd); 596 int ret = 0; 597 598 if (!f.file) 599 return -EBADF; 600 601 css = css_tryget_online_from_dir(f.file->f_path.dentry, 602 &perf_event_cgrp_subsys); 603 if (IS_ERR(css)) { 604 ret = PTR_ERR(css); 605 goto out; 606 } 607 608 cgrp = container_of(css, struct perf_cgroup, css); 609 event->cgrp = cgrp; 610 611 /* 612 * all events in a group must monitor 613 * the same cgroup because a task belongs 614 * to only one perf cgroup at a time 615 */ 616 if (group_leader && group_leader->cgrp != cgrp) { 617 perf_detach_cgroup(event); 618 ret = -EINVAL; 619 } 620 out: 621 fdput(f); 622 return ret; 623 } 624 625 static inline void 626 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 627 { 628 struct perf_cgroup_info *t; 629 t = per_cpu_ptr(event->cgrp->info, event->cpu); 630 event->shadow_ctx_time = now - t->timestamp; 631 } 632 633 static inline void 634 perf_cgroup_defer_enabled(struct perf_event *event) 635 { 636 /* 637 * when the current task's perf cgroup does not match 638 * the event's, we need to remember to call the 639 * perf_mark_enable() function the first time a task with 640 * a matching perf cgroup is scheduled in. 641 */ 642 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 643 event->cgrp_defer_enabled = 1; 644 } 645 646 static inline void 647 perf_cgroup_mark_enabled(struct perf_event *event, 648 struct perf_event_context *ctx) 649 { 650 struct perf_event *sub; 651 u64 tstamp = perf_event_time(event); 652 653 if (!event->cgrp_defer_enabled) 654 return; 655 656 event->cgrp_defer_enabled = 0; 657 658 event->tstamp_enabled = tstamp - event->total_time_enabled; 659 list_for_each_entry(sub, &event->sibling_list, group_entry) { 660 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 661 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 662 sub->cgrp_defer_enabled = 0; 663 } 664 } 665 } 666 #else /* !CONFIG_CGROUP_PERF */ 667 668 static inline bool 669 perf_cgroup_match(struct perf_event *event) 670 { 671 return true; 672 } 673 674 static inline void perf_detach_cgroup(struct perf_event *event) 675 {} 676 677 static inline int is_cgroup_event(struct perf_event *event) 678 { 679 return 0; 680 } 681 682 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 683 { 684 return 0; 685 } 686 687 static inline void update_cgrp_time_from_event(struct perf_event *event) 688 { 689 } 690 691 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 692 { 693 } 694 695 static inline void perf_cgroup_sched_out(struct task_struct *task, 696 struct task_struct *next) 697 { 698 } 699 700 static inline void perf_cgroup_sched_in(struct task_struct *prev, 701 struct task_struct *task) 702 { 703 } 704 705 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 706 struct perf_event_attr *attr, 707 struct perf_event *group_leader) 708 { 709 return -EINVAL; 710 } 711 712 static inline void 713 perf_cgroup_set_timestamp(struct task_struct *task, 714 struct perf_event_context *ctx) 715 { 716 } 717 718 void 719 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 720 { 721 } 722 723 static inline void 724 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 725 { 726 } 727 728 static inline u64 perf_cgroup_event_time(struct perf_event *event) 729 { 730 return 0; 731 } 732 733 static inline void 734 perf_cgroup_defer_enabled(struct perf_event *event) 735 { 736 } 737 738 static inline void 739 perf_cgroup_mark_enabled(struct perf_event *event, 740 struct perf_event_context *ctx) 741 { 742 } 743 #endif 744 745 /* 746 * set default to be dependent on timer tick just 747 * like original code 748 */ 749 #define PERF_CPU_HRTIMER (1000 / HZ) 750 /* 751 * function must be called with interrupts disbled 752 */ 753 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 754 { 755 struct perf_cpu_context *cpuctx; 756 int rotations = 0; 757 758 WARN_ON(!irqs_disabled()); 759 760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 761 rotations = perf_rotate_context(cpuctx); 762 763 raw_spin_lock(&cpuctx->hrtimer_lock); 764 if (rotations) 765 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 766 else 767 cpuctx->hrtimer_active = 0; 768 raw_spin_unlock(&cpuctx->hrtimer_lock); 769 770 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; 771 } 772 773 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 774 { 775 struct hrtimer *timer = &cpuctx->hrtimer; 776 struct pmu *pmu = cpuctx->ctx.pmu; 777 u64 interval; 778 779 /* no multiplexing needed for SW PMU */ 780 if (pmu->task_ctx_nr == perf_sw_context) 781 return; 782 783 /* 784 * check default is sane, if not set then force to 785 * default interval (1/tick) 786 */ 787 interval = pmu->hrtimer_interval_ms; 788 if (interval < 1) 789 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 790 791 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); 792 793 raw_spin_lock_init(&cpuctx->hrtimer_lock); 794 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 795 timer->function = perf_mux_hrtimer_handler; 796 } 797 798 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) 799 { 800 struct hrtimer *timer = &cpuctx->hrtimer; 801 struct pmu *pmu = cpuctx->ctx.pmu; 802 unsigned long flags; 803 804 /* not for SW PMU */ 805 if (pmu->task_ctx_nr == perf_sw_context) 806 return 0; 807 808 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); 809 if (!cpuctx->hrtimer_active) { 810 cpuctx->hrtimer_active = 1; 811 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); 812 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 813 } 814 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); 815 816 return 0; 817 } 818 819 void perf_pmu_disable(struct pmu *pmu) 820 { 821 int *count = this_cpu_ptr(pmu->pmu_disable_count); 822 if (!(*count)++) 823 pmu->pmu_disable(pmu); 824 } 825 826 void perf_pmu_enable(struct pmu *pmu) 827 { 828 int *count = this_cpu_ptr(pmu->pmu_disable_count); 829 if (!--(*count)) 830 pmu->pmu_enable(pmu); 831 } 832 833 static DEFINE_PER_CPU(struct list_head, active_ctx_list); 834 835 /* 836 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and 837 * perf_event_task_tick() are fully serialized because they're strictly cpu 838 * affine and perf_event_ctx{activate,deactivate} are called with IRQs 839 * disabled, while perf_event_task_tick is called from IRQ context. 840 */ 841 static void perf_event_ctx_activate(struct perf_event_context *ctx) 842 { 843 struct list_head *head = this_cpu_ptr(&active_ctx_list); 844 845 WARN_ON(!irqs_disabled()); 846 847 WARN_ON(!list_empty(&ctx->active_ctx_list)); 848 849 list_add(&ctx->active_ctx_list, head); 850 } 851 852 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) 853 { 854 WARN_ON(!irqs_disabled()); 855 856 WARN_ON(list_empty(&ctx->active_ctx_list)); 857 858 list_del_init(&ctx->active_ctx_list); 859 } 860 861 static void get_ctx(struct perf_event_context *ctx) 862 { 863 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 864 } 865 866 static void free_ctx(struct rcu_head *head) 867 { 868 struct perf_event_context *ctx; 869 870 ctx = container_of(head, struct perf_event_context, rcu_head); 871 kfree(ctx->task_ctx_data); 872 kfree(ctx); 873 } 874 875 static void put_ctx(struct perf_event_context *ctx) 876 { 877 if (atomic_dec_and_test(&ctx->refcount)) { 878 if (ctx->parent_ctx) 879 put_ctx(ctx->parent_ctx); 880 if (ctx->task) 881 put_task_struct(ctx->task); 882 call_rcu(&ctx->rcu_head, free_ctx); 883 } 884 } 885 886 /* 887 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and 888 * perf_pmu_migrate_context() we need some magic. 889 * 890 * Those places that change perf_event::ctx will hold both 891 * perf_event_ctx::mutex of the 'old' and 'new' ctx value. 892 * 893 * Lock ordering is by mutex address. There are two other sites where 894 * perf_event_context::mutex nests and those are: 895 * 896 * - perf_event_exit_task_context() [ child , 0 ] 897 * __perf_event_exit_task() 898 * sync_child_event() 899 * put_event() [ parent, 1 ] 900 * 901 * - perf_event_init_context() [ parent, 0 ] 902 * inherit_task_group() 903 * inherit_group() 904 * inherit_event() 905 * perf_event_alloc() 906 * perf_init_event() 907 * perf_try_init_event() [ child , 1 ] 908 * 909 * While it appears there is an obvious deadlock here -- the parent and child 910 * nesting levels are inverted between the two. This is in fact safe because 911 * life-time rules separate them. That is an exiting task cannot fork, and a 912 * spawning task cannot (yet) exit. 913 * 914 * But remember that that these are parent<->child context relations, and 915 * migration does not affect children, therefore these two orderings should not 916 * interact. 917 * 918 * The change in perf_event::ctx does not affect children (as claimed above) 919 * because the sys_perf_event_open() case will install a new event and break 920 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only 921 * concerned with cpuctx and that doesn't have children. 922 * 923 * The places that change perf_event::ctx will issue: 924 * 925 * perf_remove_from_context(); 926 * synchronize_rcu(); 927 * perf_install_in_context(); 928 * 929 * to affect the change. The remove_from_context() + synchronize_rcu() should 930 * quiesce the event, after which we can install it in the new location. This 931 * means that only external vectors (perf_fops, prctl) can perturb the event 932 * while in transit. Therefore all such accessors should also acquire 933 * perf_event_context::mutex to serialize against this. 934 * 935 * However; because event->ctx can change while we're waiting to acquire 936 * ctx->mutex we must be careful and use the below perf_event_ctx_lock() 937 * function. 938 * 939 * Lock order: 940 * task_struct::perf_event_mutex 941 * perf_event_context::mutex 942 * perf_event_context::lock 943 * perf_event::child_mutex; 944 * perf_event::mmap_mutex 945 * mmap_sem 946 */ 947 static struct perf_event_context * 948 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) 949 { 950 struct perf_event_context *ctx; 951 952 again: 953 rcu_read_lock(); 954 ctx = ACCESS_ONCE(event->ctx); 955 if (!atomic_inc_not_zero(&ctx->refcount)) { 956 rcu_read_unlock(); 957 goto again; 958 } 959 rcu_read_unlock(); 960 961 mutex_lock_nested(&ctx->mutex, nesting); 962 if (event->ctx != ctx) { 963 mutex_unlock(&ctx->mutex); 964 put_ctx(ctx); 965 goto again; 966 } 967 968 return ctx; 969 } 970 971 static inline struct perf_event_context * 972 perf_event_ctx_lock(struct perf_event *event) 973 { 974 return perf_event_ctx_lock_nested(event, 0); 975 } 976 977 static void perf_event_ctx_unlock(struct perf_event *event, 978 struct perf_event_context *ctx) 979 { 980 mutex_unlock(&ctx->mutex); 981 put_ctx(ctx); 982 } 983 984 /* 985 * This must be done under the ctx->lock, such as to serialize against 986 * context_equiv(), therefore we cannot call put_ctx() since that might end up 987 * calling scheduler related locks and ctx->lock nests inside those. 988 */ 989 static __must_check struct perf_event_context * 990 unclone_ctx(struct perf_event_context *ctx) 991 { 992 struct perf_event_context *parent_ctx = ctx->parent_ctx; 993 994 lockdep_assert_held(&ctx->lock); 995 996 if (parent_ctx) 997 ctx->parent_ctx = NULL; 998 ctx->generation++; 999 1000 return parent_ctx; 1001 } 1002 1003 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 1004 { 1005 /* 1006 * only top level events have the pid namespace they were created in 1007 */ 1008 if (event->parent) 1009 event = event->parent; 1010 1011 return task_tgid_nr_ns(p, event->ns); 1012 } 1013 1014 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 1015 { 1016 /* 1017 * only top level events have the pid namespace they were created in 1018 */ 1019 if (event->parent) 1020 event = event->parent; 1021 1022 return task_pid_nr_ns(p, event->ns); 1023 } 1024 1025 /* 1026 * If we inherit events we want to return the parent event id 1027 * to userspace. 1028 */ 1029 static u64 primary_event_id(struct perf_event *event) 1030 { 1031 u64 id = event->id; 1032 1033 if (event->parent) 1034 id = event->parent->id; 1035 1036 return id; 1037 } 1038 1039 /* 1040 * Get the perf_event_context for a task and lock it. 1041 * This has to cope with with the fact that until it is locked, 1042 * the context could get moved to another task. 1043 */ 1044 static struct perf_event_context * 1045 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 1046 { 1047 struct perf_event_context *ctx; 1048 1049 retry: 1050 /* 1051 * One of the few rules of preemptible RCU is that one cannot do 1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when 1053 * part of the read side critical section was preemptible -- see 1054 * rcu_read_unlock_special(). 1055 * 1056 * Since ctx->lock nests under rq->lock we must ensure the entire read 1057 * side critical section is non-preemptible. 1058 */ 1059 preempt_disable(); 1060 rcu_read_lock(); 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 1062 if (ctx) { 1063 /* 1064 * If this context is a clone of another, it might 1065 * get swapped for another underneath us by 1066 * perf_event_task_sched_out, though the 1067 * rcu_read_lock() protects us from any context 1068 * getting freed. Lock the context and check if it 1069 * got swapped before we could get the lock, and retry 1070 * if so. If we locked the right context, then it 1071 * can't get swapped on us any more. 1072 */ 1073 raw_spin_lock_irqsave(&ctx->lock, *flags); 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 1075 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1076 rcu_read_unlock(); 1077 preempt_enable(); 1078 goto retry; 1079 } 1080 1081 if (!atomic_inc_not_zero(&ctx->refcount)) { 1082 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 1083 ctx = NULL; 1084 } 1085 } 1086 rcu_read_unlock(); 1087 preempt_enable(); 1088 return ctx; 1089 } 1090 1091 /* 1092 * Get the context for a task and increment its pin_count so it 1093 * can't get swapped to another task. This also increments its 1094 * reference count so that the context can't get freed. 1095 */ 1096 static struct perf_event_context * 1097 perf_pin_task_context(struct task_struct *task, int ctxn) 1098 { 1099 struct perf_event_context *ctx; 1100 unsigned long flags; 1101 1102 ctx = perf_lock_task_context(task, ctxn, &flags); 1103 if (ctx) { 1104 ++ctx->pin_count; 1105 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1106 } 1107 return ctx; 1108 } 1109 1110 static void perf_unpin_context(struct perf_event_context *ctx) 1111 { 1112 unsigned long flags; 1113 1114 raw_spin_lock_irqsave(&ctx->lock, flags); 1115 --ctx->pin_count; 1116 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1117 } 1118 1119 /* 1120 * Update the record of the current time in a context. 1121 */ 1122 static void update_context_time(struct perf_event_context *ctx) 1123 { 1124 u64 now = perf_clock(); 1125 1126 ctx->time += now - ctx->timestamp; 1127 ctx->timestamp = now; 1128 } 1129 1130 static u64 perf_event_time(struct perf_event *event) 1131 { 1132 struct perf_event_context *ctx = event->ctx; 1133 1134 if (is_cgroup_event(event)) 1135 return perf_cgroup_event_time(event); 1136 1137 return ctx ? ctx->time : 0; 1138 } 1139 1140 /* 1141 * Update the total_time_enabled and total_time_running fields for a event. 1142 * The caller of this function needs to hold the ctx->lock. 1143 */ 1144 static void update_event_times(struct perf_event *event) 1145 { 1146 struct perf_event_context *ctx = event->ctx; 1147 u64 run_end; 1148 1149 if (event->state < PERF_EVENT_STATE_INACTIVE || 1150 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1151 return; 1152 /* 1153 * in cgroup mode, time_enabled represents 1154 * the time the event was enabled AND active 1155 * tasks were in the monitored cgroup. This is 1156 * independent of the activity of the context as 1157 * there may be a mix of cgroup and non-cgroup events. 1158 * 1159 * That is why we treat cgroup events differently 1160 * here. 1161 */ 1162 if (is_cgroup_event(event)) 1163 run_end = perf_cgroup_event_time(event); 1164 else if (ctx->is_active) 1165 run_end = ctx->time; 1166 else 1167 run_end = event->tstamp_stopped; 1168 1169 event->total_time_enabled = run_end - event->tstamp_enabled; 1170 1171 if (event->state == PERF_EVENT_STATE_INACTIVE) 1172 run_end = event->tstamp_stopped; 1173 else 1174 run_end = perf_event_time(event); 1175 1176 event->total_time_running = run_end - event->tstamp_running; 1177 1178 } 1179 1180 /* 1181 * Update total_time_enabled and total_time_running for all events in a group. 1182 */ 1183 static void update_group_times(struct perf_event *leader) 1184 { 1185 struct perf_event *event; 1186 1187 update_event_times(leader); 1188 list_for_each_entry(event, &leader->sibling_list, group_entry) 1189 update_event_times(event); 1190 } 1191 1192 static struct list_head * 1193 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1194 { 1195 if (event->attr.pinned) 1196 return &ctx->pinned_groups; 1197 else 1198 return &ctx->flexible_groups; 1199 } 1200 1201 /* 1202 * Add a event from the lists for its context. 1203 * Must be called with ctx->mutex and ctx->lock held. 1204 */ 1205 static void 1206 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1207 { 1208 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1209 event->attach_state |= PERF_ATTACH_CONTEXT; 1210 1211 /* 1212 * If we're a stand alone event or group leader, we go to the context 1213 * list, group events are kept attached to the group so that 1214 * perf_group_detach can, at all times, locate all siblings. 1215 */ 1216 if (event->group_leader == event) { 1217 struct list_head *list; 1218 1219 if (is_software_event(event)) 1220 event->group_flags |= PERF_GROUP_SOFTWARE; 1221 1222 list = ctx_group_list(event, ctx); 1223 list_add_tail(&event->group_entry, list); 1224 } 1225 1226 if (is_cgroup_event(event)) 1227 ctx->nr_cgroups++; 1228 1229 list_add_rcu(&event->event_entry, &ctx->event_list); 1230 ctx->nr_events++; 1231 if (event->attr.inherit_stat) 1232 ctx->nr_stat++; 1233 1234 ctx->generation++; 1235 } 1236 1237 /* 1238 * Initialize event state based on the perf_event_attr::disabled. 1239 */ 1240 static inline void perf_event__state_init(struct perf_event *event) 1241 { 1242 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1243 PERF_EVENT_STATE_INACTIVE; 1244 } 1245 1246 static void __perf_event_read_size(struct perf_event *event, int nr_siblings) 1247 { 1248 int entry = sizeof(u64); /* value */ 1249 int size = 0; 1250 int nr = 1; 1251 1252 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1253 size += sizeof(u64); 1254 1255 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1256 size += sizeof(u64); 1257 1258 if (event->attr.read_format & PERF_FORMAT_ID) 1259 entry += sizeof(u64); 1260 1261 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1262 nr += nr_siblings; 1263 size += sizeof(u64); 1264 } 1265 1266 size += entry * nr; 1267 event->read_size = size; 1268 } 1269 1270 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) 1271 { 1272 struct perf_sample_data *data; 1273 u16 size = 0; 1274 1275 if (sample_type & PERF_SAMPLE_IP) 1276 size += sizeof(data->ip); 1277 1278 if (sample_type & PERF_SAMPLE_ADDR) 1279 size += sizeof(data->addr); 1280 1281 if (sample_type & PERF_SAMPLE_PERIOD) 1282 size += sizeof(data->period); 1283 1284 if (sample_type & PERF_SAMPLE_WEIGHT) 1285 size += sizeof(data->weight); 1286 1287 if (sample_type & PERF_SAMPLE_READ) 1288 size += event->read_size; 1289 1290 if (sample_type & PERF_SAMPLE_DATA_SRC) 1291 size += sizeof(data->data_src.val); 1292 1293 if (sample_type & PERF_SAMPLE_TRANSACTION) 1294 size += sizeof(data->txn); 1295 1296 event->header_size = size; 1297 } 1298 1299 /* 1300 * Called at perf_event creation and when events are attached/detached from a 1301 * group. 1302 */ 1303 static void perf_event__header_size(struct perf_event *event) 1304 { 1305 __perf_event_read_size(event, 1306 event->group_leader->nr_siblings); 1307 __perf_event_header_size(event, event->attr.sample_type); 1308 } 1309 1310 static void perf_event__id_header_size(struct perf_event *event) 1311 { 1312 struct perf_sample_data *data; 1313 u64 sample_type = event->attr.sample_type; 1314 u16 size = 0; 1315 1316 if (sample_type & PERF_SAMPLE_TID) 1317 size += sizeof(data->tid_entry); 1318 1319 if (sample_type & PERF_SAMPLE_TIME) 1320 size += sizeof(data->time); 1321 1322 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1323 size += sizeof(data->id); 1324 1325 if (sample_type & PERF_SAMPLE_ID) 1326 size += sizeof(data->id); 1327 1328 if (sample_type & PERF_SAMPLE_STREAM_ID) 1329 size += sizeof(data->stream_id); 1330 1331 if (sample_type & PERF_SAMPLE_CPU) 1332 size += sizeof(data->cpu_entry); 1333 1334 event->id_header_size = size; 1335 } 1336 1337 static bool perf_event_validate_size(struct perf_event *event) 1338 { 1339 /* 1340 * The values computed here will be over-written when we actually 1341 * attach the event. 1342 */ 1343 __perf_event_read_size(event, event->group_leader->nr_siblings + 1); 1344 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); 1345 perf_event__id_header_size(event); 1346 1347 /* 1348 * Sum the lot; should not exceed the 64k limit we have on records. 1349 * Conservative limit to allow for callchains and other variable fields. 1350 */ 1351 if (event->read_size + event->header_size + 1352 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) 1353 return false; 1354 1355 return true; 1356 } 1357 1358 static void perf_group_attach(struct perf_event *event) 1359 { 1360 struct perf_event *group_leader = event->group_leader, *pos; 1361 1362 /* 1363 * We can have double attach due to group movement in perf_event_open. 1364 */ 1365 if (event->attach_state & PERF_ATTACH_GROUP) 1366 return; 1367 1368 event->attach_state |= PERF_ATTACH_GROUP; 1369 1370 if (group_leader == event) 1371 return; 1372 1373 WARN_ON_ONCE(group_leader->ctx != event->ctx); 1374 1375 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1376 !is_software_event(event)) 1377 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1378 1379 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1380 group_leader->nr_siblings++; 1381 1382 perf_event__header_size(group_leader); 1383 1384 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1385 perf_event__header_size(pos); 1386 } 1387 1388 /* 1389 * Remove a event from the lists for its context. 1390 * Must be called with ctx->mutex and ctx->lock held. 1391 */ 1392 static void 1393 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1394 { 1395 struct perf_cpu_context *cpuctx; 1396 1397 WARN_ON_ONCE(event->ctx != ctx); 1398 lockdep_assert_held(&ctx->lock); 1399 1400 /* 1401 * We can have double detach due to exit/hot-unplug + close. 1402 */ 1403 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1404 return; 1405 1406 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1407 1408 if (is_cgroup_event(event)) { 1409 ctx->nr_cgroups--; 1410 cpuctx = __get_cpu_context(ctx); 1411 /* 1412 * if there are no more cgroup events 1413 * then cler cgrp to avoid stale pointer 1414 * in update_cgrp_time_from_cpuctx() 1415 */ 1416 if (!ctx->nr_cgroups) 1417 cpuctx->cgrp = NULL; 1418 } 1419 1420 ctx->nr_events--; 1421 if (event->attr.inherit_stat) 1422 ctx->nr_stat--; 1423 1424 list_del_rcu(&event->event_entry); 1425 1426 if (event->group_leader == event) 1427 list_del_init(&event->group_entry); 1428 1429 update_group_times(event); 1430 1431 /* 1432 * If event was in error state, then keep it 1433 * that way, otherwise bogus counts will be 1434 * returned on read(). The only way to get out 1435 * of error state is by explicit re-enabling 1436 * of the event 1437 */ 1438 if (event->state > PERF_EVENT_STATE_OFF) 1439 event->state = PERF_EVENT_STATE_OFF; 1440 1441 ctx->generation++; 1442 } 1443 1444 static void perf_group_detach(struct perf_event *event) 1445 { 1446 struct perf_event *sibling, *tmp; 1447 struct list_head *list = NULL; 1448 1449 /* 1450 * We can have double detach due to exit/hot-unplug + close. 1451 */ 1452 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1453 return; 1454 1455 event->attach_state &= ~PERF_ATTACH_GROUP; 1456 1457 /* 1458 * If this is a sibling, remove it from its group. 1459 */ 1460 if (event->group_leader != event) { 1461 list_del_init(&event->group_entry); 1462 event->group_leader->nr_siblings--; 1463 goto out; 1464 } 1465 1466 if (!list_empty(&event->group_entry)) 1467 list = &event->group_entry; 1468 1469 /* 1470 * If this was a group event with sibling events then 1471 * upgrade the siblings to singleton events by adding them 1472 * to whatever list we are on. 1473 */ 1474 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1475 if (list) 1476 list_move_tail(&sibling->group_entry, list); 1477 sibling->group_leader = sibling; 1478 1479 /* Inherit group flags from the previous leader */ 1480 sibling->group_flags = event->group_flags; 1481 1482 WARN_ON_ONCE(sibling->ctx != event->ctx); 1483 } 1484 1485 out: 1486 perf_event__header_size(event->group_leader); 1487 1488 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1489 perf_event__header_size(tmp); 1490 } 1491 1492 /* 1493 * User event without the task. 1494 */ 1495 static bool is_orphaned_event(struct perf_event *event) 1496 { 1497 return event && !is_kernel_event(event) && !event->owner; 1498 } 1499 1500 /* 1501 * Event has a parent but parent's task finished and it's 1502 * alive only because of children holding refference. 1503 */ 1504 static bool is_orphaned_child(struct perf_event *event) 1505 { 1506 return is_orphaned_event(event->parent); 1507 } 1508 1509 static void orphans_remove_work(struct work_struct *work); 1510 1511 static void schedule_orphans_remove(struct perf_event_context *ctx) 1512 { 1513 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) 1514 return; 1515 1516 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { 1517 get_ctx(ctx); 1518 ctx->orphans_remove_sched = true; 1519 } 1520 } 1521 1522 static int __init perf_workqueue_init(void) 1523 { 1524 perf_wq = create_singlethread_workqueue("perf"); 1525 WARN(!perf_wq, "failed to create perf workqueue\n"); 1526 return perf_wq ? 0 : -1; 1527 } 1528 1529 core_initcall(perf_workqueue_init); 1530 1531 static inline int pmu_filter_match(struct perf_event *event) 1532 { 1533 struct pmu *pmu = event->pmu; 1534 return pmu->filter_match ? pmu->filter_match(event) : 1; 1535 } 1536 1537 static inline int 1538 event_filter_match(struct perf_event *event) 1539 { 1540 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1541 && perf_cgroup_match(event) && pmu_filter_match(event); 1542 } 1543 1544 static void 1545 event_sched_out(struct perf_event *event, 1546 struct perf_cpu_context *cpuctx, 1547 struct perf_event_context *ctx) 1548 { 1549 u64 tstamp = perf_event_time(event); 1550 u64 delta; 1551 1552 WARN_ON_ONCE(event->ctx != ctx); 1553 lockdep_assert_held(&ctx->lock); 1554 1555 /* 1556 * An event which could not be activated because of 1557 * filter mismatch still needs to have its timings 1558 * maintained, otherwise bogus information is return 1559 * via read() for time_enabled, time_running: 1560 */ 1561 if (event->state == PERF_EVENT_STATE_INACTIVE 1562 && !event_filter_match(event)) { 1563 delta = tstamp - event->tstamp_stopped; 1564 event->tstamp_running += delta; 1565 event->tstamp_stopped = tstamp; 1566 } 1567 1568 if (event->state != PERF_EVENT_STATE_ACTIVE) 1569 return; 1570 1571 perf_pmu_disable(event->pmu); 1572 1573 event->state = PERF_EVENT_STATE_INACTIVE; 1574 if (event->pending_disable) { 1575 event->pending_disable = 0; 1576 event->state = PERF_EVENT_STATE_OFF; 1577 } 1578 event->tstamp_stopped = tstamp; 1579 event->pmu->del(event, 0); 1580 event->oncpu = -1; 1581 1582 if (!is_software_event(event)) 1583 cpuctx->active_oncpu--; 1584 if (!--ctx->nr_active) 1585 perf_event_ctx_deactivate(ctx); 1586 if (event->attr.freq && event->attr.sample_freq) 1587 ctx->nr_freq--; 1588 if (event->attr.exclusive || !cpuctx->active_oncpu) 1589 cpuctx->exclusive = 0; 1590 1591 if (is_orphaned_child(event)) 1592 schedule_orphans_remove(ctx); 1593 1594 perf_pmu_enable(event->pmu); 1595 } 1596 1597 static void 1598 group_sched_out(struct perf_event *group_event, 1599 struct perf_cpu_context *cpuctx, 1600 struct perf_event_context *ctx) 1601 { 1602 struct perf_event *event; 1603 int state = group_event->state; 1604 1605 event_sched_out(group_event, cpuctx, ctx); 1606 1607 /* 1608 * Schedule out siblings (if any): 1609 */ 1610 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1611 event_sched_out(event, cpuctx, ctx); 1612 1613 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1614 cpuctx->exclusive = 0; 1615 } 1616 1617 struct remove_event { 1618 struct perf_event *event; 1619 bool detach_group; 1620 }; 1621 1622 /* 1623 * Cross CPU call to remove a performance event 1624 * 1625 * We disable the event on the hardware level first. After that we 1626 * remove it from the context list. 1627 */ 1628 static int __perf_remove_from_context(void *info) 1629 { 1630 struct remove_event *re = info; 1631 struct perf_event *event = re->event; 1632 struct perf_event_context *ctx = event->ctx; 1633 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1634 1635 raw_spin_lock(&ctx->lock); 1636 event_sched_out(event, cpuctx, ctx); 1637 if (re->detach_group) 1638 perf_group_detach(event); 1639 list_del_event(event, ctx); 1640 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1641 ctx->is_active = 0; 1642 cpuctx->task_ctx = NULL; 1643 } 1644 raw_spin_unlock(&ctx->lock); 1645 1646 return 0; 1647 } 1648 1649 1650 /* 1651 * Remove the event from a task's (or a CPU's) list of events. 1652 * 1653 * CPU events are removed with a smp call. For task events we only 1654 * call when the task is on a CPU. 1655 * 1656 * If event->ctx is a cloned context, callers must make sure that 1657 * every task struct that event->ctx->task could possibly point to 1658 * remains valid. This is OK when called from perf_release since 1659 * that only calls us on the top-level context, which can't be a clone. 1660 * When called from perf_event_exit_task, it's OK because the 1661 * context has been detached from its task. 1662 */ 1663 static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1664 { 1665 struct perf_event_context *ctx = event->ctx; 1666 struct task_struct *task = ctx->task; 1667 struct remove_event re = { 1668 .event = event, 1669 .detach_group = detach_group, 1670 }; 1671 1672 lockdep_assert_held(&ctx->mutex); 1673 1674 if (!task) { 1675 /* 1676 * Per cpu events are removed via an smp call. The removal can 1677 * fail if the CPU is currently offline, but in that case we 1678 * already called __perf_remove_from_context from 1679 * perf_event_exit_cpu. 1680 */ 1681 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1682 return; 1683 } 1684 1685 retry: 1686 if (!task_function_call(task, __perf_remove_from_context, &re)) 1687 return; 1688 1689 raw_spin_lock_irq(&ctx->lock); 1690 /* 1691 * If we failed to find a running task, but find the context active now 1692 * that we've acquired the ctx->lock, retry. 1693 */ 1694 if (ctx->is_active) { 1695 raw_spin_unlock_irq(&ctx->lock); 1696 /* 1697 * Reload the task pointer, it might have been changed by 1698 * a concurrent perf_event_context_sched_out(). 1699 */ 1700 task = ctx->task; 1701 goto retry; 1702 } 1703 1704 /* 1705 * Since the task isn't running, its safe to remove the event, us 1706 * holding the ctx->lock ensures the task won't get scheduled in. 1707 */ 1708 if (detach_group) 1709 perf_group_detach(event); 1710 list_del_event(event, ctx); 1711 raw_spin_unlock_irq(&ctx->lock); 1712 } 1713 1714 /* 1715 * Cross CPU call to disable a performance event 1716 */ 1717 int __perf_event_disable(void *info) 1718 { 1719 struct perf_event *event = info; 1720 struct perf_event_context *ctx = event->ctx; 1721 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1722 1723 /* 1724 * If this is a per-task event, need to check whether this 1725 * event's task is the current task on this cpu. 1726 * 1727 * Can trigger due to concurrent perf_event_context_sched_out() 1728 * flipping contexts around. 1729 */ 1730 if (ctx->task && cpuctx->task_ctx != ctx) 1731 return -EINVAL; 1732 1733 raw_spin_lock(&ctx->lock); 1734 1735 /* 1736 * If the event is on, turn it off. 1737 * If it is in error state, leave it in error state. 1738 */ 1739 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1740 update_context_time(ctx); 1741 update_cgrp_time_from_event(event); 1742 update_group_times(event); 1743 if (event == event->group_leader) 1744 group_sched_out(event, cpuctx, ctx); 1745 else 1746 event_sched_out(event, cpuctx, ctx); 1747 event->state = PERF_EVENT_STATE_OFF; 1748 } 1749 1750 raw_spin_unlock(&ctx->lock); 1751 1752 return 0; 1753 } 1754 1755 /* 1756 * Disable a event. 1757 * 1758 * If event->ctx is a cloned context, callers must make sure that 1759 * every task struct that event->ctx->task could possibly point to 1760 * remains valid. This condition is satisifed when called through 1761 * perf_event_for_each_child or perf_event_for_each because they 1762 * hold the top-level event's child_mutex, so any descendant that 1763 * goes to exit will block in sync_child_event. 1764 * When called from perf_pending_event it's OK because event->ctx 1765 * is the current context on this CPU and preemption is disabled, 1766 * hence we can't get into perf_event_task_sched_out for this context. 1767 */ 1768 static void _perf_event_disable(struct perf_event *event) 1769 { 1770 struct perf_event_context *ctx = event->ctx; 1771 struct task_struct *task = ctx->task; 1772 1773 if (!task) { 1774 /* 1775 * Disable the event on the cpu that it's on 1776 */ 1777 cpu_function_call(event->cpu, __perf_event_disable, event); 1778 return; 1779 } 1780 1781 retry: 1782 if (!task_function_call(task, __perf_event_disable, event)) 1783 return; 1784 1785 raw_spin_lock_irq(&ctx->lock); 1786 /* 1787 * If the event is still active, we need to retry the cross-call. 1788 */ 1789 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1790 raw_spin_unlock_irq(&ctx->lock); 1791 /* 1792 * Reload the task pointer, it might have been changed by 1793 * a concurrent perf_event_context_sched_out(). 1794 */ 1795 task = ctx->task; 1796 goto retry; 1797 } 1798 1799 /* 1800 * Since we have the lock this context can't be scheduled 1801 * in, so we can change the state safely. 1802 */ 1803 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1804 update_group_times(event); 1805 event->state = PERF_EVENT_STATE_OFF; 1806 } 1807 raw_spin_unlock_irq(&ctx->lock); 1808 } 1809 1810 /* 1811 * Strictly speaking kernel users cannot create groups and therefore this 1812 * interface does not need the perf_event_ctx_lock() magic. 1813 */ 1814 void perf_event_disable(struct perf_event *event) 1815 { 1816 struct perf_event_context *ctx; 1817 1818 ctx = perf_event_ctx_lock(event); 1819 _perf_event_disable(event); 1820 perf_event_ctx_unlock(event, ctx); 1821 } 1822 EXPORT_SYMBOL_GPL(perf_event_disable); 1823 1824 static void perf_set_shadow_time(struct perf_event *event, 1825 struct perf_event_context *ctx, 1826 u64 tstamp) 1827 { 1828 /* 1829 * use the correct time source for the time snapshot 1830 * 1831 * We could get by without this by leveraging the 1832 * fact that to get to this function, the caller 1833 * has most likely already called update_context_time() 1834 * and update_cgrp_time_xx() and thus both timestamp 1835 * are identical (or very close). Given that tstamp is, 1836 * already adjusted for cgroup, we could say that: 1837 * tstamp - ctx->timestamp 1838 * is equivalent to 1839 * tstamp - cgrp->timestamp. 1840 * 1841 * Then, in perf_output_read(), the calculation would 1842 * work with no changes because: 1843 * - event is guaranteed scheduled in 1844 * - no scheduled out in between 1845 * - thus the timestamp would be the same 1846 * 1847 * But this is a bit hairy. 1848 * 1849 * So instead, we have an explicit cgroup call to remain 1850 * within the time time source all along. We believe it 1851 * is cleaner and simpler to understand. 1852 */ 1853 if (is_cgroup_event(event)) 1854 perf_cgroup_set_shadow_time(event, tstamp); 1855 else 1856 event->shadow_ctx_time = tstamp - ctx->timestamp; 1857 } 1858 1859 #define MAX_INTERRUPTS (~0ULL) 1860 1861 static void perf_log_throttle(struct perf_event *event, int enable); 1862 static void perf_log_itrace_start(struct perf_event *event); 1863 1864 static int 1865 event_sched_in(struct perf_event *event, 1866 struct perf_cpu_context *cpuctx, 1867 struct perf_event_context *ctx) 1868 { 1869 u64 tstamp = perf_event_time(event); 1870 int ret = 0; 1871 1872 lockdep_assert_held(&ctx->lock); 1873 1874 if (event->state <= PERF_EVENT_STATE_OFF) 1875 return 0; 1876 1877 event->state = PERF_EVENT_STATE_ACTIVE; 1878 event->oncpu = smp_processor_id(); 1879 1880 /* 1881 * Unthrottle events, since we scheduled we might have missed several 1882 * ticks already, also for a heavily scheduling task there is little 1883 * guarantee it'll get a tick in a timely manner. 1884 */ 1885 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1886 perf_log_throttle(event, 1); 1887 event->hw.interrupts = 0; 1888 } 1889 1890 /* 1891 * The new state must be visible before we turn it on in the hardware: 1892 */ 1893 smp_wmb(); 1894 1895 perf_pmu_disable(event->pmu); 1896 1897 perf_set_shadow_time(event, ctx, tstamp); 1898 1899 perf_log_itrace_start(event); 1900 1901 if (event->pmu->add(event, PERF_EF_START)) { 1902 event->state = PERF_EVENT_STATE_INACTIVE; 1903 event->oncpu = -1; 1904 ret = -EAGAIN; 1905 goto out; 1906 } 1907 1908 event->tstamp_running += tstamp - event->tstamp_stopped; 1909 1910 if (!is_software_event(event)) 1911 cpuctx->active_oncpu++; 1912 if (!ctx->nr_active++) 1913 perf_event_ctx_activate(ctx); 1914 if (event->attr.freq && event->attr.sample_freq) 1915 ctx->nr_freq++; 1916 1917 if (event->attr.exclusive) 1918 cpuctx->exclusive = 1; 1919 1920 if (is_orphaned_child(event)) 1921 schedule_orphans_remove(ctx); 1922 1923 out: 1924 perf_pmu_enable(event->pmu); 1925 1926 return ret; 1927 } 1928 1929 static int 1930 group_sched_in(struct perf_event *group_event, 1931 struct perf_cpu_context *cpuctx, 1932 struct perf_event_context *ctx) 1933 { 1934 struct perf_event *event, *partial_group = NULL; 1935 struct pmu *pmu = ctx->pmu; 1936 u64 now = ctx->time; 1937 bool simulate = false; 1938 1939 if (group_event->state == PERF_EVENT_STATE_OFF) 1940 return 0; 1941 1942 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); 1943 1944 if (event_sched_in(group_event, cpuctx, ctx)) { 1945 pmu->cancel_txn(pmu); 1946 perf_mux_hrtimer_restart(cpuctx); 1947 return -EAGAIN; 1948 } 1949 1950 /* 1951 * Schedule in siblings as one group (if any): 1952 */ 1953 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1954 if (event_sched_in(event, cpuctx, ctx)) { 1955 partial_group = event; 1956 goto group_error; 1957 } 1958 } 1959 1960 if (!pmu->commit_txn(pmu)) 1961 return 0; 1962 1963 group_error: 1964 /* 1965 * Groups can be scheduled in as one unit only, so undo any 1966 * partial group before returning: 1967 * The events up to the failed event are scheduled out normally, 1968 * tstamp_stopped will be updated. 1969 * 1970 * The failed events and the remaining siblings need to have 1971 * their timings updated as if they had gone thru event_sched_in() 1972 * and event_sched_out(). This is required to get consistent timings 1973 * across the group. This also takes care of the case where the group 1974 * could never be scheduled by ensuring tstamp_stopped is set to mark 1975 * the time the event was actually stopped, such that time delta 1976 * calculation in update_event_times() is correct. 1977 */ 1978 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1979 if (event == partial_group) 1980 simulate = true; 1981 1982 if (simulate) { 1983 event->tstamp_running += now - event->tstamp_stopped; 1984 event->tstamp_stopped = now; 1985 } else { 1986 event_sched_out(event, cpuctx, ctx); 1987 } 1988 } 1989 event_sched_out(group_event, cpuctx, ctx); 1990 1991 pmu->cancel_txn(pmu); 1992 1993 perf_mux_hrtimer_restart(cpuctx); 1994 1995 return -EAGAIN; 1996 } 1997 1998 /* 1999 * Work out whether we can put this event group on the CPU now. 2000 */ 2001 static int group_can_go_on(struct perf_event *event, 2002 struct perf_cpu_context *cpuctx, 2003 int can_add_hw) 2004 { 2005 /* 2006 * Groups consisting entirely of software events can always go on. 2007 */ 2008 if (event->group_flags & PERF_GROUP_SOFTWARE) 2009 return 1; 2010 /* 2011 * If an exclusive group is already on, no other hardware 2012 * events can go on. 2013 */ 2014 if (cpuctx->exclusive) 2015 return 0; 2016 /* 2017 * If this group is exclusive and there are already 2018 * events on the CPU, it can't go on. 2019 */ 2020 if (event->attr.exclusive && cpuctx->active_oncpu) 2021 return 0; 2022 /* 2023 * Otherwise, try to add it if all previous groups were able 2024 * to go on. 2025 */ 2026 return can_add_hw; 2027 } 2028 2029 static void add_event_to_ctx(struct perf_event *event, 2030 struct perf_event_context *ctx) 2031 { 2032 u64 tstamp = perf_event_time(event); 2033 2034 list_add_event(event, ctx); 2035 perf_group_attach(event); 2036 event->tstamp_enabled = tstamp; 2037 event->tstamp_running = tstamp; 2038 event->tstamp_stopped = tstamp; 2039 } 2040 2041 static void task_ctx_sched_out(struct perf_event_context *ctx); 2042 static void 2043 ctx_sched_in(struct perf_event_context *ctx, 2044 struct perf_cpu_context *cpuctx, 2045 enum event_type_t event_type, 2046 struct task_struct *task); 2047 2048 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2049 struct perf_event_context *ctx, 2050 struct task_struct *task) 2051 { 2052 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 2053 if (ctx) 2054 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2055 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 2056 if (ctx) 2057 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 2058 } 2059 2060 /* 2061 * Cross CPU call to install and enable a performance event 2062 * 2063 * Must be called with ctx->mutex held 2064 */ 2065 static int __perf_install_in_context(void *info) 2066 { 2067 struct perf_event *event = info; 2068 struct perf_event_context *ctx = event->ctx; 2069 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2070 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2071 struct task_struct *task = current; 2072 2073 perf_ctx_lock(cpuctx, task_ctx); 2074 perf_pmu_disable(cpuctx->ctx.pmu); 2075 2076 /* 2077 * If there was an active task_ctx schedule it out. 2078 */ 2079 if (task_ctx) 2080 task_ctx_sched_out(task_ctx); 2081 2082 /* 2083 * If the context we're installing events in is not the 2084 * active task_ctx, flip them. 2085 */ 2086 if (ctx->task && task_ctx != ctx) { 2087 if (task_ctx) 2088 raw_spin_unlock(&task_ctx->lock); 2089 raw_spin_lock(&ctx->lock); 2090 task_ctx = ctx; 2091 } 2092 2093 if (task_ctx) { 2094 cpuctx->task_ctx = task_ctx; 2095 task = task_ctx->task; 2096 } 2097 2098 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 2099 2100 update_context_time(ctx); 2101 /* 2102 * update cgrp time only if current cgrp 2103 * matches event->cgrp. Must be done before 2104 * calling add_event_to_ctx() 2105 */ 2106 update_cgrp_time_from_event(event); 2107 2108 add_event_to_ctx(event, ctx); 2109 2110 /* 2111 * Schedule everything back in 2112 */ 2113 perf_event_sched_in(cpuctx, task_ctx, task); 2114 2115 perf_pmu_enable(cpuctx->ctx.pmu); 2116 perf_ctx_unlock(cpuctx, task_ctx); 2117 2118 return 0; 2119 } 2120 2121 /* 2122 * Attach a performance event to a context 2123 * 2124 * First we add the event to the list with the hardware enable bit 2125 * in event->hw_config cleared. 2126 * 2127 * If the event is attached to a task which is on a CPU we use a smp 2128 * call to enable it in the task context. The task might have been 2129 * scheduled away, but we check this in the smp call again. 2130 */ 2131 static void 2132 perf_install_in_context(struct perf_event_context *ctx, 2133 struct perf_event *event, 2134 int cpu) 2135 { 2136 struct task_struct *task = ctx->task; 2137 2138 lockdep_assert_held(&ctx->mutex); 2139 2140 event->ctx = ctx; 2141 if (event->cpu != -1) 2142 event->cpu = cpu; 2143 2144 if (!task) { 2145 /* 2146 * Per cpu events are installed via an smp call and 2147 * the install is always successful. 2148 */ 2149 cpu_function_call(cpu, __perf_install_in_context, event); 2150 return; 2151 } 2152 2153 retry: 2154 if (!task_function_call(task, __perf_install_in_context, event)) 2155 return; 2156 2157 raw_spin_lock_irq(&ctx->lock); 2158 /* 2159 * If we failed to find a running task, but find the context active now 2160 * that we've acquired the ctx->lock, retry. 2161 */ 2162 if (ctx->is_active) { 2163 raw_spin_unlock_irq(&ctx->lock); 2164 /* 2165 * Reload the task pointer, it might have been changed by 2166 * a concurrent perf_event_context_sched_out(). 2167 */ 2168 task = ctx->task; 2169 goto retry; 2170 } 2171 2172 /* 2173 * Since the task isn't running, its safe to add the event, us holding 2174 * the ctx->lock ensures the task won't get scheduled in. 2175 */ 2176 add_event_to_ctx(event, ctx); 2177 raw_spin_unlock_irq(&ctx->lock); 2178 } 2179 2180 /* 2181 * Put a event into inactive state and update time fields. 2182 * Enabling the leader of a group effectively enables all 2183 * the group members that aren't explicitly disabled, so we 2184 * have to update their ->tstamp_enabled also. 2185 * Note: this works for group members as well as group leaders 2186 * since the non-leader members' sibling_lists will be empty. 2187 */ 2188 static void __perf_event_mark_enabled(struct perf_event *event) 2189 { 2190 struct perf_event *sub; 2191 u64 tstamp = perf_event_time(event); 2192 2193 event->state = PERF_EVENT_STATE_INACTIVE; 2194 event->tstamp_enabled = tstamp - event->total_time_enabled; 2195 list_for_each_entry(sub, &event->sibling_list, group_entry) { 2196 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 2197 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 2198 } 2199 } 2200 2201 /* 2202 * Cross CPU call to enable a performance event 2203 */ 2204 static int __perf_event_enable(void *info) 2205 { 2206 struct perf_event *event = info; 2207 struct perf_event_context *ctx = event->ctx; 2208 struct perf_event *leader = event->group_leader; 2209 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2210 int err; 2211 2212 /* 2213 * There's a time window between 'ctx->is_active' check 2214 * in perf_event_enable function and this place having: 2215 * - IRQs on 2216 * - ctx->lock unlocked 2217 * 2218 * where the task could be killed and 'ctx' deactivated 2219 * by perf_event_exit_task. 2220 */ 2221 if (!ctx->is_active) 2222 return -EINVAL; 2223 2224 raw_spin_lock(&ctx->lock); 2225 update_context_time(ctx); 2226 2227 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2228 goto unlock; 2229 2230 /* 2231 * set current task's cgroup time reference point 2232 */ 2233 perf_cgroup_set_timestamp(current, ctx); 2234 2235 __perf_event_mark_enabled(event); 2236 2237 if (!event_filter_match(event)) { 2238 if (is_cgroup_event(event)) 2239 perf_cgroup_defer_enabled(event); 2240 goto unlock; 2241 } 2242 2243 /* 2244 * If the event is in a group and isn't the group leader, 2245 * then don't put it on unless the group is on. 2246 */ 2247 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2248 goto unlock; 2249 2250 if (!group_can_go_on(event, cpuctx, 1)) { 2251 err = -EEXIST; 2252 } else { 2253 if (event == leader) 2254 err = group_sched_in(event, cpuctx, ctx); 2255 else 2256 err = event_sched_in(event, cpuctx, ctx); 2257 } 2258 2259 if (err) { 2260 /* 2261 * If this event can't go on and it's part of a 2262 * group, then the whole group has to come off. 2263 */ 2264 if (leader != event) { 2265 group_sched_out(leader, cpuctx, ctx); 2266 perf_mux_hrtimer_restart(cpuctx); 2267 } 2268 if (leader->attr.pinned) { 2269 update_group_times(leader); 2270 leader->state = PERF_EVENT_STATE_ERROR; 2271 } 2272 } 2273 2274 unlock: 2275 raw_spin_unlock(&ctx->lock); 2276 2277 return 0; 2278 } 2279 2280 /* 2281 * Enable a event. 2282 * 2283 * If event->ctx is a cloned context, callers must make sure that 2284 * every task struct that event->ctx->task could possibly point to 2285 * remains valid. This condition is satisfied when called through 2286 * perf_event_for_each_child or perf_event_for_each as described 2287 * for perf_event_disable. 2288 */ 2289 static void _perf_event_enable(struct perf_event *event) 2290 { 2291 struct perf_event_context *ctx = event->ctx; 2292 struct task_struct *task = ctx->task; 2293 2294 if (!task) { 2295 /* 2296 * Enable the event on the cpu that it's on 2297 */ 2298 cpu_function_call(event->cpu, __perf_event_enable, event); 2299 return; 2300 } 2301 2302 raw_spin_lock_irq(&ctx->lock); 2303 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2304 goto out; 2305 2306 /* 2307 * If the event is in error state, clear that first. 2308 * That way, if we see the event in error state below, we 2309 * know that it has gone back into error state, as distinct 2310 * from the task having been scheduled away before the 2311 * cross-call arrived. 2312 */ 2313 if (event->state == PERF_EVENT_STATE_ERROR) 2314 event->state = PERF_EVENT_STATE_OFF; 2315 2316 retry: 2317 if (!ctx->is_active) { 2318 __perf_event_mark_enabled(event); 2319 goto out; 2320 } 2321 2322 raw_spin_unlock_irq(&ctx->lock); 2323 2324 if (!task_function_call(task, __perf_event_enable, event)) 2325 return; 2326 2327 raw_spin_lock_irq(&ctx->lock); 2328 2329 /* 2330 * If the context is active and the event is still off, 2331 * we need to retry the cross-call. 2332 */ 2333 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2334 /* 2335 * task could have been flipped by a concurrent 2336 * perf_event_context_sched_out() 2337 */ 2338 task = ctx->task; 2339 goto retry; 2340 } 2341 2342 out: 2343 raw_spin_unlock_irq(&ctx->lock); 2344 } 2345 2346 /* 2347 * See perf_event_disable(); 2348 */ 2349 void perf_event_enable(struct perf_event *event) 2350 { 2351 struct perf_event_context *ctx; 2352 2353 ctx = perf_event_ctx_lock(event); 2354 _perf_event_enable(event); 2355 perf_event_ctx_unlock(event, ctx); 2356 } 2357 EXPORT_SYMBOL_GPL(perf_event_enable); 2358 2359 static int _perf_event_refresh(struct perf_event *event, int refresh) 2360 { 2361 /* 2362 * not supported on inherited events 2363 */ 2364 if (event->attr.inherit || !is_sampling_event(event)) 2365 return -EINVAL; 2366 2367 atomic_add(refresh, &event->event_limit); 2368 _perf_event_enable(event); 2369 2370 return 0; 2371 } 2372 2373 /* 2374 * See perf_event_disable() 2375 */ 2376 int perf_event_refresh(struct perf_event *event, int refresh) 2377 { 2378 struct perf_event_context *ctx; 2379 int ret; 2380 2381 ctx = perf_event_ctx_lock(event); 2382 ret = _perf_event_refresh(event, refresh); 2383 perf_event_ctx_unlock(event, ctx); 2384 2385 return ret; 2386 } 2387 EXPORT_SYMBOL_GPL(perf_event_refresh); 2388 2389 static void ctx_sched_out(struct perf_event_context *ctx, 2390 struct perf_cpu_context *cpuctx, 2391 enum event_type_t event_type) 2392 { 2393 struct perf_event *event; 2394 int is_active = ctx->is_active; 2395 2396 ctx->is_active &= ~event_type; 2397 if (likely(!ctx->nr_events)) 2398 return; 2399 2400 update_context_time(ctx); 2401 update_cgrp_time_from_cpuctx(cpuctx); 2402 if (!ctx->nr_active) 2403 return; 2404 2405 perf_pmu_disable(ctx->pmu); 2406 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2407 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2408 group_sched_out(event, cpuctx, ctx); 2409 } 2410 2411 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2412 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2413 group_sched_out(event, cpuctx, ctx); 2414 } 2415 perf_pmu_enable(ctx->pmu); 2416 } 2417 2418 /* 2419 * Test whether two contexts are equivalent, i.e. whether they have both been 2420 * cloned from the same version of the same context. 2421 * 2422 * Equivalence is measured using a generation number in the context that is 2423 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2424 * and list_del_event(). 2425 */ 2426 static int context_equiv(struct perf_event_context *ctx1, 2427 struct perf_event_context *ctx2) 2428 { 2429 lockdep_assert_held(&ctx1->lock); 2430 lockdep_assert_held(&ctx2->lock); 2431 2432 /* Pinning disables the swap optimization */ 2433 if (ctx1->pin_count || ctx2->pin_count) 2434 return 0; 2435 2436 /* If ctx1 is the parent of ctx2 */ 2437 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2438 return 1; 2439 2440 /* If ctx2 is the parent of ctx1 */ 2441 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2442 return 1; 2443 2444 /* 2445 * If ctx1 and ctx2 have the same parent; we flatten the parent 2446 * hierarchy, see perf_event_init_context(). 2447 */ 2448 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2449 ctx1->parent_gen == ctx2->parent_gen) 2450 return 1; 2451 2452 /* Unmatched */ 2453 return 0; 2454 } 2455 2456 static void __perf_event_sync_stat(struct perf_event *event, 2457 struct perf_event *next_event) 2458 { 2459 u64 value; 2460 2461 if (!event->attr.inherit_stat) 2462 return; 2463 2464 /* 2465 * Update the event value, we cannot use perf_event_read() 2466 * because we're in the middle of a context switch and have IRQs 2467 * disabled, which upsets smp_call_function_single(), however 2468 * we know the event must be on the current CPU, therefore we 2469 * don't need to use it. 2470 */ 2471 switch (event->state) { 2472 case PERF_EVENT_STATE_ACTIVE: 2473 event->pmu->read(event); 2474 /* fall-through */ 2475 2476 case PERF_EVENT_STATE_INACTIVE: 2477 update_event_times(event); 2478 break; 2479 2480 default: 2481 break; 2482 } 2483 2484 /* 2485 * In order to keep per-task stats reliable we need to flip the event 2486 * values when we flip the contexts. 2487 */ 2488 value = local64_read(&next_event->count); 2489 value = local64_xchg(&event->count, value); 2490 local64_set(&next_event->count, value); 2491 2492 swap(event->total_time_enabled, next_event->total_time_enabled); 2493 swap(event->total_time_running, next_event->total_time_running); 2494 2495 /* 2496 * Since we swizzled the values, update the user visible data too. 2497 */ 2498 perf_event_update_userpage(event); 2499 perf_event_update_userpage(next_event); 2500 } 2501 2502 static void perf_event_sync_stat(struct perf_event_context *ctx, 2503 struct perf_event_context *next_ctx) 2504 { 2505 struct perf_event *event, *next_event; 2506 2507 if (!ctx->nr_stat) 2508 return; 2509 2510 update_context_time(ctx); 2511 2512 event = list_first_entry(&ctx->event_list, 2513 struct perf_event, event_entry); 2514 2515 next_event = list_first_entry(&next_ctx->event_list, 2516 struct perf_event, event_entry); 2517 2518 while (&event->event_entry != &ctx->event_list && 2519 &next_event->event_entry != &next_ctx->event_list) { 2520 2521 __perf_event_sync_stat(event, next_event); 2522 2523 event = list_next_entry(event, event_entry); 2524 next_event = list_next_entry(next_event, event_entry); 2525 } 2526 } 2527 2528 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2529 struct task_struct *next) 2530 { 2531 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2532 struct perf_event_context *next_ctx; 2533 struct perf_event_context *parent, *next_parent; 2534 struct perf_cpu_context *cpuctx; 2535 int do_switch = 1; 2536 2537 if (likely(!ctx)) 2538 return; 2539 2540 cpuctx = __get_cpu_context(ctx); 2541 if (!cpuctx->task_ctx) 2542 return; 2543 2544 rcu_read_lock(); 2545 next_ctx = next->perf_event_ctxp[ctxn]; 2546 if (!next_ctx) 2547 goto unlock; 2548 2549 parent = rcu_dereference(ctx->parent_ctx); 2550 next_parent = rcu_dereference(next_ctx->parent_ctx); 2551 2552 /* If neither context have a parent context; they cannot be clones. */ 2553 if (!parent && !next_parent) 2554 goto unlock; 2555 2556 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2557 /* 2558 * Looks like the two contexts are clones, so we might be 2559 * able to optimize the context switch. We lock both 2560 * contexts and check that they are clones under the 2561 * lock (including re-checking that neither has been 2562 * uncloned in the meantime). It doesn't matter which 2563 * order we take the locks because no other cpu could 2564 * be trying to lock both of these tasks. 2565 */ 2566 raw_spin_lock(&ctx->lock); 2567 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2568 if (context_equiv(ctx, next_ctx)) { 2569 /* 2570 * XXX do we need a memory barrier of sorts 2571 * wrt to rcu_dereference() of perf_event_ctxp 2572 */ 2573 task->perf_event_ctxp[ctxn] = next_ctx; 2574 next->perf_event_ctxp[ctxn] = ctx; 2575 ctx->task = next; 2576 next_ctx->task = task; 2577 2578 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); 2579 2580 do_switch = 0; 2581 2582 perf_event_sync_stat(ctx, next_ctx); 2583 } 2584 raw_spin_unlock(&next_ctx->lock); 2585 raw_spin_unlock(&ctx->lock); 2586 } 2587 unlock: 2588 rcu_read_unlock(); 2589 2590 if (do_switch) { 2591 raw_spin_lock(&ctx->lock); 2592 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2593 cpuctx->task_ctx = NULL; 2594 raw_spin_unlock(&ctx->lock); 2595 } 2596 } 2597 2598 void perf_sched_cb_dec(struct pmu *pmu) 2599 { 2600 this_cpu_dec(perf_sched_cb_usages); 2601 } 2602 2603 void perf_sched_cb_inc(struct pmu *pmu) 2604 { 2605 this_cpu_inc(perf_sched_cb_usages); 2606 } 2607 2608 /* 2609 * This function provides the context switch callback to the lower code 2610 * layer. It is invoked ONLY when the context switch callback is enabled. 2611 */ 2612 static void perf_pmu_sched_task(struct task_struct *prev, 2613 struct task_struct *next, 2614 bool sched_in) 2615 { 2616 struct perf_cpu_context *cpuctx; 2617 struct pmu *pmu; 2618 unsigned long flags; 2619 2620 if (prev == next) 2621 return; 2622 2623 local_irq_save(flags); 2624 2625 rcu_read_lock(); 2626 2627 list_for_each_entry_rcu(pmu, &pmus, entry) { 2628 if (pmu->sched_task) { 2629 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2630 2631 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2632 2633 perf_pmu_disable(pmu); 2634 2635 pmu->sched_task(cpuctx->task_ctx, sched_in); 2636 2637 perf_pmu_enable(pmu); 2638 2639 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2640 } 2641 } 2642 2643 rcu_read_unlock(); 2644 2645 local_irq_restore(flags); 2646 } 2647 2648 static void perf_event_switch(struct task_struct *task, 2649 struct task_struct *next_prev, bool sched_in); 2650 2651 #define for_each_task_context_nr(ctxn) \ 2652 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2653 2654 /* 2655 * Called from scheduler to remove the events of the current task, 2656 * with interrupts disabled. 2657 * 2658 * We stop each event and update the event value in event->count. 2659 * 2660 * This does not protect us against NMI, but disable() 2661 * sets the disabled bit in the control field of event _before_ 2662 * accessing the event control register. If a NMI hits, then it will 2663 * not restart the event. 2664 */ 2665 void __perf_event_task_sched_out(struct task_struct *task, 2666 struct task_struct *next) 2667 { 2668 int ctxn; 2669 2670 if (__this_cpu_read(perf_sched_cb_usages)) 2671 perf_pmu_sched_task(task, next, false); 2672 2673 if (atomic_read(&nr_switch_events)) 2674 perf_event_switch(task, next, false); 2675 2676 for_each_task_context_nr(ctxn) 2677 perf_event_context_sched_out(task, ctxn, next); 2678 2679 /* 2680 * if cgroup events exist on this CPU, then we need 2681 * to check if we have to switch out PMU state. 2682 * cgroup event are system-wide mode only 2683 */ 2684 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2685 perf_cgroup_sched_out(task, next); 2686 } 2687 2688 static void task_ctx_sched_out(struct perf_event_context *ctx) 2689 { 2690 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2691 2692 if (!cpuctx->task_ctx) 2693 return; 2694 2695 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2696 return; 2697 2698 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2699 cpuctx->task_ctx = NULL; 2700 } 2701 2702 /* 2703 * Called with IRQs disabled 2704 */ 2705 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2706 enum event_type_t event_type) 2707 { 2708 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2709 } 2710 2711 static void 2712 ctx_pinned_sched_in(struct perf_event_context *ctx, 2713 struct perf_cpu_context *cpuctx) 2714 { 2715 struct perf_event *event; 2716 2717 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2718 if (event->state <= PERF_EVENT_STATE_OFF) 2719 continue; 2720 if (!event_filter_match(event)) 2721 continue; 2722 2723 /* may need to reset tstamp_enabled */ 2724 if (is_cgroup_event(event)) 2725 perf_cgroup_mark_enabled(event, ctx); 2726 2727 if (group_can_go_on(event, cpuctx, 1)) 2728 group_sched_in(event, cpuctx, ctx); 2729 2730 /* 2731 * If this pinned group hasn't been scheduled, 2732 * put it in error state. 2733 */ 2734 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2735 update_group_times(event); 2736 event->state = PERF_EVENT_STATE_ERROR; 2737 } 2738 } 2739 } 2740 2741 static void 2742 ctx_flexible_sched_in(struct perf_event_context *ctx, 2743 struct perf_cpu_context *cpuctx) 2744 { 2745 struct perf_event *event; 2746 int can_add_hw = 1; 2747 2748 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2749 /* Ignore events in OFF or ERROR state */ 2750 if (event->state <= PERF_EVENT_STATE_OFF) 2751 continue; 2752 /* 2753 * Listen to the 'cpu' scheduling filter constraint 2754 * of events: 2755 */ 2756 if (!event_filter_match(event)) 2757 continue; 2758 2759 /* may need to reset tstamp_enabled */ 2760 if (is_cgroup_event(event)) 2761 perf_cgroup_mark_enabled(event, ctx); 2762 2763 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2764 if (group_sched_in(event, cpuctx, ctx)) 2765 can_add_hw = 0; 2766 } 2767 } 2768 } 2769 2770 static void 2771 ctx_sched_in(struct perf_event_context *ctx, 2772 struct perf_cpu_context *cpuctx, 2773 enum event_type_t event_type, 2774 struct task_struct *task) 2775 { 2776 u64 now; 2777 int is_active = ctx->is_active; 2778 2779 ctx->is_active |= event_type; 2780 if (likely(!ctx->nr_events)) 2781 return; 2782 2783 now = perf_clock(); 2784 ctx->timestamp = now; 2785 perf_cgroup_set_timestamp(task, ctx); 2786 /* 2787 * First go through the list and put on any pinned groups 2788 * in order to give them the best chance of going on. 2789 */ 2790 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2791 ctx_pinned_sched_in(ctx, cpuctx); 2792 2793 /* Then walk through the lower prio flexible groups */ 2794 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2795 ctx_flexible_sched_in(ctx, cpuctx); 2796 } 2797 2798 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2799 enum event_type_t event_type, 2800 struct task_struct *task) 2801 { 2802 struct perf_event_context *ctx = &cpuctx->ctx; 2803 2804 ctx_sched_in(ctx, cpuctx, event_type, task); 2805 } 2806 2807 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2808 struct task_struct *task) 2809 { 2810 struct perf_cpu_context *cpuctx; 2811 2812 cpuctx = __get_cpu_context(ctx); 2813 if (cpuctx->task_ctx == ctx) 2814 return; 2815 2816 perf_ctx_lock(cpuctx, ctx); 2817 perf_pmu_disable(ctx->pmu); 2818 /* 2819 * We want to keep the following priority order: 2820 * cpu pinned (that don't need to move), task pinned, 2821 * cpu flexible, task flexible. 2822 */ 2823 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2824 2825 if (ctx->nr_events) 2826 cpuctx->task_ctx = ctx; 2827 2828 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2829 2830 perf_pmu_enable(ctx->pmu); 2831 perf_ctx_unlock(cpuctx, ctx); 2832 } 2833 2834 /* 2835 * Called from scheduler to add the events of the current task 2836 * with interrupts disabled. 2837 * 2838 * We restore the event value and then enable it. 2839 * 2840 * This does not protect us against NMI, but enable() 2841 * sets the enabled bit in the control field of event _before_ 2842 * accessing the event control register. If a NMI hits, then it will 2843 * keep the event running. 2844 */ 2845 void __perf_event_task_sched_in(struct task_struct *prev, 2846 struct task_struct *task) 2847 { 2848 struct perf_event_context *ctx; 2849 int ctxn; 2850 2851 for_each_task_context_nr(ctxn) { 2852 ctx = task->perf_event_ctxp[ctxn]; 2853 if (likely(!ctx)) 2854 continue; 2855 2856 perf_event_context_sched_in(ctx, task); 2857 } 2858 /* 2859 * if cgroup events exist on this CPU, then we need 2860 * to check if we have to switch in PMU state. 2861 * cgroup event are system-wide mode only 2862 */ 2863 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2864 perf_cgroup_sched_in(prev, task); 2865 2866 if (atomic_read(&nr_switch_events)) 2867 perf_event_switch(task, prev, true); 2868 2869 if (__this_cpu_read(perf_sched_cb_usages)) 2870 perf_pmu_sched_task(prev, task, true); 2871 } 2872 2873 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2874 { 2875 u64 frequency = event->attr.sample_freq; 2876 u64 sec = NSEC_PER_SEC; 2877 u64 divisor, dividend; 2878 2879 int count_fls, nsec_fls, frequency_fls, sec_fls; 2880 2881 count_fls = fls64(count); 2882 nsec_fls = fls64(nsec); 2883 frequency_fls = fls64(frequency); 2884 sec_fls = 30; 2885 2886 /* 2887 * We got @count in @nsec, with a target of sample_freq HZ 2888 * the target period becomes: 2889 * 2890 * @count * 10^9 2891 * period = ------------------- 2892 * @nsec * sample_freq 2893 * 2894 */ 2895 2896 /* 2897 * Reduce accuracy by one bit such that @a and @b converge 2898 * to a similar magnitude. 2899 */ 2900 #define REDUCE_FLS(a, b) \ 2901 do { \ 2902 if (a##_fls > b##_fls) { \ 2903 a >>= 1; \ 2904 a##_fls--; \ 2905 } else { \ 2906 b >>= 1; \ 2907 b##_fls--; \ 2908 } \ 2909 } while (0) 2910 2911 /* 2912 * Reduce accuracy until either term fits in a u64, then proceed with 2913 * the other, so that finally we can do a u64/u64 division. 2914 */ 2915 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2916 REDUCE_FLS(nsec, frequency); 2917 REDUCE_FLS(sec, count); 2918 } 2919 2920 if (count_fls + sec_fls > 64) { 2921 divisor = nsec * frequency; 2922 2923 while (count_fls + sec_fls > 64) { 2924 REDUCE_FLS(count, sec); 2925 divisor >>= 1; 2926 } 2927 2928 dividend = count * sec; 2929 } else { 2930 dividend = count * sec; 2931 2932 while (nsec_fls + frequency_fls > 64) { 2933 REDUCE_FLS(nsec, frequency); 2934 dividend >>= 1; 2935 } 2936 2937 divisor = nsec * frequency; 2938 } 2939 2940 if (!divisor) 2941 return dividend; 2942 2943 return div64_u64(dividend, divisor); 2944 } 2945 2946 static DEFINE_PER_CPU(int, perf_throttled_count); 2947 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2948 2949 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2950 { 2951 struct hw_perf_event *hwc = &event->hw; 2952 s64 period, sample_period; 2953 s64 delta; 2954 2955 period = perf_calculate_period(event, nsec, count); 2956 2957 delta = (s64)(period - hwc->sample_period); 2958 delta = (delta + 7) / 8; /* low pass filter */ 2959 2960 sample_period = hwc->sample_period + delta; 2961 2962 if (!sample_period) 2963 sample_period = 1; 2964 2965 hwc->sample_period = sample_period; 2966 2967 if (local64_read(&hwc->period_left) > 8*sample_period) { 2968 if (disable) 2969 event->pmu->stop(event, PERF_EF_UPDATE); 2970 2971 local64_set(&hwc->period_left, 0); 2972 2973 if (disable) 2974 event->pmu->start(event, PERF_EF_RELOAD); 2975 } 2976 } 2977 2978 /* 2979 * combine freq adjustment with unthrottling to avoid two passes over the 2980 * events. At the same time, make sure, having freq events does not change 2981 * the rate of unthrottling as that would introduce bias. 2982 */ 2983 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2984 int needs_unthr) 2985 { 2986 struct perf_event *event; 2987 struct hw_perf_event *hwc; 2988 u64 now, period = TICK_NSEC; 2989 s64 delta; 2990 2991 /* 2992 * only need to iterate over all events iff: 2993 * - context have events in frequency mode (needs freq adjust) 2994 * - there are events to unthrottle on this cpu 2995 */ 2996 if (!(ctx->nr_freq || needs_unthr)) 2997 return; 2998 2999 raw_spin_lock(&ctx->lock); 3000 perf_pmu_disable(ctx->pmu); 3001 3002 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3003 if (event->state != PERF_EVENT_STATE_ACTIVE) 3004 continue; 3005 3006 if (!event_filter_match(event)) 3007 continue; 3008 3009 perf_pmu_disable(event->pmu); 3010 3011 hwc = &event->hw; 3012 3013 if (hwc->interrupts == MAX_INTERRUPTS) { 3014 hwc->interrupts = 0; 3015 perf_log_throttle(event, 1); 3016 event->pmu->start(event, 0); 3017 } 3018 3019 if (!event->attr.freq || !event->attr.sample_freq) 3020 goto next; 3021 3022 /* 3023 * stop the event and update event->count 3024 */ 3025 event->pmu->stop(event, PERF_EF_UPDATE); 3026 3027 now = local64_read(&event->count); 3028 delta = now - hwc->freq_count_stamp; 3029 hwc->freq_count_stamp = now; 3030 3031 /* 3032 * restart the event 3033 * reload only if value has changed 3034 * we have stopped the event so tell that 3035 * to perf_adjust_period() to avoid stopping it 3036 * twice. 3037 */ 3038 if (delta > 0) 3039 perf_adjust_period(event, period, delta, false); 3040 3041 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 3042 next: 3043 perf_pmu_enable(event->pmu); 3044 } 3045 3046 perf_pmu_enable(ctx->pmu); 3047 raw_spin_unlock(&ctx->lock); 3048 } 3049 3050 /* 3051 * Round-robin a context's events: 3052 */ 3053 static void rotate_ctx(struct perf_event_context *ctx) 3054 { 3055 /* 3056 * Rotate the first entry last of non-pinned groups. Rotation might be 3057 * disabled by the inheritance code. 3058 */ 3059 if (!ctx->rotate_disable) 3060 list_rotate_left(&ctx->flexible_groups); 3061 } 3062 3063 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3064 { 3065 struct perf_event_context *ctx = NULL; 3066 int rotate = 0; 3067 3068 if (cpuctx->ctx.nr_events) { 3069 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3070 rotate = 1; 3071 } 3072 3073 ctx = cpuctx->task_ctx; 3074 if (ctx && ctx->nr_events) { 3075 if (ctx->nr_events != ctx->nr_active) 3076 rotate = 1; 3077 } 3078 3079 if (!rotate) 3080 goto done; 3081 3082 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 3083 perf_pmu_disable(cpuctx->ctx.pmu); 3084 3085 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 3086 if (ctx) 3087 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 3088 3089 rotate_ctx(&cpuctx->ctx); 3090 if (ctx) 3091 rotate_ctx(ctx); 3092 3093 perf_event_sched_in(cpuctx, ctx, current); 3094 3095 perf_pmu_enable(cpuctx->ctx.pmu); 3096 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3097 done: 3098 3099 return rotate; 3100 } 3101 3102 #ifdef CONFIG_NO_HZ_FULL 3103 bool perf_event_can_stop_tick(void) 3104 { 3105 if (atomic_read(&nr_freq_events) || 3106 __this_cpu_read(perf_throttled_count)) 3107 return false; 3108 else 3109 return true; 3110 } 3111 #endif 3112 3113 void perf_event_task_tick(void) 3114 { 3115 struct list_head *head = this_cpu_ptr(&active_ctx_list); 3116 struct perf_event_context *ctx, *tmp; 3117 int throttled; 3118 3119 WARN_ON(!irqs_disabled()); 3120 3121 __this_cpu_inc(perf_throttled_seq); 3122 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3123 3124 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) 3125 perf_adjust_freq_unthr_context(ctx, throttled); 3126 } 3127 3128 static int event_enable_on_exec(struct perf_event *event, 3129 struct perf_event_context *ctx) 3130 { 3131 if (!event->attr.enable_on_exec) 3132 return 0; 3133 3134 event->attr.enable_on_exec = 0; 3135 if (event->state >= PERF_EVENT_STATE_INACTIVE) 3136 return 0; 3137 3138 __perf_event_mark_enabled(event); 3139 3140 return 1; 3141 } 3142 3143 /* 3144 * Enable all of a task's events that have been marked enable-on-exec. 3145 * This expects task == current. 3146 */ 3147 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 3148 { 3149 struct perf_event_context *clone_ctx = NULL; 3150 struct perf_event *event; 3151 unsigned long flags; 3152 int enabled = 0; 3153 int ret; 3154 3155 local_irq_save(flags); 3156 if (!ctx || !ctx->nr_events) 3157 goto out; 3158 3159 /* 3160 * We must ctxsw out cgroup events to avoid conflict 3161 * when invoking perf_task_event_sched_in() later on 3162 * in this function. Otherwise we end up trying to 3163 * ctxswin cgroup events which are already scheduled 3164 * in. 3165 */ 3166 perf_cgroup_sched_out(current, NULL); 3167 3168 raw_spin_lock(&ctx->lock); 3169 task_ctx_sched_out(ctx); 3170 3171 list_for_each_entry(event, &ctx->event_list, event_entry) { 3172 ret = event_enable_on_exec(event, ctx); 3173 if (ret) 3174 enabled = 1; 3175 } 3176 3177 /* 3178 * Unclone this context if we enabled any event. 3179 */ 3180 if (enabled) 3181 clone_ctx = unclone_ctx(ctx); 3182 3183 raw_spin_unlock(&ctx->lock); 3184 3185 /* 3186 * Also calls ctxswin for cgroup events, if any: 3187 */ 3188 perf_event_context_sched_in(ctx, ctx->task); 3189 out: 3190 local_irq_restore(flags); 3191 3192 if (clone_ctx) 3193 put_ctx(clone_ctx); 3194 } 3195 3196 void perf_event_exec(void) 3197 { 3198 struct perf_event_context *ctx; 3199 int ctxn; 3200 3201 rcu_read_lock(); 3202 for_each_task_context_nr(ctxn) { 3203 ctx = current->perf_event_ctxp[ctxn]; 3204 if (!ctx) 3205 continue; 3206 3207 perf_event_enable_on_exec(ctx); 3208 } 3209 rcu_read_unlock(); 3210 } 3211 3212 struct perf_read_data { 3213 struct perf_event *event; 3214 bool group; 3215 int ret; 3216 }; 3217 3218 /* 3219 * Cross CPU call to read the hardware event 3220 */ 3221 static void __perf_event_read(void *info) 3222 { 3223 struct perf_read_data *data = info; 3224 struct perf_event *sub, *event = data->event; 3225 struct perf_event_context *ctx = event->ctx; 3226 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3227 struct pmu *pmu = event->pmu; 3228 3229 /* 3230 * If this is a task context, we need to check whether it is 3231 * the current task context of this cpu. If not it has been 3232 * scheduled out before the smp call arrived. In that case 3233 * event->count would have been updated to a recent sample 3234 * when the event was scheduled out. 3235 */ 3236 if (ctx->task && cpuctx->task_ctx != ctx) 3237 return; 3238 3239 raw_spin_lock(&ctx->lock); 3240 if (ctx->is_active) { 3241 update_context_time(ctx); 3242 update_cgrp_time_from_event(event); 3243 } 3244 3245 update_event_times(event); 3246 if (event->state != PERF_EVENT_STATE_ACTIVE) 3247 goto unlock; 3248 3249 if (!data->group) { 3250 pmu->read(event); 3251 data->ret = 0; 3252 goto unlock; 3253 } 3254 3255 pmu->start_txn(pmu, PERF_PMU_TXN_READ); 3256 3257 pmu->read(event); 3258 3259 list_for_each_entry(sub, &event->sibling_list, group_entry) { 3260 update_event_times(sub); 3261 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 3262 /* 3263 * Use sibling's PMU rather than @event's since 3264 * sibling could be on different (eg: software) PMU. 3265 */ 3266 sub->pmu->read(sub); 3267 } 3268 } 3269 3270 data->ret = pmu->commit_txn(pmu); 3271 3272 unlock: 3273 raw_spin_unlock(&ctx->lock); 3274 } 3275 3276 static inline u64 perf_event_count(struct perf_event *event) 3277 { 3278 if (event->pmu->count) 3279 return event->pmu->count(event); 3280 3281 return __perf_event_count(event); 3282 } 3283 3284 /* 3285 * NMI-safe method to read a local event, that is an event that 3286 * is: 3287 * - either for the current task, or for this CPU 3288 * - does not have inherit set, for inherited task events 3289 * will not be local and we cannot read them atomically 3290 * - must not have a pmu::count method 3291 */ 3292 u64 perf_event_read_local(struct perf_event *event) 3293 { 3294 unsigned long flags; 3295 u64 val; 3296 3297 /* 3298 * Disabling interrupts avoids all counter scheduling (context 3299 * switches, timer based rotation and IPIs). 3300 */ 3301 local_irq_save(flags); 3302 3303 /* If this is a per-task event, it must be for current */ 3304 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && 3305 event->hw.target != current); 3306 3307 /* If this is a per-CPU event, it must be for this CPU */ 3308 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && 3309 event->cpu != smp_processor_id()); 3310 3311 /* 3312 * It must not be an event with inherit set, we cannot read 3313 * all child counters from atomic context. 3314 */ 3315 WARN_ON_ONCE(event->attr.inherit); 3316 3317 /* 3318 * It must not have a pmu::count method, those are not 3319 * NMI safe. 3320 */ 3321 WARN_ON_ONCE(event->pmu->count); 3322 3323 /* 3324 * If the event is currently on this CPU, its either a per-task event, 3325 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 3326 * oncpu == -1). 3327 */ 3328 if (event->oncpu == smp_processor_id()) 3329 event->pmu->read(event); 3330 3331 val = local64_read(&event->count); 3332 local_irq_restore(flags); 3333 3334 return val; 3335 } 3336 3337 static int perf_event_read(struct perf_event *event, bool group) 3338 { 3339 int ret = 0; 3340 3341 /* 3342 * If event is enabled and currently active on a CPU, update the 3343 * value in the event structure: 3344 */ 3345 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3346 struct perf_read_data data = { 3347 .event = event, 3348 .group = group, 3349 .ret = 0, 3350 }; 3351 smp_call_function_single(event->oncpu, 3352 __perf_event_read, &data, 1); 3353 ret = data.ret; 3354 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3355 struct perf_event_context *ctx = event->ctx; 3356 unsigned long flags; 3357 3358 raw_spin_lock_irqsave(&ctx->lock, flags); 3359 /* 3360 * may read while context is not active 3361 * (e.g., thread is blocked), in that case 3362 * we cannot update context time 3363 */ 3364 if (ctx->is_active) { 3365 update_context_time(ctx); 3366 update_cgrp_time_from_event(event); 3367 } 3368 if (group) 3369 update_group_times(event); 3370 else 3371 update_event_times(event); 3372 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3373 } 3374 3375 return ret; 3376 } 3377 3378 /* 3379 * Initialize the perf_event context in a task_struct: 3380 */ 3381 static void __perf_event_init_context(struct perf_event_context *ctx) 3382 { 3383 raw_spin_lock_init(&ctx->lock); 3384 mutex_init(&ctx->mutex); 3385 INIT_LIST_HEAD(&ctx->active_ctx_list); 3386 INIT_LIST_HEAD(&ctx->pinned_groups); 3387 INIT_LIST_HEAD(&ctx->flexible_groups); 3388 INIT_LIST_HEAD(&ctx->event_list); 3389 atomic_set(&ctx->refcount, 1); 3390 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); 3391 } 3392 3393 static struct perf_event_context * 3394 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3395 { 3396 struct perf_event_context *ctx; 3397 3398 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3399 if (!ctx) 3400 return NULL; 3401 3402 __perf_event_init_context(ctx); 3403 if (task) { 3404 ctx->task = task; 3405 get_task_struct(task); 3406 } 3407 ctx->pmu = pmu; 3408 3409 return ctx; 3410 } 3411 3412 static struct task_struct * 3413 find_lively_task_by_vpid(pid_t vpid) 3414 { 3415 struct task_struct *task; 3416 int err; 3417 3418 rcu_read_lock(); 3419 if (!vpid) 3420 task = current; 3421 else 3422 task = find_task_by_vpid(vpid); 3423 if (task) 3424 get_task_struct(task); 3425 rcu_read_unlock(); 3426 3427 if (!task) 3428 return ERR_PTR(-ESRCH); 3429 3430 /* Reuse ptrace permission checks for now. */ 3431 err = -EACCES; 3432 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3433 goto errout; 3434 3435 return task; 3436 errout: 3437 put_task_struct(task); 3438 return ERR_PTR(err); 3439 3440 } 3441 3442 /* 3443 * Returns a matching context with refcount and pincount. 3444 */ 3445 static struct perf_event_context * 3446 find_get_context(struct pmu *pmu, struct task_struct *task, 3447 struct perf_event *event) 3448 { 3449 struct perf_event_context *ctx, *clone_ctx = NULL; 3450 struct perf_cpu_context *cpuctx; 3451 void *task_ctx_data = NULL; 3452 unsigned long flags; 3453 int ctxn, err; 3454 int cpu = event->cpu; 3455 3456 if (!task) { 3457 /* Must be root to operate on a CPU event: */ 3458 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3459 return ERR_PTR(-EACCES); 3460 3461 /* 3462 * We could be clever and allow to attach a event to an 3463 * offline CPU and activate it when the CPU comes up, but 3464 * that's for later. 3465 */ 3466 if (!cpu_online(cpu)) 3467 return ERR_PTR(-ENODEV); 3468 3469 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3470 ctx = &cpuctx->ctx; 3471 get_ctx(ctx); 3472 ++ctx->pin_count; 3473 3474 return ctx; 3475 } 3476 3477 err = -EINVAL; 3478 ctxn = pmu->task_ctx_nr; 3479 if (ctxn < 0) 3480 goto errout; 3481 3482 if (event->attach_state & PERF_ATTACH_TASK_DATA) { 3483 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); 3484 if (!task_ctx_data) { 3485 err = -ENOMEM; 3486 goto errout; 3487 } 3488 } 3489 3490 retry: 3491 ctx = perf_lock_task_context(task, ctxn, &flags); 3492 if (ctx) { 3493 clone_ctx = unclone_ctx(ctx); 3494 ++ctx->pin_count; 3495 3496 if (task_ctx_data && !ctx->task_ctx_data) { 3497 ctx->task_ctx_data = task_ctx_data; 3498 task_ctx_data = NULL; 3499 } 3500 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3501 3502 if (clone_ctx) 3503 put_ctx(clone_ctx); 3504 } else { 3505 ctx = alloc_perf_context(pmu, task); 3506 err = -ENOMEM; 3507 if (!ctx) 3508 goto errout; 3509 3510 if (task_ctx_data) { 3511 ctx->task_ctx_data = task_ctx_data; 3512 task_ctx_data = NULL; 3513 } 3514 3515 err = 0; 3516 mutex_lock(&task->perf_event_mutex); 3517 /* 3518 * If it has already passed perf_event_exit_task(). 3519 * we must see PF_EXITING, it takes this mutex too. 3520 */ 3521 if (task->flags & PF_EXITING) 3522 err = -ESRCH; 3523 else if (task->perf_event_ctxp[ctxn]) 3524 err = -EAGAIN; 3525 else { 3526 get_ctx(ctx); 3527 ++ctx->pin_count; 3528 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3529 } 3530 mutex_unlock(&task->perf_event_mutex); 3531 3532 if (unlikely(err)) { 3533 put_ctx(ctx); 3534 3535 if (err == -EAGAIN) 3536 goto retry; 3537 goto errout; 3538 } 3539 } 3540 3541 kfree(task_ctx_data); 3542 return ctx; 3543 3544 errout: 3545 kfree(task_ctx_data); 3546 return ERR_PTR(err); 3547 } 3548 3549 static void perf_event_free_filter(struct perf_event *event); 3550 static void perf_event_free_bpf_prog(struct perf_event *event); 3551 3552 static void free_event_rcu(struct rcu_head *head) 3553 { 3554 struct perf_event *event; 3555 3556 event = container_of(head, struct perf_event, rcu_head); 3557 if (event->ns) 3558 put_pid_ns(event->ns); 3559 perf_event_free_filter(event); 3560 kfree(event); 3561 } 3562 3563 static void ring_buffer_attach(struct perf_event *event, 3564 struct ring_buffer *rb); 3565 3566 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3567 { 3568 if (event->parent) 3569 return; 3570 3571 if (is_cgroup_event(event)) 3572 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3573 } 3574 3575 static void unaccount_event(struct perf_event *event) 3576 { 3577 if (event->parent) 3578 return; 3579 3580 if (event->attach_state & PERF_ATTACH_TASK) 3581 static_key_slow_dec_deferred(&perf_sched_events); 3582 if (event->attr.mmap || event->attr.mmap_data) 3583 atomic_dec(&nr_mmap_events); 3584 if (event->attr.comm) 3585 atomic_dec(&nr_comm_events); 3586 if (event->attr.task) 3587 atomic_dec(&nr_task_events); 3588 if (event->attr.freq) 3589 atomic_dec(&nr_freq_events); 3590 if (event->attr.context_switch) { 3591 static_key_slow_dec_deferred(&perf_sched_events); 3592 atomic_dec(&nr_switch_events); 3593 } 3594 if (is_cgroup_event(event)) 3595 static_key_slow_dec_deferred(&perf_sched_events); 3596 if (has_branch_stack(event)) 3597 static_key_slow_dec_deferred(&perf_sched_events); 3598 3599 unaccount_event_cpu(event, event->cpu); 3600 } 3601 3602 /* 3603 * The following implement mutual exclusion of events on "exclusive" pmus 3604 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3605 * at a time, so we disallow creating events that might conflict, namely: 3606 * 3607 * 1) cpu-wide events in the presence of per-task events, 3608 * 2) per-task events in the presence of cpu-wide events, 3609 * 3) two matching events on the same context. 3610 * 3611 * The former two cases are handled in the allocation path (perf_event_alloc(), 3612 * __free_event()), the latter -- before the first perf_install_in_context(). 3613 */ 3614 static int exclusive_event_init(struct perf_event *event) 3615 { 3616 struct pmu *pmu = event->pmu; 3617 3618 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3619 return 0; 3620 3621 /* 3622 * Prevent co-existence of per-task and cpu-wide events on the 3623 * same exclusive pmu. 3624 * 3625 * Negative pmu::exclusive_cnt means there are cpu-wide 3626 * events on this "exclusive" pmu, positive means there are 3627 * per-task events. 3628 * 3629 * Since this is called in perf_event_alloc() path, event::ctx 3630 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK 3631 * to mean "per-task event", because unlike other attach states it 3632 * never gets cleared. 3633 */ 3634 if (event->attach_state & PERF_ATTACH_TASK) { 3635 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) 3636 return -EBUSY; 3637 } else { 3638 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) 3639 return -EBUSY; 3640 } 3641 3642 return 0; 3643 } 3644 3645 static void exclusive_event_destroy(struct perf_event *event) 3646 { 3647 struct pmu *pmu = event->pmu; 3648 3649 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3650 return; 3651 3652 /* see comment in exclusive_event_init() */ 3653 if (event->attach_state & PERF_ATTACH_TASK) 3654 atomic_dec(&pmu->exclusive_cnt); 3655 else 3656 atomic_inc(&pmu->exclusive_cnt); 3657 } 3658 3659 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3660 { 3661 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3662 (e1->cpu == e2->cpu || 3663 e1->cpu == -1 || 3664 e2->cpu == -1)) 3665 return true; 3666 return false; 3667 } 3668 3669 /* Called under the same ctx::mutex as perf_install_in_context() */ 3670 static bool exclusive_event_installable(struct perf_event *event, 3671 struct perf_event_context *ctx) 3672 { 3673 struct perf_event *iter_event; 3674 struct pmu *pmu = event->pmu; 3675 3676 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 3677 return true; 3678 3679 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 3680 if (exclusive_event_match(iter_event, event)) 3681 return false; 3682 } 3683 3684 return true; 3685 } 3686 3687 static void __free_event(struct perf_event *event) 3688 { 3689 if (!event->parent) { 3690 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3691 put_callchain_buffers(); 3692 } 3693 3694 perf_event_free_bpf_prog(event); 3695 3696 if (event->destroy) 3697 event->destroy(event); 3698 3699 if (event->ctx) 3700 put_ctx(event->ctx); 3701 3702 if (event->pmu) { 3703 exclusive_event_destroy(event); 3704 module_put(event->pmu->module); 3705 } 3706 3707 call_rcu(&event->rcu_head, free_event_rcu); 3708 } 3709 3710 static void _free_event(struct perf_event *event) 3711 { 3712 irq_work_sync(&event->pending); 3713 3714 unaccount_event(event); 3715 3716 if (event->rb) { 3717 /* 3718 * Can happen when we close an event with re-directed output. 3719 * 3720 * Since we have a 0 refcount, perf_mmap_close() will skip 3721 * over us; possibly making our ring_buffer_put() the last. 3722 */ 3723 mutex_lock(&event->mmap_mutex); 3724 ring_buffer_attach(event, NULL); 3725 mutex_unlock(&event->mmap_mutex); 3726 } 3727 3728 if (is_cgroup_event(event)) 3729 perf_detach_cgroup(event); 3730 3731 __free_event(event); 3732 } 3733 3734 /* 3735 * Used to free events which have a known refcount of 1, such as in error paths 3736 * where the event isn't exposed yet and inherited events. 3737 */ 3738 static void free_event(struct perf_event *event) 3739 { 3740 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, 3741 "unexpected event refcount: %ld; ptr=%p\n", 3742 atomic_long_read(&event->refcount), event)) { 3743 /* leak to avoid use-after-free */ 3744 return; 3745 } 3746 3747 _free_event(event); 3748 } 3749 3750 /* 3751 * Remove user event from the owner task. 3752 */ 3753 static void perf_remove_from_owner(struct perf_event *event) 3754 { 3755 struct task_struct *owner; 3756 3757 rcu_read_lock(); 3758 owner = ACCESS_ONCE(event->owner); 3759 /* 3760 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3761 * !owner it means the list deletion is complete and we can indeed 3762 * free this event, otherwise we need to serialize on 3763 * owner->perf_event_mutex. 3764 */ 3765 smp_read_barrier_depends(); 3766 if (owner) { 3767 /* 3768 * Since delayed_put_task_struct() also drops the last 3769 * task reference we can safely take a new reference 3770 * while holding the rcu_read_lock(). 3771 */ 3772 get_task_struct(owner); 3773 } 3774 rcu_read_unlock(); 3775 3776 if (owner) { 3777 /* 3778 * If we're here through perf_event_exit_task() we're already 3779 * holding ctx->mutex which would be an inversion wrt. the 3780 * normal lock order. 3781 * 3782 * However we can safely take this lock because its the child 3783 * ctx->mutex. 3784 */ 3785 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); 3786 3787 /* 3788 * We have to re-check the event->owner field, if it is cleared 3789 * we raced with perf_event_exit_task(), acquiring the mutex 3790 * ensured they're done, and we can proceed with freeing the 3791 * event. 3792 */ 3793 if (event->owner) 3794 list_del_init(&event->owner_entry); 3795 mutex_unlock(&owner->perf_event_mutex); 3796 put_task_struct(owner); 3797 } 3798 } 3799 3800 static void put_event(struct perf_event *event) 3801 { 3802 struct perf_event_context *ctx; 3803 3804 if (!atomic_long_dec_and_test(&event->refcount)) 3805 return; 3806 3807 if (!is_kernel_event(event)) 3808 perf_remove_from_owner(event); 3809 3810 /* 3811 * There are two ways this annotation is useful: 3812 * 3813 * 1) there is a lock recursion from perf_event_exit_task 3814 * see the comment there. 3815 * 3816 * 2) there is a lock-inversion with mmap_sem through 3817 * perf_read_group(), which takes faults while 3818 * holding ctx->mutex, however this is called after 3819 * the last filedesc died, so there is no possibility 3820 * to trigger the AB-BA case. 3821 */ 3822 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); 3823 WARN_ON_ONCE(ctx->parent_ctx); 3824 perf_remove_from_context(event, true); 3825 perf_event_ctx_unlock(event, ctx); 3826 3827 _free_event(event); 3828 } 3829 3830 int perf_event_release_kernel(struct perf_event *event) 3831 { 3832 put_event(event); 3833 return 0; 3834 } 3835 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3836 3837 /* 3838 * Called when the last reference to the file is gone. 3839 */ 3840 static int perf_release(struct inode *inode, struct file *file) 3841 { 3842 put_event(file->private_data); 3843 return 0; 3844 } 3845 3846 /* 3847 * Remove all orphanes events from the context. 3848 */ 3849 static void orphans_remove_work(struct work_struct *work) 3850 { 3851 struct perf_event_context *ctx; 3852 struct perf_event *event, *tmp; 3853 3854 ctx = container_of(work, struct perf_event_context, 3855 orphans_remove.work); 3856 3857 mutex_lock(&ctx->mutex); 3858 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { 3859 struct perf_event *parent_event = event->parent; 3860 3861 if (!is_orphaned_child(event)) 3862 continue; 3863 3864 perf_remove_from_context(event, true); 3865 3866 mutex_lock(&parent_event->child_mutex); 3867 list_del_init(&event->child_list); 3868 mutex_unlock(&parent_event->child_mutex); 3869 3870 free_event(event); 3871 put_event(parent_event); 3872 } 3873 3874 raw_spin_lock_irq(&ctx->lock); 3875 ctx->orphans_remove_sched = false; 3876 raw_spin_unlock_irq(&ctx->lock); 3877 mutex_unlock(&ctx->mutex); 3878 3879 put_ctx(ctx); 3880 } 3881 3882 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3883 { 3884 struct perf_event *child; 3885 u64 total = 0; 3886 3887 *enabled = 0; 3888 *running = 0; 3889 3890 mutex_lock(&event->child_mutex); 3891 3892 (void)perf_event_read(event, false); 3893 total += perf_event_count(event); 3894 3895 *enabled += event->total_time_enabled + 3896 atomic64_read(&event->child_total_time_enabled); 3897 *running += event->total_time_running + 3898 atomic64_read(&event->child_total_time_running); 3899 3900 list_for_each_entry(child, &event->child_list, child_list) { 3901 (void)perf_event_read(child, false); 3902 total += perf_event_count(child); 3903 *enabled += child->total_time_enabled; 3904 *running += child->total_time_running; 3905 } 3906 mutex_unlock(&event->child_mutex); 3907 3908 return total; 3909 } 3910 EXPORT_SYMBOL_GPL(perf_event_read_value); 3911 3912 static int __perf_read_group_add(struct perf_event *leader, 3913 u64 read_format, u64 *values) 3914 { 3915 struct perf_event *sub; 3916 int n = 1; /* skip @nr */ 3917 int ret; 3918 3919 ret = perf_event_read(leader, true); 3920 if (ret) 3921 return ret; 3922 3923 /* 3924 * Since we co-schedule groups, {enabled,running} times of siblings 3925 * will be identical to those of the leader, so we only publish one 3926 * set. 3927 */ 3928 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3929 values[n++] += leader->total_time_enabled + 3930 atomic64_read(&leader->child_total_time_enabled); 3931 } 3932 3933 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3934 values[n++] += leader->total_time_running + 3935 atomic64_read(&leader->child_total_time_running); 3936 } 3937 3938 /* 3939 * Write {count,id} tuples for every sibling. 3940 */ 3941 values[n++] += perf_event_count(leader); 3942 if (read_format & PERF_FORMAT_ID) 3943 values[n++] = primary_event_id(leader); 3944 3945 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3946 values[n++] += perf_event_count(sub); 3947 if (read_format & PERF_FORMAT_ID) 3948 values[n++] = primary_event_id(sub); 3949 } 3950 3951 return 0; 3952 } 3953 3954 static int perf_read_group(struct perf_event *event, 3955 u64 read_format, char __user *buf) 3956 { 3957 struct perf_event *leader = event->group_leader, *child; 3958 struct perf_event_context *ctx = leader->ctx; 3959 int ret; 3960 u64 *values; 3961 3962 lockdep_assert_held(&ctx->mutex); 3963 3964 values = kzalloc(event->read_size, GFP_KERNEL); 3965 if (!values) 3966 return -ENOMEM; 3967 3968 values[0] = 1 + leader->nr_siblings; 3969 3970 /* 3971 * By locking the child_mutex of the leader we effectively 3972 * lock the child list of all siblings.. XXX explain how. 3973 */ 3974 mutex_lock(&leader->child_mutex); 3975 3976 ret = __perf_read_group_add(leader, read_format, values); 3977 if (ret) 3978 goto unlock; 3979 3980 list_for_each_entry(child, &leader->child_list, child_list) { 3981 ret = __perf_read_group_add(child, read_format, values); 3982 if (ret) 3983 goto unlock; 3984 } 3985 3986 mutex_unlock(&leader->child_mutex); 3987 3988 ret = event->read_size; 3989 if (copy_to_user(buf, values, event->read_size)) 3990 ret = -EFAULT; 3991 goto out; 3992 3993 unlock: 3994 mutex_unlock(&leader->child_mutex); 3995 out: 3996 kfree(values); 3997 return ret; 3998 } 3999 4000 static int perf_read_one(struct perf_event *event, 4001 u64 read_format, char __user *buf) 4002 { 4003 u64 enabled, running; 4004 u64 values[4]; 4005 int n = 0; 4006 4007 values[n++] = perf_event_read_value(event, &enabled, &running); 4008 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 4009 values[n++] = enabled; 4010 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 4011 values[n++] = running; 4012 if (read_format & PERF_FORMAT_ID) 4013 values[n++] = primary_event_id(event); 4014 4015 if (copy_to_user(buf, values, n * sizeof(u64))) 4016 return -EFAULT; 4017 4018 return n * sizeof(u64); 4019 } 4020 4021 static bool is_event_hup(struct perf_event *event) 4022 { 4023 bool no_children; 4024 4025 if (event->state != PERF_EVENT_STATE_EXIT) 4026 return false; 4027 4028 mutex_lock(&event->child_mutex); 4029 no_children = list_empty(&event->child_list); 4030 mutex_unlock(&event->child_mutex); 4031 return no_children; 4032 } 4033 4034 /* 4035 * Read the performance event - simple non blocking version for now 4036 */ 4037 static ssize_t 4038 __perf_read(struct perf_event *event, char __user *buf, size_t count) 4039 { 4040 u64 read_format = event->attr.read_format; 4041 int ret; 4042 4043 /* 4044 * Return end-of-file for a read on a event that is in 4045 * error state (i.e. because it was pinned but it couldn't be 4046 * scheduled on to the CPU at some point). 4047 */ 4048 if (event->state == PERF_EVENT_STATE_ERROR) 4049 return 0; 4050 4051 if (count < event->read_size) 4052 return -ENOSPC; 4053 4054 WARN_ON_ONCE(event->ctx->parent_ctx); 4055 if (read_format & PERF_FORMAT_GROUP) 4056 ret = perf_read_group(event, read_format, buf); 4057 else 4058 ret = perf_read_one(event, read_format, buf); 4059 4060 return ret; 4061 } 4062 4063 static ssize_t 4064 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 4065 { 4066 struct perf_event *event = file->private_data; 4067 struct perf_event_context *ctx; 4068 int ret; 4069 4070 ctx = perf_event_ctx_lock(event); 4071 ret = __perf_read(event, buf, count); 4072 perf_event_ctx_unlock(event, ctx); 4073 4074 return ret; 4075 } 4076 4077 static unsigned int perf_poll(struct file *file, poll_table *wait) 4078 { 4079 struct perf_event *event = file->private_data; 4080 struct ring_buffer *rb; 4081 unsigned int events = POLLHUP; 4082 4083 poll_wait(file, &event->waitq, wait); 4084 4085 if (is_event_hup(event)) 4086 return events; 4087 4088 /* 4089 * Pin the event->rb by taking event->mmap_mutex; otherwise 4090 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 4091 */ 4092 mutex_lock(&event->mmap_mutex); 4093 rb = event->rb; 4094 if (rb) 4095 events = atomic_xchg(&rb->poll, 0); 4096 mutex_unlock(&event->mmap_mutex); 4097 return events; 4098 } 4099 4100 static void _perf_event_reset(struct perf_event *event) 4101 { 4102 (void)perf_event_read(event, false); 4103 local64_set(&event->count, 0); 4104 perf_event_update_userpage(event); 4105 } 4106 4107 /* 4108 * Holding the top-level event's child_mutex means that any 4109 * descendant process that has inherited this event will block 4110 * in sync_child_event if it goes to exit, thus satisfying the 4111 * task existence requirements of perf_event_enable/disable. 4112 */ 4113 static void perf_event_for_each_child(struct perf_event *event, 4114 void (*func)(struct perf_event *)) 4115 { 4116 struct perf_event *child; 4117 4118 WARN_ON_ONCE(event->ctx->parent_ctx); 4119 4120 mutex_lock(&event->child_mutex); 4121 func(event); 4122 list_for_each_entry(child, &event->child_list, child_list) 4123 func(child); 4124 mutex_unlock(&event->child_mutex); 4125 } 4126 4127 static void perf_event_for_each(struct perf_event *event, 4128 void (*func)(struct perf_event *)) 4129 { 4130 struct perf_event_context *ctx = event->ctx; 4131 struct perf_event *sibling; 4132 4133 lockdep_assert_held(&ctx->mutex); 4134 4135 event = event->group_leader; 4136 4137 perf_event_for_each_child(event, func); 4138 list_for_each_entry(sibling, &event->sibling_list, group_entry) 4139 perf_event_for_each_child(sibling, func); 4140 } 4141 4142 struct period_event { 4143 struct perf_event *event; 4144 u64 value; 4145 }; 4146 4147 static int __perf_event_period(void *info) 4148 { 4149 struct period_event *pe = info; 4150 struct perf_event *event = pe->event; 4151 struct perf_event_context *ctx = event->ctx; 4152 u64 value = pe->value; 4153 bool active; 4154 4155 raw_spin_lock(&ctx->lock); 4156 if (event->attr.freq) { 4157 event->attr.sample_freq = value; 4158 } else { 4159 event->attr.sample_period = value; 4160 event->hw.sample_period = value; 4161 } 4162 4163 active = (event->state == PERF_EVENT_STATE_ACTIVE); 4164 if (active) { 4165 perf_pmu_disable(ctx->pmu); 4166 event->pmu->stop(event, PERF_EF_UPDATE); 4167 } 4168 4169 local64_set(&event->hw.period_left, 0); 4170 4171 if (active) { 4172 event->pmu->start(event, PERF_EF_RELOAD); 4173 perf_pmu_enable(ctx->pmu); 4174 } 4175 raw_spin_unlock(&ctx->lock); 4176 4177 return 0; 4178 } 4179 4180 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4181 { 4182 struct period_event pe = { .event = event, }; 4183 struct perf_event_context *ctx = event->ctx; 4184 struct task_struct *task; 4185 u64 value; 4186 4187 if (!is_sampling_event(event)) 4188 return -EINVAL; 4189 4190 if (copy_from_user(&value, arg, sizeof(value))) 4191 return -EFAULT; 4192 4193 if (!value) 4194 return -EINVAL; 4195 4196 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4197 return -EINVAL; 4198 4199 task = ctx->task; 4200 pe.value = value; 4201 4202 if (!task) { 4203 cpu_function_call(event->cpu, __perf_event_period, &pe); 4204 return 0; 4205 } 4206 4207 retry: 4208 if (!task_function_call(task, __perf_event_period, &pe)) 4209 return 0; 4210 4211 raw_spin_lock_irq(&ctx->lock); 4212 if (ctx->is_active) { 4213 raw_spin_unlock_irq(&ctx->lock); 4214 task = ctx->task; 4215 goto retry; 4216 } 4217 4218 __perf_event_period(&pe); 4219 raw_spin_unlock_irq(&ctx->lock); 4220 4221 return 0; 4222 } 4223 4224 static const struct file_operations perf_fops; 4225 4226 static inline int perf_fget_light(int fd, struct fd *p) 4227 { 4228 struct fd f = fdget(fd); 4229 if (!f.file) 4230 return -EBADF; 4231 4232 if (f.file->f_op != &perf_fops) { 4233 fdput(f); 4234 return -EBADF; 4235 } 4236 *p = f; 4237 return 0; 4238 } 4239 4240 static int perf_event_set_output(struct perf_event *event, 4241 struct perf_event *output_event); 4242 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 4243 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); 4244 4245 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) 4246 { 4247 void (*func)(struct perf_event *); 4248 u32 flags = arg; 4249 4250 switch (cmd) { 4251 case PERF_EVENT_IOC_ENABLE: 4252 func = _perf_event_enable; 4253 break; 4254 case PERF_EVENT_IOC_DISABLE: 4255 func = _perf_event_disable; 4256 break; 4257 case PERF_EVENT_IOC_RESET: 4258 func = _perf_event_reset; 4259 break; 4260 4261 case PERF_EVENT_IOC_REFRESH: 4262 return _perf_event_refresh(event, arg); 4263 4264 case PERF_EVENT_IOC_PERIOD: 4265 return perf_event_period(event, (u64 __user *)arg); 4266 4267 case PERF_EVENT_IOC_ID: 4268 { 4269 u64 id = primary_event_id(event); 4270 4271 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 4272 return -EFAULT; 4273 return 0; 4274 } 4275 4276 case PERF_EVENT_IOC_SET_OUTPUT: 4277 { 4278 int ret; 4279 if (arg != -1) { 4280 struct perf_event *output_event; 4281 struct fd output; 4282 ret = perf_fget_light(arg, &output); 4283 if (ret) 4284 return ret; 4285 output_event = output.file->private_data; 4286 ret = perf_event_set_output(event, output_event); 4287 fdput(output); 4288 } else { 4289 ret = perf_event_set_output(event, NULL); 4290 } 4291 return ret; 4292 } 4293 4294 case PERF_EVENT_IOC_SET_FILTER: 4295 return perf_event_set_filter(event, (void __user *)arg); 4296 4297 case PERF_EVENT_IOC_SET_BPF: 4298 return perf_event_set_bpf_prog(event, arg); 4299 4300 default: 4301 return -ENOTTY; 4302 } 4303 4304 if (flags & PERF_IOC_FLAG_GROUP) 4305 perf_event_for_each(event, func); 4306 else 4307 perf_event_for_each_child(event, func); 4308 4309 return 0; 4310 } 4311 4312 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4313 { 4314 struct perf_event *event = file->private_data; 4315 struct perf_event_context *ctx; 4316 long ret; 4317 4318 ctx = perf_event_ctx_lock(event); 4319 ret = _perf_ioctl(event, cmd, arg); 4320 perf_event_ctx_unlock(event, ctx); 4321 4322 return ret; 4323 } 4324 4325 #ifdef CONFIG_COMPAT 4326 static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4327 unsigned long arg) 4328 { 4329 switch (_IOC_NR(cmd)) { 4330 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): 4331 case _IOC_NR(PERF_EVENT_IOC_ID): 4332 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 4333 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { 4334 cmd &= ~IOCSIZE_MASK; 4335 cmd |= sizeof(void *) << IOCSIZE_SHIFT; 4336 } 4337 break; 4338 } 4339 return perf_ioctl(file, cmd, arg); 4340 } 4341 #else 4342 # define perf_compat_ioctl NULL 4343 #endif 4344 4345 int perf_event_task_enable(void) 4346 { 4347 struct perf_event_context *ctx; 4348 struct perf_event *event; 4349 4350 mutex_lock(¤t->perf_event_mutex); 4351 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4352 ctx = perf_event_ctx_lock(event); 4353 perf_event_for_each_child(event, _perf_event_enable); 4354 perf_event_ctx_unlock(event, ctx); 4355 } 4356 mutex_unlock(¤t->perf_event_mutex); 4357 4358 return 0; 4359 } 4360 4361 int perf_event_task_disable(void) 4362 { 4363 struct perf_event_context *ctx; 4364 struct perf_event *event; 4365 4366 mutex_lock(¤t->perf_event_mutex); 4367 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { 4368 ctx = perf_event_ctx_lock(event); 4369 perf_event_for_each_child(event, _perf_event_disable); 4370 perf_event_ctx_unlock(event, ctx); 4371 } 4372 mutex_unlock(¤t->perf_event_mutex); 4373 4374 return 0; 4375 } 4376 4377 static int perf_event_index(struct perf_event *event) 4378 { 4379 if (event->hw.state & PERF_HES_STOPPED) 4380 return 0; 4381 4382 if (event->state != PERF_EVENT_STATE_ACTIVE) 4383 return 0; 4384 4385 return event->pmu->event_idx(event); 4386 } 4387 4388 static void calc_timer_values(struct perf_event *event, 4389 u64 *now, 4390 u64 *enabled, 4391 u64 *running) 4392 { 4393 u64 ctx_time; 4394 4395 *now = perf_clock(); 4396 ctx_time = event->shadow_ctx_time + *now; 4397 *enabled = ctx_time - event->tstamp_enabled; 4398 *running = ctx_time - event->tstamp_running; 4399 } 4400 4401 static void perf_event_init_userpage(struct perf_event *event) 4402 { 4403 struct perf_event_mmap_page *userpg; 4404 struct ring_buffer *rb; 4405 4406 rcu_read_lock(); 4407 rb = rcu_dereference(event->rb); 4408 if (!rb) 4409 goto unlock; 4410 4411 userpg = rb->user_page; 4412 4413 /* Allow new userspace to detect that bit 0 is deprecated */ 4414 userpg->cap_bit0_is_deprecated = 1; 4415 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 4416 userpg->data_offset = PAGE_SIZE; 4417 userpg->data_size = perf_data_size(rb); 4418 4419 unlock: 4420 rcu_read_unlock(); 4421 } 4422 4423 void __weak arch_perf_update_userpage( 4424 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) 4425 { 4426 } 4427 4428 /* 4429 * Callers need to ensure there can be no nesting of this function, otherwise 4430 * the seqlock logic goes bad. We can not serialize this because the arch 4431 * code calls this from NMI context. 4432 */ 4433 void perf_event_update_userpage(struct perf_event *event) 4434 { 4435 struct perf_event_mmap_page *userpg; 4436 struct ring_buffer *rb; 4437 u64 enabled, running, now; 4438 4439 rcu_read_lock(); 4440 rb = rcu_dereference(event->rb); 4441 if (!rb) 4442 goto unlock; 4443 4444 /* 4445 * compute total_time_enabled, total_time_running 4446 * based on snapshot values taken when the event 4447 * was last scheduled in. 4448 * 4449 * we cannot simply called update_context_time() 4450 * because of locking issue as we can be called in 4451 * NMI context 4452 */ 4453 calc_timer_values(event, &now, &enabled, &running); 4454 4455 userpg = rb->user_page; 4456 /* 4457 * Disable preemption so as to not let the corresponding user-space 4458 * spin too long if we get preempted. 4459 */ 4460 preempt_disable(); 4461 ++userpg->lock; 4462 barrier(); 4463 userpg->index = perf_event_index(event); 4464 userpg->offset = perf_event_count(event); 4465 if (userpg->index) 4466 userpg->offset -= local64_read(&event->hw.prev_count); 4467 4468 userpg->time_enabled = enabled + 4469 atomic64_read(&event->child_total_time_enabled); 4470 4471 userpg->time_running = running + 4472 atomic64_read(&event->child_total_time_running); 4473 4474 arch_perf_update_userpage(event, userpg, now); 4475 4476 barrier(); 4477 ++userpg->lock; 4478 preempt_enable(); 4479 unlock: 4480 rcu_read_unlock(); 4481 } 4482 4483 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 4484 { 4485 struct perf_event *event = vma->vm_file->private_data; 4486 struct ring_buffer *rb; 4487 int ret = VM_FAULT_SIGBUS; 4488 4489 if (vmf->flags & FAULT_FLAG_MKWRITE) { 4490 if (vmf->pgoff == 0) 4491 ret = 0; 4492 return ret; 4493 } 4494 4495 rcu_read_lock(); 4496 rb = rcu_dereference(event->rb); 4497 if (!rb) 4498 goto unlock; 4499 4500 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 4501 goto unlock; 4502 4503 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 4504 if (!vmf->page) 4505 goto unlock; 4506 4507 get_page(vmf->page); 4508 vmf->page->mapping = vma->vm_file->f_mapping; 4509 vmf->page->index = vmf->pgoff; 4510 4511 ret = 0; 4512 unlock: 4513 rcu_read_unlock(); 4514 4515 return ret; 4516 } 4517 4518 static void ring_buffer_attach(struct perf_event *event, 4519 struct ring_buffer *rb) 4520 { 4521 struct ring_buffer *old_rb = NULL; 4522 unsigned long flags; 4523 4524 if (event->rb) { 4525 /* 4526 * Should be impossible, we set this when removing 4527 * event->rb_entry and wait/clear when adding event->rb_entry. 4528 */ 4529 WARN_ON_ONCE(event->rcu_pending); 4530 4531 old_rb = event->rb; 4532 spin_lock_irqsave(&old_rb->event_lock, flags); 4533 list_del_rcu(&event->rb_entry); 4534 spin_unlock_irqrestore(&old_rb->event_lock, flags); 4535 4536 event->rcu_batches = get_state_synchronize_rcu(); 4537 event->rcu_pending = 1; 4538 } 4539 4540 if (rb) { 4541 if (event->rcu_pending) { 4542 cond_synchronize_rcu(event->rcu_batches); 4543 event->rcu_pending = 0; 4544 } 4545 4546 spin_lock_irqsave(&rb->event_lock, flags); 4547 list_add_rcu(&event->rb_entry, &rb->event_list); 4548 spin_unlock_irqrestore(&rb->event_lock, flags); 4549 } 4550 4551 rcu_assign_pointer(event->rb, rb); 4552 4553 if (old_rb) { 4554 ring_buffer_put(old_rb); 4555 /* 4556 * Since we detached before setting the new rb, so that we 4557 * could attach the new rb, we could have missed a wakeup. 4558 * Provide it now. 4559 */ 4560 wake_up_all(&event->waitq); 4561 } 4562 } 4563 4564 static void ring_buffer_wakeup(struct perf_event *event) 4565 { 4566 struct ring_buffer *rb; 4567 4568 rcu_read_lock(); 4569 rb = rcu_dereference(event->rb); 4570 if (rb) { 4571 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 4572 wake_up_all(&event->waitq); 4573 } 4574 rcu_read_unlock(); 4575 } 4576 4577 struct ring_buffer *ring_buffer_get(struct perf_event *event) 4578 { 4579 struct ring_buffer *rb; 4580 4581 rcu_read_lock(); 4582 rb = rcu_dereference(event->rb); 4583 if (rb) { 4584 if (!atomic_inc_not_zero(&rb->refcount)) 4585 rb = NULL; 4586 } 4587 rcu_read_unlock(); 4588 4589 return rb; 4590 } 4591 4592 void ring_buffer_put(struct ring_buffer *rb) 4593 { 4594 if (!atomic_dec_and_test(&rb->refcount)) 4595 return; 4596 4597 WARN_ON_ONCE(!list_empty(&rb->event_list)); 4598 4599 call_rcu(&rb->rcu_head, rb_free_rcu); 4600 } 4601 4602 static void perf_mmap_open(struct vm_area_struct *vma) 4603 { 4604 struct perf_event *event = vma->vm_file->private_data; 4605 4606 atomic_inc(&event->mmap_count); 4607 atomic_inc(&event->rb->mmap_count); 4608 4609 if (vma->vm_pgoff) 4610 atomic_inc(&event->rb->aux_mmap_count); 4611 4612 if (event->pmu->event_mapped) 4613 event->pmu->event_mapped(event); 4614 } 4615 4616 /* 4617 * A buffer can be mmap()ed multiple times; either directly through the same 4618 * event, or through other events by use of perf_event_set_output(). 4619 * 4620 * In order to undo the VM accounting done by perf_mmap() we need to destroy 4621 * the buffer here, where we still have a VM context. This means we need 4622 * to detach all events redirecting to us. 4623 */ 4624 static void perf_mmap_close(struct vm_area_struct *vma) 4625 { 4626 struct perf_event *event = vma->vm_file->private_data; 4627 4628 struct ring_buffer *rb = ring_buffer_get(event); 4629 struct user_struct *mmap_user = rb->mmap_user; 4630 int mmap_locked = rb->mmap_locked; 4631 unsigned long size = perf_data_size(rb); 4632 4633 if (event->pmu->event_unmapped) 4634 event->pmu->event_unmapped(event); 4635 4636 /* 4637 * rb->aux_mmap_count will always drop before rb->mmap_count and 4638 * event->mmap_count, so it is ok to use event->mmap_mutex to 4639 * serialize with perf_mmap here. 4640 */ 4641 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && 4642 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { 4643 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); 4644 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; 4645 4646 rb_free_aux(rb); 4647 mutex_unlock(&event->mmap_mutex); 4648 } 4649 4650 atomic_dec(&rb->mmap_count); 4651 4652 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4653 goto out_put; 4654 4655 ring_buffer_attach(event, NULL); 4656 mutex_unlock(&event->mmap_mutex); 4657 4658 /* If there's still other mmap()s of this buffer, we're done. */ 4659 if (atomic_read(&rb->mmap_count)) 4660 goto out_put; 4661 4662 /* 4663 * No other mmap()s, detach from all other events that might redirect 4664 * into the now unreachable buffer. Somewhat complicated by the 4665 * fact that rb::event_lock otherwise nests inside mmap_mutex. 4666 */ 4667 again: 4668 rcu_read_lock(); 4669 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 4670 if (!atomic_long_inc_not_zero(&event->refcount)) { 4671 /* 4672 * This event is en-route to free_event() which will 4673 * detach it and remove it from the list. 4674 */ 4675 continue; 4676 } 4677 rcu_read_unlock(); 4678 4679 mutex_lock(&event->mmap_mutex); 4680 /* 4681 * Check we didn't race with perf_event_set_output() which can 4682 * swizzle the rb from under us while we were waiting to 4683 * acquire mmap_mutex. 4684 * 4685 * If we find a different rb; ignore this event, a next 4686 * iteration will no longer find it on the list. We have to 4687 * still restart the iteration to make sure we're not now 4688 * iterating the wrong list. 4689 */ 4690 if (event->rb == rb) 4691 ring_buffer_attach(event, NULL); 4692 4693 mutex_unlock(&event->mmap_mutex); 4694 put_event(event); 4695 4696 /* 4697 * Restart the iteration; either we're on the wrong list or 4698 * destroyed its integrity by doing a deletion. 4699 */ 4700 goto again; 4701 } 4702 rcu_read_unlock(); 4703 4704 /* 4705 * It could be there's still a few 0-ref events on the list; they'll 4706 * get cleaned up by free_event() -- they'll also still have their 4707 * ref on the rb and will free it whenever they are done with it. 4708 * 4709 * Aside from that, this buffer is 'fully' detached and unmapped, 4710 * undo the VM accounting. 4711 */ 4712 4713 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4714 vma->vm_mm->pinned_vm -= mmap_locked; 4715 free_uid(mmap_user); 4716 4717 out_put: 4718 ring_buffer_put(rb); /* could be last */ 4719 } 4720 4721 static const struct vm_operations_struct perf_mmap_vmops = { 4722 .open = perf_mmap_open, 4723 .close = perf_mmap_close, /* non mergable */ 4724 .fault = perf_mmap_fault, 4725 .page_mkwrite = perf_mmap_fault, 4726 }; 4727 4728 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4729 { 4730 struct perf_event *event = file->private_data; 4731 unsigned long user_locked, user_lock_limit; 4732 struct user_struct *user = current_user(); 4733 unsigned long locked, lock_limit; 4734 struct ring_buffer *rb = NULL; 4735 unsigned long vma_size; 4736 unsigned long nr_pages; 4737 long user_extra = 0, extra = 0; 4738 int ret = 0, flags = 0; 4739 4740 /* 4741 * Don't allow mmap() of inherited per-task counters. This would 4742 * create a performance issue due to all children writing to the 4743 * same rb. 4744 */ 4745 if (event->cpu == -1 && event->attr.inherit) 4746 return -EINVAL; 4747 4748 if (!(vma->vm_flags & VM_SHARED)) 4749 return -EINVAL; 4750 4751 vma_size = vma->vm_end - vma->vm_start; 4752 4753 if (vma->vm_pgoff == 0) { 4754 nr_pages = (vma_size / PAGE_SIZE) - 1; 4755 } else { 4756 /* 4757 * AUX area mapping: if rb->aux_nr_pages != 0, it's already 4758 * mapped, all subsequent mappings should have the same size 4759 * and offset. Must be above the normal perf buffer. 4760 */ 4761 u64 aux_offset, aux_size; 4762 4763 if (!event->rb) 4764 return -EINVAL; 4765 4766 nr_pages = vma_size / PAGE_SIZE; 4767 4768 mutex_lock(&event->mmap_mutex); 4769 ret = -EINVAL; 4770 4771 rb = event->rb; 4772 if (!rb) 4773 goto aux_unlock; 4774 4775 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); 4776 aux_size = ACCESS_ONCE(rb->user_page->aux_size); 4777 4778 if (aux_offset < perf_data_size(rb) + PAGE_SIZE) 4779 goto aux_unlock; 4780 4781 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) 4782 goto aux_unlock; 4783 4784 /* already mapped with a different offset */ 4785 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) 4786 goto aux_unlock; 4787 4788 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) 4789 goto aux_unlock; 4790 4791 /* already mapped with a different size */ 4792 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) 4793 goto aux_unlock; 4794 4795 if (!is_power_of_2(nr_pages)) 4796 goto aux_unlock; 4797 4798 if (!atomic_inc_not_zero(&rb->mmap_count)) 4799 goto aux_unlock; 4800 4801 if (rb_has_aux(rb)) { 4802 atomic_inc(&rb->aux_mmap_count); 4803 ret = 0; 4804 goto unlock; 4805 } 4806 4807 atomic_set(&rb->aux_mmap_count, 1); 4808 user_extra = nr_pages; 4809 4810 goto accounting; 4811 } 4812 4813 /* 4814 * If we have rb pages ensure they're a power-of-two number, so we 4815 * can do bitmasks instead of modulo. 4816 */ 4817 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4818 return -EINVAL; 4819 4820 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4821 return -EINVAL; 4822 4823 WARN_ON_ONCE(event->ctx->parent_ctx); 4824 again: 4825 mutex_lock(&event->mmap_mutex); 4826 if (event->rb) { 4827 if (event->rb->nr_pages != nr_pages) { 4828 ret = -EINVAL; 4829 goto unlock; 4830 } 4831 4832 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4833 /* 4834 * Raced against perf_mmap_close() through 4835 * perf_event_set_output(). Try again, hope for better 4836 * luck. 4837 */ 4838 mutex_unlock(&event->mmap_mutex); 4839 goto again; 4840 } 4841 4842 goto unlock; 4843 } 4844 4845 user_extra = nr_pages + 1; 4846 4847 accounting: 4848 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4849 4850 /* 4851 * Increase the limit linearly with more CPUs: 4852 */ 4853 user_lock_limit *= num_online_cpus(); 4854 4855 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4856 4857 if (user_locked > user_lock_limit) 4858 extra = user_locked - user_lock_limit; 4859 4860 lock_limit = rlimit(RLIMIT_MEMLOCK); 4861 lock_limit >>= PAGE_SHIFT; 4862 locked = vma->vm_mm->pinned_vm + extra; 4863 4864 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4865 !capable(CAP_IPC_LOCK)) { 4866 ret = -EPERM; 4867 goto unlock; 4868 } 4869 4870 WARN_ON(!rb && event->rb); 4871 4872 if (vma->vm_flags & VM_WRITE) 4873 flags |= RING_BUFFER_WRITABLE; 4874 4875 if (!rb) { 4876 rb = rb_alloc(nr_pages, 4877 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4878 event->cpu, flags); 4879 4880 if (!rb) { 4881 ret = -ENOMEM; 4882 goto unlock; 4883 } 4884 4885 atomic_set(&rb->mmap_count, 1); 4886 rb->mmap_user = get_current_user(); 4887 rb->mmap_locked = extra; 4888 4889 ring_buffer_attach(event, rb); 4890 4891 perf_event_init_userpage(event); 4892 perf_event_update_userpage(event); 4893 } else { 4894 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, 4895 event->attr.aux_watermark, flags); 4896 if (!ret) 4897 rb->aux_mmap_locked = extra; 4898 } 4899 4900 unlock: 4901 if (!ret) { 4902 atomic_long_add(user_extra, &user->locked_vm); 4903 vma->vm_mm->pinned_vm += extra; 4904 4905 atomic_inc(&event->mmap_count); 4906 } else if (rb) { 4907 atomic_dec(&rb->mmap_count); 4908 } 4909 aux_unlock: 4910 mutex_unlock(&event->mmap_mutex); 4911 4912 /* 4913 * Since pinned accounting is per vm we cannot allow fork() to copy our 4914 * vma. 4915 */ 4916 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4917 vma->vm_ops = &perf_mmap_vmops; 4918 4919 if (event->pmu->event_mapped) 4920 event->pmu->event_mapped(event); 4921 4922 return ret; 4923 } 4924 4925 static int perf_fasync(int fd, struct file *filp, int on) 4926 { 4927 struct inode *inode = file_inode(filp); 4928 struct perf_event *event = filp->private_data; 4929 int retval; 4930 4931 mutex_lock(&inode->i_mutex); 4932 retval = fasync_helper(fd, filp, on, &event->fasync); 4933 mutex_unlock(&inode->i_mutex); 4934 4935 if (retval < 0) 4936 return retval; 4937 4938 return 0; 4939 } 4940 4941 static const struct file_operations perf_fops = { 4942 .llseek = no_llseek, 4943 .release = perf_release, 4944 .read = perf_read, 4945 .poll = perf_poll, 4946 .unlocked_ioctl = perf_ioctl, 4947 .compat_ioctl = perf_compat_ioctl, 4948 .mmap = perf_mmap, 4949 .fasync = perf_fasync, 4950 }; 4951 4952 /* 4953 * Perf event wakeup 4954 * 4955 * If there's data, ensure we set the poll() state and publish everything 4956 * to user-space before waking everybody up. 4957 */ 4958 4959 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) 4960 { 4961 /* only the parent has fasync state */ 4962 if (event->parent) 4963 event = event->parent; 4964 return &event->fasync; 4965 } 4966 4967 void perf_event_wakeup(struct perf_event *event) 4968 { 4969 ring_buffer_wakeup(event); 4970 4971 if (event->pending_kill) { 4972 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); 4973 event->pending_kill = 0; 4974 } 4975 } 4976 4977 static void perf_pending_event(struct irq_work *entry) 4978 { 4979 struct perf_event *event = container_of(entry, 4980 struct perf_event, pending); 4981 int rctx; 4982 4983 rctx = perf_swevent_get_recursion_context(); 4984 /* 4985 * If we 'fail' here, that's OK, it means recursion is already disabled 4986 * and we won't recurse 'further'. 4987 */ 4988 4989 if (event->pending_disable) { 4990 event->pending_disable = 0; 4991 __perf_event_disable(event); 4992 } 4993 4994 if (event->pending_wakeup) { 4995 event->pending_wakeup = 0; 4996 perf_event_wakeup(event); 4997 } 4998 4999 if (rctx >= 0) 5000 perf_swevent_put_recursion_context(rctx); 5001 } 5002 5003 /* 5004 * We assume there is only KVM supporting the callbacks. 5005 * Later on, we might change it to a list if there is 5006 * another virtualization implementation supporting the callbacks. 5007 */ 5008 struct perf_guest_info_callbacks *perf_guest_cbs; 5009 5010 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5011 { 5012 perf_guest_cbs = cbs; 5013 return 0; 5014 } 5015 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 5016 5017 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 5018 { 5019 perf_guest_cbs = NULL; 5020 return 0; 5021 } 5022 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 5023 5024 static void 5025 perf_output_sample_regs(struct perf_output_handle *handle, 5026 struct pt_regs *regs, u64 mask) 5027 { 5028 int bit; 5029 5030 for_each_set_bit(bit, (const unsigned long *) &mask, 5031 sizeof(mask) * BITS_PER_BYTE) { 5032 u64 val; 5033 5034 val = perf_reg_value(regs, bit); 5035 perf_output_put(handle, val); 5036 } 5037 } 5038 5039 static void perf_sample_regs_user(struct perf_regs *regs_user, 5040 struct pt_regs *regs, 5041 struct pt_regs *regs_user_copy) 5042 { 5043 if (user_mode(regs)) { 5044 regs_user->abi = perf_reg_abi(current); 5045 regs_user->regs = regs; 5046 } else if (current->mm) { 5047 perf_get_regs_user(regs_user, regs, regs_user_copy); 5048 } else { 5049 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 5050 regs_user->regs = NULL; 5051 } 5052 } 5053 5054 static void perf_sample_regs_intr(struct perf_regs *regs_intr, 5055 struct pt_regs *regs) 5056 { 5057 regs_intr->regs = regs; 5058 regs_intr->abi = perf_reg_abi(current); 5059 } 5060 5061 5062 /* 5063 * Get remaining task size from user stack pointer. 5064 * 5065 * It'd be better to take stack vma map and limit this more 5066 * precisly, but there's no way to get it safely under interrupt, 5067 * so using TASK_SIZE as limit. 5068 */ 5069 static u64 perf_ustack_task_size(struct pt_regs *regs) 5070 { 5071 unsigned long addr = perf_user_stack_pointer(regs); 5072 5073 if (!addr || addr >= TASK_SIZE) 5074 return 0; 5075 5076 return TASK_SIZE - addr; 5077 } 5078 5079 static u16 5080 perf_sample_ustack_size(u16 stack_size, u16 header_size, 5081 struct pt_regs *regs) 5082 { 5083 u64 task_size; 5084 5085 /* No regs, no stack pointer, no dump. */ 5086 if (!regs) 5087 return 0; 5088 5089 /* 5090 * Check if we fit in with the requested stack size into the: 5091 * - TASK_SIZE 5092 * If we don't, we limit the size to the TASK_SIZE. 5093 * 5094 * - remaining sample size 5095 * If we don't, we customize the stack size to 5096 * fit in to the remaining sample size. 5097 */ 5098 5099 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 5100 stack_size = min(stack_size, (u16) task_size); 5101 5102 /* Current header size plus static size and dynamic size. */ 5103 header_size += 2 * sizeof(u64); 5104 5105 /* Do we fit in with the current stack dump size? */ 5106 if ((u16) (header_size + stack_size) < header_size) { 5107 /* 5108 * If we overflow the maximum size for the sample, 5109 * we customize the stack dump size to fit in. 5110 */ 5111 stack_size = USHRT_MAX - header_size - sizeof(u64); 5112 stack_size = round_up(stack_size, sizeof(u64)); 5113 } 5114 5115 return stack_size; 5116 } 5117 5118 static void 5119 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 5120 struct pt_regs *regs) 5121 { 5122 /* Case of a kernel thread, nothing to dump */ 5123 if (!regs) { 5124 u64 size = 0; 5125 perf_output_put(handle, size); 5126 } else { 5127 unsigned long sp; 5128 unsigned int rem; 5129 u64 dyn_size; 5130 5131 /* 5132 * We dump: 5133 * static size 5134 * - the size requested by user or the best one we can fit 5135 * in to the sample max size 5136 * data 5137 * - user stack dump data 5138 * dynamic size 5139 * - the actual dumped size 5140 */ 5141 5142 /* Static size. */ 5143 perf_output_put(handle, dump_size); 5144 5145 /* Data. */ 5146 sp = perf_user_stack_pointer(regs); 5147 rem = __output_copy_user(handle, (void *) sp, dump_size); 5148 dyn_size = dump_size - rem; 5149 5150 perf_output_skip(handle, rem); 5151 5152 /* Dynamic size. */ 5153 perf_output_put(handle, dyn_size); 5154 } 5155 } 5156 5157 static void __perf_event_header__init_id(struct perf_event_header *header, 5158 struct perf_sample_data *data, 5159 struct perf_event *event) 5160 { 5161 u64 sample_type = event->attr.sample_type; 5162 5163 data->type = sample_type; 5164 header->size += event->id_header_size; 5165 5166 if (sample_type & PERF_SAMPLE_TID) { 5167 /* namespace issues */ 5168 data->tid_entry.pid = perf_event_pid(event, current); 5169 data->tid_entry.tid = perf_event_tid(event, current); 5170 } 5171 5172 if (sample_type & PERF_SAMPLE_TIME) 5173 data->time = perf_event_clock(event); 5174 5175 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 5176 data->id = primary_event_id(event); 5177 5178 if (sample_type & PERF_SAMPLE_STREAM_ID) 5179 data->stream_id = event->id; 5180 5181 if (sample_type & PERF_SAMPLE_CPU) { 5182 data->cpu_entry.cpu = raw_smp_processor_id(); 5183 data->cpu_entry.reserved = 0; 5184 } 5185 } 5186 5187 void perf_event_header__init_id(struct perf_event_header *header, 5188 struct perf_sample_data *data, 5189 struct perf_event *event) 5190 { 5191 if (event->attr.sample_id_all) 5192 __perf_event_header__init_id(header, data, event); 5193 } 5194 5195 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 5196 struct perf_sample_data *data) 5197 { 5198 u64 sample_type = data->type; 5199 5200 if (sample_type & PERF_SAMPLE_TID) 5201 perf_output_put(handle, data->tid_entry); 5202 5203 if (sample_type & PERF_SAMPLE_TIME) 5204 perf_output_put(handle, data->time); 5205 5206 if (sample_type & PERF_SAMPLE_ID) 5207 perf_output_put(handle, data->id); 5208 5209 if (sample_type & PERF_SAMPLE_STREAM_ID) 5210 perf_output_put(handle, data->stream_id); 5211 5212 if (sample_type & PERF_SAMPLE_CPU) 5213 perf_output_put(handle, data->cpu_entry); 5214 5215 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5216 perf_output_put(handle, data->id); 5217 } 5218 5219 void perf_event__output_id_sample(struct perf_event *event, 5220 struct perf_output_handle *handle, 5221 struct perf_sample_data *sample) 5222 { 5223 if (event->attr.sample_id_all) 5224 __perf_event__output_id_sample(handle, sample); 5225 } 5226 5227 static void perf_output_read_one(struct perf_output_handle *handle, 5228 struct perf_event *event, 5229 u64 enabled, u64 running) 5230 { 5231 u64 read_format = event->attr.read_format; 5232 u64 values[4]; 5233 int n = 0; 5234 5235 values[n++] = perf_event_count(event); 5236 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 5237 values[n++] = enabled + 5238 atomic64_read(&event->child_total_time_enabled); 5239 } 5240 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 5241 values[n++] = running + 5242 atomic64_read(&event->child_total_time_running); 5243 } 5244 if (read_format & PERF_FORMAT_ID) 5245 values[n++] = primary_event_id(event); 5246 5247 __output_copy(handle, values, n * sizeof(u64)); 5248 } 5249 5250 /* 5251 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 5252 */ 5253 static void perf_output_read_group(struct perf_output_handle *handle, 5254 struct perf_event *event, 5255 u64 enabled, u64 running) 5256 { 5257 struct perf_event *leader = event->group_leader, *sub; 5258 u64 read_format = event->attr.read_format; 5259 u64 values[5]; 5260 int n = 0; 5261 5262 values[n++] = 1 + leader->nr_siblings; 5263 5264 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 5265 values[n++] = enabled; 5266 5267 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 5268 values[n++] = running; 5269 5270 if (leader != event) 5271 leader->pmu->read(leader); 5272 5273 values[n++] = perf_event_count(leader); 5274 if (read_format & PERF_FORMAT_ID) 5275 values[n++] = primary_event_id(leader); 5276 5277 __output_copy(handle, values, n * sizeof(u64)); 5278 5279 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 5280 n = 0; 5281 5282 if ((sub != event) && 5283 (sub->state == PERF_EVENT_STATE_ACTIVE)) 5284 sub->pmu->read(sub); 5285 5286 values[n++] = perf_event_count(sub); 5287 if (read_format & PERF_FORMAT_ID) 5288 values[n++] = primary_event_id(sub); 5289 5290 __output_copy(handle, values, n * sizeof(u64)); 5291 } 5292 } 5293 5294 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 5295 PERF_FORMAT_TOTAL_TIME_RUNNING) 5296 5297 static void perf_output_read(struct perf_output_handle *handle, 5298 struct perf_event *event) 5299 { 5300 u64 enabled = 0, running = 0, now; 5301 u64 read_format = event->attr.read_format; 5302 5303 /* 5304 * compute total_time_enabled, total_time_running 5305 * based on snapshot values taken when the event 5306 * was last scheduled in. 5307 * 5308 * we cannot simply called update_context_time() 5309 * because of locking issue as we are called in 5310 * NMI context 5311 */ 5312 if (read_format & PERF_FORMAT_TOTAL_TIMES) 5313 calc_timer_values(event, &now, &enabled, &running); 5314 5315 if (event->attr.read_format & PERF_FORMAT_GROUP) 5316 perf_output_read_group(handle, event, enabled, running); 5317 else 5318 perf_output_read_one(handle, event, enabled, running); 5319 } 5320 5321 void perf_output_sample(struct perf_output_handle *handle, 5322 struct perf_event_header *header, 5323 struct perf_sample_data *data, 5324 struct perf_event *event) 5325 { 5326 u64 sample_type = data->type; 5327 5328 perf_output_put(handle, *header); 5329 5330 if (sample_type & PERF_SAMPLE_IDENTIFIER) 5331 perf_output_put(handle, data->id); 5332 5333 if (sample_type & PERF_SAMPLE_IP) 5334 perf_output_put(handle, data->ip); 5335 5336 if (sample_type & PERF_SAMPLE_TID) 5337 perf_output_put(handle, data->tid_entry); 5338 5339 if (sample_type & PERF_SAMPLE_TIME) 5340 perf_output_put(handle, data->time); 5341 5342 if (sample_type & PERF_SAMPLE_ADDR) 5343 perf_output_put(handle, data->addr); 5344 5345 if (sample_type & PERF_SAMPLE_ID) 5346 perf_output_put(handle, data->id); 5347 5348 if (sample_type & PERF_SAMPLE_STREAM_ID) 5349 perf_output_put(handle, data->stream_id); 5350 5351 if (sample_type & PERF_SAMPLE_CPU) 5352 perf_output_put(handle, data->cpu_entry); 5353 5354 if (sample_type & PERF_SAMPLE_PERIOD) 5355 perf_output_put(handle, data->period); 5356 5357 if (sample_type & PERF_SAMPLE_READ) 5358 perf_output_read(handle, event); 5359 5360 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5361 if (data->callchain) { 5362 int size = 1; 5363 5364 if (data->callchain) 5365 size += data->callchain->nr; 5366 5367 size *= sizeof(u64); 5368 5369 __output_copy(handle, data->callchain, size); 5370 } else { 5371 u64 nr = 0; 5372 perf_output_put(handle, nr); 5373 } 5374 } 5375 5376 if (sample_type & PERF_SAMPLE_RAW) { 5377 if (data->raw) { 5378 u32 raw_size = data->raw->size; 5379 u32 real_size = round_up(raw_size + sizeof(u32), 5380 sizeof(u64)) - sizeof(u32); 5381 u64 zero = 0; 5382 5383 perf_output_put(handle, real_size); 5384 __output_copy(handle, data->raw->data, raw_size); 5385 if (real_size - raw_size) 5386 __output_copy(handle, &zero, real_size - raw_size); 5387 } else { 5388 struct { 5389 u32 size; 5390 u32 data; 5391 } raw = { 5392 .size = sizeof(u32), 5393 .data = 0, 5394 }; 5395 perf_output_put(handle, raw); 5396 } 5397 } 5398 5399 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5400 if (data->br_stack) { 5401 size_t size; 5402 5403 size = data->br_stack->nr 5404 * sizeof(struct perf_branch_entry); 5405 5406 perf_output_put(handle, data->br_stack->nr); 5407 perf_output_copy(handle, data->br_stack->entries, size); 5408 } else { 5409 /* 5410 * we always store at least the value of nr 5411 */ 5412 u64 nr = 0; 5413 perf_output_put(handle, nr); 5414 } 5415 } 5416 5417 if (sample_type & PERF_SAMPLE_REGS_USER) { 5418 u64 abi = data->regs_user.abi; 5419 5420 /* 5421 * If there are no regs to dump, notice it through 5422 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5423 */ 5424 perf_output_put(handle, abi); 5425 5426 if (abi) { 5427 u64 mask = event->attr.sample_regs_user; 5428 perf_output_sample_regs(handle, 5429 data->regs_user.regs, 5430 mask); 5431 } 5432 } 5433 5434 if (sample_type & PERF_SAMPLE_STACK_USER) { 5435 perf_output_sample_ustack(handle, 5436 data->stack_user_size, 5437 data->regs_user.regs); 5438 } 5439 5440 if (sample_type & PERF_SAMPLE_WEIGHT) 5441 perf_output_put(handle, data->weight); 5442 5443 if (sample_type & PERF_SAMPLE_DATA_SRC) 5444 perf_output_put(handle, data->data_src.val); 5445 5446 if (sample_type & PERF_SAMPLE_TRANSACTION) 5447 perf_output_put(handle, data->txn); 5448 5449 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5450 u64 abi = data->regs_intr.abi; 5451 /* 5452 * If there are no regs to dump, notice it through 5453 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 5454 */ 5455 perf_output_put(handle, abi); 5456 5457 if (abi) { 5458 u64 mask = event->attr.sample_regs_intr; 5459 5460 perf_output_sample_regs(handle, 5461 data->regs_intr.regs, 5462 mask); 5463 } 5464 } 5465 5466 if (!event->attr.watermark) { 5467 int wakeup_events = event->attr.wakeup_events; 5468 5469 if (wakeup_events) { 5470 struct ring_buffer *rb = handle->rb; 5471 int events = local_inc_return(&rb->events); 5472 5473 if (events >= wakeup_events) { 5474 local_sub(wakeup_events, &rb->events); 5475 local_inc(&rb->wakeup); 5476 } 5477 } 5478 } 5479 } 5480 5481 void perf_prepare_sample(struct perf_event_header *header, 5482 struct perf_sample_data *data, 5483 struct perf_event *event, 5484 struct pt_regs *regs) 5485 { 5486 u64 sample_type = event->attr.sample_type; 5487 5488 header->type = PERF_RECORD_SAMPLE; 5489 header->size = sizeof(*header) + event->header_size; 5490 5491 header->misc = 0; 5492 header->misc |= perf_misc_flags(regs); 5493 5494 __perf_event_header__init_id(header, data, event); 5495 5496 if (sample_type & PERF_SAMPLE_IP) 5497 data->ip = perf_instruction_pointer(regs); 5498 5499 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 5500 int size = 1; 5501 5502 data->callchain = perf_callchain(event, regs); 5503 5504 if (data->callchain) 5505 size += data->callchain->nr; 5506 5507 header->size += size * sizeof(u64); 5508 } 5509 5510 if (sample_type & PERF_SAMPLE_RAW) { 5511 int size = sizeof(u32); 5512 5513 if (data->raw) 5514 size += data->raw->size; 5515 else 5516 size += sizeof(u32); 5517 5518 header->size += round_up(size, sizeof(u64)); 5519 } 5520 5521 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 5522 int size = sizeof(u64); /* nr */ 5523 if (data->br_stack) { 5524 size += data->br_stack->nr 5525 * sizeof(struct perf_branch_entry); 5526 } 5527 header->size += size; 5528 } 5529 5530 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 5531 perf_sample_regs_user(&data->regs_user, regs, 5532 &data->regs_user_copy); 5533 5534 if (sample_type & PERF_SAMPLE_REGS_USER) { 5535 /* regs dump ABI info */ 5536 int size = sizeof(u64); 5537 5538 if (data->regs_user.regs) { 5539 u64 mask = event->attr.sample_regs_user; 5540 size += hweight64(mask) * sizeof(u64); 5541 } 5542 5543 header->size += size; 5544 } 5545 5546 if (sample_type & PERF_SAMPLE_STACK_USER) { 5547 /* 5548 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 5549 * processed as the last one or have additional check added 5550 * in case new sample type is added, because we could eat 5551 * up the rest of the sample size. 5552 */ 5553 u16 stack_size = event->attr.sample_stack_user; 5554 u16 size = sizeof(u64); 5555 5556 stack_size = perf_sample_ustack_size(stack_size, header->size, 5557 data->regs_user.regs); 5558 5559 /* 5560 * If there is something to dump, add space for the dump 5561 * itself and for the field that tells the dynamic size, 5562 * which is how many have been actually dumped. 5563 */ 5564 if (stack_size) 5565 size += sizeof(u64) + stack_size; 5566 5567 data->stack_user_size = stack_size; 5568 header->size += size; 5569 } 5570 5571 if (sample_type & PERF_SAMPLE_REGS_INTR) { 5572 /* regs dump ABI info */ 5573 int size = sizeof(u64); 5574 5575 perf_sample_regs_intr(&data->regs_intr, regs); 5576 5577 if (data->regs_intr.regs) { 5578 u64 mask = event->attr.sample_regs_intr; 5579 5580 size += hweight64(mask) * sizeof(u64); 5581 } 5582 5583 header->size += size; 5584 } 5585 } 5586 5587 void perf_event_output(struct perf_event *event, 5588 struct perf_sample_data *data, 5589 struct pt_regs *regs) 5590 { 5591 struct perf_output_handle handle; 5592 struct perf_event_header header; 5593 5594 /* protect the callchain buffers */ 5595 rcu_read_lock(); 5596 5597 perf_prepare_sample(&header, data, event, regs); 5598 5599 if (perf_output_begin(&handle, event, header.size)) 5600 goto exit; 5601 5602 perf_output_sample(&handle, &header, data, event); 5603 5604 perf_output_end(&handle); 5605 5606 exit: 5607 rcu_read_unlock(); 5608 } 5609 5610 /* 5611 * read event_id 5612 */ 5613 5614 struct perf_read_event { 5615 struct perf_event_header header; 5616 5617 u32 pid; 5618 u32 tid; 5619 }; 5620 5621 static void 5622 perf_event_read_event(struct perf_event *event, 5623 struct task_struct *task) 5624 { 5625 struct perf_output_handle handle; 5626 struct perf_sample_data sample; 5627 struct perf_read_event read_event = { 5628 .header = { 5629 .type = PERF_RECORD_READ, 5630 .misc = 0, 5631 .size = sizeof(read_event) + event->read_size, 5632 }, 5633 .pid = perf_event_pid(event, task), 5634 .tid = perf_event_tid(event, task), 5635 }; 5636 int ret; 5637 5638 perf_event_header__init_id(&read_event.header, &sample, event); 5639 ret = perf_output_begin(&handle, event, read_event.header.size); 5640 if (ret) 5641 return; 5642 5643 perf_output_put(&handle, read_event); 5644 perf_output_read(&handle, event); 5645 perf_event__output_id_sample(event, &handle, &sample); 5646 5647 perf_output_end(&handle); 5648 } 5649 5650 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 5651 5652 static void 5653 perf_event_aux_ctx(struct perf_event_context *ctx, 5654 perf_event_aux_output_cb output, 5655 void *data) 5656 { 5657 struct perf_event *event; 5658 5659 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5660 if (event->state < PERF_EVENT_STATE_INACTIVE) 5661 continue; 5662 if (!event_filter_match(event)) 5663 continue; 5664 output(event, data); 5665 } 5666 } 5667 5668 static void 5669 perf_event_aux(perf_event_aux_output_cb output, void *data, 5670 struct perf_event_context *task_ctx) 5671 { 5672 struct perf_cpu_context *cpuctx; 5673 struct perf_event_context *ctx; 5674 struct pmu *pmu; 5675 int ctxn; 5676 5677 rcu_read_lock(); 5678 list_for_each_entry_rcu(pmu, &pmus, entry) { 5679 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5680 if (cpuctx->unique_pmu != pmu) 5681 goto next; 5682 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5683 if (task_ctx) 5684 goto next; 5685 ctxn = pmu->task_ctx_nr; 5686 if (ctxn < 0) 5687 goto next; 5688 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 5689 if (ctx) 5690 perf_event_aux_ctx(ctx, output, data); 5691 next: 5692 put_cpu_ptr(pmu->pmu_cpu_context); 5693 } 5694 5695 if (task_ctx) { 5696 preempt_disable(); 5697 perf_event_aux_ctx(task_ctx, output, data); 5698 preempt_enable(); 5699 } 5700 rcu_read_unlock(); 5701 } 5702 5703 /* 5704 * task tracking -- fork/exit 5705 * 5706 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 5707 */ 5708 5709 struct perf_task_event { 5710 struct task_struct *task; 5711 struct perf_event_context *task_ctx; 5712 5713 struct { 5714 struct perf_event_header header; 5715 5716 u32 pid; 5717 u32 ppid; 5718 u32 tid; 5719 u32 ptid; 5720 u64 time; 5721 } event_id; 5722 }; 5723 5724 static int perf_event_task_match(struct perf_event *event) 5725 { 5726 return event->attr.comm || event->attr.mmap || 5727 event->attr.mmap2 || event->attr.mmap_data || 5728 event->attr.task; 5729 } 5730 5731 static void perf_event_task_output(struct perf_event *event, 5732 void *data) 5733 { 5734 struct perf_task_event *task_event = data; 5735 struct perf_output_handle handle; 5736 struct perf_sample_data sample; 5737 struct task_struct *task = task_event->task; 5738 int ret, size = task_event->event_id.header.size; 5739 5740 if (!perf_event_task_match(event)) 5741 return; 5742 5743 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 5744 5745 ret = perf_output_begin(&handle, event, 5746 task_event->event_id.header.size); 5747 if (ret) 5748 goto out; 5749 5750 task_event->event_id.pid = perf_event_pid(event, task); 5751 task_event->event_id.ppid = perf_event_pid(event, current); 5752 5753 task_event->event_id.tid = perf_event_tid(event, task); 5754 task_event->event_id.ptid = perf_event_tid(event, current); 5755 5756 task_event->event_id.time = perf_event_clock(event); 5757 5758 perf_output_put(&handle, task_event->event_id); 5759 5760 perf_event__output_id_sample(event, &handle, &sample); 5761 5762 perf_output_end(&handle); 5763 out: 5764 task_event->event_id.header.size = size; 5765 } 5766 5767 static void perf_event_task(struct task_struct *task, 5768 struct perf_event_context *task_ctx, 5769 int new) 5770 { 5771 struct perf_task_event task_event; 5772 5773 if (!atomic_read(&nr_comm_events) && 5774 !atomic_read(&nr_mmap_events) && 5775 !atomic_read(&nr_task_events)) 5776 return; 5777 5778 task_event = (struct perf_task_event){ 5779 .task = task, 5780 .task_ctx = task_ctx, 5781 .event_id = { 5782 .header = { 5783 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 5784 .misc = 0, 5785 .size = sizeof(task_event.event_id), 5786 }, 5787 /* .pid */ 5788 /* .ppid */ 5789 /* .tid */ 5790 /* .ptid */ 5791 /* .time */ 5792 }, 5793 }; 5794 5795 perf_event_aux(perf_event_task_output, 5796 &task_event, 5797 task_ctx); 5798 } 5799 5800 void perf_event_fork(struct task_struct *task) 5801 { 5802 perf_event_task(task, NULL, 1); 5803 } 5804 5805 /* 5806 * comm tracking 5807 */ 5808 5809 struct perf_comm_event { 5810 struct task_struct *task; 5811 char *comm; 5812 int comm_size; 5813 5814 struct { 5815 struct perf_event_header header; 5816 5817 u32 pid; 5818 u32 tid; 5819 } event_id; 5820 }; 5821 5822 static int perf_event_comm_match(struct perf_event *event) 5823 { 5824 return event->attr.comm; 5825 } 5826 5827 static void perf_event_comm_output(struct perf_event *event, 5828 void *data) 5829 { 5830 struct perf_comm_event *comm_event = data; 5831 struct perf_output_handle handle; 5832 struct perf_sample_data sample; 5833 int size = comm_event->event_id.header.size; 5834 int ret; 5835 5836 if (!perf_event_comm_match(event)) 5837 return; 5838 5839 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5840 ret = perf_output_begin(&handle, event, 5841 comm_event->event_id.header.size); 5842 5843 if (ret) 5844 goto out; 5845 5846 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5847 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5848 5849 perf_output_put(&handle, comm_event->event_id); 5850 __output_copy(&handle, comm_event->comm, 5851 comm_event->comm_size); 5852 5853 perf_event__output_id_sample(event, &handle, &sample); 5854 5855 perf_output_end(&handle); 5856 out: 5857 comm_event->event_id.header.size = size; 5858 } 5859 5860 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5861 { 5862 char comm[TASK_COMM_LEN]; 5863 unsigned int size; 5864 5865 memset(comm, 0, sizeof(comm)); 5866 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5867 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5868 5869 comm_event->comm = comm; 5870 comm_event->comm_size = size; 5871 5872 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5873 5874 perf_event_aux(perf_event_comm_output, 5875 comm_event, 5876 NULL); 5877 } 5878 5879 void perf_event_comm(struct task_struct *task, bool exec) 5880 { 5881 struct perf_comm_event comm_event; 5882 5883 if (!atomic_read(&nr_comm_events)) 5884 return; 5885 5886 comm_event = (struct perf_comm_event){ 5887 .task = task, 5888 /* .comm */ 5889 /* .comm_size */ 5890 .event_id = { 5891 .header = { 5892 .type = PERF_RECORD_COMM, 5893 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, 5894 /* .size */ 5895 }, 5896 /* .pid */ 5897 /* .tid */ 5898 }, 5899 }; 5900 5901 perf_event_comm_event(&comm_event); 5902 } 5903 5904 /* 5905 * mmap tracking 5906 */ 5907 5908 struct perf_mmap_event { 5909 struct vm_area_struct *vma; 5910 5911 const char *file_name; 5912 int file_size; 5913 int maj, min; 5914 u64 ino; 5915 u64 ino_generation; 5916 u32 prot, flags; 5917 5918 struct { 5919 struct perf_event_header header; 5920 5921 u32 pid; 5922 u32 tid; 5923 u64 start; 5924 u64 len; 5925 u64 pgoff; 5926 } event_id; 5927 }; 5928 5929 static int perf_event_mmap_match(struct perf_event *event, 5930 void *data) 5931 { 5932 struct perf_mmap_event *mmap_event = data; 5933 struct vm_area_struct *vma = mmap_event->vma; 5934 int executable = vma->vm_flags & VM_EXEC; 5935 5936 return (!executable && event->attr.mmap_data) || 5937 (executable && (event->attr.mmap || event->attr.mmap2)); 5938 } 5939 5940 static void perf_event_mmap_output(struct perf_event *event, 5941 void *data) 5942 { 5943 struct perf_mmap_event *mmap_event = data; 5944 struct perf_output_handle handle; 5945 struct perf_sample_data sample; 5946 int size = mmap_event->event_id.header.size; 5947 int ret; 5948 5949 if (!perf_event_mmap_match(event, data)) 5950 return; 5951 5952 if (event->attr.mmap2) { 5953 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5954 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5955 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5956 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5957 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5958 mmap_event->event_id.header.size += sizeof(mmap_event->prot); 5959 mmap_event->event_id.header.size += sizeof(mmap_event->flags); 5960 } 5961 5962 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5963 ret = perf_output_begin(&handle, event, 5964 mmap_event->event_id.header.size); 5965 if (ret) 5966 goto out; 5967 5968 mmap_event->event_id.pid = perf_event_pid(event, current); 5969 mmap_event->event_id.tid = perf_event_tid(event, current); 5970 5971 perf_output_put(&handle, mmap_event->event_id); 5972 5973 if (event->attr.mmap2) { 5974 perf_output_put(&handle, mmap_event->maj); 5975 perf_output_put(&handle, mmap_event->min); 5976 perf_output_put(&handle, mmap_event->ino); 5977 perf_output_put(&handle, mmap_event->ino_generation); 5978 perf_output_put(&handle, mmap_event->prot); 5979 perf_output_put(&handle, mmap_event->flags); 5980 } 5981 5982 __output_copy(&handle, mmap_event->file_name, 5983 mmap_event->file_size); 5984 5985 perf_event__output_id_sample(event, &handle, &sample); 5986 5987 perf_output_end(&handle); 5988 out: 5989 mmap_event->event_id.header.size = size; 5990 } 5991 5992 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5993 { 5994 struct vm_area_struct *vma = mmap_event->vma; 5995 struct file *file = vma->vm_file; 5996 int maj = 0, min = 0; 5997 u64 ino = 0, gen = 0; 5998 u32 prot = 0, flags = 0; 5999 unsigned int size; 6000 char tmp[16]; 6001 char *buf = NULL; 6002 char *name; 6003 6004 if (file) { 6005 struct inode *inode; 6006 dev_t dev; 6007 6008 buf = kmalloc(PATH_MAX, GFP_KERNEL); 6009 if (!buf) { 6010 name = "//enomem"; 6011 goto cpy_name; 6012 } 6013 /* 6014 * d_path() works from the end of the rb backwards, so we 6015 * need to add enough zero bytes after the string to handle 6016 * the 64bit alignment we do later. 6017 */ 6018 name = file_path(file, buf, PATH_MAX - sizeof(u64)); 6019 if (IS_ERR(name)) { 6020 name = "//toolong"; 6021 goto cpy_name; 6022 } 6023 inode = file_inode(vma->vm_file); 6024 dev = inode->i_sb->s_dev; 6025 ino = inode->i_ino; 6026 gen = inode->i_generation; 6027 maj = MAJOR(dev); 6028 min = MINOR(dev); 6029 6030 if (vma->vm_flags & VM_READ) 6031 prot |= PROT_READ; 6032 if (vma->vm_flags & VM_WRITE) 6033 prot |= PROT_WRITE; 6034 if (vma->vm_flags & VM_EXEC) 6035 prot |= PROT_EXEC; 6036 6037 if (vma->vm_flags & VM_MAYSHARE) 6038 flags = MAP_SHARED; 6039 else 6040 flags = MAP_PRIVATE; 6041 6042 if (vma->vm_flags & VM_DENYWRITE) 6043 flags |= MAP_DENYWRITE; 6044 if (vma->vm_flags & VM_MAYEXEC) 6045 flags |= MAP_EXECUTABLE; 6046 if (vma->vm_flags & VM_LOCKED) 6047 flags |= MAP_LOCKED; 6048 if (vma->vm_flags & VM_HUGETLB) 6049 flags |= MAP_HUGETLB; 6050 6051 goto got_name; 6052 } else { 6053 if (vma->vm_ops && vma->vm_ops->name) { 6054 name = (char *) vma->vm_ops->name(vma); 6055 if (name) 6056 goto cpy_name; 6057 } 6058 6059 name = (char *)arch_vma_name(vma); 6060 if (name) 6061 goto cpy_name; 6062 6063 if (vma->vm_start <= vma->vm_mm->start_brk && 6064 vma->vm_end >= vma->vm_mm->brk) { 6065 name = "[heap]"; 6066 goto cpy_name; 6067 } 6068 if (vma->vm_start <= vma->vm_mm->start_stack && 6069 vma->vm_end >= vma->vm_mm->start_stack) { 6070 name = "[stack]"; 6071 goto cpy_name; 6072 } 6073 6074 name = "//anon"; 6075 goto cpy_name; 6076 } 6077 6078 cpy_name: 6079 strlcpy(tmp, name, sizeof(tmp)); 6080 name = tmp; 6081 got_name: 6082 /* 6083 * Since our buffer works in 8 byte units we need to align our string 6084 * size to a multiple of 8. However, we must guarantee the tail end is 6085 * zero'd out to avoid leaking random bits to userspace. 6086 */ 6087 size = strlen(name)+1; 6088 while (!IS_ALIGNED(size, sizeof(u64))) 6089 name[size++] = '\0'; 6090 6091 mmap_event->file_name = name; 6092 mmap_event->file_size = size; 6093 mmap_event->maj = maj; 6094 mmap_event->min = min; 6095 mmap_event->ino = ino; 6096 mmap_event->ino_generation = gen; 6097 mmap_event->prot = prot; 6098 mmap_event->flags = flags; 6099 6100 if (!(vma->vm_flags & VM_EXEC)) 6101 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 6102 6103 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 6104 6105 perf_event_aux(perf_event_mmap_output, 6106 mmap_event, 6107 NULL); 6108 6109 kfree(buf); 6110 } 6111 6112 void perf_event_mmap(struct vm_area_struct *vma) 6113 { 6114 struct perf_mmap_event mmap_event; 6115 6116 if (!atomic_read(&nr_mmap_events)) 6117 return; 6118 6119 mmap_event = (struct perf_mmap_event){ 6120 .vma = vma, 6121 /* .file_name */ 6122 /* .file_size */ 6123 .event_id = { 6124 .header = { 6125 .type = PERF_RECORD_MMAP, 6126 .misc = PERF_RECORD_MISC_USER, 6127 /* .size */ 6128 }, 6129 /* .pid */ 6130 /* .tid */ 6131 .start = vma->vm_start, 6132 .len = vma->vm_end - vma->vm_start, 6133 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 6134 }, 6135 /* .maj (attr_mmap2 only) */ 6136 /* .min (attr_mmap2 only) */ 6137 /* .ino (attr_mmap2 only) */ 6138 /* .ino_generation (attr_mmap2 only) */ 6139 /* .prot (attr_mmap2 only) */ 6140 /* .flags (attr_mmap2 only) */ 6141 }; 6142 6143 perf_event_mmap_event(&mmap_event); 6144 } 6145 6146 void perf_event_aux_event(struct perf_event *event, unsigned long head, 6147 unsigned long size, u64 flags) 6148 { 6149 struct perf_output_handle handle; 6150 struct perf_sample_data sample; 6151 struct perf_aux_event { 6152 struct perf_event_header header; 6153 u64 offset; 6154 u64 size; 6155 u64 flags; 6156 } rec = { 6157 .header = { 6158 .type = PERF_RECORD_AUX, 6159 .misc = 0, 6160 .size = sizeof(rec), 6161 }, 6162 .offset = head, 6163 .size = size, 6164 .flags = flags, 6165 }; 6166 int ret; 6167 6168 perf_event_header__init_id(&rec.header, &sample, event); 6169 ret = perf_output_begin(&handle, event, rec.header.size); 6170 6171 if (ret) 6172 return; 6173 6174 perf_output_put(&handle, rec); 6175 perf_event__output_id_sample(event, &handle, &sample); 6176 6177 perf_output_end(&handle); 6178 } 6179 6180 /* 6181 * Lost/dropped samples logging 6182 */ 6183 void perf_log_lost_samples(struct perf_event *event, u64 lost) 6184 { 6185 struct perf_output_handle handle; 6186 struct perf_sample_data sample; 6187 int ret; 6188 6189 struct { 6190 struct perf_event_header header; 6191 u64 lost; 6192 } lost_samples_event = { 6193 .header = { 6194 .type = PERF_RECORD_LOST_SAMPLES, 6195 .misc = 0, 6196 .size = sizeof(lost_samples_event), 6197 }, 6198 .lost = lost, 6199 }; 6200 6201 perf_event_header__init_id(&lost_samples_event.header, &sample, event); 6202 6203 ret = perf_output_begin(&handle, event, 6204 lost_samples_event.header.size); 6205 if (ret) 6206 return; 6207 6208 perf_output_put(&handle, lost_samples_event); 6209 perf_event__output_id_sample(event, &handle, &sample); 6210 perf_output_end(&handle); 6211 } 6212 6213 /* 6214 * context_switch tracking 6215 */ 6216 6217 struct perf_switch_event { 6218 struct task_struct *task; 6219 struct task_struct *next_prev; 6220 6221 struct { 6222 struct perf_event_header header; 6223 u32 next_prev_pid; 6224 u32 next_prev_tid; 6225 } event_id; 6226 }; 6227 6228 static int perf_event_switch_match(struct perf_event *event) 6229 { 6230 return event->attr.context_switch; 6231 } 6232 6233 static void perf_event_switch_output(struct perf_event *event, void *data) 6234 { 6235 struct perf_switch_event *se = data; 6236 struct perf_output_handle handle; 6237 struct perf_sample_data sample; 6238 int ret; 6239 6240 if (!perf_event_switch_match(event)) 6241 return; 6242 6243 /* Only CPU-wide events are allowed to see next/prev pid/tid */ 6244 if (event->ctx->task) { 6245 se->event_id.header.type = PERF_RECORD_SWITCH; 6246 se->event_id.header.size = sizeof(se->event_id.header); 6247 } else { 6248 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; 6249 se->event_id.header.size = sizeof(se->event_id); 6250 se->event_id.next_prev_pid = 6251 perf_event_pid(event, se->next_prev); 6252 se->event_id.next_prev_tid = 6253 perf_event_tid(event, se->next_prev); 6254 } 6255 6256 perf_event_header__init_id(&se->event_id.header, &sample, event); 6257 6258 ret = perf_output_begin(&handle, event, se->event_id.header.size); 6259 if (ret) 6260 return; 6261 6262 if (event->ctx->task) 6263 perf_output_put(&handle, se->event_id.header); 6264 else 6265 perf_output_put(&handle, se->event_id); 6266 6267 perf_event__output_id_sample(event, &handle, &sample); 6268 6269 perf_output_end(&handle); 6270 } 6271 6272 static void perf_event_switch(struct task_struct *task, 6273 struct task_struct *next_prev, bool sched_in) 6274 { 6275 struct perf_switch_event switch_event; 6276 6277 /* N.B. caller checks nr_switch_events != 0 */ 6278 6279 switch_event = (struct perf_switch_event){ 6280 .task = task, 6281 .next_prev = next_prev, 6282 .event_id = { 6283 .header = { 6284 /* .type */ 6285 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, 6286 /* .size */ 6287 }, 6288 /* .next_prev_pid */ 6289 /* .next_prev_tid */ 6290 }, 6291 }; 6292 6293 perf_event_aux(perf_event_switch_output, 6294 &switch_event, 6295 NULL); 6296 } 6297 6298 /* 6299 * IRQ throttle logging 6300 */ 6301 6302 static void perf_log_throttle(struct perf_event *event, int enable) 6303 { 6304 struct perf_output_handle handle; 6305 struct perf_sample_data sample; 6306 int ret; 6307 6308 struct { 6309 struct perf_event_header header; 6310 u64 time; 6311 u64 id; 6312 u64 stream_id; 6313 } throttle_event = { 6314 .header = { 6315 .type = PERF_RECORD_THROTTLE, 6316 .misc = 0, 6317 .size = sizeof(throttle_event), 6318 }, 6319 .time = perf_event_clock(event), 6320 .id = primary_event_id(event), 6321 .stream_id = event->id, 6322 }; 6323 6324 if (enable) 6325 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 6326 6327 perf_event_header__init_id(&throttle_event.header, &sample, event); 6328 6329 ret = perf_output_begin(&handle, event, 6330 throttle_event.header.size); 6331 if (ret) 6332 return; 6333 6334 perf_output_put(&handle, throttle_event); 6335 perf_event__output_id_sample(event, &handle, &sample); 6336 perf_output_end(&handle); 6337 } 6338 6339 static void perf_log_itrace_start(struct perf_event *event) 6340 { 6341 struct perf_output_handle handle; 6342 struct perf_sample_data sample; 6343 struct perf_aux_event { 6344 struct perf_event_header header; 6345 u32 pid; 6346 u32 tid; 6347 } rec; 6348 int ret; 6349 6350 if (event->parent) 6351 event = event->parent; 6352 6353 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || 6354 event->hw.itrace_started) 6355 return; 6356 6357 rec.header.type = PERF_RECORD_ITRACE_START; 6358 rec.header.misc = 0; 6359 rec.header.size = sizeof(rec); 6360 rec.pid = perf_event_pid(event, current); 6361 rec.tid = perf_event_tid(event, current); 6362 6363 perf_event_header__init_id(&rec.header, &sample, event); 6364 ret = perf_output_begin(&handle, event, rec.header.size); 6365 6366 if (ret) 6367 return; 6368 6369 perf_output_put(&handle, rec); 6370 perf_event__output_id_sample(event, &handle, &sample); 6371 6372 perf_output_end(&handle); 6373 } 6374 6375 /* 6376 * Generic event overflow handling, sampling. 6377 */ 6378 6379 static int __perf_event_overflow(struct perf_event *event, 6380 int throttle, struct perf_sample_data *data, 6381 struct pt_regs *regs) 6382 { 6383 int events = atomic_read(&event->event_limit); 6384 struct hw_perf_event *hwc = &event->hw; 6385 u64 seq; 6386 int ret = 0; 6387 6388 /* 6389 * Non-sampling counters might still use the PMI to fold short 6390 * hardware counters, ignore those. 6391 */ 6392 if (unlikely(!is_sampling_event(event))) 6393 return 0; 6394 6395 seq = __this_cpu_read(perf_throttled_seq); 6396 if (seq != hwc->interrupts_seq) { 6397 hwc->interrupts_seq = seq; 6398 hwc->interrupts = 1; 6399 } else { 6400 hwc->interrupts++; 6401 if (unlikely(throttle 6402 && hwc->interrupts >= max_samples_per_tick)) { 6403 __this_cpu_inc(perf_throttled_count); 6404 hwc->interrupts = MAX_INTERRUPTS; 6405 perf_log_throttle(event, 0); 6406 tick_nohz_full_kick(); 6407 ret = 1; 6408 } 6409 } 6410 6411 if (event->attr.freq) { 6412 u64 now = perf_clock(); 6413 s64 delta = now - hwc->freq_time_stamp; 6414 6415 hwc->freq_time_stamp = now; 6416 6417 if (delta > 0 && delta < 2*TICK_NSEC) 6418 perf_adjust_period(event, delta, hwc->last_period, true); 6419 } 6420 6421 /* 6422 * XXX event_limit might not quite work as expected on inherited 6423 * events 6424 */ 6425 6426 event->pending_kill = POLL_IN; 6427 if (events && atomic_dec_and_test(&event->event_limit)) { 6428 ret = 1; 6429 event->pending_kill = POLL_HUP; 6430 event->pending_disable = 1; 6431 irq_work_queue(&event->pending); 6432 } 6433 6434 if (event->overflow_handler) 6435 event->overflow_handler(event, data, regs); 6436 else 6437 perf_event_output(event, data, regs); 6438 6439 if (*perf_event_fasync(event) && event->pending_kill) { 6440 event->pending_wakeup = 1; 6441 irq_work_queue(&event->pending); 6442 } 6443 6444 return ret; 6445 } 6446 6447 int perf_event_overflow(struct perf_event *event, 6448 struct perf_sample_data *data, 6449 struct pt_regs *regs) 6450 { 6451 return __perf_event_overflow(event, 1, data, regs); 6452 } 6453 6454 /* 6455 * Generic software event infrastructure 6456 */ 6457 6458 struct swevent_htable { 6459 struct swevent_hlist *swevent_hlist; 6460 struct mutex hlist_mutex; 6461 int hlist_refcount; 6462 6463 /* Recursion avoidance in each contexts */ 6464 int recursion[PERF_NR_CONTEXTS]; 6465 6466 /* Keeps track of cpu being initialized/exited */ 6467 bool online; 6468 }; 6469 6470 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 6471 6472 /* 6473 * We directly increment event->count and keep a second value in 6474 * event->hw.period_left to count intervals. This period event 6475 * is kept in the range [-sample_period, 0] so that we can use the 6476 * sign as trigger. 6477 */ 6478 6479 u64 perf_swevent_set_period(struct perf_event *event) 6480 { 6481 struct hw_perf_event *hwc = &event->hw; 6482 u64 period = hwc->last_period; 6483 u64 nr, offset; 6484 s64 old, val; 6485 6486 hwc->last_period = hwc->sample_period; 6487 6488 again: 6489 old = val = local64_read(&hwc->period_left); 6490 if (val < 0) 6491 return 0; 6492 6493 nr = div64_u64(period + val, period); 6494 offset = nr * period; 6495 val -= offset; 6496 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 6497 goto again; 6498 6499 return nr; 6500 } 6501 6502 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 6503 struct perf_sample_data *data, 6504 struct pt_regs *regs) 6505 { 6506 struct hw_perf_event *hwc = &event->hw; 6507 int throttle = 0; 6508 6509 if (!overflow) 6510 overflow = perf_swevent_set_period(event); 6511 6512 if (hwc->interrupts == MAX_INTERRUPTS) 6513 return; 6514 6515 for (; overflow; overflow--) { 6516 if (__perf_event_overflow(event, throttle, 6517 data, regs)) { 6518 /* 6519 * We inhibit the overflow from happening when 6520 * hwc->interrupts == MAX_INTERRUPTS. 6521 */ 6522 break; 6523 } 6524 throttle = 1; 6525 } 6526 } 6527 6528 static void perf_swevent_event(struct perf_event *event, u64 nr, 6529 struct perf_sample_data *data, 6530 struct pt_regs *regs) 6531 { 6532 struct hw_perf_event *hwc = &event->hw; 6533 6534 local64_add(nr, &event->count); 6535 6536 if (!regs) 6537 return; 6538 6539 if (!is_sampling_event(event)) 6540 return; 6541 6542 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 6543 data->period = nr; 6544 return perf_swevent_overflow(event, 1, data, regs); 6545 } else 6546 data->period = event->hw.last_period; 6547 6548 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 6549 return perf_swevent_overflow(event, 1, data, regs); 6550 6551 if (local64_add_negative(nr, &hwc->period_left)) 6552 return; 6553 6554 perf_swevent_overflow(event, 0, data, regs); 6555 } 6556 6557 static int perf_exclude_event(struct perf_event *event, 6558 struct pt_regs *regs) 6559 { 6560 if (event->hw.state & PERF_HES_STOPPED) 6561 return 1; 6562 6563 if (regs) { 6564 if (event->attr.exclude_user && user_mode(regs)) 6565 return 1; 6566 6567 if (event->attr.exclude_kernel && !user_mode(regs)) 6568 return 1; 6569 } 6570 6571 return 0; 6572 } 6573 6574 static int perf_swevent_match(struct perf_event *event, 6575 enum perf_type_id type, 6576 u32 event_id, 6577 struct perf_sample_data *data, 6578 struct pt_regs *regs) 6579 { 6580 if (event->attr.type != type) 6581 return 0; 6582 6583 if (event->attr.config != event_id) 6584 return 0; 6585 6586 if (perf_exclude_event(event, regs)) 6587 return 0; 6588 6589 return 1; 6590 } 6591 6592 static inline u64 swevent_hash(u64 type, u32 event_id) 6593 { 6594 u64 val = event_id | (type << 32); 6595 6596 return hash_64(val, SWEVENT_HLIST_BITS); 6597 } 6598 6599 static inline struct hlist_head * 6600 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 6601 { 6602 u64 hash = swevent_hash(type, event_id); 6603 6604 return &hlist->heads[hash]; 6605 } 6606 6607 /* For the read side: events when they trigger */ 6608 static inline struct hlist_head * 6609 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 6610 { 6611 struct swevent_hlist *hlist; 6612 6613 hlist = rcu_dereference(swhash->swevent_hlist); 6614 if (!hlist) 6615 return NULL; 6616 6617 return __find_swevent_head(hlist, type, event_id); 6618 } 6619 6620 /* For the event head insertion and removal in the hlist */ 6621 static inline struct hlist_head * 6622 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 6623 { 6624 struct swevent_hlist *hlist; 6625 u32 event_id = event->attr.config; 6626 u64 type = event->attr.type; 6627 6628 /* 6629 * Event scheduling is always serialized against hlist allocation 6630 * and release. Which makes the protected version suitable here. 6631 * The context lock guarantees that. 6632 */ 6633 hlist = rcu_dereference_protected(swhash->swevent_hlist, 6634 lockdep_is_held(&event->ctx->lock)); 6635 if (!hlist) 6636 return NULL; 6637 6638 return __find_swevent_head(hlist, type, event_id); 6639 } 6640 6641 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 6642 u64 nr, 6643 struct perf_sample_data *data, 6644 struct pt_regs *regs) 6645 { 6646 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6647 struct perf_event *event; 6648 struct hlist_head *head; 6649 6650 rcu_read_lock(); 6651 head = find_swevent_head_rcu(swhash, type, event_id); 6652 if (!head) 6653 goto end; 6654 6655 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6656 if (perf_swevent_match(event, type, event_id, data, regs)) 6657 perf_swevent_event(event, nr, data, regs); 6658 } 6659 end: 6660 rcu_read_unlock(); 6661 } 6662 6663 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); 6664 6665 int perf_swevent_get_recursion_context(void) 6666 { 6667 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6668 6669 return get_recursion_context(swhash->recursion); 6670 } 6671 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 6672 6673 inline void perf_swevent_put_recursion_context(int rctx) 6674 { 6675 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6676 6677 put_recursion_context(swhash->recursion, rctx); 6678 } 6679 6680 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6681 { 6682 struct perf_sample_data data; 6683 6684 if (WARN_ON_ONCE(!regs)) 6685 return; 6686 6687 perf_sample_data_init(&data, addr, 0); 6688 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6689 } 6690 6691 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6692 { 6693 int rctx; 6694 6695 preempt_disable_notrace(); 6696 rctx = perf_swevent_get_recursion_context(); 6697 if (unlikely(rctx < 0)) 6698 goto fail; 6699 6700 ___perf_sw_event(event_id, nr, regs, addr); 6701 6702 perf_swevent_put_recursion_context(rctx); 6703 fail: 6704 preempt_enable_notrace(); 6705 } 6706 6707 static void perf_swevent_read(struct perf_event *event) 6708 { 6709 } 6710 6711 static int perf_swevent_add(struct perf_event *event, int flags) 6712 { 6713 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6714 struct hw_perf_event *hwc = &event->hw; 6715 struct hlist_head *head; 6716 6717 if (is_sampling_event(event)) { 6718 hwc->last_period = hwc->sample_period; 6719 perf_swevent_set_period(event); 6720 } 6721 6722 hwc->state = !(flags & PERF_EF_START); 6723 6724 head = find_swevent_head(swhash, event); 6725 if (!head) { 6726 /* 6727 * We can race with cpu hotplug code. Do not 6728 * WARN if the cpu just got unplugged. 6729 */ 6730 WARN_ON_ONCE(swhash->online); 6731 return -EINVAL; 6732 } 6733 6734 hlist_add_head_rcu(&event->hlist_entry, head); 6735 perf_event_update_userpage(event); 6736 6737 return 0; 6738 } 6739 6740 static void perf_swevent_del(struct perf_event *event, int flags) 6741 { 6742 hlist_del_rcu(&event->hlist_entry); 6743 } 6744 6745 static void perf_swevent_start(struct perf_event *event, int flags) 6746 { 6747 event->hw.state = 0; 6748 } 6749 6750 static void perf_swevent_stop(struct perf_event *event, int flags) 6751 { 6752 event->hw.state = PERF_HES_STOPPED; 6753 } 6754 6755 /* Deref the hlist from the update side */ 6756 static inline struct swevent_hlist * 6757 swevent_hlist_deref(struct swevent_htable *swhash) 6758 { 6759 return rcu_dereference_protected(swhash->swevent_hlist, 6760 lockdep_is_held(&swhash->hlist_mutex)); 6761 } 6762 6763 static void swevent_hlist_release(struct swevent_htable *swhash) 6764 { 6765 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 6766 6767 if (!hlist) 6768 return; 6769 6770 RCU_INIT_POINTER(swhash->swevent_hlist, NULL); 6771 kfree_rcu(hlist, rcu_head); 6772 } 6773 6774 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 6775 { 6776 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6777 6778 mutex_lock(&swhash->hlist_mutex); 6779 6780 if (!--swhash->hlist_refcount) 6781 swevent_hlist_release(swhash); 6782 6783 mutex_unlock(&swhash->hlist_mutex); 6784 } 6785 6786 static void swevent_hlist_put(struct perf_event *event) 6787 { 6788 int cpu; 6789 6790 for_each_possible_cpu(cpu) 6791 swevent_hlist_put_cpu(event, cpu); 6792 } 6793 6794 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 6795 { 6796 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6797 int err = 0; 6798 6799 mutex_lock(&swhash->hlist_mutex); 6800 6801 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 6802 struct swevent_hlist *hlist; 6803 6804 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 6805 if (!hlist) { 6806 err = -ENOMEM; 6807 goto exit; 6808 } 6809 rcu_assign_pointer(swhash->swevent_hlist, hlist); 6810 } 6811 swhash->hlist_refcount++; 6812 exit: 6813 mutex_unlock(&swhash->hlist_mutex); 6814 6815 return err; 6816 } 6817 6818 static int swevent_hlist_get(struct perf_event *event) 6819 { 6820 int err; 6821 int cpu, failed_cpu; 6822 6823 get_online_cpus(); 6824 for_each_possible_cpu(cpu) { 6825 err = swevent_hlist_get_cpu(event, cpu); 6826 if (err) { 6827 failed_cpu = cpu; 6828 goto fail; 6829 } 6830 } 6831 put_online_cpus(); 6832 6833 return 0; 6834 fail: 6835 for_each_possible_cpu(cpu) { 6836 if (cpu == failed_cpu) 6837 break; 6838 swevent_hlist_put_cpu(event, cpu); 6839 } 6840 6841 put_online_cpus(); 6842 return err; 6843 } 6844 6845 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 6846 6847 static void sw_perf_event_destroy(struct perf_event *event) 6848 { 6849 u64 event_id = event->attr.config; 6850 6851 WARN_ON(event->parent); 6852 6853 static_key_slow_dec(&perf_swevent_enabled[event_id]); 6854 swevent_hlist_put(event); 6855 } 6856 6857 static int perf_swevent_init(struct perf_event *event) 6858 { 6859 u64 event_id = event->attr.config; 6860 6861 if (event->attr.type != PERF_TYPE_SOFTWARE) 6862 return -ENOENT; 6863 6864 /* 6865 * no branch sampling for software events 6866 */ 6867 if (has_branch_stack(event)) 6868 return -EOPNOTSUPP; 6869 6870 switch (event_id) { 6871 case PERF_COUNT_SW_CPU_CLOCK: 6872 case PERF_COUNT_SW_TASK_CLOCK: 6873 return -ENOENT; 6874 6875 default: 6876 break; 6877 } 6878 6879 if (event_id >= PERF_COUNT_SW_MAX) 6880 return -ENOENT; 6881 6882 if (!event->parent) { 6883 int err; 6884 6885 err = swevent_hlist_get(event); 6886 if (err) 6887 return err; 6888 6889 static_key_slow_inc(&perf_swevent_enabled[event_id]); 6890 event->destroy = sw_perf_event_destroy; 6891 } 6892 6893 return 0; 6894 } 6895 6896 static struct pmu perf_swevent = { 6897 .task_ctx_nr = perf_sw_context, 6898 6899 .capabilities = PERF_PMU_CAP_NO_NMI, 6900 6901 .event_init = perf_swevent_init, 6902 .add = perf_swevent_add, 6903 .del = perf_swevent_del, 6904 .start = perf_swevent_start, 6905 .stop = perf_swevent_stop, 6906 .read = perf_swevent_read, 6907 }; 6908 6909 #ifdef CONFIG_EVENT_TRACING 6910 6911 static int perf_tp_filter_match(struct perf_event *event, 6912 struct perf_sample_data *data) 6913 { 6914 void *record = data->raw->data; 6915 6916 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 6917 return 1; 6918 return 0; 6919 } 6920 6921 static int perf_tp_event_match(struct perf_event *event, 6922 struct perf_sample_data *data, 6923 struct pt_regs *regs) 6924 { 6925 if (event->hw.state & PERF_HES_STOPPED) 6926 return 0; 6927 /* 6928 * All tracepoints are from kernel-space. 6929 */ 6930 if (event->attr.exclude_kernel) 6931 return 0; 6932 6933 if (!perf_tp_filter_match(event, data)) 6934 return 0; 6935 6936 return 1; 6937 } 6938 6939 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 6940 struct pt_regs *regs, struct hlist_head *head, int rctx, 6941 struct task_struct *task) 6942 { 6943 struct perf_sample_data data; 6944 struct perf_event *event; 6945 6946 struct perf_raw_record raw = { 6947 .size = entry_size, 6948 .data = record, 6949 }; 6950 6951 perf_sample_data_init(&data, addr, 0); 6952 data.raw = &raw; 6953 6954 hlist_for_each_entry_rcu(event, head, hlist_entry) { 6955 if (perf_tp_event_match(event, &data, regs)) 6956 perf_swevent_event(event, count, &data, regs); 6957 } 6958 6959 /* 6960 * If we got specified a target task, also iterate its context and 6961 * deliver this event there too. 6962 */ 6963 if (task && task != current) { 6964 struct perf_event_context *ctx; 6965 struct trace_entry *entry = record; 6966 6967 rcu_read_lock(); 6968 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 6969 if (!ctx) 6970 goto unlock; 6971 6972 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 6973 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6974 continue; 6975 if (event->attr.config != entry->type) 6976 continue; 6977 if (perf_tp_event_match(event, &data, regs)) 6978 perf_swevent_event(event, count, &data, regs); 6979 } 6980 unlock: 6981 rcu_read_unlock(); 6982 } 6983 6984 perf_swevent_put_recursion_context(rctx); 6985 } 6986 EXPORT_SYMBOL_GPL(perf_tp_event); 6987 6988 static void tp_perf_event_destroy(struct perf_event *event) 6989 { 6990 perf_trace_destroy(event); 6991 } 6992 6993 static int perf_tp_event_init(struct perf_event *event) 6994 { 6995 int err; 6996 6997 if (event->attr.type != PERF_TYPE_TRACEPOINT) 6998 return -ENOENT; 6999 7000 /* 7001 * no branch sampling for tracepoint events 7002 */ 7003 if (has_branch_stack(event)) 7004 return -EOPNOTSUPP; 7005 7006 err = perf_trace_init(event); 7007 if (err) 7008 return err; 7009 7010 event->destroy = tp_perf_event_destroy; 7011 7012 return 0; 7013 } 7014 7015 static struct pmu perf_tracepoint = { 7016 .task_ctx_nr = perf_sw_context, 7017 7018 .event_init = perf_tp_event_init, 7019 .add = perf_trace_add, 7020 .del = perf_trace_del, 7021 .start = perf_swevent_start, 7022 .stop = perf_swevent_stop, 7023 .read = perf_swevent_read, 7024 }; 7025 7026 static inline void perf_tp_register(void) 7027 { 7028 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 7029 } 7030 7031 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7032 { 7033 char *filter_str; 7034 int ret; 7035 7036 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7037 return -EINVAL; 7038 7039 filter_str = strndup_user(arg, PAGE_SIZE); 7040 if (IS_ERR(filter_str)) 7041 return PTR_ERR(filter_str); 7042 7043 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 7044 7045 kfree(filter_str); 7046 return ret; 7047 } 7048 7049 static void perf_event_free_filter(struct perf_event *event) 7050 { 7051 ftrace_profile_free_filter(event); 7052 } 7053 7054 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7055 { 7056 struct bpf_prog *prog; 7057 7058 if (event->attr.type != PERF_TYPE_TRACEPOINT) 7059 return -EINVAL; 7060 7061 if (event->tp_event->prog) 7062 return -EEXIST; 7063 7064 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE)) 7065 /* bpf programs can only be attached to u/kprobes */ 7066 return -EINVAL; 7067 7068 prog = bpf_prog_get(prog_fd); 7069 if (IS_ERR(prog)) 7070 return PTR_ERR(prog); 7071 7072 if (prog->type != BPF_PROG_TYPE_KPROBE) { 7073 /* valid fd, but invalid bpf program type */ 7074 bpf_prog_put(prog); 7075 return -EINVAL; 7076 } 7077 7078 event->tp_event->prog = prog; 7079 7080 return 0; 7081 } 7082 7083 static void perf_event_free_bpf_prog(struct perf_event *event) 7084 { 7085 struct bpf_prog *prog; 7086 7087 if (!event->tp_event) 7088 return; 7089 7090 prog = event->tp_event->prog; 7091 if (prog) { 7092 event->tp_event->prog = NULL; 7093 bpf_prog_put(prog); 7094 } 7095 } 7096 7097 #else 7098 7099 static inline void perf_tp_register(void) 7100 { 7101 } 7102 7103 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 7104 { 7105 return -ENOENT; 7106 } 7107 7108 static void perf_event_free_filter(struct perf_event *event) 7109 { 7110 } 7111 7112 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) 7113 { 7114 return -ENOENT; 7115 } 7116 7117 static void perf_event_free_bpf_prog(struct perf_event *event) 7118 { 7119 } 7120 #endif /* CONFIG_EVENT_TRACING */ 7121 7122 #ifdef CONFIG_HAVE_HW_BREAKPOINT 7123 void perf_bp_event(struct perf_event *bp, void *data) 7124 { 7125 struct perf_sample_data sample; 7126 struct pt_regs *regs = data; 7127 7128 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 7129 7130 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 7131 perf_swevent_event(bp, 1, &sample, regs); 7132 } 7133 #endif 7134 7135 /* 7136 * hrtimer based swevent callback 7137 */ 7138 7139 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 7140 { 7141 enum hrtimer_restart ret = HRTIMER_RESTART; 7142 struct perf_sample_data data; 7143 struct pt_regs *regs; 7144 struct perf_event *event; 7145 u64 period; 7146 7147 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 7148 7149 if (event->state != PERF_EVENT_STATE_ACTIVE) 7150 return HRTIMER_NORESTART; 7151 7152 event->pmu->read(event); 7153 7154 perf_sample_data_init(&data, 0, event->hw.last_period); 7155 regs = get_irq_regs(); 7156 7157 if (regs && !perf_exclude_event(event, regs)) { 7158 if (!(event->attr.exclude_idle && is_idle_task(current))) 7159 if (__perf_event_overflow(event, 1, &data, regs)) 7160 ret = HRTIMER_NORESTART; 7161 } 7162 7163 period = max_t(u64, 10000, event->hw.sample_period); 7164 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 7165 7166 return ret; 7167 } 7168 7169 static void perf_swevent_start_hrtimer(struct perf_event *event) 7170 { 7171 struct hw_perf_event *hwc = &event->hw; 7172 s64 period; 7173 7174 if (!is_sampling_event(event)) 7175 return; 7176 7177 period = local64_read(&hwc->period_left); 7178 if (period) { 7179 if (period < 0) 7180 period = 10000; 7181 7182 local64_set(&hwc->period_left, 0); 7183 } else { 7184 period = max_t(u64, 10000, hwc->sample_period); 7185 } 7186 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), 7187 HRTIMER_MODE_REL_PINNED); 7188 } 7189 7190 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 7191 { 7192 struct hw_perf_event *hwc = &event->hw; 7193 7194 if (is_sampling_event(event)) { 7195 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 7196 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 7197 7198 hrtimer_cancel(&hwc->hrtimer); 7199 } 7200 } 7201 7202 static void perf_swevent_init_hrtimer(struct perf_event *event) 7203 { 7204 struct hw_perf_event *hwc = &event->hw; 7205 7206 if (!is_sampling_event(event)) 7207 return; 7208 7209 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7210 hwc->hrtimer.function = perf_swevent_hrtimer; 7211 7212 /* 7213 * Since hrtimers have a fixed rate, we can do a static freq->period 7214 * mapping and avoid the whole period adjust feedback stuff. 7215 */ 7216 if (event->attr.freq) { 7217 long freq = event->attr.sample_freq; 7218 7219 event->attr.sample_period = NSEC_PER_SEC / freq; 7220 hwc->sample_period = event->attr.sample_period; 7221 local64_set(&hwc->period_left, hwc->sample_period); 7222 hwc->last_period = hwc->sample_period; 7223 event->attr.freq = 0; 7224 } 7225 } 7226 7227 /* 7228 * Software event: cpu wall time clock 7229 */ 7230 7231 static void cpu_clock_event_update(struct perf_event *event) 7232 { 7233 s64 prev; 7234 u64 now; 7235 7236 now = local_clock(); 7237 prev = local64_xchg(&event->hw.prev_count, now); 7238 local64_add(now - prev, &event->count); 7239 } 7240 7241 static void cpu_clock_event_start(struct perf_event *event, int flags) 7242 { 7243 local64_set(&event->hw.prev_count, local_clock()); 7244 perf_swevent_start_hrtimer(event); 7245 } 7246 7247 static void cpu_clock_event_stop(struct perf_event *event, int flags) 7248 { 7249 perf_swevent_cancel_hrtimer(event); 7250 cpu_clock_event_update(event); 7251 } 7252 7253 static int cpu_clock_event_add(struct perf_event *event, int flags) 7254 { 7255 if (flags & PERF_EF_START) 7256 cpu_clock_event_start(event, flags); 7257 perf_event_update_userpage(event); 7258 7259 return 0; 7260 } 7261 7262 static void cpu_clock_event_del(struct perf_event *event, int flags) 7263 { 7264 cpu_clock_event_stop(event, flags); 7265 } 7266 7267 static void cpu_clock_event_read(struct perf_event *event) 7268 { 7269 cpu_clock_event_update(event); 7270 } 7271 7272 static int cpu_clock_event_init(struct perf_event *event) 7273 { 7274 if (event->attr.type != PERF_TYPE_SOFTWARE) 7275 return -ENOENT; 7276 7277 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 7278 return -ENOENT; 7279 7280 /* 7281 * no branch sampling for software events 7282 */ 7283 if (has_branch_stack(event)) 7284 return -EOPNOTSUPP; 7285 7286 perf_swevent_init_hrtimer(event); 7287 7288 return 0; 7289 } 7290 7291 static struct pmu perf_cpu_clock = { 7292 .task_ctx_nr = perf_sw_context, 7293 7294 .capabilities = PERF_PMU_CAP_NO_NMI, 7295 7296 .event_init = cpu_clock_event_init, 7297 .add = cpu_clock_event_add, 7298 .del = cpu_clock_event_del, 7299 .start = cpu_clock_event_start, 7300 .stop = cpu_clock_event_stop, 7301 .read = cpu_clock_event_read, 7302 }; 7303 7304 /* 7305 * Software event: task time clock 7306 */ 7307 7308 static void task_clock_event_update(struct perf_event *event, u64 now) 7309 { 7310 u64 prev; 7311 s64 delta; 7312 7313 prev = local64_xchg(&event->hw.prev_count, now); 7314 delta = now - prev; 7315 local64_add(delta, &event->count); 7316 } 7317 7318 static void task_clock_event_start(struct perf_event *event, int flags) 7319 { 7320 local64_set(&event->hw.prev_count, event->ctx->time); 7321 perf_swevent_start_hrtimer(event); 7322 } 7323 7324 static void task_clock_event_stop(struct perf_event *event, int flags) 7325 { 7326 perf_swevent_cancel_hrtimer(event); 7327 task_clock_event_update(event, event->ctx->time); 7328 } 7329 7330 static int task_clock_event_add(struct perf_event *event, int flags) 7331 { 7332 if (flags & PERF_EF_START) 7333 task_clock_event_start(event, flags); 7334 perf_event_update_userpage(event); 7335 7336 return 0; 7337 } 7338 7339 static void task_clock_event_del(struct perf_event *event, int flags) 7340 { 7341 task_clock_event_stop(event, PERF_EF_UPDATE); 7342 } 7343 7344 static void task_clock_event_read(struct perf_event *event) 7345 { 7346 u64 now = perf_clock(); 7347 u64 delta = now - event->ctx->timestamp; 7348 u64 time = event->ctx->time + delta; 7349 7350 task_clock_event_update(event, time); 7351 } 7352 7353 static int task_clock_event_init(struct perf_event *event) 7354 { 7355 if (event->attr.type != PERF_TYPE_SOFTWARE) 7356 return -ENOENT; 7357 7358 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 7359 return -ENOENT; 7360 7361 /* 7362 * no branch sampling for software events 7363 */ 7364 if (has_branch_stack(event)) 7365 return -EOPNOTSUPP; 7366 7367 perf_swevent_init_hrtimer(event); 7368 7369 return 0; 7370 } 7371 7372 static struct pmu perf_task_clock = { 7373 .task_ctx_nr = perf_sw_context, 7374 7375 .capabilities = PERF_PMU_CAP_NO_NMI, 7376 7377 .event_init = task_clock_event_init, 7378 .add = task_clock_event_add, 7379 .del = task_clock_event_del, 7380 .start = task_clock_event_start, 7381 .stop = task_clock_event_stop, 7382 .read = task_clock_event_read, 7383 }; 7384 7385 static void perf_pmu_nop_void(struct pmu *pmu) 7386 { 7387 } 7388 7389 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) 7390 { 7391 } 7392 7393 static int perf_pmu_nop_int(struct pmu *pmu) 7394 { 7395 return 0; 7396 } 7397 7398 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 7399 7400 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 7401 { 7402 __this_cpu_write(nop_txn_flags, flags); 7403 7404 if (flags & ~PERF_PMU_TXN_ADD) 7405 return; 7406 7407 perf_pmu_disable(pmu); 7408 } 7409 7410 static int perf_pmu_commit_txn(struct pmu *pmu) 7411 { 7412 unsigned int flags = __this_cpu_read(nop_txn_flags); 7413 7414 __this_cpu_write(nop_txn_flags, 0); 7415 7416 if (flags & ~PERF_PMU_TXN_ADD) 7417 return 0; 7418 7419 perf_pmu_enable(pmu); 7420 return 0; 7421 } 7422 7423 static void perf_pmu_cancel_txn(struct pmu *pmu) 7424 { 7425 unsigned int flags = __this_cpu_read(nop_txn_flags); 7426 7427 __this_cpu_write(nop_txn_flags, 0); 7428 7429 if (flags & ~PERF_PMU_TXN_ADD) 7430 return; 7431 7432 perf_pmu_enable(pmu); 7433 } 7434 7435 static int perf_event_idx_default(struct perf_event *event) 7436 { 7437 return 0; 7438 } 7439 7440 /* 7441 * Ensures all contexts with the same task_ctx_nr have the same 7442 * pmu_cpu_context too. 7443 */ 7444 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 7445 { 7446 struct pmu *pmu; 7447 7448 if (ctxn < 0) 7449 return NULL; 7450 7451 list_for_each_entry(pmu, &pmus, entry) { 7452 if (pmu->task_ctx_nr == ctxn) 7453 return pmu->pmu_cpu_context; 7454 } 7455 7456 return NULL; 7457 } 7458 7459 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 7460 { 7461 int cpu; 7462 7463 for_each_possible_cpu(cpu) { 7464 struct perf_cpu_context *cpuctx; 7465 7466 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7467 7468 if (cpuctx->unique_pmu == old_pmu) 7469 cpuctx->unique_pmu = pmu; 7470 } 7471 } 7472 7473 static void free_pmu_context(struct pmu *pmu) 7474 { 7475 struct pmu *i; 7476 7477 mutex_lock(&pmus_lock); 7478 /* 7479 * Like a real lame refcount. 7480 */ 7481 list_for_each_entry(i, &pmus, entry) { 7482 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 7483 update_pmu_context(i, pmu); 7484 goto out; 7485 } 7486 } 7487 7488 free_percpu(pmu->pmu_cpu_context); 7489 out: 7490 mutex_unlock(&pmus_lock); 7491 } 7492 static struct idr pmu_idr; 7493 7494 static ssize_t 7495 type_show(struct device *dev, struct device_attribute *attr, char *page) 7496 { 7497 struct pmu *pmu = dev_get_drvdata(dev); 7498 7499 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 7500 } 7501 static DEVICE_ATTR_RO(type); 7502 7503 static ssize_t 7504 perf_event_mux_interval_ms_show(struct device *dev, 7505 struct device_attribute *attr, 7506 char *page) 7507 { 7508 struct pmu *pmu = dev_get_drvdata(dev); 7509 7510 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 7511 } 7512 7513 static DEFINE_MUTEX(mux_interval_mutex); 7514 7515 static ssize_t 7516 perf_event_mux_interval_ms_store(struct device *dev, 7517 struct device_attribute *attr, 7518 const char *buf, size_t count) 7519 { 7520 struct pmu *pmu = dev_get_drvdata(dev); 7521 int timer, cpu, ret; 7522 7523 ret = kstrtoint(buf, 0, &timer); 7524 if (ret) 7525 return ret; 7526 7527 if (timer < 1) 7528 return -EINVAL; 7529 7530 /* same value, noting to do */ 7531 if (timer == pmu->hrtimer_interval_ms) 7532 return count; 7533 7534 mutex_lock(&mux_interval_mutex); 7535 pmu->hrtimer_interval_ms = timer; 7536 7537 /* update all cpuctx for this PMU */ 7538 get_online_cpus(); 7539 for_each_online_cpu(cpu) { 7540 struct perf_cpu_context *cpuctx; 7541 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7542 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 7543 7544 cpu_function_call(cpu, 7545 (remote_function_f)perf_mux_hrtimer_restart, cpuctx); 7546 } 7547 put_online_cpus(); 7548 mutex_unlock(&mux_interval_mutex); 7549 7550 return count; 7551 } 7552 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 7553 7554 static struct attribute *pmu_dev_attrs[] = { 7555 &dev_attr_type.attr, 7556 &dev_attr_perf_event_mux_interval_ms.attr, 7557 NULL, 7558 }; 7559 ATTRIBUTE_GROUPS(pmu_dev); 7560 7561 static int pmu_bus_running; 7562 static struct bus_type pmu_bus = { 7563 .name = "event_source", 7564 .dev_groups = pmu_dev_groups, 7565 }; 7566 7567 static void pmu_dev_release(struct device *dev) 7568 { 7569 kfree(dev); 7570 } 7571 7572 static int pmu_dev_alloc(struct pmu *pmu) 7573 { 7574 int ret = -ENOMEM; 7575 7576 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 7577 if (!pmu->dev) 7578 goto out; 7579 7580 pmu->dev->groups = pmu->attr_groups; 7581 device_initialize(pmu->dev); 7582 ret = dev_set_name(pmu->dev, "%s", pmu->name); 7583 if (ret) 7584 goto free_dev; 7585 7586 dev_set_drvdata(pmu->dev, pmu); 7587 pmu->dev->bus = &pmu_bus; 7588 pmu->dev->release = pmu_dev_release; 7589 ret = device_add(pmu->dev); 7590 if (ret) 7591 goto free_dev; 7592 7593 out: 7594 return ret; 7595 7596 free_dev: 7597 put_device(pmu->dev); 7598 goto out; 7599 } 7600 7601 static struct lock_class_key cpuctx_mutex; 7602 static struct lock_class_key cpuctx_lock; 7603 7604 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 7605 { 7606 int cpu, ret; 7607 7608 mutex_lock(&pmus_lock); 7609 ret = -ENOMEM; 7610 pmu->pmu_disable_count = alloc_percpu(int); 7611 if (!pmu->pmu_disable_count) 7612 goto unlock; 7613 7614 pmu->type = -1; 7615 if (!name) 7616 goto skip_type; 7617 pmu->name = name; 7618 7619 if (type < 0) { 7620 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 7621 if (type < 0) { 7622 ret = type; 7623 goto free_pdc; 7624 } 7625 } 7626 pmu->type = type; 7627 7628 if (pmu_bus_running) { 7629 ret = pmu_dev_alloc(pmu); 7630 if (ret) 7631 goto free_idr; 7632 } 7633 7634 skip_type: 7635 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 7636 if (pmu->pmu_cpu_context) 7637 goto got_cpu_context; 7638 7639 ret = -ENOMEM; 7640 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 7641 if (!pmu->pmu_cpu_context) 7642 goto free_dev; 7643 7644 for_each_possible_cpu(cpu) { 7645 struct perf_cpu_context *cpuctx; 7646 7647 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 7648 __perf_event_init_context(&cpuctx->ctx); 7649 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 7650 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 7651 cpuctx->ctx.pmu = pmu; 7652 7653 __perf_mux_hrtimer_init(cpuctx, cpu); 7654 7655 cpuctx->unique_pmu = pmu; 7656 } 7657 7658 got_cpu_context: 7659 if (!pmu->start_txn) { 7660 if (pmu->pmu_enable) { 7661 /* 7662 * If we have pmu_enable/pmu_disable calls, install 7663 * transaction stubs that use that to try and batch 7664 * hardware accesses. 7665 */ 7666 pmu->start_txn = perf_pmu_start_txn; 7667 pmu->commit_txn = perf_pmu_commit_txn; 7668 pmu->cancel_txn = perf_pmu_cancel_txn; 7669 } else { 7670 pmu->start_txn = perf_pmu_nop_txn; 7671 pmu->commit_txn = perf_pmu_nop_int; 7672 pmu->cancel_txn = perf_pmu_nop_void; 7673 } 7674 } 7675 7676 if (!pmu->pmu_enable) { 7677 pmu->pmu_enable = perf_pmu_nop_void; 7678 pmu->pmu_disable = perf_pmu_nop_void; 7679 } 7680 7681 if (!pmu->event_idx) 7682 pmu->event_idx = perf_event_idx_default; 7683 7684 list_add_rcu(&pmu->entry, &pmus); 7685 atomic_set(&pmu->exclusive_cnt, 0); 7686 ret = 0; 7687 unlock: 7688 mutex_unlock(&pmus_lock); 7689 7690 return ret; 7691 7692 free_dev: 7693 device_del(pmu->dev); 7694 put_device(pmu->dev); 7695 7696 free_idr: 7697 if (pmu->type >= PERF_TYPE_MAX) 7698 idr_remove(&pmu_idr, pmu->type); 7699 7700 free_pdc: 7701 free_percpu(pmu->pmu_disable_count); 7702 goto unlock; 7703 } 7704 EXPORT_SYMBOL_GPL(perf_pmu_register); 7705 7706 void perf_pmu_unregister(struct pmu *pmu) 7707 { 7708 mutex_lock(&pmus_lock); 7709 list_del_rcu(&pmu->entry); 7710 mutex_unlock(&pmus_lock); 7711 7712 /* 7713 * We dereference the pmu list under both SRCU and regular RCU, so 7714 * synchronize against both of those. 7715 */ 7716 synchronize_srcu(&pmus_srcu); 7717 synchronize_rcu(); 7718 7719 free_percpu(pmu->pmu_disable_count); 7720 if (pmu->type >= PERF_TYPE_MAX) 7721 idr_remove(&pmu_idr, pmu->type); 7722 device_del(pmu->dev); 7723 put_device(pmu->dev); 7724 free_pmu_context(pmu); 7725 } 7726 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7727 7728 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 7729 { 7730 struct perf_event_context *ctx = NULL; 7731 int ret; 7732 7733 if (!try_module_get(pmu->module)) 7734 return -ENODEV; 7735 7736 if (event->group_leader != event) { 7737 /* 7738 * This ctx->mutex can nest when we're called through 7739 * inheritance. See the perf_event_ctx_lock_nested() comment. 7740 */ 7741 ctx = perf_event_ctx_lock_nested(event->group_leader, 7742 SINGLE_DEPTH_NESTING); 7743 BUG_ON(!ctx); 7744 } 7745 7746 event->pmu = pmu; 7747 ret = pmu->event_init(event); 7748 7749 if (ctx) 7750 perf_event_ctx_unlock(event->group_leader, ctx); 7751 7752 if (ret) 7753 module_put(pmu->module); 7754 7755 return ret; 7756 } 7757 7758 static struct pmu *perf_init_event(struct perf_event *event) 7759 { 7760 struct pmu *pmu = NULL; 7761 int idx; 7762 int ret; 7763 7764 idx = srcu_read_lock(&pmus_srcu); 7765 7766 rcu_read_lock(); 7767 pmu = idr_find(&pmu_idr, event->attr.type); 7768 rcu_read_unlock(); 7769 if (pmu) { 7770 ret = perf_try_init_event(pmu, event); 7771 if (ret) 7772 pmu = ERR_PTR(ret); 7773 goto unlock; 7774 } 7775 7776 list_for_each_entry_rcu(pmu, &pmus, entry) { 7777 ret = perf_try_init_event(pmu, event); 7778 if (!ret) 7779 goto unlock; 7780 7781 if (ret != -ENOENT) { 7782 pmu = ERR_PTR(ret); 7783 goto unlock; 7784 } 7785 } 7786 pmu = ERR_PTR(-ENOENT); 7787 unlock: 7788 srcu_read_unlock(&pmus_srcu, idx); 7789 7790 return pmu; 7791 } 7792 7793 static void account_event_cpu(struct perf_event *event, int cpu) 7794 { 7795 if (event->parent) 7796 return; 7797 7798 if (is_cgroup_event(event)) 7799 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 7800 } 7801 7802 static void account_event(struct perf_event *event) 7803 { 7804 if (event->parent) 7805 return; 7806 7807 if (event->attach_state & PERF_ATTACH_TASK) 7808 static_key_slow_inc(&perf_sched_events.key); 7809 if (event->attr.mmap || event->attr.mmap_data) 7810 atomic_inc(&nr_mmap_events); 7811 if (event->attr.comm) 7812 atomic_inc(&nr_comm_events); 7813 if (event->attr.task) 7814 atomic_inc(&nr_task_events); 7815 if (event->attr.freq) { 7816 if (atomic_inc_return(&nr_freq_events) == 1) 7817 tick_nohz_full_kick_all(); 7818 } 7819 if (event->attr.context_switch) { 7820 atomic_inc(&nr_switch_events); 7821 static_key_slow_inc(&perf_sched_events.key); 7822 } 7823 if (has_branch_stack(event)) 7824 static_key_slow_inc(&perf_sched_events.key); 7825 if (is_cgroup_event(event)) 7826 static_key_slow_inc(&perf_sched_events.key); 7827 7828 account_event_cpu(event, event->cpu); 7829 } 7830 7831 /* 7832 * Allocate and initialize a event structure 7833 */ 7834 static struct perf_event * 7835 perf_event_alloc(struct perf_event_attr *attr, int cpu, 7836 struct task_struct *task, 7837 struct perf_event *group_leader, 7838 struct perf_event *parent_event, 7839 perf_overflow_handler_t overflow_handler, 7840 void *context, int cgroup_fd) 7841 { 7842 struct pmu *pmu; 7843 struct perf_event *event; 7844 struct hw_perf_event *hwc; 7845 long err = -EINVAL; 7846 7847 if ((unsigned)cpu >= nr_cpu_ids) { 7848 if (!task || cpu != -1) 7849 return ERR_PTR(-EINVAL); 7850 } 7851 7852 event = kzalloc(sizeof(*event), GFP_KERNEL); 7853 if (!event) 7854 return ERR_PTR(-ENOMEM); 7855 7856 /* 7857 * Single events are their own group leaders, with an 7858 * empty sibling list: 7859 */ 7860 if (!group_leader) 7861 group_leader = event; 7862 7863 mutex_init(&event->child_mutex); 7864 INIT_LIST_HEAD(&event->child_list); 7865 7866 INIT_LIST_HEAD(&event->group_entry); 7867 INIT_LIST_HEAD(&event->event_entry); 7868 INIT_LIST_HEAD(&event->sibling_list); 7869 INIT_LIST_HEAD(&event->rb_entry); 7870 INIT_LIST_HEAD(&event->active_entry); 7871 INIT_HLIST_NODE(&event->hlist_entry); 7872 7873 7874 init_waitqueue_head(&event->waitq); 7875 init_irq_work(&event->pending, perf_pending_event); 7876 7877 mutex_init(&event->mmap_mutex); 7878 7879 atomic_long_set(&event->refcount, 1); 7880 event->cpu = cpu; 7881 event->attr = *attr; 7882 event->group_leader = group_leader; 7883 event->pmu = NULL; 7884 event->oncpu = -1; 7885 7886 event->parent = parent_event; 7887 7888 event->ns = get_pid_ns(task_active_pid_ns(current)); 7889 event->id = atomic64_inc_return(&perf_event_id); 7890 7891 event->state = PERF_EVENT_STATE_INACTIVE; 7892 7893 if (task) { 7894 event->attach_state = PERF_ATTACH_TASK; 7895 /* 7896 * XXX pmu::event_init needs to know what task to account to 7897 * and we cannot use the ctx information because we need the 7898 * pmu before we get a ctx. 7899 */ 7900 event->hw.target = task; 7901 } 7902 7903 event->clock = &local_clock; 7904 if (parent_event) 7905 event->clock = parent_event->clock; 7906 7907 if (!overflow_handler && parent_event) { 7908 overflow_handler = parent_event->overflow_handler; 7909 context = parent_event->overflow_handler_context; 7910 } 7911 7912 event->overflow_handler = overflow_handler; 7913 event->overflow_handler_context = context; 7914 7915 perf_event__state_init(event); 7916 7917 pmu = NULL; 7918 7919 hwc = &event->hw; 7920 hwc->sample_period = attr->sample_period; 7921 if (attr->freq && attr->sample_freq) 7922 hwc->sample_period = 1; 7923 hwc->last_period = hwc->sample_period; 7924 7925 local64_set(&hwc->period_left, hwc->sample_period); 7926 7927 /* 7928 * we currently do not support PERF_FORMAT_GROUP on inherited events 7929 */ 7930 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 7931 goto err_ns; 7932 7933 if (!has_branch_stack(event)) 7934 event->attr.branch_sample_type = 0; 7935 7936 if (cgroup_fd != -1) { 7937 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); 7938 if (err) 7939 goto err_ns; 7940 } 7941 7942 pmu = perf_init_event(event); 7943 if (!pmu) 7944 goto err_ns; 7945 else if (IS_ERR(pmu)) { 7946 err = PTR_ERR(pmu); 7947 goto err_ns; 7948 } 7949 7950 err = exclusive_event_init(event); 7951 if (err) 7952 goto err_pmu; 7953 7954 if (!event->parent) { 7955 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7956 err = get_callchain_buffers(); 7957 if (err) 7958 goto err_per_task; 7959 } 7960 } 7961 7962 return event; 7963 7964 err_per_task: 7965 exclusive_event_destroy(event); 7966 7967 err_pmu: 7968 if (event->destroy) 7969 event->destroy(event); 7970 module_put(pmu->module); 7971 err_ns: 7972 if (is_cgroup_event(event)) 7973 perf_detach_cgroup(event); 7974 if (event->ns) 7975 put_pid_ns(event->ns); 7976 kfree(event); 7977 7978 return ERR_PTR(err); 7979 } 7980 7981 static int perf_copy_attr(struct perf_event_attr __user *uattr, 7982 struct perf_event_attr *attr) 7983 { 7984 u32 size; 7985 int ret; 7986 7987 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 7988 return -EFAULT; 7989 7990 /* 7991 * zero the full structure, so that a short copy will be nice. 7992 */ 7993 memset(attr, 0, sizeof(*attr)); 7994 7995 ret = get_user(size, &uattr->size); 7996 if (ret) 7997 return ret; 7998 7999 if (size > PAGE_SIZE) /* silly large */ 8000 goto err_size; 8001 8002 if (!size) /* abi compat */ 8003 size = PERF_ATTR_SIZE_VER0; 8004 8005 if (size < PERF_ATTR_SIZE_VER0) 8006 goto err_size; 8007 8008 /* 8009 * If we're handed a bigger struct than we know of, 8010 * ensure all the unknown bits are 0 - i.e. new 8011 * user-space does not rely on any kernel feature 8012 * extensions we dont know about yet. 8013 */ 8014 if (size > sizeof(*attr)) { 8015 unsigned char __user *addr; 8016 unsigned char __user *end; 8017 unsigned char val; 8018 8019 addr = (void __user *)uattr + sizeof(*attr); 8020 end = (void __user *)uattr + size; 8021 8022 for (; addr < end; addr++) { 8023 ret = get_user(val, addr); 8024 if (ret) 8025 return ret; 8026 if (val) 8027 goto err_size; 8028 } 8029 size = sizeof(*attr); 8030 } 8031 8032 ret = copy_from_user(attr, uattr, size); 8033 if (ret) 8034 return -EFAULT; 8035 8036 if (attr->__reserved_1) 8037 return -EINVAL; 8038 8039 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 8040 return -EINVAL; 8041 8042 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 8043 return -EINVAL; 8044 8045 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 8046 u64 mask = attr->branch_sample_type; 8047 8048 /* only using defined bits */ 8049 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 8050 return -EINVAL; 8051 8052 /* at least one branch bit must be set */ 8053 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 8054 return -EINVAL; 8055 8056 /* propagate priv level, when not set for branch */ 8057 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 8058 8059 /* exclude_kernel checked on syscall entry */ 8060 if (!attr->exclude_kernel) 8061 mask |= PERF_SAMPLE_BRANCH_KERNEL; 8062 8063 if (!attr->exclude_user) 8064 mask |= PERF_SAMPLE_BRANCH_USER; 8065 8066 if (!attr->exclude_hv) 8067 mask |= PERF_SAMPLE_BRANCH_HV; 8068 /* 8069 * adjust user setting (for HW filter setup) 8070 */ 8071 attr->branch_sample_type = mask; 8072 } 8073 /* privileged levels capture (kernel, hv): check permissions */ 8074 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 8075 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8076 return -EACCES; 8077 } 8078 8079 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 8080 ret = perf_reg_validate(attr->sample_regs_user); 8081 if (ret) 8082 return ret; 8083 } 8084 8085 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 8086 if (!arch_perf_have_user_stack_dump()) 8087 return -ENOSYS; 8088 8089 /* 8090 * We have __u32 type for the size, but so far 8091 * we can only use __u16 as maximum due to the 8092 * __u16 sample size limit. 8093 */ 8094 if (attr->sample_stack_user >= USHRT_MAX) 8095 ret = -EINVAL; 8096 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 8097 ret = -EINVAL; 8098 } 8099 8100 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) 8101 ret = perf_reg_validate(attr->sample_regs_intr); 8102 out: 8103 return ret; 8104 8105 err_size: 8106 put_user(sizeof(*attr), &uattr->size); 8107 ret = -E2BIG; 8108 goto out; 8109 } 8110 8111 static int 8112 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 8113 { 8114 struct ring_buffer *rb = NULL; 8115 int ret = -EINVAL; 8116 8117 if (!output_event) 8118 goto set; 8119 8120 /* don't allow circular references */ 8121 if (event == output_event) 8122 goto out; 8123 8124 /* 8125 * Don't allow cross-cpu buffers 8126 */ 8127 if (output_event->cpu != event->cpu) 8128 goto out; 8129 8130 /* 8131 * If its not a per-cpu rb, it must be the same task. 8132 */ 8133 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 8134 goto out; 8135 8136 /* 8137 * Mixing clocks in the same buffer is trouble you don't need. 8138 */ 8139 if (output_event->clock != event->clock) 8140 goto out; 8141 8142 /* 8143 * If both events generate aux data, they must be on the same PMU 8144 */ 8145 if (has_aux(event) && has_aux(output_event) && 8146 event->pmu != output_event->pmu) 8147 goto out; 8148 8149 set: 8150 mutex_lock(&event->mmap_mutex); 8151 /* Can't redirect output if we've got an active mmap() */ 8152 if (atomic_read(&event->mmap_count)) 8153 goto unlock; 8154 8155 if (output_event) { 8156 /* get the rb we want to redirect to */ 8157 rb = ring_buffer_get(output_event); 8158 if (!rb) 8159 goto unlock; 8160 } 8161 8162 ring_buffer_attach(event, rb); 8163 8164 ret = 0; 8165 unlock: 8166 mutex_unlock(&event->mmap_mutex); 8167 8168 out: 8169 return ret; 8170 } 8171 8172 static void mutex_lock_double(struct mutex *a, struct mutex *b) 8173 { 8174 if (b < a) 8175 swap(a, b); 8176 8177 mutex_lock(a); 8178 mutex_lock_nested(b, SINGLE_DEPTH_NESTING); 8179 } 8180 8181 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) 8182 { 8183 bool nmi_safe = false; 8184 8185 switch (clk_id) { 8186 case CLOCK_MONOTONIC: 8187 event->clock = &ktime_get_mono_fast_ns; 8188 nmi_safe = true; 8189 break; 8190 8191 case CLOCK_MONOTONIC_RAW: 8192 event->clock = &ktime_get_raw_fast_ns; 8193 nmi_safe = true; 8194 break; 8195 8196 case CLOCK_REALTIME: 8197 event->clock = &ktime_get_real_ns; 8198 break; 8199 8200 case CLOCK_BOOTTIME: 8201 event->clock = &ktime_get_boot_ns; 8202 break; 8203 8204 case CLOCK_TAI: 8205 event->clock = &ktime_get_tai_ns; 8206 break; 8207 8208 default: 8209 return -EINVAL; 8210 } 8211 8212 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) 8213 return -EINVAL; 8214 8215 return 0; 8216 } 8217 8218 /** 8219 * sys_perf_event_open - open a performance event, associate it to a task/cpu 8220 * 8221 * @attr_uptr: event_id type attributes for monitoring/sampling 8222 * @pid: target pid 8223 * @cpu: target cpu 8224 * @group_fd: group leader event fd 8225 */ 8226 SYSCALL_DEFINE5(perf_event_open, 8227 struct perf_event_attr __user *, attr_uptr, 8228 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 8229 { 8230 struct perf_event *group_leader = NULL, *output_event = NULL; 8231 struct perf_event *event, *sibling; 8232 struct perf_event_attr attr; 8233 struct perf_event_context *ctx, *uninitialized_var(gctx); 8234 struct file *event_file = NULL; 8235 struct fd group = {NULL, 0}; 8236 struct task_struct *task = NULL; 8237 struct pmu *pmu; 8238 int event_fd; 8239 int move_group = 0; 8240 int err; 8241 int f_flags = O_RDWR; 8242 int cgroup_fd = -1; 8243 8244 /* for future expandability... */ 8245 if (flags & ~PERF_FLAG_ALL) 8246 return -EINVAL; 8247 8248 err = perf_copy_attr(attr_uptr, &attr); 8249 if (err) 8250 return err; 8251 8252 if (!attr.exclude_kernel) { 8253 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 8254 return -EACCES; 8255 } 8256 8257 if (attr.freq) { 8258 if (attr.sample_freq > sysctl_perf_event_sample_rate) 8259 return -EINVAL; 8260 } else { 8261 if (attr.sample_period & (1ULL << 63)) 8262 return -EINVAL; 8263 } 8264 8265 /* 8266 * In cgroup mode, the pid argument is used to pass the fd 8267 * opened to the cgroup directory in cgroupfs. The cpu argument 8268 * designates the cpu on which to monitor threads from that 8269 * cgroup. 8270 */ 8271 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 8272 return -EINVAL; 8273 8274 if (flags & PERF_FLAG_FD_CLOEXEC) 8275 f_flags |= O_CLOEXEC; 8276 8277 event_fd = get_unused_fd_flags(f_flags); 8278 if (event_fd < 0) 8279 return event_fd; 8280 8281 if (group_fd != -1) { 8282 err = perf_fget_light(group_fd, &group); 8283 if (err) 8284 goto err_fd; 8285 group_leader = group.file->private_data; 8286 if (flags & PERF_FLAG_FD_OUTPUT) 8287 output_event = group_leader; 8288 if (flags & PERF_FLAG_FD_NO_GROUP) 8289 group_leader = NULL; 8290 } 8291 8292 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 8293 task = find_lively_task_by_vpid(pid); 8294 if (IS_ERR(task)) { 8295 err = PTR_ERR(task); 8296 goto err_group_fd; 8297 } 8298 } 8299 8300 if (task && group_leader && 8301 group_leader->attr.inherit != attr.inherit) { 8302 err = -EINVAL; 8303 goto err_task; 8304 } 8305 8306 get_online_cpus(); 8307 8308 if (flags & PERF_FLAG_PID_CGROUP) 8309 cgroup_fd = pid; 8310 8311 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 8312 NULL, NULL, cgroup_fd); 8313 if (IS_ERR(event)) { 8314 err = PTR_ERR(event); 8315 goto err_cpus; 8316 } 8317 8318 if (is_sampling_event(event)) { 8319 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 8320 err = -ENOTSUPP; 8321 goto err_alloc; 8322 } 8323 } 8324 8325 account_event(event); 8326 8327 /* 8328 * Special case software events and allow them to be part of 8329 * any hardware group. 8330 */ 8331 pmu = event->pmu; 8332 8333 if (attr.use_clockid) { 8334 err = perf_event_set_clock(event, attr.clockid); 8335 if (err) 8336 goto err_alloc; 8337 } 8338 8339 if (group_leader && 8340 (is_software_event(event) != is_software_event(group_leader))) { 8341 if (is_software_event(event)) { 8342 /* 8343 * If event and group_leader are not both a software 8344 * event, and event is, then group leader is not. 8345 * 8346 * Allow the addition of software events to !software 8347 * groups, this is safe because software events never 8348 * fail to schedule. 8349 */ 8350 pmu = group_leader->pmu; 8351 } else if (is_software_event(group_leader) && 8352 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 8353 /* 8354 * In case the group is a pure software group, and we 8355 * try to add a hardware event, move the whole group to 8356 * the hardware context. 8357 */ 8358 move_group = 1; 8359 } 8360 } 8361 8362 /* 8363 * Get the target context (task or percpu): 8364 */ 8365 ctx = find_get_context(pmu, task, event); 8366 if (IS_ERR(ctx)) { 8367 err = PTR_ERR(ctx); 8368 goto err_alloc; 8369 } 8370 8371 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) { 8372 err = -EBUSY; 8373 goto err_context; 8374 } 8375 8376 if (task) { 8377 put_task_struct(task); 8378 task = NULL; 8379 } 8380 8381 /* 8382 * Look up the group leader (we will attach this event to it): 8383 */ 8384 if (group_leader) { 8385 err = -EINVAL; 8386 8387 /* 8388 * Do not allow a recursive hierarchy (this new sibling 8389 * becoming part of another group-sibling): 8390 */ 8391 if (group_leader->group_leader != group_leader) 8392 goto err_context; 8393 8394 /* All events in a group should have the same clock */ 8395 if (group_leader->clock != event->clock) 8396 goto err_context; 8397 8398 /* 8399 * Do not allow to attach to a group in a different 8400 * task or CPU context: 8401 */ 8402 if (move_group) { 8403 /* 8404 * Make sure we're both on the same task, or both 8405 * per-cpu events. 8406 */ 8407 if (group_leader->ctx->task != ctx->task) 8408 goto err_context; 8409 8410 /* 8411 * Make sure we're both events for the same CPU; 8412 * grouping events for different CPUs is broken; since 8413 * you can never concurrently schedule them anyhow. 8414 */ 8415 if (group_leader->cpu != event->cpu) 8416 goto err_context; 8417 } else { 8418 if (group_leader->ctx != ctx) 8419 goto err_context; 8420 } 8421 8422 /* 8423 * Only a group leader can be exclusive or pinned 8424 */ 8425 if (attr.exclusive || attr.pinned) 8426 goto err_context; 8427 } 8428 8429 if (output_event) { 8430 err = perf_event_set_output(event, output_event); 8431 if (err) 8432 goto err_context; 8433 } 8434 8435 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 8436 f_flags); 8437 if (IS_ERR(event_file)) { 8438 err = PTR_ERR(event_file); 8439 goto err_context; 8440 } 8441 8442 if (move_group) { 8443 gctx = group_leader->ctx; 8444 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8445 } else { 8446 mutex_lock(&ctx->mutex); 8447 } 8448 8449 if (!perf_event_validate_size(event)) { 8450 err = -E2BIG; 8451 goto err_locked; 8452 } 8453 8454 /* 8455 * Must be under the same ctx::mutex as perf_install_in_context(), 8456 * because we need to serialize with concurrent event creation. 8457 */ 8458 if (!exclusive_event_installable(event, ctx)) { 8459 /* exclusive and group stuff are assumed mutually exclusive */ 8460 WARN_ON_ONCE(move_group); 8461 8462 err = -EBUSY; 8463 goto err_locked; 8464 } 8465 8466 WARN_ON_ONCE(ctx->parent_ctx); 8467 8468 if (move_group) { 8469 /* 8470 * See perf_event_ctx_lock() for comments on the details 8471 * of swizzling perf_event::ctx. 8472 */ 8473 perf_remove_from_context(group_leader, false); 8474 8475 list_for_each_entry(sibling, &group_leader->sibling_list, 8476 group_entry) { 8477 perf_remove_from_context(sibling, false); 8478 put_ctx(gctx); 8479 } 8480 8481 /* 8482 * Wait for everybody to stop referencing the events through 8483 * the old lists, before installing it on new lists. 8484 */ 8485 synchronize_rcu(); 8486 8487 /* 8488 * Install the group siblings before the group leader. 8489 * 8490 * Because a group leader will try and install the entire group 8491 * (through the sibling list, which is still in-tact), we can 8492 * end up with siblings installed in the wrong context. 8493 * 8494 * By installing siblings first we NO-OP because they're not 8495 * reachable through the group lists. 8496 */ 8497 list_for_each_entry(sibling, &group_leader->sibling_list, 8498 group_entry) { 8499 perf_event__state_init(sibling); 8500 perf_install_in_context(ctx, sibling, sibling->cpu); 8501 get_ctx(ctx); 8502 } 8503 8504 /* 8505 * Removing from the context ends up with disabled 8506 * event. What we want here is event in the initial 8507 * startup state, ready to be add into new context. 8508 */ 8509 perf_event__state_init(group_leader); 8510 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8511 get_ctx(ctx); 8512 8513 /* 8514 * Now that all events are installed in @ctx, nothing 8515 * references @gctx anymore, so drop the last reference we have 8516 * on it. 8517 */ 8518 put_ctx(gctx); 8519 } 8520 8521 /* 8522 * Precalculate sample_data sizes; do while holding ctx::mutex such 8523 * that we're serialized against further additions and before 8524 * perf_install_in_context() which is the point the event is active and 8525 * can use these values. 8526 */ 8527 perf_event__header_size(event); 8528 perf_event__id_header_size(event); 8529 8530 perf_install_in_context(ctx, event, event->cpu); 8531 perf_unpin_context(ctx); 8532 8533 if (move_group) 8534 mutex_unlock(&gctx->mutex); 8535 mutex_unlock(&ctx->mutex); 8536 8537 put_online_cpus(); 8538 8539 event->owner = current; 8540 8541 mutex_lock(¤t->perf_event_mutex); 8542 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 8543 mutex_unlock(¤t->perf_event_mutex); 8544 8545 /* 8546 * Drop the reference on the group_event after placing the 8547 * new event on the sibling_list. This ensures destruction 8548 * of the group leader will find the pointer to itself in 8549 * perf_group_detach(). 8550 */ 8551 fdput(group); 8552 fd_install(event_fd, event_file); 8553 return event_fd; 8554 8555 err_locked: 8556 if (move_group) 8557 mutex_unlock(&gctx->mutex); 8558 mutex_unlock(&ctx->mutex); 8559 /* err_file: */ 8560 fput(event_file); 8561 err_context: 8562 perf_unpin_context(ctx); 8563 put_ctx(ctx); 8564 err_alloc: 8565 free_event(event); 8566 err_cpus: 8567 put_online_cpus(); 8568 err_task: 8569 if (task) 8570 put_task_struct(task); 8571 err_group_fd: 8572 fdput(group); 8573 err_fd: 8574 put_unused_fd(event_fd); 8575 return err; 8576 } 8577 8578 /** 8579 * perf_event_create_kernel_counter 8580 * 8581 * @attr: attributes of the counter to create 8582 * @cpu: cpu in which the counter is bound 8583 * @task: task to profile (NULL for percpu) 8584 */ 8585 struct perf_event * 8586 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 8587 struct task_struct *task, 8588 perf_overflow_handler_t overflow_handler, 8589 void *context) 8590 { 8591 struct perf_event_context *ctx; 8592 struct perf_event *event; 8593 int err; 8594 8595 /* 8596 * Get the target context (task or percpu): 8597 */ 8598 8599 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 8600 overflow_handler, context, -1); 8601 if (IS_ERR(event)) { 8602 err = PTR_ERR(event); 8603 goto err; 8604 } 8605 8606 /* Mark owner so we could distinguish it from user events. */ 8607 event->owner = EVENT_OWNER_KERNEL; 8608 8609 account_event(event); 8610 8611 ctx = find_get_context(event->pmu, task, event); 8612 if (IS_ERR(ctx)) { 8613 err = PTR_ERR(ctx); 8614 goto err_free; 8615 } 8616 8617 WARN_ON_ONCE(ctx->parent_ctx); 8618 mutex_lock(&ctx->mutex); 8619 if (!exclusive_event_installable(event, ctx)) { 8620 mutex_unlock(&ctx->mutex); 8621 perf_unpin_context(ctx); 8622 put_ctx(ctx); 8623 err = -EBUSY; 8624 goto err_free; 8625 } 8626 8627 perf_install_in_context(ctx, event, cpu); 8628 perf_unpin_context(ctx); 8629 mutex_unlock(&ctx->mutex); 8630 8631 return event; 8632 8633 err_free: 8634 free_event(event); 8635 err: 8636 return ERR_PTR(err); 8637 } 8638 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 8639 8640 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 8641 { 8642 struct perf_event_context *src_ctx; 8643 struct perf_event_context *dst_ctx; 8644 struct perf_event *event, *tmp; 8645 LIST_HEAD(events); 8646 8647 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 8648 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 8649 8650 /* 8651 * See perf_event_ctx_lock() for comments on the details 8652 * of swizzling perf_event::ctx. 8653 */ 8654 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); 8655 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 8656 event_entry) { 8657 perf_remove_from_context(event, false); 8658 unaccount_event_cpu(event, src_cpu); 8659 put_ctx(src_ctx); 8660 list_add(&event->migrate_entry, &events); 8661 } 8662 8663 /* 8664 * Wait for the events to quiesce before re-instating them. 8665 */ 8666 synchronize_rcu(); 8667 8668 /* 8669 * Re-instate events in 2 passes. 8670 * 8671 * Skip over group leaders and only install siblings on this first 8672 * pass, siblings will not get enabled without a leader, however a 8673 * leader will enable its siblings, even if those are still on the old 8674 * context. 8675 */ 8676 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8677 if (event->group_leader == event) 8678 continue; 8679 8680 list_del(&event->migrate_entry); 8681 if (event->state >= PERF_EVENT_STATE_OFF) 8682 event->state = PERF_EVENT_STATE_INACTIVE; 8683 account_event_cpu(event, dst_cpu); 8684 perf_install_in_context(dst_ctx, event, dst_cpu); 8685 get_ctx(dst_ctx); 8686 } 8687 8688 /* 8689 * Once all the siblings are setup properly, install the group leaders 8690 * to make it go. 8691 */ 8692 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 8693 list_del(&event->migrate_entry); 8694 if (event->state >= PERF_EVENT_STATE_OFF) 8695 event->state = PERF_EVENT_STATE_INACTIVE; 8696 account_event_cpu(event, dst_cpu); 8697 perf_install_in_context(dst_ctx, event, dst_cpu); 8698 get_ctx(dst_ctx); 8699 } 8700 mutex_unlock(&dst_ctx->mutex); 8701 mutex_unlock(&src_ctx->mutex); 8702 } 8703 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 8704 8705 static void sync_child_event(struct perf_event *child_event, 8706 struct task_struct *child) 8707 { 8708 struct perf_event *parent_event = child_event->parent; 8709 u64 child_val; 8710 8711 if (child_event->attr.inherit_stat) 8712 perf_event_read_event(child_event, child); 8713 8714 child_val = perf_event_count(child_event); 8715 8716 /* 8717 * Add back the child's count to the parent's count: 8718 */ 8719 atomic64_add(child_val, &parent_event->child_count); 8720 atomic64_add(child_event->total_time_enabled, 8721 &parent_event->child_total_time_enabled); 8722 atomic64_add(child_event->total_time_running, 8723 &parent_event->child_total_time_running); 8724 8725 /* 8726 * Remove this event from the parent's list 8727 */ 8728 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 8729 mutex_lock(&parent_event->child_mutex); 8730 list_del_init(&child_event->child_list); 8731 mutex_unlock(&parent_event->child_mutex); 8732 8733 /* 8734 * Make sure user/parent get notified, that we just 8735 * lost one event. 8736 */ 8737 perf_event_wakeup(parent_event); 8738 8739 /* 8740 * Release the parent event, if this was the last 8741 * reference to it. 8742 */ 8743 put_event(parent_event); 8744 } 8745 8746 static void 8747 __perf_event_exit_task(struct perf_event *child_event, 8748 struct perf_event_context *child_ctx, 8749 struct task_struct *child) 8750 { 8751 /* 8752 * Do not destroy the 'original' grouping; because of the context 8753 * switch optimization the original events could've ended up in a 8754 * random child task. 8755 * 8756 * If we were to destroy the original group, all group related 8757 * operations would cease to function properly after this random 8758 * child dies. 8759 * 8760 * Do destroy all inherited groups, we don't care about those 8761 * and being thorough is better. 8762 */ 8763 perf_remove_from_context(child_event, !!child_event->parent); 8764 8765 /* 8766 * It can happen that the parent exits first, and has events 8767 * that are still around due to the child reference. These 8768 * events need to be zapped. 8769 */ 8770 if (child_event->parent) { 8771 sync_child_event(child_event, child); 8772 free_event(child_event); 8773 } else { 8774 child_event->state = PERF_EVENT_STATE_EXIT; 8775 perf_event_wakeup(child_event); 8776 } 8777 } 8778 8779 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 8780 { 8781 struct perf_event *child_event, *next; 8782 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8783 unsigned long flags; 8784 8785 if (likely(!child->perf_event_ctxp[ctxn])) { 8786 perf_event_task(child, NULL, 0); 8787 return; 8788 } 8789 8790 local_irq_save(flags); 8791 /* 8792 * We can't reschedule here because interrupts are disabled, 8793 * and either child is current or it is a task that can't be 8794 * scheduled, so we are now safe from rescheduling changing 8795 * our context. 8796 */ 8797 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 8798 8799 /* 8800 * Take the context lock here so that if find_get_context is 8801 * reading child->perf_event_ctxp, we wait until it has 8802 * incremented the context's refcount before we do put_ctx below. 8803 */ 8804 raw_spin_lock(&child_ctx->lock); 8805 task_ctx_sched_out(child_ctx); 8806 child->perf_event_ctxp[ctxn] = NULL; 8807 8808 /* 8809 * If this context is a clone; unclone it so it can't get 8810 * swapped to another process while we're removing all 8811 * the events from it. 8812 */ 8813 clone_ctx = unclone_ctx(child_ctx); 8814 update_context_time(child_ctx); 8815 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 8816 8817 if (clone_ctx) 8818 put_ctx(clone_ctx); 8819 8820 /* 8821 * Report the task dead after unscheduling the events so that we 8822 * won't get any samples after PERF_RECORD_EXIT. We can however still 8823 * get a few PERF_RECORD_READ events. 8824 */ 8825 perf_event_task(child, child_ctx, 0); 8826 8827 /* 8828 * We can recurse on the same lock type through: 8829 * 8830 * __perf_event_exit_task() 8831 * sync_child_event() 8832 * put_event() 8833 * mutex_lock(&ctx->mutex) 8834 * 8835 * But since its the parent context it won't be the same instance. 8836 */ 8837 mutex_lock(&child_ctx->mutex); 8838 8839 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) 8840 __perf_event_exit_task(child_event, child_ctx, child); 8841 8842 mutex_unlock(&child_ctx->mutex); 8843 8844 put_ctx(child_ctx); 8845 } 8846 8847 /* 8848 * When a child task exits, feed back event values to parent events. 8849 */ 8850 void perf_event_exit_task(struct task_struct *child) 8851 { 8852 struct perf_event *event, *tmp; 8853 int ctxn; 8854 8855 mutex_lock(&child->perf_event_mutex); 8856 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 8857 owner_entry) { 8858 list_del_init(&event->owner_entry); 8859 8860 /* 8861 * Ensure the list deletion is visible before we clear 8862 * the owner, closes a race against perf_release() where 8863 * we need to serialize on the owner->perf_event_mutex. 8864 */ 8865 smp_wmb(); 8866 event->owner = NULL; 8867 } 8868 mutex_unlock(&child->perf_event_mutex); 8869 8870 for_each_task_context_nr(ctxn) 8871 perf_event_exit_task_context(child, ctxn); 8872 } 8873 8874 static void perf_free_event(struct perf_event *event, 8875 struct perf_event_context *ctx) 8876 { 8877 struct perf_event *parent = event->parent; 8878 8879 if (WARN_ON_ONCE(!parent)) 8880 return; 8881 8882 mutex_lock(&parent->child_mutex); 8883 list_del_init(&event->child_list); 8884 mutex_unlock(&parent->child_mutex); 8885 8886 put_event(parent); 8887 8888 raw_spin_lock_irq(&ctx->lock); 8889 perf_group_detach(event); 8890 list_del_event(event, ctx); 8891 raw_spin_unlock_irq(&ctx->lock); 8892 free_event(event); 8893 } 8894 8895 /* 8896 * Free an unexposed, unused context as created by inheritance by 8897 * perf_event_init_task below, used by fork() in case of fail. 8898 * 8899 * Not all locks are strictly required, but take them anyway to be nice and 8900 * help out with the lockdep assertions. 8901 */ 8902 void perf_event_free_task(struct task_struct *task) 8903 { 8904 struct perf_event_context *ctx; 8905 struct perf_event *event, *tmp; 8906 int ctxn; 8907 8908 for_each_task_context_nr(ctxn) { 8909 ctx = task->perf_event_ctxp[ctxn]; 8910 if (!ctx) 8911 continue; 8912 8913 mutex_lock(&ctx->mutex); 8914 again: 8915 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 8916 group_entry) 8917 perf_free_event(event, ctx); 8918 8919 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 8920 group_entry) 8921 perf_free_event(event, ctx); 8922 8923 if (!list_empty(&ctx->pinned_groups) || 8924 !list_empty(&ctx->flexible_groups)) 8925 goto again; 8926 8927 mutex_unlock(&ctx->mutex); 8928 8929 put_ctx(ctx); 8930 } 8931 } 8932 8933 void perf_event_delayed_put(struct task_struct *task) 8934 { 8935 int ctxn; 8936 8937 for_each_task_context_nr(ctxn) 8938 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 8939 } 8940 8941 struct perf_event *perf_event_get(unsigned int fd) 8942 { 8943 int err; 8944 struct fd f; 8945 struct perf_event *event; 8946 8947 err = perf_fget_light(fd, &f); 8948 if (err) 8949 return ERR_PTR(err); 8950 8951 event = f.file->private_data; 8952 atomic_long_inc(&event->refcount); 8953 fdput(f); 8954 8955 return event; 8956 } 8957 8958 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 8959 { 8960 if (!event) 8961 return ERR_PTR(-EINVAL); 8962 8963 return &event->attr; 8964 } 8965 8966 /* 8967 * inherit a event from parent task to child task: 8968 */ 8969 static struct perf_event * 8970 inherit_event(struct perf_event *parent_event, 8971 struct task_struct *parent, 8972 struct perf_event_context *parent_ctx, 8973 struct task_struct *child, 8974 struct perf_event *group_leader, 8975 struct perf_event_context *child_ctx) 8976 { 8977 enum perf_event_active_state parent_state = parent_event->state; 8978 struct perf_event *child_event; 8979 unsigned long flags; 8980 8981 /* 8982 * Instead of creating recursive hierarchies of events, 8983 * we link inherited events back to the original parent, 8984 * which has a filp for sure, which we use as the reference 8985 * count: 8986 */ 8987 if (parent_event->parent) 8988 parent_event = parent_event->parent; 8989 8990 child_event = perf_event_alloc(&parent_event->attr, 8991 parent_event->cpu, 8992 child, 8993 group_leader, parent_event, 8994 NULL, NULL, -1); 8995 if (IS_ERR(child_event)) 8996 return child_event; 8997 8998 if (is_orphaned_event(parent_event) || 8999 !atomic_long_inc_not_zero(&parent_event->refcount)) { 9000 free_event(child_event); 9001 return NULL; 9002 } 9003 9004 get_ctx(child_ctx); 9005 9006 /* 9007 * Make the child state follow the state of the parent event, 9008 * not its attr.disabled bit. We hold the parent's mutex, 9009 * so we won't race with perf_event_{en, dis}able_family. 9010 */ 9011 if (parent_state >= PERF_EVENT_STATE_INACTIVE) 9012 child_event->state = PERF_EVENT_STATE_INACTIVE; 9013 else 9014 child_event->state = PERF_EVENT_STATE_OFF; 9015 9016 if (parent_event->attr.freq) { 9017 u64 sample_period = parent_event->hw.sample_period; 9018 struct hw_perf_event *hwc = &child_event->hw; 9019 9020 hwc->sample_period = sample_period; 9021 hwc->last_period = sample_period; 9022 9023 local64_set(&hwc->period_left, sample_period); 9024 } 9025 9026 child_event->ctx = child_ctx; 9027 child_event->overflow_handler = parent_event->overflow_handler; 9028 child_event->overflow_handler_context 9029 = parent_event->overflow_handler_context; 9030 9031 /* 9032 * Precalculate sample_data sizes 9033 */ 9034 perf_event__header_size(child_event); 9035 perf_event__id_header_size(child_event); 9036 9037 /* 9038 * Link it up in the child's context: 9039 */ 9040 raw_spin_lock_irqsave(&child_ctx->lock, flags); 9041 add_event_to_ctx(child_event, child_ctx); 9042 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 9043 9044 /* 9045 * Link this into the parent event's child list 9046 */ 9047 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 9048 mutex_lock(&parent_event->child_mutex); 9049 list_add_tail(&child_event->child_list, &parent_event->child_list); 9050 mutex_unlock(&parent_event->child_mutex); 9051 9052 return child_event; 9053 } 9054 9055 static int inherit_group(struct perf_event *parent_event, 9056 struct task_struct *parent, 9057 struct perf_event_context *parent_ctx, 9058 struct task_struct *child, 9059 struct perf_event_context *child_ctx) 9060 { 9061 struct perf_event *leader; 9062 struct perf_event *sub; 9063 struct perf_event *child_ctr; 9064 9065 leader = inherit_event(parent_event, parent, parent_ctx, 9066 child, NULL, child_ctx); 9067 if (IS_ERR(leader)) 9068 return PTR_ERR(leader); 9069 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 9070 child_ctr = inherit_event(sub, parent, parent_ctx, 9071 child, leader, child_ctx); 9072 if (IS_ERR(child_ctr)) 9073 return PTR_ERR(child_ctr); 9074 } 9075 return 0; 9076 } 9077 9078 static int 9079 inherit_task_group(struct perf_event *event, struct task_struct *parent, 9080 struct perf_event_context *parent_ctx, 9081 struct task_struct *child, int ctxn, 9082 int *inherited_all) 9083 { 9084 int ret; 9085 struct perf_event_context *child_ctx; 9086 9087 if (!event->attr.inherit) { 9088 *inherited_all = 0; 9089 return 0; 9090 } 9091 9092 child_ctx = child->perf_event_ctxp[ctxn]; 9093 if (!child_ctx) { 9094 /* 9095 * This is executed from the parent task context, so 9096 * inherit events that have been marked for cloning. 9097 * First allocate and initialize a context for the 9098 * child. 9099 */ 9100 9101 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 9102 if (!child_ctx) 9103 return -ENOMEM; 9104 9105 child->perf_event_ctxp[ctxn] = child_ctx; 9106 } 9107 9108 ret = inherit_group(event, parent, parent_ctx, 9109 child, child_ctx); 9110 9111 if (ret) 9112 *inherited_all = 0; 9113 9114 return ret; 9115 } 9116 9117 /* 9118 * Initialize the perf_event context in task_struct 9119 */ 9120 static int perf_event_init_context(struct task_struct *child, int ctxn) 9121 { 9122 struct perf_event_context *child_ctx, *parent_ctx; 9123 struct perf_event_context *cloned_ctx; 9124 struct perf_event *event; 9125 struct task_struct *parent = current; 9126 int inherited_all = 1; 9127 unsigned long flags; 9128 int ret = 0; 9129 9130 if (likely(!parent->perf_event_ctxp[ctxn])) 9131 return 0; 9132 9133 /* 9134 * If the parent's context is a clone, pin it so it won't get 9135 * swapped under us. 9136 */ 9137 parent_ctx = perf_pin_task_context(parent, ctxn); 9138 if (!parent_ctx) 9139 return 0; 9140 9141 /* 9142 * No need to check if parent_ctx != NULL here; since we saw 9143 * it non-NULL earlier, the only reason for it to become NULL 9144 * is if we exit, and since we're currently in the middle of 9145 * a fork we can't be exiting at the same time. 9146 */ 9147 9148 /* 9149 * Lock the parent list. No need to lock the child - not PID 9150 * hashed yet and not running, so nobody can access it. 9151 */ 9152 mutex_lock(&parent_ctx->mutex); 9153 9154 /* 9155 * We dont have to disable NMIs - we are only looking at 9156 * the list, not manipulating it: 9157 */ 9158 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 9159 ret = inherit_task_group(event, parent, parent_ctx, 9160 child, ctxn, &inherited_all); 9161 if (ret) 9162 break; 9163 } 9164 9165 /* 9166 * We can't hold ctx->lock when iterating the ->flexible_group list due 9167 * to allocations, but we need to prevent rotation because 9168 * rotate_ctx() will change the list from interrupt context. 9169 */ 9170 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9171 parent_ctx->rotate_disable = 1; 9172 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9173 9174 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 9175 ret = inherit_task_group(event, parent, parent_ctx, 9176 child, ctxn, &inherited_all); 9177 if (ret) 9178 break; 9179 } 9180 9181 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 9182 parent_ctx->rotate_disable = 0; 9183 9184 child_ctx = child->perf_event_ctxp[ctxn]; 9185 9186 if (child_ctx && inherited_all) { 9187 /* 9188 * Mark the child context as a clone of the parent 9189 * context, or of whatever the parent is a clone of. 9190 * 9191 * Note that if the parent is a clone, the holding of 9192 * parent_ctx->lock avoids it from being uncloned. 9193 */ 9194 cloned_ctx = parent_ctx->parent_ctx; 9195 if (cloned_ctx) { 9196 child_ctx->parent_ctx = cloned_ctx; 9197 child_ctx->parent_gen = parent_ctx->parent_gen; 9198 } else { 9199 child_ctx->parent_ctx = parent_ctx; 9200 child_ctx->parent_gen = parent_ctx->generation; 9201 } 9202 get_ctx(child_ctx->parent_ctx); 9203 } 9204 9205 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 9206 mutex_unlock(&parent_ctx->mutex); 9207 9208 perf_unpin_context(parent_ctx); 9209 put_ctx(parent_ctx); 9210 9211 return ret; 9212 } 9213 9214 /* 9215 * Initialize the perf_event context in task_struct 9216 */ 9217 int perf_event_init_task(struct task_struct *child) 9218 { 9219 int ctxn, ret; 9220 9221 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 9222 mutex_init(&child->perf_event_mutex); 9223 INIT_LIST_HEAD(&child->perf_event_list); 9224 9225 for_each_task_context_nr(ctxn) { 9226 ret = perf_event_init_context(child, ctxn); 9227 if (ret) { 9228 perf_event_free_task(child); 9229 return ret; 9230 } 9231 } 9232 9233 return 0; 9234 } 9235 9236 static void __init perf_event_init_all_cpus(void) 9237 { 9238 struct swevent_htable *swhash; 9239 int cpu; 9240 9241 for_each_possible_cpu(cpu) { 9242 swhash = &per_cpu(swevent_htable, cpu); 9243 mutex_init(&swhash->hlist_mutex); 9244 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); 9245 } 9246 } 9247 9248 static void perf_event_init_cpu(int cpu) 9249 { 9250 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9251 9252 mutex_lock(&swhash->hlist_mutex); 9253 swhash->online = true; 9254 if (swhash->hlist_refcount > 0) { 9255 struct swevent_hlist *hlist; 9256 9257 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9258 WARN_ON(!hlist); 9259 rcu_assign_pointer(swhash->swevent_hlist, hlist); 9260 } 9261 mutex_unlock(&swhash->hlist_mutex); 9262 } 9263 9264 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 9265 static void __perf_event_exit_context(void *__info) 9266 { 9267 struct remove_event re = { .detach_group = true }; 9268 struct perf_event_context *ctx = __info; 9269 9270 rcu_read_lock(); 9271 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 9272 __perf_remove_from_context(&re); 9273 rcu_read_unlock(); 9274 } 9275 9276 static void perf_event_exit_cpu_context(int cpu) 9277 { 9278 struct perf_event_context *ctx; 9279 struct pmu *pmu; 9280 int idx; 9281 9282 idx = srcu_read_lock(&pmus_srcu); 9283 list_for_each_entry_rcu(pmu, &pmus, entry) { 9284 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 9285 9286 mutex_lock(&ctx->mutex); 9287 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 9288 mutex_unlock(&ctx->mutex); 9289 } 9290 srcu_read_unlock(&pmus_srcu, idx); 9291 } 9292 9293 static void perf_event_exit_cpu(int cpu) 9294 { 9295 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9296 9297 perf_event_exit_cpu_context(cpu); 9298 9299 mutex_lock(&swhash->hlist_mutex); 9300 swhash->online = false; 9301 swevent_hlist_release(swhash); 9302 mutex_unlock(&swhash->hlist_mutex); 9303 } 9304 #else 9305 static inline void perf_event_exit_cpu(int cpu) { } 9306 #endif 9307 9308 static int 9309 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 9310 { 9311 int cpu; 9312 9313 for_each_online_cpu(cpu) 9314 perf_event_exit_cpu(cpu); 9315 9316 return NOTIFY_OK; 9317 } 9318 9319 /* 9320 * Run the perf reboot notifier at the very last possible moment so that 9321 * the generic watchdog code runs as long as possible. 9322 */ 9323 static struct notifier_block perf_reboot_notifier = { 9324 .notifier_call = perf_reboot, 9325 .priority = INT_MIN, 9326 }; 9327 9328 static int 9329 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 9330 { 9331 unsigned int cpu = (long)hcpu; 9332 9333 switch (action & ~CPU_TASKS_FROZEN) { 9334 9335 case CPU_UP_PREPARE: 9336 case CPU_DOWN_FAILED: 9337 perf_event_init_cpu(cpu); 9338 break; 9339 9340 case CPU_UP_CANCELED: 9341 case CPU_DOWN_PREPARE: 9342 perf_event_exit_cpu(cpu); 9343 break; 9344 default: 9345 break; 9346 } 9347 9348 return NOTIFY_OK; 9349 } 9350 9351 void __init perf_event_init(void) 9352 { 9353 int ret; 9354 9355 idr_init(&pmu_idr); 9356 9357 perf_event_init_all_cpus(); 9358 init_srcu_struct(&pmus_srcu); 9359 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 9360 perf_pmu_register(&perf_cpu_clock, NULL, -1); 9361 perf_pmu_register(&perf_task_clock, NULL, -1); 9362 perf_tp_register(); 9363 perf_cpu_notifier(perf_cpu_notify); 9364 register_reboot_notifier(&perf_reboot_notifier); 9365 9366 ret = init_hw_breakpoint(); 9367 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9368 9369 /* do not patch jump label more than once per second */ 9370 jump_label_rate_limit(&perf_sched_events, HZ); 9371 9372 /* 9373 * Build time assertion that we keep the data_head at the intended 9374 * location. IOW, validation we got the __reserved[] size right. 9375 */ 9376 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 9377 != 1024); 9378 } 9379 9380 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 9381 char *page) 9382 { 9383 struct perf_pmu_events_attr *pmu_attr = 9384 container_of(attr, struct perf_pmu_events_attr, attr); 9385 9386 if (pmu_attr->event_str) 9387 return sprintf(page, "%s\n", pmu_attr->event_str); 9388 9389 return 0; 9390 } 9391 9392 static int __init perf_event_sysfs_init(void) 9393 { 9394 struct pmu *pmu; 9395 int ret; 9396 9397 mutex_lock(&pmus_lock); 9398 9399 ret = bus_register(&pmu_bus); 9400 if (ret) 9401 goto unlock; 9402 9403 list_for_each_entry(pmu, &pmus, entry) { 9404 if (!pmu->name || pmu->type < 0) 9405 continue; 9406 9407 ret = pmu_dev_alloc(pmu); 9408 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 9409 } 9410 pmu_bus_running = 1; 9411 ret = 0; 9412 9413 unlock: 9414 mutex_unlock(&pmus_lock); 9415 9416 return ret; 9417 } 9418 device_initcall(perf_event_sysfs_init); 9419 9420 #ifdef CONFIG_CGROUP_PERF 9421 static struct cgroup_subsys_state * 9422 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9423 { 9424 struct perf_cgroup *jc; 9425 9426 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 9427 if (!jc) 9428 return ERR_PTR(-ENOMEM); 9429 9430 jc->info = alloc_percpu(struct perf_cgroup_info); 9431 if (!jc->info) { 9432 kfree(jc); 9433 return ERR_PTR(-ENOMEM); 9434 } 9435 9436 return &jc->css; 9437 } 9438 9439 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 9440 { 9441 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 9442 9443 free_percpu(jc->info); 9444 kfree(jc); 9445 } 9446 9447 static int __perf_cgroup_move(void *info) 9448 { 9449 struct task_struct *task = info; 9450 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9451 return 0; 9452 } 9453 9454 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9455 struct cgroup_taskset *tset) 9456 { 9457 struct task_struct *task; 9458 9459 cgroup_taskset_for_each(task, tset) 9460 task_function_call(task, __perf_cgroup_move, task); 9461 } 9462 9463 struct cgroup_subsys perf_event_cgrp_subsys = { 9464 .css_alloc = perf_cgroup_css_alloc, 9465 .css_free = perf_cgroup_css_free, 9466 .attach = perf_cgroup_attach, 9467 }; 9468 #endif /* CONFIG_CGROUP_PERF */ 9469