1 /* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/cpu.h> 15 #include <linux/smp.h> 16 #include <linux/idr.h> 17 #include <linux/file.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/hash.h> 21 #include <linux/tick.h> 22 #include <linux/sysfs.h> 23 #include <linux/dcache.h> 24 #include <linux/percpu.h> 25 #include <linux/ptrace.h> 26 #include <linux/reboot.h> 27 #include <linux/vmstat.h> 28 #include <linux/device.h> 29 #include <linux/export.h> 30 #include <linux/vmalloc.h> 31 #include <linux/hardirq.h> 32 #include <linux/rculist.h> 33 #include <linux/uaccess.h> 34 #include <linux/syscalls.h> 35 #include <linux/anon_inodes.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/perf_event.h> 38 #include <linux/ftrace_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/mm_types.h> 41 #include <linux/cgroup.h> 42 #include <linux/module.h> 43 44 #include "internal.h" 45 46 #include <asm/irq_regs.h> 47 48 struct remote_function_call { 49 struct task_struct *p; 50 int (*func)(void *info); 51 void *info; 52 int ret; 53 }; 54 55 static void remote_function(void *data) 56 { 57 struct remote_function_call *tfc = data; 58 struct task_struct *p = tfc->p; 59 60 if (p) { 61 tfc->ret = -EAGAIN; 62 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 63 return; 64 } 65 66 tfc->ret = tfc->func(tfc->info); 67 } 68 69 /** 70 * task_function_call - call a function on the cpu on which a task runs 71 * @p: the task to evaluate 72 * @func: the function to be called 73 * @info: the function call argument 74 * 75 * Calls the function @func when the task is currently running. This might 76 * be on the current CPU, which just calls the function directly 77 * 78 * returns: @func return value, or 79 * -ESRCH - when the process isn't running 80 * -EAGAIN - when the process moved away 81 */ 82 static int 83 task_function_call(struct task_struct *p, int (*func) (void *info), void *info) 84 { 85 struct remote_function_call data = { 86 .p = p, 87 .func = func, 88 .info = info, 89 .ret = -ESRCH, /* No such (running) process */ 90 }; 91 92 if (task_curr(p)) 93 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 94 95 return data.ret; 96 } 97 98 /** 99 * cpu_function_call - call a function on the cpu 100 * @func: the function to be called 101 * @info: the function call argument 102 * 103 * Calls the function @func on the remote cpu. 104 * 105 * returns: @func return value or -ENXIO when the cpu is offline 106 */ 107 static int cpu_function_call(int cpu, int (*func) (void *info), void *info) 108 { 109 struct remote_function_call data = { 110 .p = NULL, 111 .func = func, 112 .info = info, 113 .ret = -ENXIO, /* No such CPU */ 114 }; 115 116 smp_call_function_single(cpu, remote_function, &data, 1); 117 118 return data.ret; 119 } 120 121 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 122 PERF_FLAG_FD_OUTPUT |\ 123 PERF_FLAG_PID_CGROUP |\ 124 PERF_FLAG_FD_CLOEXEC) 125 126 /* 127 * branch priv levels that need permission checks 128 */ 129 #define PERF_SAMPLE_BRANCH_PERM_PLM \ 130 (PERF_SAMPLE_BRANCH_KERNEL |\ 131 PERF_SAMPLE_BRANCH_HV) 132 133 enum event_type_t { 134 EVENT_FLEXIBLE = 0x1, 135 EVENT_PINNED = 0x2, 136 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 137 }; 138 139 /* 140 * perf_sched_events : >0 events exist 141 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 142 */ 143 struct static_key_deferred perf_sched_events __read_mostly; 144 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 145 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); 146 147 static atomic_t nr_mmap_events __read_mostly; 148 static atomic_t nr_comm_events __read_mostly; 149 static atomic_t nr_task_events __read_mostly; 150 static atomic_t nr_freq_events __read_mostly; 151 152 static LIST_HEAD(pmus); 153 static DEFINE_MUTEX(pmus_lock); 154 static struct srcu_struct pmus_srcu; 155 156 /* 157 * perf event paranoia level: 158 * -1 - not paranoid at all 159 * 0 - disallow raw tracepoint access for unpriv 160 * 1 - disallow cpu events for unpriv 161 * 2 - disallow kernel profiling for unpriv 162 */ 163 int sysctl_perf_event_paranoid __read_mostly = 1; 164 165 /* Minimum for 512 kiB + 1 user control page */ 166 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ 167 168 /* 169 * max perf event sample rate 170 */ 171 #define DEFAULT_MAX_SAMPLE_RATE 100000 172 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) 173 #define DEFAULT_CPU_TIME_MAX_PERCENT 25 174 175 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; 176 177 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); 178 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; 179 180 static int perf_sample_allowed_ns __read_mostly = 181 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 182 183 void update_perf_cpu_limits(void) 184 { 185 u64 tmp = perf_sample_period_ns; 186 187 tmp *= sysctl_perf_cpu_time_max_percent; 188 do_div(tmp, 100); 189 ACCESS_ONCE(perf_sample_allowed_ns) = tmp; 190 } 191 192 static int perf_rotate_context(struct perf_cpu_context *cpuctx); 193 194 int perf_proc_update_handler(struct ctl_table *table, int write, 195 void __user *buffer, size_t *lenp, 196 loff_t *ppos) 197 { 198 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 199 200 if (ret || !write) 201 return ret; 202 203 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 204 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 205 update_perf_cpu_limits(); 206 207 return 0; 208 } 209 210 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 211 212 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 213 void __user *buffer, size_t *lenp, 214 loff_t *ppos) 215 { 216 int ret = proc_dointvec(table, write, buffer, lenp, ppos); 217 218 if (ret || !write) 219 return ret; 220 221 update_perf_cpu_limits(); 222 223 return 0; 224 } 225 226 /* 227 * perf samples are done in some very critical code paths (NMIs). 228 * If they take too much CPU time, the system can lock up and not 229 * get any real work done. This will drop the sample rate when 230 * we detect that events are taking too long. 231 */ 232 #define NR_ACCUMULATED_SAMPLES 128 233 static DEFINE_PER_CPU(u64, running_sample_length); 234 235 static void perf_duration_warn(struct irq_work *w) 236 { 237 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 238 u64 avg_local_sample_len; 239 u64 local_samples_len; 240 241 local_samples_len = __get_cpu_var(running_sample_length); 242 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 243 244 printk_ratelimited(KERN_WARNING 245 "perf interrupt took too long (%lld > %lld), lowering " 246 "kernel.perf_event_max_sample_rate to %d\n", 247 avg_local_sample_len, allowed_ns >> 1, 248 sysctl_perf_event_sample_rate); 249 } 250 251 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); 252 253 void perf_sample_event_took(u64 sample_len_ns) 254 { 255 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); 256 u64 avg_local_sample_len; 257 u64 local_samples_len; 258 259 if (allowed_ns == 0) 260 return; 261 262 /* decay the counter by 1 average sample */ 263 local_samples_len = __get_cpu_var(running_sample_length); 264 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 265 local_samples_len += sample_len_ns; 266 __get_cpu_var(running_sample_length) = local_samples_len; 267 268 /* 269 * note: this will be biased artifically low until we have 270 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 271 * from having to maintain a count. 272 */ 273 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 274 275 if (avg_local_sample_len <= allowed_ns) 276 return; 277 278 if (max_samples_per_tick <= 1) 279 return; 280 281 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); 282 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; 283 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 284 285 update_perf_cpu_limits(); 286 287 if (!irq_work_queue(&perf_duration_work)) { 288 early_printk("perf interrupt took too long (%lld > %lld), lowering " 289 "kernel.perf_event_max_sample_rate to %d\n", 290 avg_local_sample_len, allowed_ns >> 1, 291 sysctl_perf_event_sample_rate); 292 } 293 } 294 295 static atomic64_t perf_event_id; 296 297 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 298 enum event_type_t event_type); 299 300 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 301 enum event_type_t event_type, 302 struct task_struct *task); 303 304 static void update_context_time(struct perf_event_context *ctx); 305 static u64 perf_event_time(struct perf_event *event); 306 307 void __weak perf_event_print_debug(void) { } 308 309 extern __weak const char *perf_pmu_name(void) 310 { 311 return "pmu"; 312 } 313 314 static inline u64 perf_clock(void) 315 { 316 return local_clock(); 317 } 318 319 static inline struct perf_cpu_context * 320 __get_cpu_context(struct perf_event_context *ctx) 321 { 322 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 323 } 324 325 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, 326 struct perf_event_context *ctx) 327 { 328 raw_spin_lock(&cpuctx->ctx.lock); 329 if (ctx) 330 raw_spin_lock(&ctx->lock); 331 } 332 333 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, 334 struct perf_event_context *ctx) 335 { 336 if (ctx) 337 raw_spin_unlock(&ctx->lock); 338 raw_spin_unlock(&cpuctx->ctx.lock); 339 } 340 341 #ifdef CONFIG_CGROUP_PERF 342 343 /* 344 * perf_cgroup_info keeps track of time_enabled for a cgroup. 345 * This is a per-cpu dynamically allocated data structure. 346 */ 347 struct perf_cgroup_info { 348 u64 time; 349 u64 timestamp; 350 }; 351 352 struct perf_cgroup { 353 struct cgroup_subsys_state css; 354 struct perf_cgroup_info __percpu *info; 355 }; 356 357 /* 358 * Must ensure cgroup is pinned (css_get) before calling 359 * this function. In other words, we cannot call this function 360 * if there is no cgroup event for the current CPU context. 361 */ 362 static inline struct perf_cgroup * 363 perf_cgroup_from_task(struct task_struct *task) 364 { 365 return container_of(task_css(task, perf_event_cgrp_id), 366 struct perf_cgroup, css); 367 } 368 369 static inline bool 370 perf_cgroup_match(struct perf_event *event) 371 { 372 struct perf_event_context *ctx = event->ctx; 373 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 374 375 /* @event doesn't care about cgroup */ 376 if (!event->cgrp) 377 return true; 378 379 /* wants specific cgroup scope but @cpuctx isn't associated with any */ 380 if (!cpuctx->cgrp) 381 return false; 382 383 /* 384 * Cgroup scoping is recursive. An event enabled for a cgroup is 385 * also enabled for all its descendant cgroups. If @cpuctx's 386 * cgroup is a descendant of @event's (the test covers identity 387 * case), it's a match. 388 */ 389 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, 390 event->cgrp->css.cgroup); 391 } 392 393 static inline void perf_put_cgroup(struct perf_event *event) 394 { 395 css_put(&event->cgrp->css); 396 } 397 398 static inline void perf_detach_cgroup(struct perf_event *event) 399 { 400 perf_put_cgroup(event); 401 event->cgrp = NULL; 402 } 403 404 static inline int is_cgroup_event(struct perf_event *event) 405 { 406 return event->cgrp != NULL; 407 } 408 409 static inline u64 perf_cgroup_event_time(struct perf_event *event) 410 { 411 struct perf_cgroup_info *t; 412 413 t = per_cpu_ptr(event->cgrp->info, event->cpu); 414 return t->time; 415 } 416 417 static inline void __update_cgrp_time(struct perf_cgroup *cgrp) 418 { 419 struct perf_cgroup_info *info; 420 u64 now; 421 422 now = perf_clock(); 423 424 info = this_cpu_ptr(cgrp->info); 425 426 info->time += now - info->timestamp; 427 info->timestamp = now; 428 } 429 430 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 431 { 432 struct perf_cgroup *cgrp_out = cpuctx->cgrp; 433 if (cgrp_out) 434 __update_cgrp_time(cgrp_out); 435 } 436 437 static inline void update_cgrp_time_from_event(struct perf_event *event) 438 { 439 struct perf_cgroup *cgrp; 440 441 /* 442 * ensure we access cgroup data only when needed and 443 * when we know the cgroup is pinned (css_get) 444 */ 445 if (!is_cgroup_event(event)) 446 return; 447 448 cgrp = perf_cgroup_from_task(current); 449 /* 450 * Do not update time when cgroup is not active 451 */ 452 if (cgrp == event->cgrp) 453 __update_cgrp_time(event->cgrp); 454 } 455 456 static inline void 457 perf_cgroup_set_timestamp(struct task_struct *task, 458 struct perf_event_context *ctx) 459 { 460 struct perf_cgroup *cgrp; 461 struct perf_cgroup_info *info; 462 463 /* 464 * ctx->lock held by caller 465 * ensure we do not access cgroup data 466 * unless we have the cgroup pinned (css_get) 467 */ 468 if (!task || !ctx->nr_cgroups) 469 return; 470 471 cgrp = perf_cgroup_from_task(task); 472 info = this_cpu_ptr(cgrp->info); 473 info->timestamp = ctx->timestamp; 474 } 475 476 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ 477 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ 478 479 /* 480 * reschedule events based on the cgroup constraint of task. 481 * 482 * mode SWOUT : schedule out everything 483 * mode SWIN : schedule in based on cgroup for next 484 */ 485 void perf_cgroup_switch(struct task_struct *task, int mode) 486 { 487 struct perf_cpu_context *cpuctx; 488 struct pmu *pmu; 489 unsigned long flags; 490 491 /* 492 * disable interrupts to avoid geting nr_cgroup 493 * changes via __perf_event_disable(). Also 494 * avoids preemption. 495 */ 496 local_irq_save(flags); 497 498 /* 499 * we reschedule only in the presence of cgroup 500 * constrained events. 501 */ 502 rcu_read_lock(); 503 504 list_for_each_entry_rcu(pmu, &pmus, entry) { 505 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 506 if (cpuctx->unique_pmu != pmu) 507 continue; /* ensure we process each cpuctx once */ 508 509 /* 510 * perf_cgroup_events says at least one 511 * context on this CPU has cgroup events. 512 * 513 * ctx->nr_cgroups reports the number of cgroup 514 * events for a context. 515 */ 516 if (cpuctx->ctx.nr_cgroups > 0) { 517 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 518 perf_pmu_disable(cpuctx->ctx.pmu); 519 520 if (mode & PERF_CGROUP_SWOUT) { 521 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 522 /* 523 * must not be done before ctxswout due 524 * to event_filter_match() in event_sched_out() 525 */ 526 cpuctx->cgrp = NULL; 527 } 528 529 if (mode & PERF_CGROUP_SWIN) { 530 WARN_ON_ONCE(cpuctx->cgrp); 531 /* 532 * set cgrp before ctxsw in to allow 533 * event_filter_match() to not have to pass 534 * task around 535 */ 536 cpuctx->cgrp = perf_cgroup_from_task(task); 537 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 538 } 539 perf_pmu_enable(cpuctx->ctx.pmu); 540 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 541 } 542 } 543 544 rcu_read_unlock(); 545 546 local_irq_restore(flags); 547 } 548 549 static inline void perf_cgroup_sched_out(struct task_struct *task, 550 struct task_struct *next) 551 { 552 struct perf_cgroup *cgrp1; 553 struct perf_cgroup *cgrp2 = NULL; 554 555 /* 556 * we come here when we know perf_cgroup_events > 0 557 */ 558 cgrp1 = perf_cgroup_from_task(task); 559 560 /* 561 * next is NULL when called from perf_event_enable_on_exec() 562 * that will systematically cause a cgroup_switch() 563 */ 564 if (next) 565 cgrp2 = perf_cgroup_from_task(next); 566 567 /* 568 * only schedule out current cgroup events if we know 569 * that we are switching to a different cgroup. Otherwise, 570 * do no touch the cgroup events. 571 */ 572 if (cgrp1 != cgrp2) 573 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 574 } 575 576 static inline void perf_cgroup_sched_in(struct task_struct *prev, 577 struct task_struct *task) 578 { 579 struct perf_cgroup *cgrp1; 580 struct perf_cgroup *cgrp2 = NULL; 581 582 /* 583 * we come here when we know perf_cgroup_events > 0 584 */ 585 cgrp1 = perf_cgroup_from_task(task); 586 587 /* prev can never be NULL */ 588 cgrp2 = perf_cgroup_from_task(prev); 589 590 /* 591 * only need to schedule in cgroup events if we are changing 592 * cgroup during ctxsw. Cgroup events were not scheduled 593 * out of ctxsw out if that was not the case. 594 */ 595 if (cgrp1 != cgrp2) 596 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 597 } 598 599 static inline int perf_cgroup_connect(int fd, struct perf_event *event, 600 struct perf_event_attr *attr, 601 struct perf_event *group_leader) 602 { 603 struct perf_cgroup *cgrp; 604 struct cgroup_subsys_state *css; 605 struct fd f = fdget(fd); 606 int ret = 0; 607 608 if (!f.file) 609 return -EBADF; 610 611 css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); 612 if (IS_ERR(css)) { 613 ret = PTR_ERR(css); 614 goto out; 615 } 616 617 cgrp = container_of(css, struct perf_cgroup, css); 618 event->cgrp = cgrp; 619 620 /* 621 * all events in a group must monitor 622 * the same cgroup because a task belongs 623 * to only one perf cgroup at a time 624 */ 625 if (group_leader && group_leader->cgrp != cgrp) { 626 perf_detach_cgroup(event); 627 ret = -EINVAL; 628 } 629 out: 630 fdput(f); 631 return ret; 632 } 633 634 static inline void 635 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 636 { 637 struct perf_cgroup_info *t; 638 t = per_cpu_ptr(event->cgrp->info, event->cpu); 639 event->shadow_ctx_time = now - t->timestamp; 640 } 641 642 static inline void 643 perf_cgroup_defer_enabled(struct perf_event *event) 644 { 645 /* 646 * when the current task's perf cgroup does not match 647 * the event's, we need to remember to call the 648 * perf_mark_enable() function the first time a task with 649 * a matching perf cgroup is scheduled in. 650 */ 651 if (is_cgroup_event(event) && !perf_cgroup_match(event)) 652 event->cgrp_defer_enabled = 1; 653 } 654 655 static inline void 656 perf_cgroup_mark_enabled(struct perf_event *event, 657 struct perf_event_context *ctx) 658 { 659 struct perf_event *sub; 660 u64 tstamp = perf_event_time(event); 661 662 if (!event->cgrp_defer_enabled) 663 return; 664 665 event->cgrp_defer_enabled = 0; 666 667 event->tstamp_enabled = tstamp - event->total_time_enabled; 668 list_for_each_entry(sub, &event->sibling_list, group_entry) { 669 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 670 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 671 sub->cgrp_defer_enabled = 0; 672 } 673 } 674 } 675 #else /* !CONFIG_CGROUP_PERF */ 676 677 static inline bool 678 perf_cgroup_match(struct perf_event *event) 679 { 680 return true; 681 } 682 683 static inline void perf_detach_cgroup(struct perf_event *event) 684 {} 685 686 static inline int is_cgroup_event(struct perf_event *event) 687 { 688 return 0; 689 } 690 691 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) 692 { 693 return 0; 694 } 695 696 static inline void update_cgrp_time_from_event(struct perf_event *event) 697 { 698 } 699 700 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) 701 { 702 } 703 704 static inline void perf_cgroup_sched_out(struct task_struct *task, 705 struct task_struct *next) 706 { 707 } 708 709 static inline void perf_cgroup_sched_in(struct task_struct *prev, 710 struct task_struct *task) 711 { 712 } 713 714 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, 715 struct perf_event_attr *attr, 716 struct perf_event *group_leader) 717 { 718 return -EINVAL; 719 } 720 721 static inline void 722 perf_cgroup_set_timestamp(struct task_struct *task, 723 struct perf_event_context *ctx) 724 { 725 } 726 727 void 728 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) 729 { 730 } 731 732 static inline void 733 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) 734 { 735 } 736 737 static inline u64 perf_cgroup_event_time(struct perf_event *event) 738 { 739 return 0; 740 } 741 742 static inline void 743 perf_cgroup_defer_enabled(struct perf_event *event) 744 { 745 } 746 747 static inline void 748 perf_cgroup_mark_enabled(struct perf_event *event, 749 struct perf_event_context *ctx) 750 { 751 } 752 #endif 753 754 /* 755 * set default to be dependent on timer tick just 756 * like original code 757 */ 758 #define PERF_CPU_HRTIMER (1000 / HZ) 759 /* 760 * function must be called with interrupts disbled 761 */ 762 static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr) 763 { 764 struct perf_cpu_context *cpuctx; 765 enum hrtimer_restart ret = HRTIMER_NORESTART; 766 int rotations = 0; 767 768 WARN_ON(!irqs_disabled()); 769 770 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); 771 772 rotations = perf_rotate_context(cpuctx); 773 774 /* 775 * arm timer if needed 776 */ 777 if (rotations) { 778 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); 779 ret = HRTIMER_RESTART; 780 } 781 782 return ret; 783 } 784 785 /* CPU is going down */ 786 void perf_cpu_hrtimer_cancel(int cpu) 787 { 788 struct perf_cpu_context *cpuctx; 789 struct pmu *pmu; 790 unsigned long flags; 791 792 if (WARN_ON(cpu != smp_processor_id())) 793 return; 794 795 local_irq_save(flags); 796 797 rcu_read_lock(); 798 799 list_for_each_entry_rcu(pmu, &pmus, entry) { 800 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 801 802 if (pmu->task_ctx_nr == perf_sw_context) 803 continue; 804 805 hrtimer_cancel(&cpuctx->hrtimer); 806 } 807 808 rcu_read_unlock(); 809 810 local_irq_restore(flags); 811 } 812 813 static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) 814 { 815 struct hrtimer *hr = &cpuctx->hrtimer; 816 struct pmu *pmu = cpuctx->ctx.pmu; 817 int timer; 818 819 /* no multiplexing needed for SW PMU */ 820 if (pmu->task_ctx_nr == perf_sw_context) 821 return; 822 823 /* 824 * check default is sane, if not set then force to 825 * default interval (1/tick) 826 */ 827 timer = pmu->hrtimer_interval_ms; 828 if (timer < 1) 829 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; 830 831 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 832 833 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 834 hr->function = perf_cpu_hrtimer_handler; 835 } 836 837 static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx) 838 { 839 struct hrtimer *hr = &cpuctx->hrtimer; 840 struct pmu *pmu = cpuctx->ctx.pmu; 841 842 /* not for SW PMU */ 843 if (pmu->task_ctx_nr == perf_sw_context) 844 return; 845 846 if (hrtimer_active(hr)) 847 return; 848 849 if (!hrtimer_callback_running(hr)) 850 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval, 851 0, HRTIMER_MODE_REL_PINNED, 0); 852 } 853 854 void perf_pmu_disable(struct pmu *pmu) 855 { 856 int *count = this_cpu_ptr(pmu->pmu_disable_count); 857 if (!(*count)++) 858 pmu->pmu_disable(pmu); 859 } 860 861 void perf_pmu_enable(struct pmu *pmu) 862 { 863 int *count = this_cpu_ptr(pmu->pmu_disable_count); 864 if (!--(*count)) 865 pmu->pmu_enable(pmu); 866 } 867 868 static DEFINE_PER_CPU(struct list_head, rotation_list); 869 870 /* 871 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 872 * because they're strictly cpu affine and rotate_start is called with IRQs 873 * disabled, while rotate_context is called from IRQ context. 874 */ 875 static void perf_pmu_rotate_start(struct pmu *pmu) 876 { 877 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 878 struct list_head *head = &__get_cpu_var(rotation_list); 879 880 WARN_ON(!irqs_disabled()); 881 882 if (list_empty(&cpuctx->rotation_list)) 883 list_add(&cpuctx->rotation_list, head); 884 } 885 886 static void get_ctx(struct perf_event_context *ctx) 887 { 888 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 889 } 890 891 static void put_ctx(struct perf_event_context *ctx) 892 { 893 if (atomic_dec_and_test(&ctx->refcount)) { 894 if (ctx->parent_ctx) 895 put_ctx(ctx->parent_ctx); 896 if (ctx->task) 897 put_task_struct(ctx->task); 898 kfree_rcu(ctx, rcu_head); 899 } 900 } 901 902 static void unclone_ctx(struct perf_event_context *ctx) 903 { 904 if (ctx->parent_ctx) { 905 put_ctx(ctx->parent_ctx); 906 ctx->parent_ctx = NULL; 907 } 908 ctx->generation++; 909 } 910 911 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 912 { 913 /* 914 * only top level events have the pid namespace they were created in 915 */ 916 if (event->parent) 917 event = event->parent; 918 919 return task_tgid_nr_ns(p, event->ns); 920 } 921 922 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 923 { 924 /* 925 * only top level events have the pid namespace they were created in 926 */ 927 if (event->parent) 928 event = event->parent; 929 930 return task_pid_nr_ns(p, event->ns); 931 } 932 933 /* 934 * If we inherit events we want to return the parent event id 935 * to userspace. 936 */ 937 static u64 primary_event_id(struct perf_event *event) 938 { 939 u64 id = event->id; 940 941 if (event->parent) 942 id = event->parent->id; 943 944 return id; 945 } 946 947 /* 948 * Get the perf_event_context for a task and lock it. 949 * This has to cope with with the fact that until it is locked, 950 * the context could get moved to another task. 951 */ 952 static struct perf_event_context * 953 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) 954 { 955 struct perf_event_context *ctx; 956 957 retry: 958 /* 959 * One of the few rules of preemptible RCU is that one cannot do 960 * rcu_read_unlock() while holding a scheduler (or nested) lock when 961 * part of the read side critical section was preemptible -- see 962 * rcu_read_unlock_special(). 963 * 964 * Since ctx->lock nests under rq->lock we must ensure the entire read 965 * side critical section is non-preemptible. 966 */ 967 preempt_disable(); 968 rcu_read_lock(); 969 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 970 if (ctx) { 971 /* 972 * If this context is a clone of another, it might 973 * get swapped for another underneath us by 974 * perf_event_task_sched_out, though the 975 * rcu_read_lock() protects us from any context 976 * getting freed. Lock the context and check if it 977 * got swapped before we could get the lock, and retry 978 * if so. If we locked the right context, then it 979 * can't get swapped on us any more. 980 */ 981 raw_spin_lock_irqsave(&ctx->lock, *flags); 982 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 983 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 984 rcu_read_unlock(); 985 preempt_enable(); 986 goto retry; 987 } 988 989 if (!atomic_inc_not_zero(&ctx->refcount)) { 990 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 991 ctx = NULL; 992 } 993 } 994 rcu_read_unlock(); 995 preempt_enable(); 996 return ctx; 997 } 998 999 /* 1000 * Get the context for a task and increment its pin_count so it 1001 * can't get swapped to another task. This also increments its 1002 * reference count so that the context can't get freed. 1003 */ 1004 static struct perf_event_context * 1005 perf_pin_task_context(struct task_struct *task, int ctxn) 1006 { 1007 struct perf_event_context *ctx; 1008 unsigned long flags; 1009 1010 ctx = perf_lock_task_context(task, ctxn, &flags); 1011 if (ctx) { 1012 ++ctx->pin_count; 1013 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1014 } 1015 return ctx; 1016 } 1017 1018 static void perf_unpin_context(struct perf_event_context *ctx) 1019 { 1020 unsigned long flags; 1021 1022 raw_spin_lock_irqsave(&ctx->lock, flags); 1023 --ctx->pin_count; 1024 raw_spin_unlock_irqrestore(&ctx->lock, flags); 1025 } 1026 1027 /* 1028 * Update the record of the current time in a context. 1029 */ 1030 static void update_context_time(struct perf_event_context *ctx) 1031 { 1032 u64 now = perf_clock(); 1033 1034 ctx->time += now - ctx->timestamp; 1035 ctx->timestamp = now; 1036 } 1037 1038 static u64 perf_event_time(struct perf_event *event) 1039 { 1040 struct perf_event_context *ctx = event->ctx; 1041 1042 if (is_cgroup_event(event)) 1043 return perf_cgroup_event_time(event); 1044 1045 return ctx ? ctx->time : 0; 1046 } 1047 1048 /* 1049 * Update the total_time_enabled and total_time_running fields for a event. 1050 * The caller of this function needs to hold the ctx->lock. 1051 */ 1052 static void update_event_times(struct perf_event *event) 1053 { 1054 struct perf_event_context *ctx = event->ctx; 1055 u64 run_end; 1056 1057 if (event->state < PERF_EVENT_STATE_INACTIVE || 1058 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1059 return; 1060 /* 1061 * in cgroup mode, time_enabled represents 1062 * the time the event was enabled AND active 1063 * tasks were in the monitored cgroup. This is 1064 * independent of the activity of the context as 1065 * there may be a mix of cgroup and non-cgroup events. 1066 * 1067 * That is why we treat cgroup events differently 1068 * here. 1069 */ 1070 if (is_cgroup_event(event)) 1071 run_end = perf_cgroup_event_time(event); 1072 else if (ctx->is_active) 1073 run_end = ctx->time; 1074 else 1075 run_end = event->tstamp_stopped; 1076 1077 event->total_time_enabled = run_end - event->tstamp_enabled; 1078 1079 if (event->state == PERF_EVENT_STATE_INACTIVE) 1080 run_end = event->tstamp_stopped; 1081 else 1082 run_end = perf_event_time(event); 1083 1084 event->total_time_running = run_end - event->tstamp_running; 1085 1086 } 1087 1088 /* 1089 * Update total_time_enabled and total_time_running for all events in a group. 1090 */ 1091 static void update_group_times(struct perf_event *leader) 1092 { 1093 struct perf_event *event; 1094 1095 update_event_times(leader); 1096 list_for_each_entry(event, &leader->sibling_list, group_entry) 1097 update_event_times(event); 1098 } 1099 1100 static struct list_head * 1101 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) 1102 { 1103 if (event->attr.pinned) 1104 return &ctx->pinned_groups; 1105 else 1106 return &ctx->flexible_groups; 1107 } 1108 1109 /* 1110 * Add a event from the lists for its context. 1111 * Must be called with ctx->mutex and ctx->lock held. 1112 */ 1113 static void 1114 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1115 { 1116 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1117 event->attach_state |= PERF_ATTACH_CONTEXT; 1118 1119 /* 1120 * If we're a stand alone event or group leader, we go to the context 1121 * list, group events are kept attached to the group so that 1122 * perf_group_detach can, at all times, locate all siblings. 1123 */ 1124 if (event->group_leader == event) { 1125 struct list_head *list; 1126 1127 if (is_software_event(event)) 1128 event->group_flags |= PERF_GROUP_SOFTWARE; 1129 1130 list = ctx_group_list(event, ctx); 1131 list_add_tail(&event->group_entry, list); 1132 } 1133 1134 if (is_cgroup_event(event)) 1135 ctx->nr_cgroups++; 1136 1137 if (has_branch_stack(event)) 1138 ctx->nr_branch_stack++; 1139 1140 list_add_rcu(&event->event_entry, &ctx->event_list); 1141 if (!ctx->nr_events) 1142 perf_pmu_rotate_start(ctx->pmu); 1143 ctx->nr_events++; 1144 if (event->attr.inherit_stat) 1145 ctx->nr_stat++; 1146 1147 ctx->generation++; 1148 } 1149 1150 /* 1151 * Initialize event state based on the perf_event_attr::disabled. 1152 */ 1153 static inline void perf_event__state_init(struct perf_event *event) 1154 { 1155 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 1156 PERF_EVENT_STATE_INACTIVE; 1157 } 1158 1159 /* 1160 * Called at perf_event creation and when events are attached/detached from a 1161 * group. 1162 */ 1163 static void perf_event__read_size(struct perf_event *event) 1164 { 1165 int entry = sizeof(u64); /* value */ 1166 int size = 0; 1167 int nr = 1; 1168 1169 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1170 size += sizeof(u64); 1171 1172 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1173 size += sizeof(u64); 1174 1175 if (event->attr.read_format & PERF_FORMAT_ID) 1176 entry += sizeof(u64); 1177 1178 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1179 nr += event->group_leader->nr_siblings; 1180 size += sizeof(u64); 1181 } 1182 1183 size += entry * nr; 1184 event->read_size = size; 1185 } 1186 1187 static void perf_event__header_size(struct perf_event *event) 1188 { 1189 struct perf_sample_data *data; 1190 u64 sample_type = event->attr.sample_type; 1191 u16 size = 0; 1192 1193 perf_event__read_size(event); 1194 1195 if (sample_type & PERF_SAMPLE_IP) 1196 size += sizeof(data->ip); 1197 1198 if (sample_type & PERF_SAMPLE_ADDR) 1199 size += sizeof(data->addr); 1200 1201 if (sample_type & PERF_SAMPLE_PERIOD) 1202 size += sizeof(data->period); 1203 1204 if (sample_type & PERF_SAMPLE_WEIGHT) 1205 size += sizeof(data->weight); 1206 1207 if (sample_type & PERF_SAMPLE_READ) 1208 size += event->read_size; 1209 1210 if (sample_type & PERF_SAMPLE_DATA_SRC) 1211 size += sizeof(data->data_src.val); 1212 1213 if (sample_type & PERF_SAMPLE_TRANSACTION) 1214 size += sizeof(data->txn); 1215 1216 event->header_size = size; 1217 } 1218 1219 static void perf_event__id_header_size(struct perf_event *event) 1220 { 1221 struct perf_sample_data *data; 1222 u64 sample_type = event->attr.sample_type; 1223 u16 size = 0; 1224 1225 if (sample_type & PERF_SAMPLE_TID) 1226 size += sizeof(data->tid_entry); 1227 1228 if (sample_type & PERF_SAMPLE_TIME) 1229 size += sizeof(data->time); 1230 1231 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1232 size += sizeof(data->id); 1233 1234 if (sample_type & PERF_SAMPLE_ID) 1235 size += sizeof(data->id); 1236 1237 if (sample_type & PERF_SAMPLE_STREAM_ID) 1238 size += sizeof(data->stream_id); 1239 1240 if (sample_type & PERF_SAMPLE_CPU) 1241 size += sizeof(data->cpu_entry); 1242 1243 event->id_header_size = size; 1244 } 1245 1246 static void perf_group_attach(struct perf_event *event) 1247 { 1248 struct perf_event *group_leader = event->group_leader, *pos; 1249 1250 /* 1251 * We can have double attach due to group movement in perf_event_open. 1252 */ 1253 if (event->attach_state & PERF_ATTACH_GROUP) 1254 return; 1255 1256 event->attach_state |= PERF_ATTACH_GROUP; 1257 1258 if (group_leader == event) 1259 return; 1260 1261 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1262 !is_software_event(event)) 1263 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1264 1265 list_add_tail(&event->group_entry, &group_leader->sibling_list); 1266 group_leader->nr_siblings++; 1267 1268 perf_event__header_size(group_leader); 1269 1270 list_for_each_entry(pos, &group_leader->sibling_list, group_entry) 1271 perf_event__header_size(pos); 1272 } 1273 1274 /* 1275 * Remove a event from the lists for its context. 1276 * Must be called with ctx->mutex and ctx->lock held. 1277 */ 1278 static void 1279 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1280 { 1281 struct perf_cpu_context *cpuctx; 1282 /* 1283 * We can have double detach due to exit/hot-unplug + close. 1284 */ 1285 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) 1286 return; 1287 1288 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1289 1290 if (is_cgroup_event(event)) { 1291 ctx->nr_cgroups--; 1292 cpuctx = __get_cpu_context(ctx); 1293 /* 1294 * if there are no more cgroup events 1295 * then cler cgrp to avoid stale pointer 1296 * in update_cgrp_time_from_cpuctx() 1297 */ 1298 if (!ctx->nr_cgroups) 1299 cpuctx->cgrp = NULL; 1300 } 1301 1302 if (has_branch_stack(event)) 1303 ctx->nr_branch_stack--; 1304 1305 ctx->nr_events--; 1306 if (event->attr.inherit_stat) 1307 ctx->nr_stat--; 1308 1309 list_del_rcu(&event->event_entry); 1310 1311 if (event->group_leader == event) 1312 list_del_init(&event->group_entry); 1313 1314 update_group_times(event); 1315 1316 /* 1317 * If event was in error state, then keep it 1318 * that way, otherwise bogus counts will be 1319 * returned on read(). The only way to get out 1320 * of error state is by explicit re-enabling 1321 * of the event 1322 */ 1323 if (event->state > PERF_EVENT_STATE_OFF) 1324 event->state = PERF_EVENT_STATE_OFF; 1325 1326 ctx->generation++; 1327 } 1328 1329 static void perf_group_detach(struct perf_event *event) 1330 { 1331 struct perf_event *sibling, *tmp; 1332 struct list_head *list = NULL; 1333 1334 /* 1335 * We can have double detach due to exit/hot-unplug + close. 1336 */ 1337 if (!(event->attach_state & PERF_ATTACH_GROUP)) 1338 return; 1339 1340 event->attach_state &= ~PERF_ATTACH_GROUP; 1341 1342 /* 1343 * If this is a sibling, remove it from its group. 1344 */ 1345 if (event->group_leader != event) { 1346 list_del_init(&event->group_entry); 1347 event->group_leader->nr_siblings--; 1348 goto out; 1349 } 1350 1351 if (!list_empty(&event->group_entry)) 1352 list = &event->group_entry; 1353 1354 /* 1355 * If this was a group event with sibling events then 1356 * upgrade the siblings to singleton events by adding them 1357 * to whatever list we are on. 1358 */ 1359 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 1360 if (list) 1361 list_move_tail(&sibling->group_entry, list); 1362 sibling->group_leader = sibling; 1363 1364 /* Inherit group flags from the previous leader */ 1365 sibling->group_flags = event->group_flags; 1366 } 1367 1368 out: 1369 perf_event__header_size(event->group_leader); 1370 1371 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) 1372 perf_event__header_size(tmp); 1373 } 1374 1375 static inline int 1376 event_filter_match(struct perf_event *event) 1377 { 1378 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1379 && perf_cgroup_match(event); 1380 } 1381 1382 static void 1383 event_sched_out(struct perf_event *event, 1384 struct perf_cpu_context *cpuctx, 1385 struct perf_event_context *ctx) 1386 { 1387 u64 tstamp = perf_event_time(event); 1388 u64 delta; 1389 /* 1390 * An event which could not be activated because of 1391 * filter mismatch still needs to have its timings 1392 * maintained, otherwise bogus information is return 1393 * via read() for time_enabled, time_running: 1394 */ 1395 if (event->state == PERF_EVENT_STATE_INACTIVE 1396 && !event_filter_match(event)) { 1397 delta = tstamp - event->tstamp_stopped; 1398 event->tstamp_running += delta; 1399 event->tstamp_stopped = tstamp; 1400 } 1401 1402 if (event->state != PERF_EVENT_STATE_ACTIVE) 1403 return; 1404 1405 perf_pmu_disable(event->pmu); 1406 1407 event->state = PERF_EVENT_STATE_INACTIVE; 1408 if (event->pending_disable) { 1409 event->pending_disable = 0; 1410 event->state = PERF_EVENT_STATE_OFF; 1411 } 1412 event->tstamp_stopped = tstamp; 1413 event->pmu->del(event, 0); 1414 event->oncpu = -1; 1415 1416 if (!is_software_event(event)) 1417 cpuctx->active_oncpu--; 1418 ctx->nr_active--; 1419 if (event->attr.freq && event->attr.sample_freq) 1420 ctx->nr_freq--; 1421 if (event->attr.exclusive || !cpuctx->active_oncpu) 1422 cpuctx->exclusive = 0; 1423 1424 perf_pmu_enable(event->pmu); 1425 } 1426 1427 static void 1428 group_sched_out(struct perf_event *group_event, 1429 struct perf_cpu_context *cpuctx, 1430 struct perf_event_context *ctx) 1431 { 1432 struct perf_event *event; 1433 int state = group_event->state; 1434 1435 event_sched_out(group_event, cpuctx, ctx); 1436 1437 /* 1438 * Schedule out siblings (if any): 1439 */ 1440 list_for_each_entry(event, &group_event->sibling_list, group_entry) 1441 event_sched_out(event, cpuctx, ctx); 1442 1443 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) 1444 cpuctx->exclusive = 0; 1445 } 1446 1447 /* 1448 * Cross CPU call to remove a performance event 1449 * 1450 * We disable the event on the hardware level first. After that we 1451 * remove it from the context list. 1452 */ 1453 static int __perf_remove_from_context(void *info) 1454 { 1455 struct perf_event *event = info; 1456 struct perf_event_context *ctx = event->ctx; 1457 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1458 1459 raw_spin_lock(&ctx->lock); 1460 event_sched_out(event, cpuctx, ctx); 1461 list_del_event(event, ctx); 1462 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1463 ctx->is_active = 0; 1464 cpuctx->task_ctx = NULL; 1465 } 1466 raw_spin_unlock(&ctx->lock); 1467 1468 return 0; 1469 } 1470 1471 1472 /* 1473 * Remove the event from a task's (or a CPU's) list of events. 1474 * 1475 * CPU events are removed with a smp call. For task events we only 1476 * call when the task is on a CPU. 1477 * 1478 * If event->ctx is a cloned context, callers must make sure that 1479 * every task struct that event->ctx->task could possibly point to 1480 * remains valid. This is OK when called from perf_release since 1481 * that only calls us on the top-level context, which can't be a clone. 1482 * When called from perf_event_exit_task, it's OK because the 1483 * context has been detached from its task. 1484 */ 1485 static void perf_remove_from_context(struct perf_event *event) 1486 { 1487 struct perf_event_context *ctx = event->ctx; 1488 struct task_struct *task = ctx->task; 1489 1490 lockdep_assert_held(&ctx->mutex); 1491 1492 if (!task) { 1493 /* 1494 * Per cpu events are removed via an smp call and 1495 * the removal is always successful. 1496 */ 1497 cpu_function_call(event->cpu, __perf_remove_from_context, event); 1498 return; 1499 } 1500 1501 retry: 1502 if (!task_function_call(task, __perf_remove_from_context, event)) 1503 return; 1504 1505 raw_spin_lock_irq(&ctx->lock); 1506 /* 1507 * If we failed to find a running task, but find the context active now 1508 * that we've acquired the ctx->lock, retry. 1509 */ 1510 if (ctx->is_active) { 1511 raw_spin_unlock_irq(&ctx->lock); 1512 goto retry; 1513 } 1514 1515 /* 1516 * Since the task isn't running, its safe to remove the event, us 1517 * holding the ctx->lock ensures the task won't get scheduled in. 1518 */ 1519 list_del_event(event, ctx); 1520 raw_spin_unlock_irq(&ctx->lock); 1521 } 1522 1523 /* 1524 * Cross CPU call to disable a performance event 1525 */ 1526 int __perf_event_disable(void *info) 1527 { 1528 struct perf_event *event = info; 1529 struct perf_event_context *ctx = event->ctx; 1530 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1531 1532 /* 1533 * If this is a per-task event, need to check whether this 1534 * event's task is the current task on this cpu. 1535 * 1536 * Can trigger due to concurrent perf_event_context_sched_out() 1537 * flipping contexts around. 1538 */ 1539 if (ctx->task && cpuctx->task_ctx != ctx) 1540 return -EINVAL; 1541 1542 raw_spin_lock(&ctx->lock); 1543 1544 /* 1545 * If the event is on, turn it off. 1546 * If it is in error state, leave it in error state. 1547 */ 1548 if (event->state >= PERF_EVENT_STATE_INACTIVE) { 1549 update_context_time(ctx); 1550 update_cgrp_time_from_event(event); 1551 update_group_times(event); 1552 if (event == event->group_leader) 1553 group_sched_out(event, cpuctx, ctx); 1554 else 1555 event_sched_out(event, cpuctx, ctx); 1556 event->state = PERF_EVENT_STATE_OFF; 1557 } 1558 1559 raw_spin_unlock(&ctx->lock); 1560 1561 return 0; 1562 } 1563 1564 /* 1565 * Disable a event. 1566 * 1567 * If event->ctx is a cloned context, callers must make sure that 1568 * every task struct that event->ctx->task could possibly point to 1569 * remains valid. This condition is satisifed when called through 1570 * perf_event_for_each_child or perf_event_for_each because they 1571 * hold the top-level event's child_mutex, so any descendant that 1572 * goes to exit will block in sync_child_event. 1573 * When called from perf_pending_event it's OK because event->ctx 1574 * is the current context on this CPU and preemption is disabled, 1575 * hence we can't get into perf_event_task_sched_out for this context. 1576 */ 1577 void perf_event_disable(struct perf_event *event) 1578 { 1579 struct perf_event_context *ctx = event->ctx; 1580 struct task_struct *task = ctx->task; 1581 1582 if (!task) { 1583 /* 1584 * Disable the event on the cpu that it's on 1585 */ 1586 cpu_function_call(event->cpu, __perf_event_disable, event); 1587 return; 1588 } 1589 1590 retry: 1591 if (!task_function_call(task, __perf_event_disable, event)) 1592 return; 1593 1594 raw_spin_lock_irq(&ctx->lock); 1595 /* 1596 * If the event is still active, we need to retry the cross-call. 1597 */ 1598 if (event->state == PERF_EVENT_STATE_ACTIVE) { 1599 raw_spin_unlock_irq(&ctx->lock); 1600 /* 1601 * Reload the task pointer, it might have been changed by 1602 * a concurrent perf_event_context_sched_out(). 1603 */ 1604 task = ctx->task; 1605 goto retry; 1606 } 1607 1608 /* 1609 * Since we have the lock this context can't be scheduled 1610 * in, so we can change the state safely. 1611 */ 1612 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1613 update_group_times(event); 1614 event->state = PERF_EVENT_STATE_OFF; 1615 } 1616 raw_spin_unlock_irq(&ctx->lock); 1617 } 1618 EXPORT_SYMBOL_GPL(perf_event_disable); 1619 1620 static void perf_set_shadow_time(struct perf_event *event, 1621 struct perf_event_context *ctx, 1622 u64 tstamp) 1623 { 1624 /* 1625 * use the correct time source for the time snapshot 1626 * 1627 * We could get by without this by leveraging the 1628 * fact that to get to this function, the caller 1629 * has most likely already called update_context_time() 1630 * and update_cgrp_time_xx() and thus both timestamp 1631 * are identical (or very close). Given that tstamp is, 1632 * already adjusted for cgroup, we could say that: 1633 * tstamp - ctx->timestamp 1634 * is equivalent to 1635 * tstamp - cgrp->timestamp. 1636 * 1637 * Then, in perf_output_read(), the calculation would 1638 * work with no changes because: 1639 * - event is guaranteed scheduled in 1640 * - no scheduled out in between 1641 * - thus the timestamp would be the same 1642 * 1643 * But this is a bit hairy. 1644 * 1645 * So instead, we have an explicit cgroup call to remain 1646 * within the time time source all along. We believe it 1647 * is cleaner and simpler to understand. 1648 */ 1649 if (is_cgroup_event(event)) 1650 perf_cgroup_set_shadow_time(event, tstamp); 1651 else 1652 event->shadow_ctx_time = tstamp - ctx->timestamp; 1653 } 1654 1655 #define MAX_INTERRUPTS (~0ULL) 1656 1657 static void perf_log_throttle(struct perf_event *event, int enable); 1658 1659 static int 1660 event_sched_in(struct perf_event *event, 1661 struct perf_cpu_context *cpuctx, 1662 struct perf_event_context *ctx) 1663 { 1664 u64 tstamp = perf_event_time(event); 1665 int ret = 0; 1666 1667 if (event->state <= PERF_EVENT_STATE_OFF) 1668 return 0; 1669 1670 event->state = PERF_EVENT_STATE_ACTIVE; 1671 event->oncpu = smp_processor_id(); 1672 1673 /* 1674 * Unthrottle events, since we scheduled we might have missed several 1675 * ticks already, also for a heavily scheduling task there is little 1676 * guarantee it'll get a tick in a timely manner. 1677 */ 1678 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { 1679 perf_log_throttle(event, 1); 1680 event->hw.interrupts = 0; 1681 } 1682 1683 /* 1684 * The new state must be visible before we turn it on in the hardware: 1685 */ 1686 smp_wmb(); 1687 1688 perf_pmu_disable(event->pmu); 1689 1690 if (event->pmu->add(event, PERF_EF_START)) { 1691 event->state = PERF_EVENT_STATE_INACTIVE; 1692 event->oncpu = -1; 1693 ret = -EAGAIN; 1694 goto out; 1695 } 1696 1697 event->tstamp_running += tstamp - event->tstamp_stopped; 1698 1699 perf_set_shadow_time(event, ctx, tstamp); 1700 1701 if (!is_software_event(event)) 1702 cpuctx->active_oncpu++; 1703 ctx->nr_active++; 1704 if (event->attr.freq && event->attr.sample_freq) 1705 ctx->nr_freq++; 1706 1707 if (event->attr.exclusive) 1708 cpuctx->exclusive = 1; 1709 1710 out: 1711 perf_pmu_enable(event->pmu); 1712 1713 return ret; 1714 } 1715 1716 static int 1717 group_sched_in(struct perf_event *group_event, 1718 struct perf_cpu_context *cpuctx, 1719 struct perf_event_context *ctx) 1720 { 1721 struct perf_event *event, *partial_group = NULL; 1722 struct pmu *pmu = ctx->pmu; 1723 u64 now = ctx->time; 1724 bool simulate = false; 1725 1726 if (group_event->state == PERF_EVENT_STATE_OFF) 1727 return 0; 1728 1729 pmu->start_txn(pmu); 1730 1731 if (event_sched_in(group_event, cpuctx, ctx)) { 1732 pmu->cancel_txn(pmu); 1733 perf_cpu_hrtimer_restart(cpuctx); 1734 return -EAGAIN; 1735 } 1736 1737 /* 1738 * Schedule in siblings as one group (if any): 1739 */ 1740 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1741 if (event_sched_in(event, cpuctx, ctx)) { 1742 partial_group = event; 1743 goto group_error; 1744 } 1745 } 1746 1747 if (!pmu->commit_txn(pmu)) 1748 return 0; 1749 1750 group_error: 1751 /* 1752 * Groups can be scheduled in as one unit only, so undo any 1753 * partial group before returning: 1754 * The events up to the failed event are scheduled out normally, 1755 * tstamp_stopped will be updated. 1756 * 1757 * The failed events and the remaining siblings need to have 1758 * their timings updated as if they had gone thru event_sched_in() 1759 * and event_sched_out(). This is required to get consistent timings 1760 * across the group. This also takes care of the case where the group 1761 * could never be scheduled by ensuring tstamp_stopped is set to mark 1762 * the time the event was actually stopped, such that time delta 1763 * calculation in update_event_times() is correct. 1764 */ 1765 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 1766 if (event == partial_group) 1767 simulate = true; 1768 1769 if (simulate) { 1770 event->tstamp_running += now - event->tstamp_stopped; 1771 event->tstamp_stopped = now; 1772 } else { 1773 event_sched_out(event, cpuctx, ctx); 1774 } 1775 } 1776 event_sched_out(group_event, cpuctx, ctx); 1777 1778 pmu->cancel_txn(pmu); 1779 1780 perf_cpu_hrtimer_restart(cpuctx); 1781 1782 return -EAGAIN; 1783 } 1784 1785 /* 1786 * Work out whether we can put this event group on the CPU now. 1787 */ 1788 static int group_can_go_on(struct perf_event *event, 1789 struct perf_cpu_context *cpuctx, 1790 int can_add_hw) 1791 { 1792 /* 1793 * Groups consisting entirely of software events can always go on. 1794 */ 1795 if (event->group_flags & PERF_GROUP_SOFTWARE) 1796 return 1; 1797 /* 1798 * If an exclusive group is already on, no other hardware 1799 * events can go on. 1800 */ 1801 if (cpuctx->exclusive) 1802 return 0; 1803 /* 1804 * If this group is exclusive and there are already 1805 * events on the CPU, it can't go on. 1806 */ 1807 if (event->attr.exclusive && cpuctx->active_oncpu) 1808 return 0; 1809 /* 1810 * Otherwise, try to add it if all previous groups were able 1811 * to go on. 1812 */ 1813 return can_add_hw; 1814 } 1815 1816 static void add_event_to_ctx(struct perf_event *event, 1817 struct perf_event_context *ctx) 1818 { 1819 u64 tstamp = perf_event_time(event); 1820 1821 list_add_event(event, ctx); 1822 perf_group_attach(event); 1823 event->tstamp_enabled = tstamp; 1824 event->tstamp_running = tstamp; 1825 event->tstamp_stopped = tstamp; 1826 } 1827 1828 static void task_ctx_sched_out(struct perf_event_context *ctx); 1829 static void 1830 ctx_sched_in(struct perf_event_context *ctx, 1831 struct perf_cpu_context *cpuctx, 1832 enum event_type_t event_type, 1833 struct task_struct *task); 1834 1835 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 1836 struct perf_event_context *ctx, 1837 struct task_struct *task) 1838 { 1839 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 1840 if (ctx) 1841 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 1842 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); 1843 if (ctx) 1844 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); 1845 } 1846 1847 /* 1848 * Cross CPU call to install and enable a performance event 1849 * 1850 * Must be called with ctx->mutex held 1851 */ 1852 static int __perf_install_in_context(void *info) 1853 { 1854 struct perf_event *event = info; 1855 struct perf_event_context *ctx = event->ctx; 1856 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1857 struct perf_event_context *task_ctx = cpuctx->task_ctx; 1858 struct task_struct *task = current; 1859 1860 perf_ctx_lock(cpuctx, task_ctx); 1861 perf_pmu_disable(cpuctx->ctx.pmu); 1862 1863 /* 1864 * If there was an active task_ctx schedule it out. 1865 */ 1866 if (task_ctx) 1867 task_ctx_sched_out(task_ctx); 1868 1869 /* 1870 * If the context we're installing events in is not the 1871 * active task_ctx, flip them. 1872 */ 1873 if (ctx->task && task_ctx != ctx) { 1874 if (task_ctx) 1875 raw_spin_unlock(&task_ctx->lock); 1876 raw_spin_lock(&ctx->lock); 1877 task_ctx = ctx; 1878 } 1879 1880 if (task_ctx) { 1881 cpuctx->task_ctx = task_ctx; 1882 task = task_ctx->task; 1883 } 1884 1885 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 1886 1887 update_context_time(ctx); 1888 /* 1889 * update cgrp time only if current cgrp 1890 * matches event->cgrp. Must be done before 1891 * calling add_event_to_ctx() 1892 */ 1893 update_cgrp_time_from_event(event); 1894 1895 add_event_to_ctx(event, ctx); 1896 1897 /* 1898 * Schedule everything back in 1899 */ 1900 perf_event_sched_in(cpuctx, task_ctx, task); 1901 1902 perf_pmu_enable(cpuctx->ctx.pmu); 1903 perf_ctx_unlock(cpuctx, task_ctx); 1904 1905 return 0; 1906 } 1907 1908 /* 1909 * Attach a performance event to a context 1910 * 1911 * First we add the event to the list with the hardware enable bit 1912 * in event->hw_config cleared. 1913 * 1914 * If the event is attached to a task which is on a CPU we use a smp 1915 * call to enable it in the task context. The task might have been 1916 * scheduled away, but we check this in the smp call again. 1917 */ 1918 static void 1919 perf_install_in_context(struct perf_event_context *ctx, 1920 struct perf_event *event, 1921 int cpu) 1922 { 1923 struct task_struct *task = ctx->task; 1924 1925 lockdep_assert_held(&ctx->mutex); 1926 1927 event->ctx = ctx; 1928 if (event->cpu != -1) 1929 event->cpu = cpu; 1930 1931 if (!task) { 1932 /* 1933 * Per cpu events are installed via an smp call and 1934 * the install is always successful. 1935 */ 1936 cpu_function_call(cpu, __perf_install_in_context, event); 1937 return; 1938 } 1939 1940 retry: 1941 if (!task_function_call(task, __perf_install_in_context, event)) 1942 return; 1943 1944 raw_spin_lock_irq(&ctx->lock); 1945 /* 1946 * If we failed to find a running task, but find the context active now 1947 * that we've acquired the ctx->lock, retry. 1948 */ 1949 if (ctx->is_active) { 1950 raw_spin_unlock_irq(&ctx->lock); 1951 goto retry; 1952 } 1953 1954 /* 1955 * Since the task isn't running, its safe to add the event, us holding 1956 * the ctx->lock ensures the task won't get scheduled in. 1957 */ 1958 add_event_to_ctx(event, ctx); 1959 raw_spin_unlock_irq(&ctx->lock); 1960 } 1961 1962 /* 1963 * Put a event into inactive state and update time fields. 1964 * Enabling the leader of a group effectively enables all 1965 * the group members that aren't explicitly disabled, so we 1966 * have to update their ->tstamp_enabled also. 1967 * Note: this works for group members as well as group leaders 1968 * since the non-leader members' sibling_lists will be empty. 1969 */ 1970 static void __perf_event_mark_enabled(struct perf_event *event) 1971 { 1972 struct perf_event *sub; 1973 u64 tstamp = perf_event_time(event); 1974 1975 event->state = PERF_EVENT_STATE_INACTIVE; 1976 event->tstamp_enabled = tstamp - event->total_time_enabled; 1977 list_for_each_entry(sub, &event->sibling_list, group_entry) { 1978 if (sub->state >= PERF_EVENT_STATE_INACTIVE) 1979 sub->tstamp_enabled = tstamp - sub->total_time_enabled; 1980 } 1981 } 1982 1983 /* 1984 * Cross CPU call to enable a performance event 1985 */ 1986 static int __perf_event_enable(void *info) 1987 { 1988 struct perf_event *event = info; 1989 struct perf_event_context *ctx = event->ctx; 1990 struct perf_event *leader = event->group_leader; 1991 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1992 int err; 1993 1994 /* 1995 * There's a time window between 'ctx->is_active' check 1996 * in perf_event_enable function and this place having: 1997 * - IRQs on 1998 * - ctx->lock unlocked 1999 * 2000 * where the task could be killed and 'ctx' deactivated 2001 * by perf_event_exit_task. 2002 */ 2003 if (!ctx->is_active) 2004 return -EINVAL; 2005 2006 raw_spin_lock(&ctx->lock); 2007 update_context_time(ctx); 2008 2009 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2010 goto unlock; 2011 2012 /* 2013 * set current task's cgroup time reference point 2014 */ 2015 perf_cgroup_set_timestamp(current, ctx); 2016 2017 __perf_event_mark_enabled(event); 2018 2019 if (!event_filter_match(event)) { 2020 if (is_cgroup_event(event)) 2021 perf_cgroup_defer_enabled(event); 2022 goto unlock; 2023 } 2024 2025 /* 2026 * If the event is in a group and isn't the group leader, 2027 * then don't put it on unless the group is on. 2028 */ 2029 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2030 goto unlock; 2031 2032 if (!group_can_go_on(event, cpuctx, 1)) { 2033 err = -EEXIST; 2034 } else { 2035 if (event == leader) 2036 err = group_sched_in(event, cpuctx, ctx); 2037 else 2038 err = event_sched_in(event, cpuctx, ctx); 2039 } 2040 2041 if (err) { 2042 /* 2043 * If this event can't go on and it's part of a 2044 * group, then the whole group has to come off. 2045 */ 2046 if (leader != event) { 2047 group_sched_out(leader, cpuctx, ctx); 2048 perf_cpu_hrtimer_restart(cpuctx); 2049 } 2050 if (leader->attr.pinned) { 2051 update_group_times(leader); 2052 leader->state = PERF_EVENT_STATE_ERROR; 2053 } 2054 } 2055 2056 unlock: 2057 raw_spin_unlock(&ctx->lock); 2058 2059 return 0; 2060 } 2061 2062 /* 2063 * Enable a event. 2064 * 2065 * If event->ctx is a cloned context, callers must make sure that 2066 * every task struct that event->ctx->task could possibly point to 2067 * remains valid. This condition is satisfied when called through 2068 * perf_event_for_each_child or perf_event_for_each as described 2069 * for perf_event_disable. 2070 */ 2071 void perf_event_enable(struct perf_event *event) 2072 { 2073 struct perf_event_context *ctx = event->ctx; 2074 struct task_struct *task = ctx->task; 2075 2076 if (!task) { 2077 /* 2078 * Enable the event on the cpu that it's on 2079 */ 2080 cpu_function_call(event->cpu, __perf_event_enable, event); 2081 return; 2082 } 2083 2084 raw_spin_lock_irq(&ctx->lock); 2085 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2086 goto out; 2087 2088 /* 2089 * If the event is in error state, clear that first. 2090 * That way, if we see the event in error state below, we 2091 * know that it has gone back into error state, as distinct 2092 * from the task having been scheduled away before the 2093 * cross-call arrived. 2094 */ 2095 if (event->state == PERF_EVENT_STATE_ERROR) 2096 event->state = PERF_EVENT_STATE_OFF; 2097 2098 retry: 2099 if (!ctx->is_active) { 2100 __perf_event_mark_enabled(event); 2101 goto out; 2102 } 2103 2104 raw_spin_unlock_irq(&ctx->lock); 2105 2106 if (!task_function_call(task, __perf_event_enable, event)) 2107 return; 2108 2109 raw_spin_lock_irq(&ctx->lock); 2110 2111 /* 2112 * If the context is active and the event is still off, 2113 * we need to retry the cross-call. 2114 */ 2115 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { 2116 /* 2117 * task could have been flipped by a concurrent 2118 * perf_event_context_sched_out() 2119 */ 2120 task = ctx->task; 2121 goto retry; 2122 } 2123 2124 out: 2125 raw_spin_unlock_irq(&ctx->lock); 2126 } 2127 EXPORT_SYMBOL_GPL(perf_event_enable); 2128 2129 int perf_event_refresh(struct perf_event *event, int refresh) 2130 { 2131 /* 2132 * not supported on inherited events 2133 */ 2134 if (event->attr.inherit || !is_sampling_event(event)) 2135 return -EINVAL; 2136 2137 atomic_add(refresh, &event->event_limit); 2138 perf_event_enable(event); 2139 2140 return 0; 2141 } 2142 EXPORT_SYMBOL_GPL(perf_event_refresh); 2143 2144 static void ctx_sched_out(struct perf_event_context *ctx, 2145 struct perf_cpu_context *cpuctx, 2146 enum event_type_t event_type) 2147 { 2148 struct perf_event *event; 2149 int is_active = ctx->is_active; 2150 2151 ctx->is_active &= ~event_type; 2152 if (likely(!ctx->nr_events)) 2153 return; 2154 2155 update_context_time(ctx); 2156 update_cgrp_time_from_cpuctx(cpuctx); 2157 if (!ctx->nr_active) 2158 return; 2159 2160 perf_pmu_disable(ctx->pmu); 2161 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2162 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2163 group_sched_out(event, cpuctx, ctx); 2164 } 2165 2166 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2167 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2168 group_sched_out(event, cpuctx, ctx); 2169 } 2170 perf_pmu_enable(ctx->pmu); 2171 } 2172 2173 /* 2174 * Test whether two contexts are equivalent, i.e. whether they have both been 2175 * cloned from the same version of the same context. 2176 * 2177 * Equivalence is measured using a generation number in the context that is 2178 * incremented on each modification to it; see unclone_ctx(), list_add_event() 2179 * and list_del_event(). 2180 */ 2181 static int context_equiv(struct perf_event_context *ctx1, 2182 struct perf_event_context *ctx2) 2183 { 2184 /* Pinning disables the swap optimization */ 2185 if (ctx1->pin_count || ctx2->pin_count) 2186 return 0; 2187 2188 /* If ctx1 is the parent of ctx2 */ 2189 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) 2190 return 1; 2191 2192 /* If ctx2 is the parent of ctx1 */ 2193 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) 2194 return 1; 2195 2196 /* 2197 * If ctx1 and ctx2 have the same parent; we flatten the parent 2198 * hierarchy, see perf_event_init_context(). 2199 */ 2200 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && 2201 ctx1->parent_gen == ctx2->parent_gen) 2202 return 1; 2203 2204 /* Unmatched */ 2205 return 0; 2206 } 2207 2208 static void __perf_event_sync_stat(struct perf_event *event, 2209 struct perf_event *next_event) 2210 { 2211 u64 value; 2212 2213 if (!event->attr.inherit_stat) 2214 return; 2215 2216 /* 2217 * Update the event value, we cannot use perf_event_read() 2218 * because we're in the middle of a context switch and have IRQs 2219 * disabled, which upsets smp_call_function_single(), however 2220 * we know the event must be on the current CPU, therefore we 2221 * don't need to use it. 2222 */ 2223 switch (event->state) { 2224 case PERF_EVENT_STATE_ACTIVE: 2225 event->pmu->read(event); 2226 /* fall-through */ 2227 2228 case PERF_EVENT_STATE_INACTIVE: 2229 update_event_times(event); 2230 break; 2231 2232 default: 2233 break; 2234 } 2235 2236 /* 2237 * In order to keep per-task stats reliable we need to flip the event 2238 * values when we flip the contexts. 2239 */ 2240 value = local64_read(&next_event->count); 2241 value = local64_xchg(&event->count, value); 2242 local64_set(&next_event->count, value); 2243 2244 swap(event->total_time_enabled, next_event->total_time_enabled); 2245 swap(event->total_time_running, next_event->total_time_running); 2246 2247 /* 2248 * Since we swizzled the values, update the user visible data too. 2249 */ 2250 perf_event_update_userpage(event); 2251 perf_event_update_userpage(next_event); 2252 } 2253 2254 static void perf_event_sync_stat(struct perf_event_context *ctx, 2255 struct perf_event_context *next_ctx) 2256 { 2257 struct perf_event *event, *next_event; 2258 2259 if (!ctx->nr_stat) 2260 return; 2261 2262 update_context_time(ctx); 2263 2264 event = list_first_entry(&ctx->event_list, 2265 struct perf_event, event_entry); 2266 2267 next_event = list_first_entry(&next_ctx->event_list, 2268 struct perf_event, event_entry); 2269 2270 while (&event->event_entry != &ctx->event_list && 2271 &next_event->event_entry != &next_ctx->event_list) { 2272 2273 __perf_event_sync_stat(event, next_event); 2274 2275 event = list_next_entry(event, event_entry); 2276 next_event = list_next_entry(next_event, event_entry); 2277 } 2278 } 2279 2280 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, 2281 struct task_struct *next) 2282 { 2283 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; 2284 struct perf_event_context *next_ctx; 2285 struct perf_event_context *parent, *next_parent; 2286 struct perf_cpu_context *cpuctx; 2287 int do_switch = 1; 2288 2289 if (likely(!ctx)) 2290 return; 2291 2292 cpuctx = __get_cpu_context(ctx); 2293 if (!cpuctx->task_ctx) 2294 return; 2295 2296 rcu_read_lock(); 2297 next_ctx = next->perf_event_ctxp[ctxn]; 2298 if (!next_ctx) 2299 goto unlock; 2300 2301 parent = rcu_dereference(ctx->parent_ctx); 2302 next_parent = rcu_dereference(next_ctx->parent_ctx); 2303 2304 /* If neither context have a parent context; they cannot be clones. */ 2305 if (!parent && !next_parent) 2306 goto unlock; 2307 2308 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2309 /* 2310 * Looks like the two contexts are clones, so we might be 2311 * able to optimize the context switch. We lock both 2312 * contexts and check that they are clones under the 2313 * lock (including re-checking that neither has been 2314 * uncloned in the meantime). It doesn't matter which 2315 * order we take the locks because no other cpu could 2316 * be trying to lock both of these tasks. 2317 */ 2318 raw_spin_lock(&ctx->lock); 2319 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 2320 if (context_equiv(ctx, next_ctx)) { 2321 /* 2322 * XXX do we need a memory barrier of sorts 2323 * wrt to rcu_dereference() of perf_event_ctxp 2324 */ 2325 task->perf_event_ctxp[ctxn] = next_ctx; 2326 next->perf_event_ctxp[ctxn] = ctx; 2327 ctx->task = next; 2328 next_ctx->task = task; 2329 do_switch = 0; 2330 2331 perf_event_sync_stat(ctx, next_ctx); 2332 } 2333 raw_spin_unlock(&next_ctx->lock); 2334 raw_spin_unlock(&ctx->lock); 2335 } 2336 unlock: 2337 rcu_read_unlock(); 2338 2339 if (do_switch) { 2340 raw_spin_lock(&ctx->lock); 2341 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2342 cpuctx->task_ctx = NULL; 2343 raw_spin_unlock(&ctx->lock); 2344 } 2345 } 2346 2347 #define for_each_task_context_nr(ctxn) \ 2348 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) 2349 2350 /* 2351 * Called from scheduler to remove the events of the current task, 2352 * with interrupts disabled. 2353 * 2354 * We stop each event and update the event value in event->count. 2355 * 2356 * This does not protect us against NMI, but disable() 2357 * sets the disabled bit in the control field of event _before_ 2358 * accessing the event control register. If a NMI hits, then it will 2359 * not restart the event. 2360 */ 2361 void __perf_event_task_sched_out(struct task_struct *task, 2362 struct task_struct *next) 2363 { 2364 int ctxn; 2365 2366 for_each_task_context_nr(ctxn) 2367 perf_event_context_sched_out(task, ctxn, next); 2368 2369 /* 2370 * if cgroup events exist on this CPU, then we need 2371 * to check if we have to switch out PMU state. 2372 * cgroup event are system-wide mode only 2373 */ 2374 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2375 perf_cgroup_sched_out(task, next); 2376 } 2377 2378 static void task_ctx_sched_out(struct perf_event_context *ctx) 2379 { 2380 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2381 2382 if (!cpuctx->task_ctx) 2383 return; 2384 2385 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2386 return; 2387 2388 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2389 cpuctx->task_ctx = NULL; 2390 } 2391 2392 /* 2393 * Called with IRQs disabled 2394 */ 2395 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 2396 enum event_type_t event_type) 2397 { 2398 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); 2399 } 2400 2401 static void 2402 ctx_pinned_sched_in(struct perf_event_context *ctx, 2403 struct perf_cpu_context *cpuctx) 2404 { 2405 struct perf_event *event; 2406 2407 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2408 if (event->state <= PERF_EVENT_STATE_OFF) 2409 continue; 2410 if (!event_filter_match(event)) 2411 continue; 2412 2413 /* may need to reset tstamp_enabled */ 2414 if (is_cgroup_event(event)) 2415 perf_cgroup_mark_enabled(event, ctx); 2416 2417 if (group_can_go_on(event, cpuctx, 1)) 2418 group_sched_in(event, cpuctx, ctx); 2419 2420 /* 2421 * If this pinned group hasn't been scheduled, 2422 * put it in error state. 2423 */ 2424 if (event->state == PERF_EVENT_STATE_INACTIVE) { 2425 update_group_times(event); 2426 event->state = PERF_EVENT_STATE_ERROR; 2427 } 2428 } 2429 } 2430 2431 static void 2432 ctx_flexible_sched_in(struct perf_event_context *ctx, 2433 struct perf_cpu_context *cpuctx) 2434 { 2435 struct perf_event *event; 2436 int can_add_hw = 1; 2437 2438 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 2439 /* Ignore events in OFF or ERROR state */ 2440 if (event->state <= PERF_EVENT_STATE_OFF) 2441 continue; 2442 /* 2443 * Listen to the 'cpu' scheduling filter constraint 2444 * of events: 2445 */ 2446 if (!event_filter_match(event)) 2447 continue; 2448 2449 /* may need to reset tstamp_enabled */ 2450 if (is_cgroup_event(event)) 2451 perf_cgroup_mark_enabled(event, ctx); 2452 2453 if (group_can_go_on(event, cpuctx, can_add_hw)) { 2454 if (group_sched_in(event, cpuctx, ctx)) 2455 can_add_hw = 0; 2456 } 2457 } 2458 } 2459 2460 static void 2461 ctx_sched_in(struct perf_event_context *ctx, 2462 struct perf_cpu_context *cpuctx, 2463 enum event_type_t event_type, 2464 struct task_struct *task) 2465 { 2466 u64 now; 2467 int is_active = ctx->is_active; 2468 2469 ctx->is_active |= event_type; 2470 if (likely(!ctx->nr_events)) 2471 return; 2472 2473 now = perf_clock(); 2474 ctx->timestamp = now; 2475 perf_cgroup_set_timestamp(task, ctx); 2476 /* 2477 * First go through the list and put on any pinned groups 2478 * in order to give them the best chance of going on. 2479 */ 2480 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2481 ctx_pinned_sched_in(ctx, cpuctx); 2482 2483 /* Then walk through the lower prio flexible groups */ 2484 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2485 ctx_flexible_sched_in(ctx, cpuctx); 2486 } 2487 2488 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2489 enum event_type_t event_type, 2490 struct task_struct *task) 2491 { 2492 struct perf_event_context *ctx = &cpuctx->ctx; 2493 2494 ctx_sched_in(ctx, cpuctx, event_type, task); 2495 } 2496 2497 static void perf_event_context_sched_in(struct perf_event_context *ctx, 2498 struct task_struct *task) 2499 { 2500 struct perf_cpu_context *cpuctx; 2501 2502 cpuctx = __get_cpu_context(ctx); 2503 if (cpuctx->task_ctx == ctx) 2504 return; 2505 2506 perf_ctx_lock(cpuctx, ctx); 2507 perf_pmu_disable(ctx->pmu); 2508 /* 2509 * We want to keep the following priority order: 2510 * cpu pinned (that don't need to move), task pinned, 2511 * cpu flexible, task flexible. 2512 */ 2513 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2514 2515 if (ctx->nr_events) 2516 cpuctx->task_ctx = ctx; 2517 2518 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); 2519 2520 perf_pmu_enable(ctx->pmu); 2521 perf_ctx_unlock(cpuctx, ctx); 2522 2523 /* 2524 * Since these rotations are per-cpu, we need to ensure the 2525 * cpu-context we got scheduled on is actually rotating. 2526 */ 2527 perf_pmu_rotate_start(ctx->pmu); 2528 } 2529 2530 /* 2531 * When sampling the branck stack in system-wide, it may be necessary 2532 * to flush the stack on context switch. This happens when the branch 2533 * stack does not tag its entries with the pid of the current task. 2534 * Otherwise it becomes impossible to associate a branch entry with a 2535 * task. This ambiguity is more likely to appear when the branch stack 2536 * supports priv level filtering and the user sets it to monitor only 2537 * at the user level (which could be a useful measurement in system-wide 2538 * mode). In that case, the risk is high of having a branch stack with 2539 * branch from multiple tasks. Flushing may mean dropping the existing 2540 * entries or stashing them somewhere in the PMU specific code layer. 2541 * 2542 * This function provides the context switch callback to the lower code 2543 * layer. It is invoked ONLY when there is at least one system-wide context 2544 * with at least one active event using taken branch sampling. 2545 */ 2546 static void perf_branch_stack_sched_in(struct task_struct *prev, 2547 struct task_struct *task) 2548 { 2549 struct perf_cpu_context *cpuctx; 2550 struct pmu *pmu; 2551 unsigned long flags; 2552 2553 /* no need to flush branch stack if not changing task */ 2554 if (prev == task) 2555 return; 2556 2557 local_irq_save(flags); 2558 2559 rcu_read_lock(); 2560 2561 list_for_each_entry_rcu(pmu, &pmus, entry) { 2562 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2563 2564 /* 2565 * check if the context has at least one 2566 * event using PERF_SAMPLE_BRANCH_STACK 2567 */ 2568 if (cpuctx->ctx.nr_branch_stack > 0 2569 && pmu->flush_branch_stack) { 2570 2571 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2572 2573 perf_pmu_disable(pmu); 2574 2575 pmu->flush_branch_stack(); 2576 2577 perf_pmu_enable(pmu); 2578 2579 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2580 } 2581 } 2582 2583 rcu_read_unlock(); 2584 2585 local_irq_restore(flags); 2586 } 2587 2588 /* 2589 * Called from scheduler to add the events of the current task 2590 * with interrupts disabled. 2591 * 2592 * We restore the event value and then enable it. 2593 * 2594 * This does not protect us against NMI, but enable() 2595 * sets the enabled bit in the control field of event _before_ 2596 * accessing the event control register. If a NMI hits, then it will 2597 * keep the event running. 2598 */ 2599 void __perf_event_task_sched_in(struct task_struct *prev, 2600 struct task_struct *task) 2601 { 2602 struct perf_event_context *ctx; 2603 int ctxn; 2604 2605 for_each_task_context_nr(ctxn) { 2606 ctx = task->perf_event_ctxp[ctxn]; 2607 if (likely(!ctx)) 2608 continue; 2609 2610 perf_event_context_sched_in(ctx, task); 2611 } 2612 /* 2613 * if cgroup events exist on this CPU, then we need 2614 * to check if we have to switch in PMU state. 2615 * cgroup event are system-wide mode only 2616 */ 2617 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2618 perf_cgroup_sched_in(prev, task); 2619 2620 /* check for system-wide branch_stack events */ 2621 if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2622 perf_branch_stack_sched_in(prev, task); 2623 } 2624 2625 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2626 { 2627 u64 frequency = event->attr.sample_freq; 2628 u64 sec = NSEC_PER_SEC; 2629 u64 divisor, dividend; 2630 2631 int count_fls, nsec_fls, frequency_fls, sec_fls; 2632 2633 count_fls = fls64(count); 2634 nsec_fls = fls64(nsec); 2635 frequency_fls = fls64(frequency); 2636 sec_fls = 30; 2637 2638 /* 2639 * We got @count in @nsec, with a target of sample_freq HZ 2640 * the target period becomes: 2641 * 2642 * @count * 10^9 2643 * period = ------------------- 2644 * @nsec * sample_freq 2645 * 2646 */ 2647 2648 /* 2649 * Reduce accuracy by one bit such that @a and @b converge 2650 * to a similar magnitude. 2651 */ 2652 #define REDUCE_FLS(a, b) \ 2653 do { \ 2654 if (a##_fls > b##_fls) { \ 2655 a >>= 1; \ 2656 a##_fls--; \ 2657 } else { \ 2658 b >>= 1; \ 2659 b##_fls--; \ 2660 } \ 2661 } while (0) 2662 2663 /* 2664 * Reduce accuracy until either term fits in a u64, then proceed with 2665 * the other, so that finally we can do a u64/u64 division. 2666 */ 2667 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { 2668 REDUCE_FLS(nsec, frequency); 2669 REDUCE_FLS(sec, count); 2670 } 2671 2672 if (count_fls + sec_fls > 64) { 2673 divisor = nsec * frequency; 2674 2675 while (count_fls + sec_fls > 64) { 2676 REDUCE_FLS(count, sec); 2677 divisor >>= 1; 2678 } 2679 2680 dividend = count * sec; 2681 } else { 2682 dividend = count * sec; 2683 2684 while (nsec_fls + frequency_fls > 64) { 2685 REDUCE_FLS(nsec, frequency); 2686 dividend >>= 1; 2687 } 2688 2689 divisor = nsec * frequency; 2690 } 2691 2692 if (!divisor) 2693 return dividend; 2694 2695 return div64_u64(dividend, divisor); 2696 } 2697 2698 static DEFINE_PER_CPU(int, perf_throttled_count); 2699 static DEFINE_PER_CPU(u64, perf_throttled_seq); 2700 2701 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) 2702 { 2703 struct hw_perf_event *hwc = &event->hw; 2704 s64 period, sample_period; 2705 s64 delta; 2706 2707 period = perf_calculate_period(event, nsec, count); 2708 2709 delta = (s64)(period - hwc->sample_period); 2710 delta = (delta + 7) / 8; /* low pass filter */ 2711 2712 sample_period = hwc->sample_period + delta; 2713 2714 if (!sample_period) 2715 sample_period = 1; 2716 2717 hwc->sample_period = sample_period; 2718 2719 if (local64_read(&hwc->period_left) > 8*sample_period) { 2720 if (disable) 2721 event->pmu->stop(event, PERF_EF_UPDATE); 2722 2723 local64_set(&hwc->period_left, 0); 2724 2725 if (disable) 2726 event->pmu->start(event, PERF_EF_RELOAD); 2727 } 2728 } 2729 2730 /* 2731 * combine freq adjustment with unthrottling to avoid two passes over the 2732 * events. At the same time, make sure, having freq events does not change 2733 * the rate of unthrottling as that would introduce bias. 2734 */ 2735 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2736 int needs_unthr) 2737 { 2738 struct perf_event *event; 2739 struct hw_perf_event *hwc; 2740 u64 now, period = TICK_NSEC; 2741 s64 delta; 2742 2743 /* 2744 * only need to iterate over all events iff: 2745 * - context have events in frequency mode (needs freq adjust) 2746 * - there are events to unthrottle on this cpu 2747 */ 2748 if (!(ctx->nr_freq || needs_unthr)) 2749 return; 2750 2751 raw_spin_lock(&ctx->lock); 2752 perf_pmu_disable(ctx->pmu); 2753 2754 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2755 if (event->state != PERF_EVENT_STATE_ACTIVE) 2756 continue; 2757 2758 if (!event_filter_match(event)) 2759 continue; 2760 2761 perf_pmu_disable(event->pmu); 2762 2763 hwc = &event->hw; 2764 2765 if (hwc->interrupts == MAX_INTERRUPTS) { 2766 hwc->interrupts = 0; 2767 perf_log_throttle(event, 1); 2768 event->pmu->start(event, 0); 2769 } 2770 2771 if (!event->attr.freq || !event->attr.sample_freq) 2772 goto next; 2773 2774 /* 2775 * stop the event and update event->count 2776 */ 2777 event->pmu->stop(event, PERF_EF_UPDATE); 2778 2779 now = local64_read(&event->count); 2780 delta = now - hwc->freq_count_stamp; 2781 hwc->freq_count_stamp = now; 2782 2783 /* 2784 * restart the event 2785 * reload only if value has changed 2786 * we have stopped the event so tell that 2787 * to perf_adjust_period() to avoid stopping it 2788 * twice. 2789 */ 2790 if (delta > 0) 2791 perf_adjust_period(event, period, delta, false); 2792 2793 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2794 next: 2795 perf_pmu_enable(event->pmu); 2796 } 2797 2798 perf_pmu_enable(ctx->pmu); 2799 raw_spin_unlock(&ctx->lock); 2800 } 2801 2802 /* 2803 * Round-robin a context's events: 2804 */ 2805 static void rotate_ctx(struct perf_event_context *ctx) 2806 { 2807 /* 2808 * Rotate the first entry last of non-pinned groups. Rotation might be 2809 * disabled by the inheritance code. 2810 */ 2811 if (!ctx->rotate_disable) 2812 list_rotate_left(&ctx->flexible_groups); 2813 } 2814 2815 /* 2816 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 2817 * because they're strictly cpu affine and rotate_start is called with IRQs 2818 * disabled, while rotate_context is called from IRQ context. 2819 */ 2820 static int perf_rotate_context(struct perf_cpu_context *cpuctx) 2821 { 2822 struct perf_event_context *ctx = NULL; 2823 int rotate = 0, remove = 1; 2824 2825 if (cpuctx->ctx.nr_events) { 2826 remove = 0; 2827 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2828 rotate = 1; 2829 } 2830 2831 ctx = cpuctx->task_ctx; 2832 if (ctx && ctx->nr_events) { 2833 remove = 0; 2834 if (ctx->nr_events != ctx->nr_active) 2835 rotate = 1; 2836 } 2837 2838 if (!rotate) 2839 goto done; 2840 2841 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2842 perf_pmu_disable(cpuctx->ctx.pmu); 2843 2844 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2845 if (ctx) 2846 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2847 2848 rotate_ctx(&cpuctx->ctx); 2849 if (ctx) 2850 rotate_ctx(ctx); 2851 2852 perf_event_sched_in(cpuctx, ctx, current); 2853 2854 perf_pmu_enable(cpuctx->ctx.pmu); 2855 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2856 done: 2857 if (remove) 2858 list_del_init(&cpuctx->rotation_list); 2859 2860 return rotate; 2861 } 2862 2863 #ifdef CONFIG_NO_HZ_FULL 2864 bool perf_event_can_stop_tick(void) 2865 { 2866 if (atomic_read(&nr_freq_events) || 2867 __this_cpu_read(perf_throttled_count)) 2868 return false; 2869 else 2870 return true; 2871 } 2872 #endif 2873 2874 void perf_event_task_tick(void) 2875 { 2876 struct list_head *head = &__get_cpu_var(rotation_list); 2877 struct perf_cpu_context *cpuctx, *tmp; 2878 struct perf_event_context *ctx; 2879 int throttled; 2880 2881 WARN_ON(!irqs_disabled()); 2882 2883 __this_cpu_inc(perf_throttled_seq); 2884 throttled = __this_cpu_xchg(perf_throttled_count, 0); 2885 2886 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2887 ctx = &cpuctx->ctx; 2888 perf_adjust_freq_unthr_context(ctx, throttled); 2889 2890 ctx = cpuctx->task_ctx; 2891 if (ctx) 2892 perf_adjust_freq_unthr_context(ctx, throttled); 2893 } 2894 } 2895 2896 static int event_enable_on_exec(struct perf_event *event, 2897 struct perf_event_context *ctx) 2898 { 2899 if (!event->attr.enable_on_exec) 2900 return 0; 2901 2902 event->attr.enable_on_exec = 0; 2903 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2904 return 0; 2905 2906 __perf_event_mark_enabled(event); 2907 2908 return 1; 2909 } 2910 2911 /* 2912 * Enable all of a task's events that have been marked enable-on-exec. 2913 * This expects task == current. 2914 */ 2915 static void perf_event_enable_on_exec(struct perf_event_context *ctx) 2916 { 2917 struct perf_event *event; 2918 unsigned long flags; 2919 int enabled = 0; 2920 int ret; 2921 2922 local_irq_save(flags); 2923 if (!ctx || !ctx->nr_events) 2924 goto out; 2925 2926 /* 2927 * We must ctxsw out cgroup events to avoid conflict 2928 * when invoking perf_task_event_sched_in() later on 2929 * in this function. Otherwise we end up trying to 2930 * ctxswin cgroup events which are already scheduled 2931 * in. 2932 */ 2933 perf_cgroup_sched_out(current, NULL); 2934 2935 raw_spin_lock(&ctx->lock); 2936 task_ctx_sched_out(ctx); 2937 2938 list_for_each_entry(event, &ctx->event_list, event_entry) { 2939 ret = event_enable_on_exec(event, ctx); 2940 if (ret) 2941 enabled = 1; 2942 } 2943 2944 /* 2945 * Unclone this context if we enabled any event. 2946 */ 2947 if (enabled) 2948 unclone_ctx(ctx); 2949 2950 raw_spin_unlock(&ctx->lock); 2951 2952 /* 2953 * Also calls ctxswin for cgroup events, if any: 2954 */ 2955 perf_event_context_sched_in(ctx, ctx->task); 2956 out: 2957 local_irq_restore(flags); 2958 } 2959 2960 /* 2961 * Cross CPU call to read the hardware event 2962 */ 2963 static void __perf_event_read(void *info) 2964 { 2965 struct perf_event *event = info; 2966 struct perf_event_context *ctx = event->ctx; 2967 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2968 2969 /* 2970 * If this is a task context, we need to check whether it is 2971 * the current task context of this cpu. If not it has been 2972 * scheduled out before the smp call arrived. In that case 2973 * event->count would have been updated to a recent sample 2974 * when the event was scheduled out. 2975 */ 2976 if (ctx->task && cpuctx->task_ctx != ctx) 2977 return; 2978 2979 raw_spin_lock(&ctx->lock); 2980 if (ctx->is_active) { 2981 update_context_time(ctx); 2982 update_cgrp_time_from_event(event); 2983 } 2984 update_event_times(event); 2985 if (event->state == PERF_EVENT_STATE_ACTIVE) 2986 event->pmu->read(event); 2987 raw_spin_unlock(&ctx->lock); 2988 } 2989 2990 static inline u64 perf_event_count(struct perf_event *event) 2991 { 2992 return local64_read(&event->count) + atomic64_read(&event->child_count); 2993 } 2994 2995 static u64 perf_event_read(struct perf_event *event) 2996 { 2997 /* 2998 * If event is enabled and currently active on a CPU, update the 2999 * value in the event structure: 3000 */ 3001 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3002 smp_call_function_single(event->oncpu, 3003 __perf_event_read, event, 1); 3004 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3005 struct perf_event_context *ctx = event->ctx; 3006 unsigned long flags; 3007 3008 raw_spin_lock_irqsave(&ctx->lock, flags); 3009 /* 3010 * may read while context is not active 3011 * (e.g., thread is blocked), in that case 3012 * we cannot update context time 3013 */ 3014 if (ctx->is_active) { 3015 update_context_time(ctx); 3016 update_cgrp_time_from_event(event); 3017 } 3018 update_event_times(event); 3019 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3020 } 3021 3022 return perf_event_count(event); 3023 } 3024 3025 /* 3026 * Initialize the perf_event context in a task_struct: 3027 */ 3028 static void __perf_event_init_context(struct perf_event_context *ctx) 3029 { 3030 raw_spin_lock_init(&ctx->lock); 3031 mutex_init(&ctx->mutex); 3032 INIT_LIST_HEAD(&ctx->pinned_groups); 3033 INIT_LIST_HEAD(&ctx->flexible_groups); 3034 INIT_LIST_HEAD(&ctx->event_list); 3035 atomic_set(&ctx->refcount, 1); 3036 } 3037 3038 static struct perf_event_context * 3039 alloc_perf_context(struct pmu *pmu, struct task_struct *task) 3040 { 3041 struct perf_event_context *ctx; 3042 3043 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); 3044 if (!ctx) 3045 return NULL; 3046 3047 __perf_event_init_context(ctx); 3048 if (task) { 3049 ctx->task = task; 3050 get_task_struct(task); 3051 } 3052 ctx->pmu = pmu; 3053 3054 return ctx; 3055 } 3056 3057 static struct task_struct * 3058 find_lively_task_by_vpid(pid_t vpid) 3059 { 3060 struct task_struct *task; 3061 int err; 3062 3063 rcu_read_lock(); 3064 if (!vpid) 3065 task = current; 3066 else 3067 task = find_task_by_vpid(vpid); 3068 if (task) 3069 get_task_struct(task); 3070 rcu_read_unlock(); 3071 3072 if (!task) 3073 return ERR_PTR(-ESRCH); 3074 3075 /* Reuse ptrace permission checks for now. */ 3076 err = -EACCES; 3077 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 3078 goto errout; 3079 3080 return task; 3081 errout: 3082 put_task_struct(task); 3083 return ERR_PTR(err); 3084 3085 } 3086 3087 /* 3088 * Returns a matching context with refcount and pincount. 3089 */ 3090 static struct perf_event_context * 3091 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) 3092 { 3093 struct perf_event_context *ctx; 3094 struct perf_cpu_context *cpuctx; 3095 unsigned long flags; 3096 int ctxn, err; 3097 3098 if (!task) { 3099 /* Must be root to operate on a CPU event: */ 3100 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3101 return ERR_PTR(-EACCES); 3102 3103 /* 3104 * We could be clever and allow to attach a event to an 3105 * offline CPU and activate it when the CPU comes up, but 3106 * that's for later. 3107 */ 3108 if (!cpu_online(cpu)) 3109 return ERR_PTR(-ENODEV); 3110 3111 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 3112 ctx = &cpuctx->ctx; 3113 get_ctx(ctx); 3114 ++ctx->pin_count; 3115 3116 return ctx; 3117 } 3118 3119 err = -EINVAL; 3120 ctxn = pmu->task_ctx_nr; 3121 if (ctxn < 0) 3122 goto errout; 3123 3124 retry: 3125 ctx = perf_lock_task_context(task, ctxn, &flags); 3126 if (ctx) { 3127 unclone_ctx(ctx); 3128 ++ctx->pin_count; 3129 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3130 } else { 3131 ctx = alloc_perf_context(pmu, task); 3132 err = -ENOMEM; 3133 if (!ctx) 3134 goto errout; 3135 3136 err = 0; 3137 mutex_lock(&task->perf_event_mutex); 3138 /* 3139 * If it has already passed perf_event_exit_task(). 3140 * we must see PF_EXITING, it takes this mutex too. 3141 */ 3142 if (task->flags & PF_EXITING) 3143 err = -ESRCH; 3144 else if (task->perf_event_ctxp[ctxn]) 3145 err = -EAGAIN; 3146 else { 3147 get_ctx(ctx); 3148 ++ctx->pin_count; 3149 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 3150 } 3151 mutex_unlock(&task->perf_event_mutex); 3152 3153 if (unlikely(err)) { 3154 put_ctx(ctx); 3155 3156 if (err == -EAGAIN) 3157 goto retry; 3158 goto errout; 3159 } 3160 } 3161 3162 return ctx; 3163 3164 errout: 3165 return ERR_PTR(err); 3166 } 3167 3168 static void perf_event_free_filter(struct perf_event *event); 3169 3170 static void free_event_rcu(struct rcu_head *head) 3171 { 3172 struct perf_event *event; 3173 3174 event = container_of(head, struct perf_event, rcu_head); 3175 if (event->ns) 3176 put_pid_ns(event->ns); 3177 perf_event_free_filter(event); 3178 kfree(event); 3179 } 3180 3181 static void ring_buffer_put(struct ring_buffer *rb); 3182 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3183 3184 static void unaccount_event_cpu(struct perf_event *event, int cpu) 3185 { 3186 if (event->parent) 3187 return; 3188 3189 if (has_branch_stack(event)) { 3190 if (!(event->attach_state & PERF_ATTACH_TASK)) 3191 atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); 3192 } 3193 if (is_cgroup_event(event)) 3194 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3195 } 3196 3197 static void unaccount_event(struct perf_event *event) 3198 { 3199 if (event->parent) 3200 return; 3201 3202 if (event->attach_state & PERF_ATTACH_TASK) 3203 static_key_slow_dec_deferred(&perf_sched_events); 3204 if (event->attr.mmap || event->attr.mmap_data) 3205 atomic_dec(&nr_mmap_events); 3206 if (event->attr.comm) 3207 atomic_dec(&nr_comm_events); 3208 if (event->attr.task) 3209 atomic_dec(&nr_task_events); 3210 if (event->attr.freq) 3211 atomic_dec(&nr_freq_events); 3212 if (is_cgroup_event(event)) 3213 static_key_slow_dec_deferred(&perf_sched_events); 3214 if (has_branch_stack(event)) 3215 static_key_slow_dec_deferred(&perf_sched_events); 3216 3217 unaccount_event_cpu(event, event->cpu); 3218 } 3219 3220 static void __free_event(struct perf_event *event) 3221 { 3222 if (!event->parent) { 3223 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3224 put_callchain_buffers(); 3225 } 3226 3227 if (event->destroy) 3228 event->destroy(event); 3229 3230 if (event->ctx) 3231 put_ctx(event->ctx); 3232 3233 if (event->pmu) 3234 module_put(event->pmu->module); 3235 3236 call_rcu(&event->rcu_head, free_event_rcu); 3237 } 3238 static void free_event(struct perf_event *event) 3239 { 3240 irq_work_sync(&event->pending); 3241 3242 unaccount_event(event); 3243 3244 if (event->rb) { 3245 struct ring_buffer *rb; 3246 3247 /* 3248 * Can happen when we close an event with re-directed output. 3249 * 3250 * Since we have a 0 refcount, perf_mmap_close() will skip 3251 * over us; possibly making our ring_buffer_put() the last. 3252 */ 3253 mutex_lock(&event->mmap_mutex); 3254 rb = event->rb; 3255 if (rb) { 3256 rcu_assign_pointer(event->rb, NULL); 3257 ring_buffer_detach(event, rb); 3258 ring_buffer_put(rb); /* could be last */ 3259 } 3260 mutex_unlock(&event->mmap_mutex); 3261 } 3262 3263 if (is_cgroup_event(event)) 3264 perf_detach_cgroup(event); 3265 3266 3267 __free_event(event); 3268 } 3269 3270 int perf_event_release_kernel(struct perf_event *event) 3271 { 3272 struct perf_event_context *ctx = event->ctx; 3273 3274 WARN_ON_ONCE(ctx->parent_ctx); 3275 /* 3276 * There are two ways this annotation is useful: 3277 * 3278 * 1) there is a lock recursion from perf_event_exit_task 3279 * see the comment there. 3280 * 3281 * 2) there is a lock-inversion with mmap_sem through 3282 * perf_event_read_group(), which takes faults while 3283 * holding ctx->mutex, however this is called after 3284 * the last filedesc died, so there is no possibility 3285 * to trigger the AB-BA case. 3286 */ 3287 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3288 raw_spin_lock_irq(&ctx->lock); 3289 perf_group_detach(event); 3290 raw_spin_unlock_irq(&ctx->lock); 3291 perf_remove_from_context(event); 3292 mutex_unlock(&ctx->mutex); 3293 3294 free_event(event); 3295 3296 return 0; 3297 } 3298 EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3299 3300 /* 3301 * Called when the last reference to the file is gone. 3302 */ 3303 static void put_event(struct perf_event *event) 3304 { 3305 struct task_struct *owner; 3306 3307 if (!atomic_long_dec_and_test(&event->refcount)) 3308 return; 3309 3310 rcu_read_lock(); 3311 owner = ACCESS_ONCE(event->owner); 3312 /* 3313 * Matches the smp_wmb() in perf_event_exit_task(). If we observe 3314 * !owner it means the list deletion is complete and we can indeed 3315 * free this event, otherwise we need to serialize on 3316 * owner->perf_event_mutex. 3317 */ 3318 smp_read_barrier_depends(); 3319 if (owner) { 3320 /* 3321 * Since delayed_put_task_struct() also drops the last 3322 * task reference we can safely take a new reference 3323 * while holding the rcu_read_lock(). 3324 */ 3325 get_task_struct(owner); 3326 } 3327 rcu_read_unlock(); 3328 3329 if (owner) { 3330 mutex_lock(&owner->perf_event_mutex); 3331 /* 3332 * We have to re-check the event->owner field, if it is cleared 3333 * we raced with perf_event_exit_task(), acquiring the mutex 3334 * ensured they're done, and we can proceed with freeing the 3335 * event. 3336 */ 3337 if (event->owner) 3338 list_del_init(&event->owner_entry); 3339 mutex_unlock(&owner->perf_event_mutex); 3340 put_task_struct(owner); 3341 } 3342 3343 perf_event_release_kernel(event); 3344 } 3345 3346 static int perf_release(struct inode *inode, struct file *file) 3347 { 3348 put_event(file->private_data); 3349 return 0; 3350 } 3351 3352 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 3353 { 3354 struct perf_event *child; 3355 u64 total = 0; 3356 3357 *enabled = 0; 3358 *running = 0; 3359 3360 mutex_lock(&event->child_mutex); 3361 total += perf_event_read(event); 3362 *enabled += event->total_time_enabled + 3363 atomic64_read(&event->child_total_time_enabled); 3364 *running += event->total_time_running + 3365 atomic64_read(&event->child_total_time_running); 3366 3367 list_for_each_entry(child, &event->child_list, child_list) { 3368 total += perf_event_read(child); 3369 *enabled += child->total_time_enabled; 3370 *running += child->total_time_running; 3371 } 3372 mutex_unlock(&event->child_mutex); 3373 3374 return total; 3375 } 3376 EXPORT_SYMBOL_GPL(perf_event_read_value); 3377 3378 static int perf_event_read_group(struct perf_event *event, 3379 u64 read_format, char __user *buf) 3380 { 3381 struct perf_event *leader = event->group_leader, *sub; 3382 int n = 0, size = 0, ret = -EFAULT; 3383 struct perf_event_context *ctx = leader->ctx; 3384 u64 values[5]; 3385 u64 count, enabled, running; 3386 3387 mutex_lock(&ctx->mutex); 3388 count = perf_event_read_value(leader, &enabled, &running); 3389 3390 values[n++] = 1 + leader->nr_siblings; 3391 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3392 values[n++] = enabled; 3393 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3394 values[n++] = running; 3395 values[n++] = count; 3396 if (read_format & PERF_FORMAT_ID) 3397 values[n++] = primary_event_id(leader); 3398 3399 size = n * sizeof(u64); 3400 3401 if (copy_to_user(buf, values, size)) 3402 goto unlock; 3403 3404 ret = size; 3405 3406 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3407 n = 0; 3408 3409 values[n++] = perf_event_read_value(sub, &enabled, &running); 3410 if (read_format & PERF_FORMAT_ID) 3411 values[n++] = primary_event_id(sub); 3412 3413 size = n * sizeof(u64); 3414 3415 if (copy_to_user(buf + ret, values, size)) { 3416 ret = -EFAULT; 3417 goto unlock; 3418 } 3419 3420 ret += size; 3421 } 3422 unlock: 3423 mutex_unlock(&ctx->mutex); 3424 3425 return ret; 3426 } 3427 3428 static int perf_event_read_one(struct perf_event *event, 3429 u64 read_format, char __user *buf) 3430 { 3431 u64 enabled, running; 3432 u64 values[4]; 3433 int n = 0; 3434 3435 values[n++] = perf_event_read_value(event, &enabled, &running); 3436 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3437 values[n++] = enabled; 3438 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3439 values[n++] = running; 3440 if (read_format & PERF_FORMAT_ID) 3441 values[n++] = primary_event_id(event); 3442 3443 if (copy_to_user(buf, values, n * sizeof(u64))) 3444 return -EFAULT; 3445 3446 return n * sizeof(u64); 3447 } 3448 3449 /* 3450 * Read the performance event - simple non blocking version for now 3451 */ 3452 static ssize_t 3453 perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 3454 { 3455 u64 read_format = event->attr.read_format; 3456 int ret; 3457 3458 /* 3459 * Return end-of-file for a read on a event that is in 3460 * error state (i.e. because it was pinned but it couldn't be 3461 * scheduled on to the CPU at some point). 3462 */ 3463 if (event->state == PERF_EVENT_STATE_ERROR) 3464 return 0; 3465 3466 if (count < event->read_size) 3467 return -ENOSPC; 3468 3469 WARN_ON_ONCE(event->ctx->parent_ctx); 3470 if (read_format & PERF_FORMAT_GROUP) 3471 ret = perf_event_read_group(event, read_format, buf); 3472 else 3473 ret = perf_event_read_one(event, read_format, buf); 3474 3475 return ret; 3476 } 3477 3478 static ssize_t 3479 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3480 { 3481 struct perf_event *event = file->private_data; 3482 3483 return perf_read_hw(event, buf, count); 3484 } 3485 3486 static unsigned int perf_poll(struct file *file, poll_table *wait) 3487 { 3488 struct perf_event *event = file->private_data; 3489 struct ring_buffer *rb; 3490 unsigned int events = POLL_HUP; 3491 3492 /* 3493 * Pin the event->rb by taking event->mmap_mutex; otherwise 3494 * perf_event_set_output() can swizzle our rb and make us miss wakeups. 3495 */ 3496 mutex_lock(&event->mmap_mutex); 3497 rb = event->rb; 3498 if (rb) 3499 events = atomic_xchg(&rb->poll, 0); 3500 mutex_unlock(&event->mmap_mutex); 3501 3502 poll_wait(file, &event->waitq, wait); 3503 3504 return events; 3505 } 3506 3507 static void perf_event_reset(struct perf_event *event) 3508 { 3509 (void)perf_event_read(event); 3510 local64_set(&event->count, 0); 3511 perf_event_update_userpage(event); 3512 } 3513 3514 /* 3515 * Holding the top-level event's child_mutex means that any 3516 * descendant process that has inherited this event will block 3517 * in sync_child_event if it goes to exit, thus satisfying the 3518 * task existence requirements of perf_event_enable/disable. 3519 */ 3520 static void perf_event_for_each_child(struct perf_event *event, 3521 void (*func)(struct perf_event *)) 3522 { 3523 struct perf_event *child; 3524 3525 WARN_ON_ONCE(event->ctx->parent_ctx); 3526 mutex_lock(&event->child_mutex); 3527 func(event); 3528 list_for_each_entry(child, &event->child_list, child_list) 3529 func(child); 3530 mutex_unlock(&event->child_mutex); 3531 } 3532 3533 static void perf_event_for_each(struct perf_event *event, 3534 void (*func)(struct perf_event *)) 3535 { 3536 struct perf_event_context *ctx = event->ctx; 3537 struct perf_event *sibling; 3538 3539 WARN_ON_ONCE(ctx->parent_ctx); 3540 mutex_lock(&ctx->mutex); 3541 event = event->group_leader; 3542 3543 perf_event_for_each_child(event, func); 3544 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3545 perf_event_for_each_child(sibling, func); 3546 mutex_unlock(&ctx->mutex); 3547 } 3548 3549 static int perf_event_period(struct perf_event *event, u64 __user *arg) 3550 { 3551 struct perf_event_context *ctx = event->ctx; 3552 int ret = 0, active; 3553 u64 value; 3554 3555 if (!is_sampling_event(event)) 3556 return -EINVAL; 3557 3558 if (copy_from_user(&value, arg, sizeof(value))) 3559 return -EFAULT; 3560 3561 if (!value) 3562 return -EINVAL; 3563 3564 raw_spin_lock_irq(&ctx->lock); 3565 if (event->attr.freq) { 3566 if (value > sysctl_perf_event_sample_rate) { 3567 ret = -EINVAL; 3568 goto unlock; 3569 } 3570 3571 event->attr.sample_freq = value; 3572 } else { 3573 event->attr.sample_period = value; 3574 event->hw.sample_period = value; 3575 } 3576 3577 active = (event->state == PERF_EVENT_STATE_ACTIVE); 3578 if (active) { 3579 perf_pmu_disable(ctx->pmu); 3580 event->pmu->stop(event, PERF_EF_UPDATE); 3581 } 3582 3583 local64_set(&event->hw.period_left, 0); 3584 3585 if (active) { 3586 event->pmu->start(event, PERF_EF_RELOAD); 3587 perf_pmu_enable(ctx->pmu); 3588 } 3589 3590 unlock: 3591 raw_spin_unlock_irq(&ctx->lock); 3592 3593 return ret; 3594 } 3595 3596 static const struct file_operations perf_fops; 3597 3598 static inline int perf_fget_light(int fd, struct fd *p) 3599 { 3600 struct fd f = fdget(fd); 3601 if (!f.file) 3602 return -EBADF; 3603 3604 if (f.file->f_op != &perf_fops) { 3605 fdput(f); 3606 return -EBADF; 3607 } 3608 *p = f; 3609 return 0; 3610 } 3611 3612 static int perf_event_set_output(struct perf_event *event, 3613 struct perf_event *output_event); 3614 static int perf_event_set_filter(struct perf_event *event, void __user *arg); 3615 3616 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3617 { 3618 struct perf_event *event = file->private_data; 3619 void (*func)(struct perf_event *); 3620 u32 flags = arg; 3621 3622 switch (cmd) { 3623 case PERF_EVENT_IOC_ENABLE: 3624 func = perf_event_enable; 3625 break; 3626 case PERF_EVENT_IOC_DISABLE: 3627 func = perf_event_disable; 3628 break; 3629 case PERF_EVENT_IOC_RESET: 3630 func = perf_event_reset; 3631 break; 3632 3633 case PERF_EVENT_IOC_REFRESH: 3634 return perf_event_refresh(event, arg); 3635 3636 case PERF_EVENT_IOC_PERIOD: 3637 return perf_event_period(event, (u64 __user *)arg); 3638 3639 case PERF_EVENT_IOC_ID: 3640 { 3641 u64 id = primary_event_id(event); 3642 3643 if (copy_to_user((void __user *)arg, &id, sizeof(id))) 3644 return -EFAULT; 3645 return 0; 3646 } 3647 3648 case PERF_EVENT_IOC_SET_OUTPUT: 3649 { 3650 int ret; 3651 if (arg != -1) { 3652 struct perf_event *output_event; 3653 struct fd output; 3654 ret = perf_fget_light(arg, &output); 3655 if (ret) 3656 return ret; 3657 output_event = output.file->private_data; 3658 ret = perf_event_set_output(event, output_event); 3659 fdput(output); 3660 } else { 3661 ret = perf_event_set_output(event, NULL); 3662 } 3663 return ret; 3664 } 3665 3666 case PERF_EVENT_IOC_SET_FILTER: 3667 return perf_event_set_filter(event, (void __user *)arg); 3668 3669 default: 3670 return -ENOTTY; 3671 } 3672 3673 if (flags & PERF_IOC_FLAG_GROUP) 3674 perf_event_for_each(event, func); 3675 else 3676 perf_event_for_each_child(event, func); 3677 3678 return 0; 3679 } 3680 3681 int perf_event_task_enable(void) 3682 { 3683 struct perf_event *event; 3684 3685 mutex_lock(¤t->perf_event_mutex); 3686 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3687 perf_event_for_each_child(event, perf_event_enable); 3688 mutex_unlock(¤t->perf_event_mutex); 3689 3690 return 0; 3691 } 3692 3693 int perf_event_task_disable(void) 3694 { 3695 struct perf_event *event; 3696 3697 mutex_lock(¤t->perf_event_mutex); 3698 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) 3699 perf_event_for_each_child(event, perf_event_disable); 3700 mutex_unlock(¤t->perf_event_mutex); 3701 3702 return 0; 3703 } 3704 3705 static int perf_event_index(struct perf_event *event) 3706 { 3707 if (event->hw.state & PERF_HES_STOPPED) 3708 return 0; 3709 3710 if (event->state != PERF_EVENT_STATE_ACTIVE) 3711 return 0; 3712 3713 return event->pmu->event_idx(event); 3714 } 3715 3716 static void calc_timer_values(struct perf_event *event, 3717 u64 *now, 3718 u64 *enabled, 3719 u64 *running) 3720 { 3721 u64 ctx_time; 3722 3723 *now = perf_clock(); 3724 ctx_time = event->shadow_ctx_time + *now; 3725 *enabled = ctx_time - event->tstamp_enabled; 3726 *running = ctx_time - event->tstamp_running; 3727 } 3728 3729 static void perf_event_init_userpage(struct perf_event *event) 3730 { 3731 struct perf_event_mmap_page *userpg; 3732 struct ring_buffer *rb; 3733 3734 rcu_read_lock(); 3735 rb = rcu_dereference(event->rb); 3736 if (!rb) 3737 goto unlock; 3738 3739 userpg = rb->user_page; 3740 3741 /* Allow new userspace to detect that bit 0 is deprecated */ 3742 userpg->cap_bit0_is_deprecated = 1; 3743 userpg->size = offsetof(struct perf_event_mmap_page, __reserved); 3744 3745 unlock: 3746 rcu_read_unlock(); 3747 } 3748 3749 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3750 { 3751 } 3752 3753 /* 3754 * Callers need to ensure there can be no nesting of this function, otherwise 3755 * the seqlock logic goes bad. We can not serialize this because the arch 3756 * code calls this from NMI context. 3757 */ 3758 void perf_event_update_userpage(struct perf_event *event) 3759 { 3760 struct perf_event_mmap_page *userpg; 3761 struct ring_buffer *rb; 3762 u64 enabled, running, now; 3763 3764 rcu_read_lock(); 3765 rb = rcu_dereference(event->rb); 3766 if (!rb) 3767 goto unlock; 3768 3769 /* 3770 * compute total_time_enabled, total_time_running 3771 * based on snapshot values taken when the event 3772 * was last scheduled in. 3773 * 3774 * we cannot simply called update_context_time() 3775 * because of locking issue as we can be called in 3776 * NMI context 3777 */ 3778 calc_timer_values(event, &now, &enabled, &running); 3779 3780 userpg = rb->user_page; 3781 /* 3782 * Disable preemption so as to not let the corresponding user-space 3783 * spin too long if we get preempted. 3784 */ 3785 preempt_disable(); 3786 ++userpg->lock; 3787 barrier(); 3788 userpg->index = perf_event_index(event); 3789 userpg->offset = perf_event_count(event); 3790 if (userpg->index) 3791 userpg->offset -= local64_read(&event->hw.prev_count); 3792 3793 userpg->time_enabled = enabled + 3794 atomic64_read(&event->child_total_time_enabled); 3795 3796 userpg->time_running = running + 3797 atomic64_read(&event->child_total_time_running); 3798 3799 arch_perf_update_userpage(userpg, now); 3800 3801 barrier(); 3802 ++userpg->lock; 3803 preempt_enable(); 3804 unlock: 3805 rcu_read_unlock(); 3806 } 3807 3808 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 3809 { 3810 struct perf_event *event = vma->vm_file->private_data; 3811 struct ring_buffer *rb; 3812 int ret = VM_FAULT_SIGBUS; 3813 3814 if (vmf->flags & FAULT_FLAG_MKWRITE) { 3815 if (vmf->pgoff == 0) 3816 ret = 0; 3817 return ret; 3818 } 3819 3820 rcu_read_lock(); 3821 rb = rcu_dereference(event->rb); 3822 if (!rb) 3823 goto unlock; 3824 3825 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 3826 goto unlock; 3827 3828 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 3829 if (!vmf->page) 3830 goto unlock; 3831 3832 get_page(vmf->page); 3833 vmf->page->mapping = vma->vm_file->f_mapping; 3834 vmf->page->index = vmf->pgoff; 3835 3836 ret = 0; 3837 unlock: 3838 rcu_read_unlock(); 3839 3840 return ret; 3841 } 3842 3843 static void ring_buffer_attach(struct perf_event *event, 3844 struct ring_buffer *rb) 3845 { 3846 unsigned long flags; 3847 3848 if (!list_empty(&event->rb_entry)) 3849 return; 3850 3851 spin_lock_irqsave(&rb->event_lock, flags); 3852 if (list_empty(&event->rb_entry)) 3853 list_add(&event->rb_entry, &rb->event_list); 3854 spin_unlock_irqrestore(&rb->event_lock, flags); 3855 } 3856 3857 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3858 { 3859 unsigned long flags; 3860 3861 if (list_empty(&event->rb_entry)) 3862 return; 3863 3864 spin_lock_irqsave(&rb->event_lock, flags); 3865 list_del_init(&event->rb_entry); 3866 wake_up_all(&event->waitq); 3867 spin_unlock_irqrestore(&rb->event_lock, flags); 3868 } 3869 3870 static void ring_buffer_wakeup(struct perf_event *event) 3871 { 3872 struct ring_buffer *rb; 3873 3874 rcu_read_lock(); 3875 rb = rcu_dereference(event->rb); 3876 if (rb) { 3877 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) 3878 wake_up_all(&event->waitq); 3879 } 3880 rcu_read_unlock(); 3881 } 3882 3883 static void rb_free_rcu(struct rcu_head *rcu_head) 3884 { 3885 struct ring_buffer *rb; 3886 3887 rb = container_of(rcu_head, struct ring_buffer, rcu_head); 3888 rb_free(rb); 3889 } 3890 3891 static struct ring_buffer *ring_buffer_get(struct perf_event *event) 3892 { 3893 struct ring_buffer *rb; 3894 3895 rcu_read_lock(); 3896 rb = rcu_dereference(event->rb); 3897 if (rb) { 3898 if (!atomic_inc_not_zero(&rb->refcount)) 3899 rb = NULL; 3900 } 3901 rcu_read_unlock(); 3902 3903 return rb; 3904 } 3905 3906 static void ring_buffer_put(struct ring_buffer *rb) 3907 { 3908 if (!atomic_dec_and_test(&rb->refcount)) 3909 return; 3910 3911 WARN_ON_ONCE(!list_empty(&rb->event_list)); 3912 3913 call_rcu(&rb->rcu_head, rb_free_rcu); 3914 } 3915 3916 static void perf_mmap_open(struct vm_area_struct *vma) 3917 { 3918 struct perf_event *event = vma->vm_file->private_data; 3919 3920 atomic_inc(&event->mmap_count); 3921 atomic_inc(&event->rb->mmap_count); 3922 } 3923 3924 /* 3925 * A buffer can be mmap()ed multiple times; either directly through the same 3926 * event, or through other events by use of perf_event_set_output(). 3927 * 3928 * In order to undo the VM accounting done by perf_mmap() we need to destroy 3929 * the buffer here, where we still have a VM context. This means we need 3930 * to detach all events redirecting to us. 3931 */ 3932 static void perf_mmap_close(struct vm_area_struct *vma) 3933 { 3934 struct perf_event *event = vma->vm_file->private_data; 3935 3936 struct ring_buffer *rb = event->rb; 3937 struct user_struct *mmap_user = rb->mmap_user; 3938 int mmap_locked = rb->mmap_locked; 3939 unsigned long size = perf_data_size(rb); 3940 3941 atomic_dec(&rb->mmap_count); 3942 3943 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3944 return; 3945 3946 /* Detach current event from the buffer. */ 3947 rcu_assign_pointer(event->rb, NULL); 3948 ring_buffer_detach(event, rb); 3949 mutex_unlock(&event->mmap_mutex); 3950 3951 /* If there's still other mmap()s of this buffer, we're done. */ 3952 if (atomic_read(&rb->mmap_count)) { 3953 ring_buffer_put(rb); /* can't be last */ 3954 return; 3955 } 3956 3957 /* 3958 * No other mmap()s, detach from all other events that might redirect 3959 * into the now unreachable buffer. Somewhat complicated by the 3960 * fact that rb::event_lock otherwise nests inside mmap_mutex. 3961 */ 3962 again: 3963 rcu_read_lock(); 3964 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3965 if (!atomic_long_inc_not_zero(&event->refcount)) { 3966 /* 3967 * This event is en-route to free_event() which will 3968 * detach it and remove it from the list. 3969 */ 3970 continue; 3971 } 3972 rcu_read_unlock(); 3973 3974 mutex_lock(&event->mmap_mutex); 3975 /* 3976 * Check we didn't race with perf_event_set_output() which can 3977 * swizzle the rb from under us while we were waiting to 3978 * acquire mmap_mutex. 3979 * 3980 * If we find a different rb; ignore this event, a next 3981 * iteration will no longer find it on the list. We have to 3982 * still restart the iteration to make sure we're not now 3983 * iterating the wrong list. 3984 */ 3985 if (event->rb == rb) { 3986 rcu_assign_pointer(event->rb, NULL); 3987 ring_buffer_detach(event, rb); 3988 ring_buffer_put(rb); /* can't be last, we still have one */ 3989 } 3990 mutex_unlock(&event->mmap_mutex); 3991 put_event(event); 3992 3993 /* 3994 * Restart the iteration; either we're on the wrong list or 3995 * destroyed its integrity by doing a deletion. 3996 */ 3997 goto again; 3998 } 3999 rcu_read_unlock(); 4000 4001 /* 4002 * It could be there's still a few 0-ref events on the list; they'll 4003 * get cleaned up by free_event() -- they'll also still have their 4004 * ref on the rb and will free it whenever they are done with it. 4005 * 4006 * Aside from that, this buffer is 'fully' detached and unmapped, 4007 * undo the VM accounting. 4008 */ 4009 4010 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 4011 vma->vm_mm->pinned_vm -= mmap_locked; 4012 free_uid(mmap_user); 4013 4014 ring_buffer_put(rb); /* could be last */ 4015 } 4016 4017 static const struct vm_operations_struct perf_mmap_vmops = { 4018 .open = perf_mmap_open, 4019 .close = perf_mmap_close, 4020 .fault = perf_mmap_fault, 4021 .page_mkwrite = perf_mmap_fault, 4022 }; 4023 4024 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 4025 { 4026 struct perf_event *event = file->private_data; 4027 unsigned long user_locked, user_lock_limit; 4028 struct user_struct *user = current_user(); 4029 unsigned long locked, lock_limit; 4030 struct ring_buffer *rb; 4031 unsigned long vma_size; 4032 unsigned long nr_pages; 4033 long user_extra, extra; 4034 int ret = 0, flags = 0; 4035 4036 /* 4037 * Don't allow mmap() of inherited per-task counters. This would 4038 * create a performance issue due to all children writing to the 4039 * same rb. 4040 */ 4041 if (event->cpu == -1 && event->attr.inherit) 4042 return -EINVAL; 4043 4044 if (!(vma->vm_flags & VM_SHARED)) 4045 return -EINVAL; 4046 4047 vma_size = vma->vm_end - vma->vm_start; 4048 nr_pages = (vma_size / PAGE_SIZE) - 1; 4049 4050 /* 4051 * If we have rb pages ensure they're a power-of-two number, so we 4052 * can do bitmasks instead of modulo. 4053 */ 4054 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 4055 return -EINVAL; 4056 4057 if (vma_size != PAGE_SIZE * (1 + nr_pages)) 4058 return -EINVAL; 4059 4060 if (vma->vm_pgoff != 0) 4061 return -EINVAL; 4062 4063 WARN_ON_ONCE(event->ctx->parent_ctx); 4064 again: 4065 mutex_lock(&event->mmap_mutex); 4066 if (event->rb) { 4067 if (event->rb->nr_pages != nr_pages) { 4068 ret = -EINVAL; 4069 goto unlock; 4070 } 4071 4072 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { 4073 /* 4074 * Raced against perf_mmap_close() through 4075 * perf_event_set_output(). Try again, hope for better 4076 * luck. 4077 */ 4078 mutex_unlock(&event->mmap_mutex); 4079 goto again; 4080 } 4081 4082 goto unlock; 4083 } 4084 4085 user_extra = nr_pages + 1; 4086 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 4087 4088 /* 4089 * Increase the limit linearly with more CPUs: 4090 */ 4091 user_lock_limit *= num_online_cpus(); 4092 4093 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 4094 4095 extra = 0; 4096 if (user_locked > user_lock_limit) 4097 extra = user_locked - user_lock_limit; 4098 4099 lock_limit = rlimit(RLIMIT_MEMLOCK); 4100 lock_limit >>= PAGE_SHIFT; 4101 locked = vma->vm_mm->pinned_vm + extra; 4102 4103 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && 4104 !capable(CAP_IPC_LOCK)) { 4105 ret = -EPERM; 4106 goto unlock; 4107 } 4108 4109 WARN_ON(event->rb); 4110 4111 if (vma->vm_flags & VM_WRITE) 4112 flags |= RING_BUFFER_WRITABLE; 4113 4114 rb = rb_alloc(nr_pages, 4115 event->attr.watermark ? event->attr.wakeup_watermark : 0, 4116 event->cpu, flags); 4117 4118 if (!rb) { 4119 ret = -ENOMEM; 4120 goto unlock; 4121 } 4122 4123 atomic_set(&rb->mmap_count, 1); 4124 rb->mmap_locked = extra; 4125 rb->mmap_user = get_current_user(); 4126 4127 atomic_long_add(user_extra, &user->locked_vm); 4128 vma->vm_mm->pinned_vm += extra; 4129 4130 ring_buffer_attach(event, rb); 4131 rcu_assign_pointer(event->rb, rb); 4132 4133 perf_event_init_userpage(event); 4134 perf_event_update_userpage(event); 4135 4136 unlock: 4137 if (!ret) 4138 atomic_inc(&event->mmap_count); 4139 mutex_unlock(&event->mmap_mutex); 4140 4141 /* 4142 * Since pinned accounting is per vm we cannot allow fork() to copy our 4143 * vma. 4144 */ 4145 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4146 vma->vm_ops = &perf_mmap_vmops; 4147 4148 return ret; 4149 } 4150 4151 static int perf_fasync(int fd, struct file *filp, int on) 4152 { 4153 struct inode *inode = file_inode(filp); 4154 struct perf_event *event = filp->private_data; 4155 int retval; 4156 4157 mutex_lock(&inode->i_mutex); 4158 retval = fasync_helper(fd, filp, on, &event->fasync); 4159 mutex_unlock(&inode->i_mutex); 4160 4161 if (retval < 0) 4162 return retval; 4163 4164 return 0; 4165 } 4166 4167 static const struct file_operations perf_fops = { 4168 .llseek = no_llseek, 4169 .release = perf_release, 4170 .read = perf_read, 4171 .poll = perf_poll, 4172 .unlocked_ioctl = perf_ioctl, 4173 .compat_ioctl = perf_ioctl, 4174 .mmap = perf_mmap, 4175 .fasync = perf_fasync, 4176 }; 4177 4178 /* 4179 * Perf event wakeup 4180 * 4181 * If there's data, ensure we set the poll() state and publish everything 4182 * to user-space before waking everybody up. 4183 */ 4184 4185 void perf_event_wakeup(struct perf_event *event) 4186 { 4187 ring_buffer_wakeup(event); 4188 4189 if (event->pending_kill) { 4190 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4191 event->pending_kill = 0; 4192 } 4193 } 4194 4195 static void perf_pending_event(struct irq_work *entry) 4196 { 4197 struct perf_event *event = container_of(entry, 4198 struct perf_event, pending); 4199 4200 if (event->pending_disable) { 4201 event->pending_disable = 0; 4202 __perf_event_disable(event); 4203 } 4204 4205 if (event->pending_wakeup) { 4206 event->pending_wakeup = 0; 4207 perf_event_wakeup(event); 4208 } 4209 } 4210 4211 /* 4212 * We assume there is only KVM supporting the callbacks. 4213 * Later on, we might change it to a list if there is 4214 * another virtualization implementation supporting the callbacks. 4215 */ 4216 struct perf_guest_info_callbacks *perf_guest_cbs; 4217 4218 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4219 { 4220 perf_guest_cbs = cbs; 4221 return 0; 4222 } 4223 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); 4224 4225 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) 4226 { 4227 perf_guest_cbs = NULL; 4228 return 0; 4229 } 4230 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); 4231 4232 static void 4233 perf_output_sample_regs(struct perf_output_handle *handle, 4234 struct pt_regs *regs, u64 mask) 4235 { 4236 int bit; 4237 4238 for_each_set_bit(bit, (const unsigned long *) &mask, 4239 sizeof(mask) * BITS_PER_BYTE) { 4240 u64 val; 4241 4242 val = perf_reg_value(regs, bit); 4243 perf_output_put(handle, val); 4244 } 4245 } 4246 4247 static void perf_sample_regs_user(struct perf_regs_user *regs_user, 4248 struct pt_regs *regs) 4249 { 4250 if (!user_mode(regs)) { 4251 if (current->mm) 4252 regs = task_pt_regs(current); 4253 else 4254 regs = NULL; 4255 } 4256 4257 if (regs) { 4258 regs_user->regs = regs; 4259 regs_user->abi = perf_reg_abi(current); 4260 } 4261 } 4262 4263 /* 4264 * Get remaining task size from user stack pointer. 4265 * 4266 * It'd be better to take stack vma map and limit this more 4267 * precisly, but there's no way to get it safely under interrupt, 4268 * so using TASK_SIZE as limit. 4269 */ 4270 static u64 perf_ustack_task_size(struct pt_regs *regs) 4271 { 4272 unsigned long addr = perf_user_stack_pointer(regs); 4273 4274 if (!addr || addr >= TASK_SIZE) 4275 return 0; 4276 4277 return TASK_SIZE - addr; 4278 } 4279 4280 static u16 4281 perf_sample_ustack_size(u16 stack_size, u16 header_size, 4282 struct pt_regs *regs) 4283 { 4284 u64 task_size; 4285 4286 /* No regs, no stack pointer, no dump. */ 4287 if (!regs) 4288 return 0; 4289 4290 /* 4291 * Check if we fit in with the requested stack size into the: 4292 * - TASK_SIZE 4293 * If we don't, we limit the size to the TASK_SIZE. 4294 * 4295 * - remaining sample size 4296 * If we don't, we customize the stack size to 4297 * fit in to the remaining sample size. 4298 */ 4299 4300 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); 4301 stack_size = min(stack_size, (u16) task_size); 4302 4303 /* Current header size plus static size and dynamic size. */ 4304 header_size += 2 * sizeof(u64); 4305 4306 /* Do we fit in with the current stack dump size? */ 4307 if ((u16) (header_size + stack_size) < header_size) { 4308 /* 4309 * If we overflow the maximum size for the sample, 4310 * we customize the stack dump size to fit in. 4311 */ 4312 stack_size = USHRT_MAX - header_size - sizeof(u64); 4313 stack_size = round_up(stack_size, sizeof(u64)); 4314 } 4315 4316 return stack_size; 4317 } 4318 4319 static void 4320 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, 4321 struct pt_regs *regs) 4322 { 4323 /* Case of a kernel thread, nothing to dump */ 4324 if (!regs) { 4325 u64 size = 0; 4326 perf_output_put(handle, size); 4327 } else { 4328 unsigned long sp; 4329 unsigned int rem; 4330 u64 dyn_size; 4331 4332 /* 4333 * We dump: 4334 * static size 4335 * - the size requested by user or the best one we can fit 4336 * in to the sample max size 4337 * data 4338 * - user stack dump data 4339 * dynamic size 4340 * - the actual dumped size 4341 */ 4342 4343 /* Static size. */ 4344 perf_output_put(handle, dump_size); 4345 4346 /* Data. */ 4347 sp = perf_user_stack_pointer(regs); 4348 rem = __output_copy_user(handle, (void *) sp, dump_size); 4349 dyn_size = dump_size - rem; 4350 4351 perf_output_skip(handle, rem); 4352 4353 /* Dynamic size. */ 4354 perf_output_put(handle, dyn_size); 4355 } 4356 } 4357 4358 static void __perf_event_header__init_id(struct perf_event_header *header, 4359 struct perf_sample_data *data, 4360 struct perf_event *event) 4361 { 4362 u64 sample_type = event->attr.sample_type; 4363 4364 data->type = sample_type; 4365 header->size += event->id_header_size; 4366 4367 if (sample_type & PERF_SAMPLE_TID) { 4368 /* namespace issues */ 4369 data->tid_entry.pid = perf_event_pid(event, current); 4370 data->tid_entry.tid = perf_event_tid(event, current); 4371 } 4372 4373 if (sample_type & PERF_SAMPLE_TIME) 4374 data->time = perf_clock(); 4375 4376 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 4377 data->id = primary_event_id(event); 4378 4379 if (sample_type & PERF_SAMPLE_STREAM_ID) 4380 data->stream_id = event->id; 4381 4382 if (sample_type & PERF_SAMPLE_CPU) { 4383 data->cpu_entry.cpu = raw_smp_processor_id(); 4384 data->cpu_entry.reserved = 0; 4385 } 4386 } 4387 4388 void perf_event_header__init_id(struct perf_event_header *header, 4389 struct perf_sample_data *data, 4390 struct perf_event *event) 4391 { 4392 if (event->attr.sample_id_all) 4393 __perf_event_header__init_id(header, data, event); 4394 } 4395 4396 static void __perf_event__output_id_sample(struct perf_output_handle *handle, 4397 struct perf_sample_data *data) 4398 { 4399 u64 sample_type = data->type; 4400 4401 if (sample_type & PERF_SAMPLE_TID) 4402 perf_output_put(handle, data->tid_entry); 4403 4404 if (sample_type & PERF_SAMPLE_TIME) 4405 perf_output_put(handle, data->time); 4406 4407 if (sample_type & PERF_SAMPLE_ID) 4408 perf_output_put(handle, data->id); 4409 4410 if (sample_type & PERF_SAMPLE_STREAM_ID) 4411 perf_output_put(handle, data->stream_id); 4412 4413 if (sample_type & PERF_SAMPLE_CPU) 4414 perf_output_put(handle, data->cpu_entry); 4415 4416 if (sample_type & PERF_SAMPLE_IDENTIFIER) 4417 perf_output_put(handle, data->id); 4418 } 4419 4420 void perf_event__output_id_sample(struct perf_event *event, 4421 struct perf_output_handle *handle, 4422 struct perf_sample_data *sample) 4423 { 4424 if (event->attr.sample_id_all) 4425 __perf_event__output_id_sample(handle, sample); 4426 } 4427 4428 static void perf_output_read_one(struct perf_output_handle *handle, 4429 struct perf_event *event, 4430 u64 enabled, u64 running) 4431 { 4432 u64 read_format = event->attr.read_format; 4433 u64 values[4]; 4434 int n = 0; 4435 4436 values[n++] = perf_event_count(event); 4437 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 4438 values[n++] = enabled + 4439 atomic64_read(&event->child_total_time_enabled); 4440 } 4441 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 4442 values[n++] = running + 4443 atomic64_read(&event->child_total_time_running); 4444 } 4445 if (read_format & PERF_FORMAT_ID) 4446 values[n++] = primary_event_id(event); 4447 4448 __output_copy(handle, values, n * sizeof(u64)); 4449 } 4450 4451 /* 4452 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 4453 */ 4454 static void perf_output_read_group(struct perf_output_handle *handle, 4455 struct perf_event *event, 4456 u64 enabled, u64 running) 4457 { 4458 struct perf_event *leader = event->group_leader, *sub; 4459 u64 read_format = event->attr.read_format; 4460 u64 values[5]; 4461 int n = 0; 4462 4463 values[n++] = 1 + leader->nr_siblings; 4464 4465 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 4466 values[n++] = enabled; 4467 4468 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 4469 values[n++] = running; 4470 4471 if (leader != event) 4472 leader->pmu->read(leader); 4473 4474 values[n++] = perf_event_count(leader); 4475 if (read_format & PERF_FORMAT_ID) 4476 values[n++] = primary_event_id(leader); 4477 4478 __output_copy(handle, values, n * sizeof(u64)); 4479 4480 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 4481 n = 0; 4482 4483 if ((sub != event) && 4484 (sub->state == PERF_EVENT_STATE_ACTIVE)) 4485 sub->pmu->read(sub); 4486 4487 values[n++] = perf_event_count(sub); 4488 if (read_format & PERF_FORMAT_ID) 4489 values[n++] = primary_event_id(sub); 4490 4491 __output_copy(handle, values, n * sizeof(u64)); 4492 } 4493 } 4494 4495 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ 4496 PERF_FORMAT_TOTAL_TIME_RUNNING) 4497 4498 static void perf_output_read(struct perf_output_handle *handle, 4499 struct perf_event *event) 4500 { 4501 u64 enabled = 0, running = 0, now; 4502 u64 read_format = event->attr.read_format; 4503 4504 /* 4505 * compute total_time_enabled, total_time_running 4506 * based on snapshot values taken when the event 4507 * was last scheduled in. 4508 * 4509 * we cannot simply called update_context_time() 4510 * because of locking issue as we are called in 4511 * NMI context 4512 */ 4513 if (read_format & PERF_FORMAT_TOTAL_TIMES) 4514 calc_timer_values(event, &now, &enabled, &running); 4515 4516 if (event->attr.read_format & PERF_FORMAT_GROUP) 4517 perf_output_read_group(handle, event, enabled, running); 4518 else 4519 perf_output_read_one(handle, event, enabled, running); 4520 } 4521 4522 void perf_output_sample(struct perf_output_handle *handle, 4523 struct perf_event_header *header, 4524 struct perf_sample_data *data, 4525 struct perf_event *event) 4526 { 4527 u64 sample_type = data->type; 4528 4529 perf_output_put(handle, *header); 4530 4531 if (sample_type & PERF_SAMPLE_IDENTIFIER) 4532 perf_output_put(handle, data->id); 4533 4534 if (sample_type & PERF_SAMPLE_IP) 4535 perf_output_put(handle, data->ip); 4536 4537 if (sample_type & PERF_SAMPLE_TID) 4538 perf_output_put(handle, data->tid_entry); 4539 4540 if (sample_type & PERF_SAMPLE_TIME) 4541 perf_output_put(handle, data->time); 4542 4543 if (sample_type & PERF_SAMPLE_ADDR) 4544 perf_output_put(handle, data->addr); 4545 4546 if (sample_type & PERF_SAMPLE_ID) 4547 perf_output_put(handle, data->id); 4548 4549 if (sample_type & PERF_SAMPLE_STREAM_ID) 4550 perf_output_put(handle, data->stream_id); 4551 4552 if (sample_type & PERF_SAMPLE_CPU) 4553 perf_output_put(handle, data->cpu_entry); 4554 4555 if (sample_type & PERF_SAMPLE_PERIOD) 4556 perf_output_put(handle, data->period); 4557 4558 if (sample_type & PERF_SAMPLE_READ) 4559 perf_output_read(handle, event); 4560 4561 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4562 if (data->callchain) { 4563 int size = 1; 4564 4565 if (data->callchain) 4566 size += data->callchain->nr; 4567 4568 size *= sizeof(u64); 4569 4570 __output_copy(handle, data->callchain, size); 4571 } else { 4572 u64 nr = 0; 4573 perf_output_put(handle, nr); 4574 } 4575 } 4576 4577 if (sample_type & PERF_SAMPLE_RAW) { 4578 if (data->raw) { 4579 perf_output_put(handle, data->raw->size); 4580 __output_copy(handle, data->raw->data, 4581 data->raw->size); 4582 } else { 4583 struct { 4584 u32 size; 4585 u32 data; 4586 } raw = { 4587 .size = sizeof(u32), 4588 .data = 0, 4589 }; 4590 perf_output_put(handle, raw); 4591 } 4592 } 4593 4594 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 4595 if (data->br_stack) { 4596 size_t size; 4597 4598 size = data->br_stack->nr 4599 * sizeof(struct perf_branch_entry); 4600 4601 perf_output_put(handle, data->br_stack->nr); 4602 perf_output_copy(handle, data->br_stack->entries, size); 4603 } else { 4604 /* 4605 * we always store at least the value of nr 4606 */ 4607 u64 nr = 0; 4608 perf_output_put(handle, nr); 4609 } 4610 } 4611 4612 if (sample_type & PERF_SAMPLE_REGS_USER) { 4613 u64 abi = data->regs_user.abi; 4614 4615 /* 4616 * If there are no regs to dump, notice it through 4617 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). 4618 */ 4619 perf_output_put(handle, abi); 4620 4621 if (abi) { 4622 u64 mask = event->attr.sample_regs_user; 4623 perf_output_sample_regs(handle, 4624 data->regs_user.regs, 4625 mask); 4626 } 4627 } 4628 4629 if (sample_type & PERF_SAMPLE_STACK_USER) { 4630 perf_output_sample_ustack(handle, 4631 data->stack_user_size, 4632 data->regs_user.regs); 4633 } 4634 4635 if (sample_type & PERF_SAMPLE_WEIGHT) 4636 perf_output_put(handle, data->weight); 4637 4638 if (sample_type & PERF_SAMPLE_DATA_SRC) 4639 perf_output_put(handle, data->data_src.val); 4640 4641 if (sample_type & PERF_SAMPLE_TRANSACTION) 4642 perf_output_put(handle, data->txn); 4643 4644 if (!event->attr.watermark) { 4645 int wakeup_events = event->attr.wakeup_events; 4646 4647 if (wakeup_events) { 4648 struct ring_buffer *rb = handle->rb; 4649 int events = local_inc_return(&rb->events); 4650 4651 if (events >= wakeup_events) { 4652 local_sub(wakeup_events, &rb->events); 4653 local_inc(&rb->wakeup); 4654 } 4655 } 4656 } 4657 } 4658 4659 void perf_prepare_sample(struct perf_event_header *header, 4660 struct perf_sample_data *data, 4661 struct perf_event *event, 4662 struct pt_regs *regs) 4663 { 4664 u64 sample_type = event->attr.sample_type; 4665 4666 header->type = PERF_RECORD_SAMPLE; 4667 header->size = sizeof(*header) + event->header_size; 4668 4669 header->misc = 0; 4670 header->misc |= perf_misc_flags(regs); 4671 4672 __perf_event_header__init_id(header, data, event); 4673 4674 if (sample_type & PERF_SAMPLE_IP) 4675 data->ip = perf_instruction_pointer(regs); 4676 4677 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4678 int size = 1; 4679 4680 data->callchain = perf_callchain(event, regs); 4681 4682 if (data->callchain) 4683 size += data->callchain->nr; 4684 4685 header->size += size * sizeof(u64); 4686 } 4687 4688 if (sample_type & PERF_SAMPLE_RAW) { 4689 int size = sizeof(u32); 4690 4691 if (data->raw) 4692 size += data->raw->size; 4693 else 4694 size += sizeof(u32); 4695 4696 WARN_ON_ONCE(size & (sizeof(u64)-1)); 4697 header->size += size; 4698 } 4699 4700 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 4701 int size = sizeof(u64); /* nr */ 4702 if (data->br_stack) { 4703 size += data->br_stack->nr 4704 * sizeof(struct perf_branch_entry); 4705 } 4706 header->size += size; 4707 } 4708 4709 if (sample_type & PERF_SAMPLE_REGS_USER) { 4710 /* regs dump ABI info */ 4711 int size = sizeof(u64); 4712 4713 perf_sample_regs_user(&data->regs_user, regs); 4714 4715 if (data->regs_user.regs) { 4716 u64 mask = event->attr.sample_regs_user; 4717 size += hweight64(mask) * sizeof(u64); 4718 } 4719 4720 header->size += size; 4721 } 4722 4723 if (sample_type & PERF_SAMPLE_STACK_USER) { 4724 /* 4725 * Either we need PERF_SAMPLE_STACK_USER bit to be allways 4726 * processed as the last one or have additional check added 4727 * in case new sample type is added, because we could eat 4728 * up the rest of the sample size. 4729 */ 4730 struct perf_regs_user *uregs = &data->regs_user; 4731 u16 stack_size = event->attr.sample_stack_user; 4732 u16 size = sizeof(u64); 4733 4734 if (!uregs->abi) 4735 perf_sample_regs_user(uregs, regs); 4736 4737 stack_size = perf_sample_ustack_size(stack_size, header->size, 4738 uregs->regs); 4739 4740 /* 4741 * If there is something to dump, add space for the dump 4742 * itself and for the field that tells the dynamic size, 4743 * which is how many have been actually dumped. 4744 */ 4745 if (stack_size) 4746 size += sizeof(u64) + stack_size; 4747 4748 data->stack_user_size = stack_size; 4749 header->size += size; 4750 } 4751 } 4752 4753 static void perf_event_output(struct perf_event *event, 4754 struct perf_sample_data *data, 4755 struct pt_regs *regs) 4756 { 4757 struct perf_output_handle handle; 4758 struct perf_event_header header; 4759 4760 /* protect the callchain buffers */ 4761 rcu_read_lock(); 4762 4763 perf_prepare_sample(&header, data, event, regs); 4764 4765 if (perf_output_begin(&handle, event, header.size)) 4766 goto exit; 4767 4768 perf_output_sample(&handle, &header, data, event); 4769 4770 perf_output_end(&handle); 4771 4772 exit: 4773 rcu_read_unlock(); 4774 } 4775 4776 /* 4777 * read event_id 4778 */ 4779 4780 struct perf_read_event { 4781 struct perf_event_header header; 4782 4783 u32 pid; 4784 u32 tid; 4785 }; 4786 4787 static void 4788 perf_event_read_event(struct perf_event *event, 4789 struct task_struct *task) 4790 { 4791 struct perf_output_handle handle; 4792 struct perf_sample_data sample; 4793 struct perf_read_event read_event = { 4794 .header = { 4795 .type = PERF_RECORD_READ, 4796 .misc = 0, 4797 .size = sizeof(read_event) + event->read_size, 4798 }, 4799 .pid = perf_event_pid(event, task), 4800 .tid = perf_event_tid(event, task), 4801 }; 4802 int ret; 4803 4804 perf_event_header__init_id(&read_event.header, &sample, event); 4805 ret = perf_output_begin(&handle, event, read_event.header.size); 4806 if (ret) 4807 return; 4808 4809 perf_output_put(&handle, read_event); 4810 perf_output_read(&handle, event); 4811 perf_event__output_id_sample(event, &handle, &sample); 4812 4813 perf_output_end(&handle); 4814 } 4815 4816 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 4817 4818 static void 4819 perf_event_aux_ctx(struct perf_event_context *ctx, 4820 perf_event_aux_output_cb output, 4821 void *data) 4822 { 4823 struct perf_event *event; 4824 4825 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4826 if (event->state < PERF_EVENT_STATE_INACTIVE) 4827 continue; 4828 if (!event_filter_match(event)) 4829 continue; 4830 output(event, data); 4831 } 4832 } 4833 4834 static void 4835 perf_event_aux(perf_event_aux_output_cb output, void *data, 4836 struct perf_event_context *task_ctx) 4837 { 4838 struct perf_cpu_context *cpuctx; 4839 struct perf_event_context *ctx; 4840 struct pmu *pmu; 4841 int ctxn; 4842 4843 rcu_read_lock(); 4844 list_for_each_entry_rcu(pmu, &pmus, entry) { 4845 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4846 if (cpuctx->unique_pmu != pmu) 4847 goto next; 4848 perf_event_aux_ctx(&cpuctx->ctx, output, data); 4849 if (task_ctx) 4850 goto next; 4851 ctxn = pmu->task_ctx_nr; 4852 if (ctxn < 0) 4853 goto next; 4854 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4855 if (ctx) 4856 perf_event_aux_ctx(ctx, output, data); 4857 next: 4858 put_cpu_ptr(pmu->pmu_cpu_context); 4859 } 4860 4861 if (task_ctx) { 4862 preempt_disable(); 4863 perf_event_aux_ctx(task_ctx, output, data); 4864 preempt_enable(); 4865 } 4866 rcu_read_unlock(); 4867 } 4868 4869 /* 4870 * task tracking -- fork/exit 4871 * 4872 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task 4873 */ 4874 4875 struct perf_task_event { 4876 struct task_struct *task; 4877 struct perf_event_context *task_ctx; 4878 4879 struct { 4880 struct perf_event_header header; 4881 4882 u32 pid; 4883 u32 ppid; 4884 u32 tid; 4885 u32 ptid; 4886 u64 time; 4887 } event_id; 4888 }; 4889 4890 static int perf_event_task_match(struct perf_event *event) 4891 { 4892 return event->attr.comm || event->attr.mmap || 4893 event->attr.mmap2 || event->attr.mmap_data || 4894 event->attr.task; 4895 } 4896 4897 static void perf_event_task_output(struct perf_event *event, 4898 void *data) 4899 { 4900 struct perf_task_event *task_event = data; 4901 struct perf_output_handle handle; 4902 struct perf_sample_data sample; 4903 struct task_struct *task = task_event->task; 4904 int ret, size = task_event->event_id.header.size; 4905 4906 if (!perf_event_task_match(event)) 4907 return; 4908 4909 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 4910 4911 ret = perf_output_begin(&handle, event, 4912 task_event->event_id.header.size); 4913 if (ret) 4914 goto out; 4915 4916 task_event->event_id.pid = perf_event_pid(event, task); 4917 task_event->event_id.ppid = perf_event_pid(event, current); 4918 4919 task_event->event_id.tid = perf_event_tid(event, task); 4920 task_event->event_id.ptid = perf_event_tid(event, current); 4921 4922 perf_output_put(&handle, task_event->event_id); 4923 4924 perf_event__output_id_sample(event, &handle, &sample); 4925 4926 perf_output_end(&handle); 4927 out: 4928 task_event->event_id.header.size = size; 4929 } 4930 4931 static void perf_event_task(struct task_struct *task, 4932 struct perf_event_context *task_ctx, 4933 int new) 4934 { 4935 struct perf_task_event task_event; 4936 4937 if (!atomic_read(&nr_comm_events) && 4938 !atomic_read(&nr_mmap_events) && 4939 !atomic_read(&nr_task_events)) 4940 return; 4941 4942 task_event = (struct perf_task_event){ 4943 .task = task, 4944 .task_ctx = task_ctx, 4945 .event_id = { 4946 .header = { 4947 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 4948 .misc = 0, 4949 .size = sizeof(task_event.event_id), 4950 }, 4951 /* .pid */ 4952 /* .ppid */ 4953 /* .tid */ 4954 /* .ptid */ 4955 .time = perf_clock(), 4956 }, 4957 }; 4958 4959 perf_event_aux(perf_event_task_output, 4960 &task_event, 4961 task_ctx); 4962 } 4963 4964 void perf_event_fork(struct task_struct *task) 4965 { 4966 perf_event_task(task, NULL, 1); 4967 } 4968 4969 /* 4970 * comm tracking 4971 */ 4972 4973 struct perf_comm_event { 4974 struct task_struct *task; 4975 char *comm; 4976 int comm_size; 4977 4978 struct { 4979 struct perf_event_header header; 4980 4981 u32 pid; 4982 u32 tid; 4983 } event_id; 4984 }; 4985 4986 static int perf_event_comm_match(struct perf_event *event) 4987 { 4988 return event->attr.comm; 4989 } 4990 4991 static void perf_event_comm_output(struct perf_event *event, 4992 void *data) 4993 { 4994 struct perf_comm_event *comm_event = data; 4995 struct perf_output_handle handle; 4996 struct perf_sample_data sample; 4997 int size = comm_event->event_id.header.size; 4998 int ret; 4999 5000 if (!perf_event_comm_match(event)) 5001 return; 5002 5003 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 5004 ret = perf_output_begin(&handle, event, 5005 comm_event->event_id.header.size); 5006 5007 if (ret) 5008 goto out; 5009 5010 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 5011 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 5012 5013 perf_output_put(&handle, comm_event->event_id); 5014 __output_copy(&handle, comm_event->comm, 5015 comm_event->comm_size); 5016 5017 perf_event__output_id_sample(event, &handle, &sample); 5018 5019 perf_output_end(&handle); 5020 out: 5021 comm_event->event_id.header.size = size; 5022 } 5023 5024 static void perf_event_comm_event(struct perf_comm_event *comm_event) 5025 { 5026 char comm[TASK_COMM_LEN]; 5027 unsigned int size; 5028 5029 memset(comm, 0, sizeof(comm)); 5030 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 5031 size = ALIGN(strlen(comm)+1, sizeof(u64)); 5032 5033 comm_event->comm = comm; 5034 comm_event->comm_size = size; 5035 5036 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 5037 5038 perf_event_aux(perf_event_comm_output, 5039 comm_event, 5040 NULL); 5041 } 5042 5043 void perf_event_comm(struct task_struct *task) 5044 { 5045 struct perf_comm_event comm_event; 5046 struct perf_event_context *ctx; 5047 int ctxn; 5048 5049 rcu_read_lock(); 5050 for_each_task_context_nr(ctxn) { 5051 ctx = task->perf_event_ctxp[ctxn]; 5052 if (!ctx) 5053 continue; 5054 5055 perf_event_enable_on_exec(ctx); 5056 } 5057 rcu_read_unlock(); 5058 5059 if (!atomic_read(&nr_comm_events)) 5060 return; 5061 5062 comm_event = (struct perf_comm_event){ 5063 .task = task, 5064 /* .comm */ 5065 /* .comm_size */ 5066 .event_id = { 5067 .header = { 5068 .type = PERF_RECORD_COMM, 5069 .misc = 0, 5070 /* .size */ 5071 }, 5072 /* .pid */ 5073 /* .tid */ 5074 }, 5075 }; 5076 5077 perf_event_comm_event(&comm_event); 5078 } 5079 5080 /* 5081 * mmap tracking 5082 */ 5083 5084 struct perf_mmap_event { 5085 struct vm_area_struct *vma; 5086 5087 const char *file_name; 5088 int file_size; 5089 int maj, min; 5090 u64 ino; 5091 u64 ino_generation; 5092 5093 struct { 5094 struct perf_event_header header; 5095 5096 u32 pid; 5097 u32 tid; 5098 u64 start; 5099 u64 len; 5100 u64 pgoff; 5101 } event_id; 5102 }; 5103 5104 static int perf_event_mmap_match(struct perf_event *event, 5105 void *data) 5106 { 5107 struct perf_mmap_event *mmap_event = data; 5108 struct vm_area_struct *vma = mmap_event->vma; 5109 int executable = vma->vm_flags & VM_EXEC; 5110 5111 return (!executable && event->attr.mmap_data) || 5112 (executable && (event->attr.mmap || event->attr.mmap2)); 5113 } 5114 5115 static void perf_event_mmap_output(struct perf_event *event, 5116 void *data) 5117 { 5118 struct perf_mmap_event *mmap_event = data; 5119 struct perf_output_handle handle; 5120 struct perf_sample_data sample; 5121 int size = mmap_event->event_id.header.size; 5122 int ret; 5123 5124 if (!perf_event_mmap_match(event, data)) 5125 return; 5126 5127 if (event->attr.mmap2) { 5128 mmap_event->event_id.header.type = PERF_RECORD_MMAP2; 5129 mmap_event->event_id.header.size += sizeof(mmap_event->maj); 5130 mmap_event->event_id.header.size += sizeof(mmap_event->min); 5131 mmap_event->event_id.header.size += sizeof(mmap_event->ino); 5132 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); 5133 } 5134 5135 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 5136 ret = perf_output_begin(&handle, event, 5137 mmap_event->event_id.header.size); 5138 if (ret) 5139 goto out; 5140 5141 mmap_event->event_id.pid = perf_event_pid(event, current); 5142 mmap_event->event_id.tid = perf_event_tid(event, current); 5143 5144 perf_output_put(&handle, mmap_event->event_id); 5145 5146 if (event->attr.mmap2) { 5147 perf_output_put(&handle, mmap_event->maj); 5148 perf_output_put(&handle, mmap_event->min); 5149 perf_output_put(&handle, mmap_event->ino); 5150 perf_output_put(&handle, mmap_event->ino_generation); 5151 } 5152 5153 __output_copy(&handle, mmap_event->file_name, 5154 mmap_event->file_size); 5155 5156 perf_event__output_id_sample(event, &handle, &sample); 5157 5158 perf_output_end(&handle); 5159 out: 5160 mmap_event->event_id.header.size = size; 5161 } 5162 5163 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 5164 { 5165 struct vm_area_struct *vma = mmap_event->vma; 5166 struct file *file = vma->vm_file; 5167 int maj = 0, min = 0; 5168 u64 ino = 0, gen = 0; 5169 unsigned int size; 5170 char tmp[16]; 5171 char *buf = NULL; 5172 char *name; 5173 5174 if (file) { 5175 struct inode *inode; 5176 dev_t dev; 5177 5178 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5179 if (!buf) { 5180 name = "//enomem"; 5181 goto cpy_name; 5182 } 5183 /* 5184 * d_path() works from the end of the rb backwards, so we 5185 * need to add enough zero bytes after the string to handle 5186 * the 64bit alignment we do later. 5187 */ 5188 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); 5189 if (IS_ERR(name)) { 5190 name = "//toolong"; 5191 goto cpy_name; 5192 } 5193 inode = file_inode(vma->vm_file); 5194 dev = inode->i_sb->s_dev; 5195 ino = inode->i_ino; 5196 gen = inode->i_generation; 5197 maj = MAJOR(dev); 5198 min = MINOR(dev); 5199 goto got_name; 5200 } else { 5201 name = (char *)arch_vma_name(vma); 5202 if (name) 5203 goto cpy_name; 5204 5205 if (vma->vm_start <= vma->vm_mm->start_brk && 5206 vma->vm_end >= vma->vm_mm->brk) { 5207 name = "[heap]"; 5208 goto cpy_name; 5209 } 5210 if (vma->vm_start <= vma->vm_mm->start_stack && 5211 vma->vm_end >= vma->vm_mm->start_stack) { 5212 name = "[stack]"; 5213 goto cpy_name; 5214 } 5215 5216 name = "//anon"; 5217 goto cpy_name; 5218 } 5219 5220 cpy_name: 5221 strlcpy(tmp, name, sizeof(tmp)); 5222 name = tmp; 5223 got_name: 5224 /* 5225 * Since our buffer works in 8 byte units we need to align our string 5226 * size to a multiple of 8. However, we must guarantee the tail end is 5227 * zero'd out to avoid leaking random bits to userspace. 5228 */ 5229 size = strlen(name)+1; 5230 while (!IS_ALIGNED(size, sizeof(u64))) 5231 name[size++] = '\0'; 5232 5233 mmap_event->file_name = name; 5234 mmap_event->file_size = size; 5235 mmap_event->maj = maj; 5236 mmap_event->min = min; 5237 mmap_event->ino = ino; 5238 mmap_event->ino_generation = gen; 5239 5240 if (!(vma->vm_flags & VM_EXEC)) 5241 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; 5242 5243 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5244 5245 perf_event_aux(perf_event_mmap_output, 5246 mmap_event, 5247 NULL); 5248 5249 kfree(buf); 5250 } 5251 5252 void perf_event_mmap(struct vm_area_struct *vma) 5253 { 5254 struct perf_mmap_event mmap_event; 5255 5256 if (!atomic_read(&nr_mmap_events)) 5257 return; 5258 5259 mmap_event = (struct perf_mmap_event){ 5260 .vma = vma, 5261 /* .file_name */ 5262 /* .file_size */ 5263 .event_id = { 5264 .header = { 5265 .type = PERF_RECORD_MMAP, 5266 .misc = PERF_RECORD_MISC_USER, 5267 /* .size */ 5268 }, 5269 /* .pid */ 5270 /* .tid */ 5271 .start = vma->vm_start, 5272 .len = vma->vm_end - vma->vm_start, 5273 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, 5274 }, 5275 /* .maj (attr_mmap2 only) */ 5276 /* .min (attr_mmap2 only) */ 5277 /* .ino (attr_mmap2 only) */ 5278 /* .ino_generation (attr_mmap2 only) */ 5279 }; 5280 5281 perf_event_mmap_event(&mmap_event); 5282 } 5283 5284 /* 5285 * IRQ throttle logging 5286 */ 5287 5288 static void perf_log_throttle(struct perf_event *event, int enable) 5289 { 5290 struct perf_output_handle handle; 5291 struct perf_sample_data sample; 5292 int ret; 5293 5294 struct { 5295 struct perf_event_header header; 5296 u64 time; 5297 u64 id; 5298 u64 stream_id; 5299 } throttle_event = { 5300 .header = { 5301 .type = PERF_RECORD_THROTTLE, 5302 .misc = 0, 5303 .size = sizeof(throttle_event), 5304 }, 5305 .time = perf_clock(), 5306 .id = primary_event_id(event), 5307 .stream_id = event->id, 5308 }; 5309 5310 if (enable) 5311 throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 5312 5313 perf_event_header__init_id(&throttle_event.header, &sample, event); 5314 5315 ret = perf_output_begin(&handle, event, 5316 throttle_event.header.size); 5317 if (ret) 5318 return; 5319 5320 perf_output_put(&handle, throttle_event); 5321 perf_event__output_id_sample(event, &handle, &sample); 5322 perf_output_end(&handle); 5323 } 5324 5325 /* 5326 * Generic event overflow handling, sampling. 5327 */ 5328 5329 static int __perf_event_overflow(struct perf_event *event, 5330 int throttle, struct perf_sample_data *data, 5331 struct pt_regs *regs) 5332 { 5333 int events = atomic_read(&event->event_limit); 5334 struct hw_perf_event *hwc = &event->hw; 5335 u64 seq; 5336 int ret = 0; 5337 5338 /* 5339 * Non-sampling counters might still use the PMI to fold short 5340 * hardware counters, ignore those. 5341 */ 5342 if (unlikely(!is_sampling_event(event))) 5343 return 0; 5344 5345 seq = __this_cpu_read(perf_throttled_seq); 5346 if (seq != hwc->interrupts_seq) { 5347 hwc->interrupts_seq = seq; 5348 hwc->interrupts = 1; 5349 } else { 5350 hwc->interrupts++; 5351 if (unlikely(throttle 5352 && hwc->interrupts >= max_samples_per_tick)) { 5353 __this_cpu_inc(perf_throttled_count); 5354 hwc->interrupts = MAX_INTERRUPTS; 5355 perf_log_throttle(event, 0); 5356 tick_nohz_full_kick(); 5357 ret = 1; 5358 } 5359 } 5360 5361 if (event->attr.freq) { 5362 u64 now = perf_clock(); 5363 s64 delta = now - hwc->freq_time_stamp; 5364 5365 hwc->freq_time_stamp = now; 5366 5367 if (delta > 0 && delta < 2*TICK_NSEC) 5368 perf_adjust_period(event, delta, hwc->last_period, true); 5369 } 5370 5371 /* 5372 * XXX event_limit might not quite work as expected on inherited 5373 * events 5374 */ 5375 5376 event->pending_kill = POLL_IN; 5377 if (events && atomic_dec_and_test(&event->event_limit)) { 5378 ret = 1; 5379 event->pending_kill = POLL_HUP; 5380 event->pending_disable = 1; 5381 irq_work_queue(&event->pending); 5382 } 5383 5384 if (event->overflow_handler) 5385 event->overflow_handler(event, data, regs); 5386 else 5387 perf_event_output(event, data, regs); 5388 5389 if (event->fasync && event->pending_kill) { 5390 event->pending_wakeup = 1; 5391 irq_work_queue(&event->pending); 5392 } 5393 5394 return ret; 5395 } 5396 5397 int perf_event_overflow(struct perf_event *event, 5398 struct perf_sample_data *data, 5399 struct pt_regs *regs) 5400 { 5401 return __perf_event_overflow(event, 1, data, regs); 5402 } 5403 5404 /* 5405 * Generic software event infrastructure 5406 */ 5407 5408 struct swevent_htable { 5409 struct swevent_hlist *swevent_hlist; 5410 struct mutex hlist_mutex; 5411 int hlist_refcount; 5412 5413 /* Recursion avoidance in each contexts */ 5414 int recursion[PERF_NR_CONTEXTS]; 5415 }; 5416 5417 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5418 5419 /* 5420 * We directly increment event->count and keep a second value in 5421 * event->hw.period_left to count intervals. This period event 5422 * is kept in the range [-sample_period, 0] so that we can use the 5423 * sign as trigger. 5424 */ 5425 5426 u64 perf_swevent_set_period(struct perf_event *event) 5427 { 5428 struct hw_perf_event *hwc = &event->hw; 5429 u64 period = hwc->last_period; 5430 u64 nr, offset; 5431 s64 old, val; 5432 5433 hwc->last_period = hwc->sample_period; 5434 5435 again: 5436 old = val = local64_read(&hwc->period_left); 5437 if (val < 0) 5438 return 0; 5439 5440 nr = div64_u64(period + val, period); 5441 offset = nr * period; 5442 val -= offset; 5443 if (local64_cmpxchg(&hwc->period_left, old, val) != old) 5444 goto again; 5445 5446 return nr; 5447 } 5448 5449 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, 5450 struct perf_sample_data *data, 5451 struct pt_regs *regs) 5452 { 5453 struct hw_perf_event *hwc = &event->hw; 5454 int throttle = 0; 5455 5456 if (!overflow) 5457 overflow = perf_swevent_set_period(event); 5458 5459 if (hwc->interrupts == MAX_INTERRUPTS) 5460 return; 5461 5462 for (; overflow; overflow--) { 5463 if (__perf_event_overflow(event, throttle, 5464 data, regs)) { 5465 /* 5466 * We inhibit the overflow from happening when 5467 * hwc->interrupts == MAX_INTERRUPTS. 5468 */ 5469 break; 5470 } 5471 throttle = 1; 5472 } 5473 } 5474 5475 static void perf_swevent_event(struct perf_event *event, u64 nr, 5476 struct perf_sample_data *data, 5477 struct pt_regs *regs) 5478 { 5479 struct hw_perf_event *hwc = &event->hw; 5480 5481 local64_add(nr, &event->count); 5482 5483 if (!regs) 5484 return; 5485 5486 if (!is_sampling_event(event)) 5487 return; 5488 5489 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { 5490 data->period = nr; 5491 return perf_swevent_overflow(event, 1, data, regs); 5492 } else 5493 data->period = event->hw.last_period; 5494 5495 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) 5496 return perf_swevent_overflow(event, 1, data, regs); 5497 5498 if (local64_add_negative(nr, &hwc->period_left)) 5499 return; 5500 5501 perf_swevent_overflow(event, 0, data, regs); 5502 } 5503 5504 static int perf_exclude_event(struct perf_event *event, 5505 struct pt_regs *regs) 5506 { 5507 if (event->hw.state & PERF_HES_STOPPED) 5508 return 1; 5509 5510 if (regs) { 5511 if (event->attr.exclude_user && user_mode(regs)) 5512 return 1; 5513 5514 if (event->attr.exclude_kernel && !user_mode(regs)) 5515 return 1; 5516 } 5517 5518 return 0; 5519 } 5520 5521 static int perf_swevent_match(struct perf_event *event, 5522 enum perf_type_id type, 5523 u32 event_id, 5524 struct perf_sample_data *data, 5525 struct pt_regs *regs) 5526 { 5527 if (event->attr.type != type) 5528 return 0; 5529 5530 if (event->attr.config != event_id) 5531 return 0; 5532 5533 if (perf_exclude_event(event, regs)) 5534 return 0; 5535 5536 return 1; 5537 } 5538 5539 static inline u64 swevent_hash(u64 type, u32 event_id) 5540 { 5541 u64 val = event_id | (type << 32); 5542 5543 return hash_64(val, SWEVENT_HLIST_BITS); 5544 } 5545 5546 static inline struct hlist_head * 5547 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) 5548 { 5549 u64 hash = swevent_hash(type, event_id); 5550 5551 return &hlist->heads[hash]; 5552 } 5553 5554 /* For the read side: events when they trigger */ 5555 static inline struct hlist_head * 5556 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) 5557 { 5558 struct swevent_hlist *hlist; 5559 5560 hlist = rcu_dereference(swhash->swevent_hlist); 5561 if (!hlist) 5562 return NULL; 5563 5564 return __find_swevent_head(hlist, type, event_id); 5565 } 5566 5567 /* For the event head insertion and removal in the hlist */ 5568 static inline struct hlist_head * 5569 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) 5570 { 5571 struct swevent_hlist *hlist; 5572 u32 event_id = event->attr.config; 5573 u64 type = event->attr.type; 5574 5575 /* 5576 * Event scheduling is always serialized against hlist allocation 5577 * and release. Which makes the protected version suitable here. 5578 * The context lock guarantees that. 5579 */ 5580 hlist = rcu_dereference_protected(swhash->swevent_hlist, 5581 lockdep_is_held(&event->ctx->lock)); 5582 if (!hlist) 5583 return NULL; 5584 5585 return __find_swevent_head(hlist, type, event_id); 5586 } 5587 5588 static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 5589 u64 nr, 5590 struct perf_sample_data *data, 5591 struct pt_regs *regs) 5592 { 5593 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5594 struct perf_event *event; 5595 struct hlist_head *head; 5596 5597 rcu_read_lock(); 5598 head = find_swevent_head_rcu(swhash, type, event_id); 5599 if (!head) 5600 goto end; 5601 5602 hlist_for_each_entry_rcu(event, head, hlist_entry) { 5603 if (perf_swevent_match(event, type, event_id, data, regs)) 5604 perf_swevent_event(event, nr, data, regs); 5605 } 5606 end: 5607 rcu_read_unlock(); 5608 } 5609 5610 int perf_swevent_get_recursion_context(void) 5611 { 5612 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5613 5614 return get_recursion_context(swhash->recursion); 5615 } 5616 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); 5617 5618 inline void perf_swevent_put_recursion_context(int rctx) 5619 { 5620 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5621 5622 put_recursion_context(swhash->recursion, rctx); 5623 } 5624 5625 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 5626 { 5627 struct perf_sample_data data; 5628 int rctx; 5629 5630 preempt_disable_notrace(); 5631 rctx = perf_swevent_get_recursion_context(); 5632 if (rctx < 0) 5633 return; 5634 5635 perf_sample_data_init(&data, addr, 0); 5636 5637 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 5638 5639 perf_swevent_put_recursion_context(rctx); 5640 preempt_enable_notrace(); 5641 } 5642 5643 static void perf_swevent_read(struct perf_event *event) 5644 { 5645 } 5646 5647 static int perf_swevent_add(struct perf_event *event, int flags) 5648 { 5649 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5650 struct hw_perf_event *hwc = &event->hw; 5651 struct hlist_head *head; 5652 5653 if (is_sampling_event(event)) { 5654 hwc->last_period = hwc->sample_period; 5655 perf_swevent_set_period(event); 5656 } 5657 5658 hwc->state = !(flags & PERF_EF_START); 5659 5660 head = find_swevent_head(swhash, event); 5661 if (WARN_ON_ONCE(!head)) 5662 return -EINVAL; 5663 5664 hlist_add_head_rcu(&event->hlist_entry, head); 5665 5666 return 0; 5667 } 5668 5669 static void perf_swevent_del(struct perf_event *event, int flags) 5670 { 5671 hlist_del_rcu(&event->hlist_entry); 5672 } 5673 5674 static void perf_swevent_start(struct perf_event *event, int flags) 5675 { 5676 event->hw.state = 0; 5677 } 5678 5679 static void perf_swevent_stop(struct perf_event *event, int flags) 5680 { 5681 event->hw.state = PERF_HES_STOPPED; 5682 } 5683 5684 /* Deref the hlist from the update side */ 5685 static inline struct swevent_hlist * 5686 swevent_hlist_deref(struct swevent_htable *swhash) 5687 { 5688 return rcu_dereference_protected(swhash->swevent_hlist, 5689 lockdep_is_held(&swhash->hlist_mutex)); 5690 } 5691 5692 static void swevent_hlist_release(struct swevent_htable *swhash) 5693 { 5694 struct swevent_hlist *hlist = swevent_hlist_deref(swhash); 5695 5696 if (!hlist) 5697 return; 5698 5699 rcu_assign_pointer(swhash->swevent_hlist, NULL); 5700 kfree_rcu(hlist, rcu_head); 5701 } 5702 5703 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) 5704 { 5705 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5706 5707 mutex_lock(&swhash->hlist_mutex); 5708 5709 if (!--swhash->hlist_refcount) 5710 swevent_hlist_release(swhash); 5711 5712 mutex_unlock(&swhash->hlist_mutex); 5713 } 5714 5715 static void swevent_hlist_put(struct perf_event *event) 5716 { 5717 int cpu; 5718 5719 for_each_possible_cpu(cpu) 5720 swevent_hlist_put_cpu(event, cpu); 5721 } 5722 5723 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) 5724 { 5725 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 5726 int err = 0; 5727 5728 mutex_lock(&swhash->hlist_mutex); 5729 5730 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { 5731 struct swevent_hlist *hlist; 5732 5733 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 5734 if (!hlist) { 5735 err = -ENOMEM; 5736 goto exit; 5737 } 5738 rcu_assign_pointer(swhash->swevent_hlist, hlist); 5739 } 5740 swhash->hlist_refcount++; 5741 exit: 5742 mutex_unlock(&swhash->hlist_mutex); 5743 5744 return err; 5745 } 5746 5747 static int swevent_hlist_get(struct perf_event *event) 5748 { 5749 int err; 5750 int cpu, failed_cpu; 5751 5752 get_online_cpus(); 5753 for_each_possible_cpu(cpu) { 5754 err = swevent_hlist_get_cpu(event, cpu); 5755 if (err) { 5756 failed_cpu = cpu; 5757 goto fail; 5758 } 5759 } 5760 put_online_cpus(); 5761 5762 return 0; 5763 fail: 5764 for_each_possible_cpu(cpu) { 5765 if (cpu == failed_cpu) 5766 break; 5767 swevent_hlist_put_cpu(event, cpu); 5768 } 5769 5770 put_online_cpus(); 5771 return err; 5772 } 5773 5774 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 5775 5776 static void sw_perf_event_destroy(struct perf_event *event) 5777 { 5778 u64 event_id = event->attr.config; 5779 5780 WARN_ON(event->parent); 5781 5782 static_key_slow_dec(&perf_swevent_enabled[event_id]); 5783 swevent_hlist_put(event); 5784 } 5785 5786 static int perf_swevent_init(struct perf_event *event) 5787 { 5788 u64 event_id = event->attr.config; 5789 5790 if (event->attr.type != PERF_TYPE_SOFTWARE) 5791 return -ENOENT; 5792 5793 /* 5794 * no branch sampling for software events 5795 */ 5796 if (has_branch_stack(event)) 5797 return -EOPNOTSUPP; 5798 5799 switch (event_id) { 5800 case PERF_COUNT_SW_CPU_CLOCK: 5801 case PERF_COUNT_SW_TASK_CLOCK: 5802 return -ENOENT; 5803 5804 default: 5805 break; 5806 } 5807 5808 if (event_id >= PERF_COUNT_SW_MAX) 5809 return -ENOENT; 5810 5811 if (!event->parent) { 5812 int err; 5813 5814 err = swevent_hlist_get(event); 5815 if (err) 5816 return err; 5817 5818 static_key_slow_inc(&perf_swevent_enabled[event_id]); 5819 event->destroy = sw_perf_event_destroy; 5820 } 5821 5822 return 0; 5823 } 5824 5825 static int perf_swevent_event_idx(struct perf_event *event) 5826 { 5827 return 0; 5828 } 5829 5830 static struct pmu perf_swevent = { 5831 .task_ctx_nr = perf_sw_context, 5832 5833 .event_init = perf_swevent_init, 5834 .add = perf_swevent_add, 5835 .del = perf_swevent_del, 5836 .start = perf_swevent_start, 5837 .stop = perf_swevent_stop, 5838 .read = perf_swevent_read, 5839 5840 .event_idx = perf_swevent_event_idx, 5841 }; 5842 5843 #ifdef CONFIG_EVENT_TRACING 5844 5845 static int perf_tp_filter_match(struct perf_event *event, 5846 struct perf_sample_data *data) 5847 { 5848 void *record = data->raw->data; 5849 5850 if (likely(!event->filter) || filter_match_preds(event->filter, record)) 5851 return 1; 5852 return 0; 5853 } 5854 5855 static int perf_tp_event_match(struct perf_event *event, 5856 struct perf_sample_data *data, 5857 struct pt_regs *regs) 5858 { 5859 if (event->hw.state & PERF_HES_STOPPED) 5860 return 0; 5861 /* 5862 * All tracepoints are from kernel-space. 5863 */ 5864 if (event->attr.exclude_kernel) 5865 return 0; 5866 5867 if (!perf_tp_filter_match(event, data)) 5868 return 0; 5869 5870 return 1; 5871 } 5872 5873 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 5874 struct pt_regs *regs, struct hlist_head *head, int rctx, 5875 struct task_struct *task) 5876 { 5877 struct perf_sample_data data; 5878 struct perf_event *event; 5879 5880 struct perf_raw_record raw = { 5881 .size = entry_size, 5882 .data = record, 5883 }; 5884 5885 perf_sample_data_init(&data, addr, 0); 5886 data.raw = &raw; 5887 5888 hlist_for_each_entry_rcu(event, head, hlist_entry) { 5889 if (perf_tp_event_match(event, &data, regs)) 5890 perf_swevent_event(event, count, &data, regs); 5891 } 5892 5893 /* 5894 * If we got specified a target task, also iterate its context and 5895 * deliver this event there too. 5896 */ 5897 if (task && task != current) { 5898 struct perf_event_context *ctx; 5899 struct trace_entry *entry = record; 5900 5901 rcu_read_lock(); 5902 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); 5903 if (!ctx) 5904 goto unlock; 5905 5906 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 5907 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5908 continue; 5909 if (event->attr.config != entry->type) 5910 continue; 5911 if (perf_tp_event_match(event, &data, regs)) 5912 perf_swevent_event(event, count, &data, regs); 5913 } 5914 unlock: 5915 rcu_read_unlock(); 5916 } 5917 5918 perf_swevent_put_recursion_context(rctx); 5919 } 5920 EXPORT_SYMBOL_GPL(perf_tp_event); 5921 5922 static void tp_perf_event_destroy(struct perf_event *event) 5923 { 5924 perf_trace_destroy(event); 5925 } 5926 5927 static int perf_tp_event_init(struct perf_event *event) 5928 { 5929 int err; 5930 5931 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5932 return -ENOENT; 5933 5934 /* 5935 * no branch sampling for tracepoint events 5936 */ 5937 if (has_branch_stack(event)) 5938 return -EOPNOTSUPP; 5939 5940 err = perf_trace_init(event); 5941 if (err) 5942 return err; 5943 5944 event->destroy = tp_perf_event_destroy; 5945 5946 return 0; 5947 } 5948 5949 static struct pmu perf_tracepoint = { 5950 .task_ctx_nr = perf_sw_context, 5951 5952 .event_init = perf_tp_event_init, 5953 .add = perf_trace_add, 5954 .del = perf_trace_del, 5955 .start = perf_swevent_start, 5956 .stop = perf_swevent_stop, 5957 .read = perf_swevent_read, 5958 5959 .event_idx = perf_swevent_event_idx, 5960 }; 5961 5962 static inline void perf_tp_register(void) 5963 { 5964 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); 5965 } 5966 5967 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 5968 { 5969 char *filter_str; 5970 int ret; 5971 5972 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5973 return -EINVAL; 5974 5975 filter_str = strndup_user(arg, PAGE_SIZE); 5976 if (IS_ERR(filter_str)) 5977 return PTR_ERR(filter_str); 5978 5979 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); 5980 5981 kfree(filter_str); 5982 return ret; 5983 } 5984 5985 static void perf_event_free_filter(struct perf_event *event) 5986 { 5987 ftrace_profile_free_filter(event); 5988 } 5989 5990 #else 5991 5992 static inline void perf_tp_register(void) 5993 { 5994 } 5995 5996 static int perf_event_set_filter(struct perf_event *event, void __user *arg) 5997 { 5998 return -ENOENT; 5999 } 6000 6001 static void perf_event_free_filter(struct perf_event *event) 6002 { 6003 } 6004 6005 #endif /* CONFIG_EVENT_TRACING */ 6006 6007 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6008 void perf_bp_event(struct perf_event *bp, void *data) 6009 { 6010 struct perf_sample_data sample; 6011 struct pt_regs *regs = data; 6012 6013 perf_sample_data_init(&sample, bp->attr.bp_addr, 0); 6014 6015 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 6016 perf_swevent_event(bp, 1, &sample, regs); 6017 } 6018 #endif 6019 6020 /* 6021 * hrtimer based swevent callback 6022 */ 6023 6024 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 6025 { 6026 enum hrtimer_restart ret = HRTIMER_RESTART; 6027 struct perf_sample_data data; 6028 struct pt_regs *regs; 6029 struct perf_event *event; 6030 u64 period; 6031 6032 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 6033 6034 if (event->state != PERF_EVENT_STATE_ACTIVE) 6035 return HRTIMER_NORESTART; 6036 6037 event->pmu->read(event); 6038 6039 perf_sample_data_init(&data, 0, event->hw.last_period); 6040 regs = get_irq_regs(); 6041 6042 if (regs && !perf_exclude_event(event, regs)) { 6043 if (!(event->attr.exclude_idle && is_idle_task(current))) 6044 if (__perf_event_overflow(event, 1, &data, regs)) 6045 ret = HRTIMER_NORESTART; 6046 } 6047 6048 period = max_t(u64, 10000, event->hw.sample_period); 6049 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 6050 6051 return ret; 6052 } 6053 6054 static void perf_swevent_start_hrtimer(struct perf_event *event) 6055 { 6056 struct hw_perf_event *hwc = &event->hw; 6057 s64 period; 6058 6059 if (!is_sampling_event(event)) 6060 return; 6061 6062 period = local64_read(&hwc->period_left); 6063 if (period) { 6064 if (period < 0) 6065 period = 10000; 6066 6067 local64_set(&hwc->period_left, 0); 6068 } else { 6069 period = max_t(u64, 10000, hwc->sample_period); 6070 } 6071 __hrtimer_start_range_ns(&hwc->hrtimer, 6072 ns_to_ktime(period), 0, 6073 HRTIMER_MODE_REL_PINNED, 0); 6074 } 6075 6076 static void perf_swevent_cancel_hrtimer(struct perf_event *event) 6077 { 6078 struct hw_perf_event *hwc = &event->hw; 6079 6080 if (is_sampling_event(event)) { 6081 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); 6082 local64_set(&hwc->period_left, ktime_to_ns(remaining)); 6083 6084 hrtimer_cancel(&hwc->hrtimer); 6085 } 6086 } 6087 6088 static void perf_swevent_init_hrtimer(struct perf_event *event) 6089 { 6090 struct hw_perf_event *hwc = &event->hw; 6091 6092 if (!is_sampling_event(event)) 6093 return; 6094 6095 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6096 hwc->hrtimer.function = perf_swevent_hrtimer; 6097 6098 /* 6099 * Since hrtimers have a fixed rate, we can do a static freq->period 6100 * mapping and avoid the whole period adjust feedback stuff. 6101 */ 6102 if (event->attr.freq) { 6103 long freq = event->attr.sample_freq; 6104 6105 event->attr.sample_period = NSEC_PER_SEC / freq; 6106 hwc->sample_period = event->attr.sample_period; 6107 local64_set(&hwc->period_left, hwc->sample_period); 6108 hwc->last_period = hwc->sample_period; 6109 event->attr.freq = 0; 6110 } 6111 } 6112 6113 /* 6114 * Software event: cpu wall time clock 6115 */ 6116 6117 static void cpu_clock_event_update(struct perf_event *event) 6118 { 6119 s64 prev; 6120 u64 now; 6121 6122 now = local_clock(); 6123 prev = local64_xchg(&event->hw.prev_count, now); 6124 local64_add(now - prev, &event->count); 6125 } 6126 6127 static void cpu_clock_event_start(struct perf_event *event, int flags) 6128 { 6129 local64_set(&event->hw.prev_count, local_clock()); 6130 perf_swevent_start_hrtimer(event); 6131 } 6132 6133 static void cpu_clock_event_stop(struct perf_event *event, int flags) 6134 { 6135 perf_swevent_cancel_hrtimer(event); 6136 cpu_clock_event_update(event); 6137 } 6138 6139 static int cpu_clock_event_add(struct perf_event *event, int flags) 6140 { 6141 if (flags & PERF_EF_START) 6142 cpu_clock_event_start(event, flags); 6143 6144 return 0; 6145 } 6146 6147 static void cpu_clock_event_del(struct perf_event *event, int flags) 6148 { 6149 cpu_clock_event_stop(event, flags); 6150 } 6151 6152 static void cpu_clock_event_read(struct perf_event *event) 6153 { 6154 cpu_clock_event_update(event); 6155 } 6156 6157 static int cpu_clock_event_init(struct perf_event *event) 6158 { 6159 if (event->attr.type != PERF_TYPE_SOFTWARE) 6160 return -ENOENT; 6161 6162 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 6163 return -ENOENT; 6164 6165 /* 6166 * no branch sampling for software events 6167 */ 6168 if (has_branch_stack(event)) 6169 return -EOPNOTSUPP; 6170 6171 perf_swevent_init_hrtimer(event); 6172 6173 return 0; 6174 } 6175 6176 static struct pmu perf_cpu_clock = { 6177 .task_ctx_nr = perf_sw_context, 6178 6179 .event_init = cpu_clock_event_init, 6180 .add = cpu_clock_event_add, 6181 .del = cpu_clock_event_del, 6182 .start = cpu_clock_event_start, 6183 .stop = cpu_clock_event_stop, 6184 .read = cpu_clock_event_read, 6185 6186 .event_idx = perf_swevent_event_idx, 6187 }; 6188 6189 /* 6190 * Software event: task time clock 6191 */ 6192 6193 static void task_clock_event_update(struct perf_event *event, u64 now) 6194 { 6195 u64 prev; 6196 s64 delta; 6197 6198 prev = local64_xchg(&event->hw.prev_count, now); 6199 delta = now - prev; 6200 local64_add(delta, &event->count); 6201 } 6202 6203 static void task_clock_event_start(struct perf_event *event, int flags) 6204 { 6205 local64_set(&event->hw.prev_count, event->ctx->time); 6206 perf_swevent_start_hrtimer(event); 6207 } 6208 6209 static void task_clock_event_stop(struct perf_event *event, int flags) 6210 { 6211 perf_swevent_cancel_hrtimer(event); 6212 task_clock_event_update(event, event->ctx->time); 6213 } 6214 6215 static int task_clock_event_add(struct perf_event *event, int flags) 6216 { 6217 if (flags & PERF_EF_START) 6218 task_clock_event_start(event, flags); 6219 6220 return 0; 6221 } 6222 6223 static void task_clock_event_del(struct perf_event *event, int flags) 6224 { 6225 task_clock_event_stop(event, PERF_EF_UPDATE); 6226 } 6227 6228 static void task_clock_event_read(struct perf_event *event) 6229 { 6230 u64 now = perf_clock(); 6231 u64 delta = now - event->ctx->timestamp; 6232 u64 time = event->ctx->time + delta; 6233 6234 task_clock_event_update(event, time); 6235 } 6236 6237 static int task_clock_event_init(struct perf_event *event) 6238 { 6239 if (event->attr.type != PERF_TYPE_SOFTWARE) 6240 return -ENOENT; 6241 6242 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 6243 return -ENOENT; 6244 6245 /* 6246 * no branch sampling for software events 6247 */ 6248 if (has_branch_stack(event)) 6249 return -EOPNOTSUPP; 6250 6251 perf_swevent_init_hrtimer(event); 6252 6253 return 0; 6254 } 6255 6256 static struct pmu perf_task_clock = { 6257 .task_ctx_nr = perf_sw_context, 6258 6259 .event_init = task_clock_event_init, 6260 .add = task_clock_event_add, 6261 .del = task_clock_event_del, 6262 .start = task_clock_event_start, 6263 .stop = task_clock_event_stop, 6264 .read = task_clock_event_read, 6265 6266 .event_idx = perf_swevent_event_idx, 6267 }; 6268 6269 static void perf_pmu_nop_void(struct pmu *pmu) 6270 { 6271 } 6272 6273 static int perf_pmu_nop_int(struct pmu *pmu) 6274 { 6275 return 0; 6276 } 6277 6278 static void perf_pmu_start_txn(struct pmu *pmu) 6279 { 6280 perf_pmu_disable(pmu); 6281 } 6282 6283 static int perf_pmu_commit_txn(struct pmu *pmu) 6284 { 6285 perf_pmu_enable(pmu); 6286 return 0; 6287 } 6288 6289 static void perf_pmu_cancel_txn(struct pmu *pmu) 6290 { 6291 perf_pmu_enable(pmu); 6292 } 6293 6294 static int perf_event_idx_default(struct perf_event *event) 6295 { 6296 return event->hw.idx + 1; 6297 } 6298 6299 /* 6300 * Ensures all contexts with the same task_ctx_nr have the same 6301 * pmu_cpu_context too. 6302 */ 6303 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) 6304 { 6305 struct pmu *pmu; 6306 6307 if (ctxn < 0) 6308 return NULL; 6309 6310 list_for_each_entry(pmu, &pmus, entry) { 6311 if (pmu->task_ctx_nr == ctxn) 6312 return pmu->pmu_cpu_context; 6313 } 6314 6315 return NULL; 6316 } 6317 6318 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) 6319 { 6320 int cpu; 6321 6322 for_each_possible_cpu(cpu) { 6323 struct perf_cpu_context *cpuctx; 6324 6325 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6326 6327 if (cpuctx->unique_pmu == old_pmu) 6328 cpuctx->unique_pmu = pmu; 6329 } 6330 } 6331 6332 static void free_pmu_context(struct pmu *pmu) 6333 { 6334 struct pmu *i; 6335 6336 mutex_lock(&pmus_lock); 6337 /* 6338 * Like a real lame refcount. 6339 */ 6340 list_for_each_entry(i, &pmus, entry) { 6341 if (i->pmu_cpu_context == pmu->pmu_cpu_context) { 6342 update_pmu_context(i, pmu); 6343 goto out; 6344 } 6345 } 6346 6347 free_percpu(pmu->pmu_cpu_context); 6348 out: 6349 mutex_unlock(&pmus_lock); 6350 } 6351 static struct idr pmu_idr; 6352 6353 static ssize_t 6354 type_show(struct device *dev, struct device_attribute *attr, char *page) 6355 { 6356 struct pmu *pmu = dev_get_drvdata(dev); 6357 6358 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); 6359 } 6360 static DEVICE_ATTR_RO(type); 6361 6362 static ssize_t 6363 perf_event_mux_interval_ms_show(struct device *dev, 6364 struct device_attribute *attr, 6365 char *page) 6366 { 6367 struct pmu *pmu = dev_get_drvdata(dev); 6368 6369 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); 6370 } 6371 6372 static ssize_t 6373 perf_event_mux_interval_ms_store(struct device *dev, 6374 struct device_attribute *attr, 6375 const char *buf, size_t count) 6376 { 6377 struct pmu *pmu = dev_get_drvdata(dev); 6378 int timer, cpu, ret; 6379 6380 ret = kstrtoint(buf, 0, &timer); 6381 if (ret) 6382 return ret; 6383 6384 if (timer < 1) 6385 return -EINVAL; 6386 6387 /* same value, noting to do */ 6388 if (timer == pmu->hrtimer_interval_ms) 6389 return count; 6390 6391 pmu->hrtimer_interval_ms = timer; 6392 6393 /* update all cpuctx for this PMU */ 6394 for_each_possible_cpu(cpu) { 6395 struct perf_cpu_context *cpuctx; 6396 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6397 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); 6398 6399 if (hrtimer_active(&cpuctx->hrtimer)) 6400 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval); 6401 } 6402 6403 return count; 6404 } 6405 static DEVICE_ATTR_RW(perf_event_mux_interval_ms); 6406 6407 static struct attribute *pmu_dev_attrs[] = { 6408 &dev_attr_type.attr, 6409 &dev_attr_perf_event_mux_interval_ms.attr, 6410 NULL, 6411 }; 6412 ATTRIBUTE_GROUPS(pmu_dev); 6413 6414 static int pmu_bus_running; 6415 static struct bus_type pmu_bus = { 6416 .name = "event_source", 6417 .dev_groups = pmu_dev_groups, 6418 }; 6419 6420 static void pmu_dev_release(struct device *dev) 6421 { 6422 kfree(dev); 6423 } 6424 6425 static int pmu_dev_alloc(struct pmu *pmu) 6426 { 6427 int ret = -ENOMEM; 6428 6429 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); 6430 if (!pmu->dev) 6431 goto out; 6432 6433 pmu->dev->groups = pmu->attr_groups; 6434 device_initialize(pmu->dev); 6435 ret = dev_set_name(pmu->dev, "%s", pmu->name); 6436 if (ret) 6437 goto free_dev; 6438 6439 dev_set_drvdata(pmu->dev, pmu); 6440 pmu->dev->bus = &pmu_bus; 6441 pmu->dev->release = pmu_dev_release; 6442 ret = device_add(pmu->dev); 6443 if (ret) 6444 goto free_dev; 6445 6446 out: 6447 return ret; 6448 6449 free_dev: 6450 put_device(pmu->dev); 6451 goto out; 6452 } 6453 6454 static struct lock_class_key cpuctx_mutex; 6455 static struct lock_class_key cpuctx_lock; 6456 6457 int perf_pmu_register(struct pmu *pmu, const char *name, int type) 6458 { 6459 int cpu, ret; 6460 6461 mutex_lock(&pmus_lock); 6462 ret = -ENOMEM; 6463 pmu->pmu_disable_count = alloc_percpu(int); 6464 if (!pmu->pmu_disable_count) 6465 goto unlock; 6466 6467 pmu->type = -1; 6468 if (!name) 6469 goto skip_type; 6470 pmu->name = name; 6471 6472 if (type < 0) { 6473 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); 6474 if (type < 0) { 6475 ret = type; 6476 goto free_pdc; 6477 } 6478 } 6479 pmu->type = type; 6480 6481 if (pmu_bus_running) { 6482 ret = pmu_dev_alloc(pmu); 6483 if (ret) 6484 goto free_idr; 6485 } 6486 6487 skip_type: 6488 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); 6489 if (pmu->pmu_cpu_context) 6490 goto got_cpu_context; 6491 6492 ret = -ENOMEM; 6493 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); 6494 if (!pmu->pmu_cpu_context) 6495 goto free_dev; 6496 6497 for_each_possible_cpu(cpu) { 6498 struct perf_cpu_context *cpuctx; 6499 6500 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6501 __perf_event_init_context(&cpuctx->ctx); 6502 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6503 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); 6504 cpuctx->ctx.type = cpu_context; 6505 cpuctx->ctx.pmu = pmu; 6506 6507 __perf_cpu_hrtimer_init(cpuctx, cpu); 6508 6509 INIT_LIST_HEAD(&cpuctx->rotation_list); 6510 cpuctx->unique_pmu = pmu; 6511 } 6512 6513 got_cpu_context: 6514 if (!pmu->start_txn) { 6515 if (pmu->pmu_enable) { 6516 /* 6517 * If we have pmu_enable/pmu_disable calls, install 6518 * transaction stubs that use that to try and batch 6519 * hardware accesses. 6520 */ 6521 pmu->start_txn = perf_pmu_start_txn; 6522 pmu->commit_txn = perf_pmu_commit_txn; 6523 pmu->cancel_txn = perf_pmu_cancel_txn; 6524 } else { 6525 pmu->start_txn = perf_pmu_nop_void; 6526 pmu->commit_txn = perf_pmu_nop_int; 6527 pmu->cancel_txn = perf_pmu_nop_void; 6528 } 6529 } 6530 6531 if (!pmu->pmu_enable) { 6532 pmu->pmu_enable = perf_pmu_nop_void; 6533 pmu->pmu_disable = perf_pmu_nop_void; 6534 } 6535 6536 if (!pmu->event_idx) 6537 pmu->event_idx = perf_event_idx_default; 6538 6539 list_add_rcu(&pmu->entry, &pmus); 6540 ret = 0; 6541 unlock: 6542 mutex_unlock(&pmus_lock); 6543 6544 return ret; 6545 6546 free_dev: 6547 device_del(pmu->dev); 6548 put_device(pmu->dev); 6549 6550 free_idr: 6551 if (pmu->type >= PERF_TYPE_MAX) 6552 idr_remove(&pmu_idr, pmu->type); 6553 6554 free_pdc: 6555 free_percpu(pmu->pmu_disable_count); 6556 goto unlock; 6557 } 6558 EXPORT_SYMBOL_GPL(perf_pmu_register); 6559 6560 void perf_pmu_unregister(struct pmu *pmu) 6561 { 6562 mutex_lock(&pmus_lock); 6563 list_del_rcu(&pmu->entry); 6564 mutex_unlock(&pmus_lock); 6565 6566 /* 6567 * We dereference the pmu list under both SRCU and regular RCU, so 6568 * synchronize against both of those. 6569 */ 6570 synchronize_srcu(&pmus_srcu); 6571 synchronize_rcu(); 6572 6573 free_percpu(pmu->pmu_disable_count); 6574 if (pmu->type >= PERF_TYPE_MAX) 6575 idr_remove(&pmu_idr, pmu->type); 6576 device_del(pmu->dev); 6577 put_device(pmu->dev); 6578 free_pmu_context(pmu); 6579 } 6580 EXPORT_SYMBOL_GPL(perf_pmu_unregister); 6581 6582 struct pmu *perf_init_event(struct perf_event *event) 6583 { 6584 struct pmu *pmu = NULL; 6585 int idx; 6586 int ret; 6587 6588 idx = srcu_read_lock(&pmus_srcu); 6589 6590 rcu_read_lock(); 6591 pmu = idr_find(&pmu_idr, event->attr.type); 6592 rcu_read_unlock(); 6593 if (pmu) { 6594 if (!try_module_get(pmu->module)) { 6595 pmu = ERR_PTR(-ENODEV); 6596 goto unlock; 6597 } 6598 event->pmu = pmu; 6599 ret = pmu->event_init(event); 6600 if (ret) 6601 pmu = ERR_PTR(ret); 6602 goto unlock; 6603 } 6604 6605 list_for_each_entry_rcu(pmu, &pmus, entry) { 6606 if (!try_module_get(pmu->module)) { 6607 pmu = ERR_PTR(-ENODEV); 6608 goto unlock; 6609 } 6610 event->pmu = pmu; 6611 ret = pmu->event_init(event); 6612 if (!ret) 6613 goto unlock; 6614 6615 if (ret != -ENOENT) { 6616 pmu = ERR_PTR(ret); 6617 goto unlock; 6618 } 6619 } 6620 pmu = ERR_PTR(-ENOENT); 6621 unlock: 6622 srcu_read_unlock(&pmus_srcu, idx); 6623 6624 return pmu; 6625 } 6626 6627 static void account_event_cpu(struct perf_event *event, int cpu) 6628 { 6629 if (event->parent) 6630 return; 6631 6632 if (has_branch_stack(event)) { 6633 if (!(event->attach_state & PERF_ATTACH_TASK)) 6634 atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); 6635 } 6636 if (is_cgroup_event(event)) 6637 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 6638 } 6639 6640 static void account_event(struct perf_event *event) 6641 { 6642 if (event->parent) 6643 return; 6644 6645 if (event->attach_state & PERF_ATTACH_TASK) 6646 static_key_slow_inc(&perf_sched_events.key); 6647 if (event->attr.mmap || event->attr.mmap_data) 6648 atomic_inc(&nr_mmap_events); 6649 if (event->attr.comm) 6650 atomic_inc(&nr_comm_events); 6651 if (event->attr.task) 6652 atomic_inc(&nr_task_events); 6653 if (event->attr.freq) { 6654 if (atomic_inc_return(&nr_freq_events) == 1) 6655 tick_nohz_full_kick_all(); 6656 } 6657 if (has_branch_stack(event)) 6658 static_key_slow_inc(&perf_sched_events.key); 6659 if (is_cgroup_event(event)) 6660 static_key_slow_inc(&perf_sched_events.key); 6661 6662 account_event_cpu(event, event->cpu); 6663 } 6664 6665 /* 6666 * Allocate and initialize a event structure 6667 */ 6668 static struct perf_event * 6669 perf_event_alloc(struct perf_event_attr *attr, int cpu, 6670 struct task_struct *task, 6671 struct perf_event *group_leader, 6672 struct perf_event *parent_event, 6673 perf_overflow_handler_t overflow_handler, 6674 void *context) 6675 { 6676 struct pmu *pmu; 6677 struct perf_event *event; 6678 struct hw_perf_event *hwc; 6679 long err = -EINVAL; 6680 6681 if ((unsigned)cpu >= nr_cpu_ids) { 6682 if (!task || cpu != -1) 6683 return ERR_PTR(-EINVAL); 6684 } 6685 6686 event = kzalloc(sizeof(*event), GFP_KERNEL); 6687 if (!event) 6688 return ERR_PTR(-ENOMEM); 6689 6690 /* 6691 * Single events are their own group leaders, with an 6692 * empty sibling list: 6693 */ 6694 if (!group_leader) 6695 group_leader = event; 6696 6697 mutex_init(&event->child_mutex); 6698 INIT_LIST_HEAD(&event->child_list); 6699 6700 INIT_LIST_HEAD(&event->group_entry); 6701 INIT_LIST_HEAD(&event->event_entry); 6702 INIT_LIST_HEAD(&event->sibling_list); 6703 INIT_LIST_HEAD(&event->rb_entry); 6704 INIT_LIST_HEAD(&event->active_entry); 6705 INIT_HLIST_NODE(&event->hlist_entry); 6706 6707 6708 init_waitqueue_head(&event->waitq); 6709 init_irq_work(&event->pending, perf_pending_event); 6710 6711 mutex_init(&event->mmap_mutex); 6712 6713 atomic_long_set(&event->refcount, 1); 6714 event->cpu = cpu; 6715 event->attr = *attr; 6716 event->group_leader = group_leader; 6717 event->pmu = NULL; 6718 event->oncpu = -1; 6719 6720 event->parent = parent_event; 6721 6722 event->ns = get_pid_ns(task_active_pid_ns(current)); 6723 event->id = atomic64_inc_return(&perf_event_id); 6724 6725 event->state = PERF_EVENT_STATE_INACTIVE; 6726 6727 if (task) { 6728 event->attach_state = PERF_ATTACH_TASK; 6729 6730 if (attr->type == PERF_TYPE_TRACEPOINT) 6731 event->hw.tp_target = task; 6732 #ifdef CONFIG_HAVE_HW_BREAKPOINT 6733 /* 6734 * hw_breakpoint is a bit difficult here.. 6735 */ 6736 else if (attr->type == PERF_TYPE_BREAKPOINT) 6737 event->hw.bp_target = task; 6738 #endif 6739 } 6740 6741 if (!overflow_handler && parent_event) { 6742 overflow_handler = parent_event->overflow_handler; 6743 context = parent_event->overflow_handler_context; 6744 } 6745 6746 event->overflow_handler = overflow_handler; 6747 event->overflow_handler_context = context; 6748 6749 perf_event__state_init(event); 6750 6751 pmu = NULL; 6752 6753 hwc = &event->hw; 6754 hwc->sample_period = attr->sample_period; 6755 if (attr->freq && attr->sample_freq) 6756 hwc->sample_period = 1; 6757 hwc->last_period = hwc->sample_period; 6758 6759 local64_set(&hwc->period_left, hwc->sample_period); 6760 6761 /* 6762 * we currently do not support PERF_FORMAT_GROUP on inherited events 6763 */ 6764 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 6765 goto err_ns; 6766 6767 pmu = perf_init_event(event); 6768 if (!pmu) 6769 goto err_ns; 6770 else if (IS_ERR(pmu)) { 6771 err = PTR_ERR(pmu); 6772 goto err_ns; 6773 } 6774 6775 if (!event->parent) { 6776 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 6777 err = get_callchain_buffers(); 6778 if (err) 6779 goto err_pmu; 6780 } 6781 } 6782 6783 return event; 6784 6785 err_pmu: 6786 if (event->destroy) 6787 event->destroy(event); 6788 module_put(pmu->module); 6789 err_ns: 6790 if (event->ns) 6791 put_pid_ns(event->ns); 6792 kfree(event); 6793 6794 return ERR_PTR(err); 6795 } 6796 6797 static int perf_copy_attr(struct perf_event_attr __user *uattr, 6798 struct perf_event_attr *attr) 6799 { 6800 u32 size; 6801 int ret; 6802 6803 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) 6804 return -EFAULT; 6805 6806 /* 6807 * zero the full structure, so that a short copy will be nice. 6808 */ 6809 memset(attr, 0, sizeof(*attr)); 6810 6811 ret = get_user(size, &uattr->size); 6812 if (ret) 6813 return ret; 6814 6815 if (size > PAGE_SIZE) /* silly large */ 6816 goto err_size; 6817 6818 if (!size) /* abi compat */ 6819 size = PERF_ATTR_SIZE_VER0; 6820 6821 if (size < PERF_ATTR_SIZE_VER0) 6822 goto err_size; 6823 6824 /* 6825 * If we're handed a bigger struct than we know of, 6826 * ensure all the unknown bits are 0 - i.e. new 6827 * user-space does not rely on any kernel feature 6828 * extensions we dont know about yet. 6829 */ 6830 if (size > sizeof(*attr)) { 6831 unsigned char __user *addr; 6832 unsigned char __user *end; 6833 unsigned char val; 6834 6835 addr = (void __user *)uattr + sizeof(*attr); 6836 end = (void __user *)uattr + size; 6837 6838 for (; addr < end; addr++) { 6839 ret = get_user(val, addr); 6840 if (ret) 6841 return ret; 6842 if (val) 6843 goto err_size; 6844 } 6845 size = sizeof(*attr); 6846 } 6847 6848 ret = copy_from_user(attr, uattr, size); 6849 if (ret) 6850 return -EFAULT; 6851 6852 /* disabled for now */ 6853 if (attr->mmap2) 6854 return -EINVAL; 6855 6856 if (attr->__reserved_1) 6857 return -EINVAL; 6858 6859 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 6860 return -EINVAL; 6861 6862 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 6863 return -EINVAL; 6864 6865 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 6866 u64 mask = attr->branch_sample_type; 6867 6868 /* only using defined bits */ 6869 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 6870 return -EINVAL; 6871 6872 /* at least one branch bit must be set */ 6873 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 6874 return -EINVAL; 6875 6876 /* propagate priv level, when not set for branch */ 6877 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 6878 6879 /* exclude_kernel checked on syscall entry */ 6880 if (!attr->exclude_kernel) 6881 mask |= PERF_SAMPLE_BRANCH_KERNEL; 6882 6883 if (!attr->exclude_user) 6884 mask |= PERF_SAMPLE_BRANCH_USER; 6885 6886 if (!attr->exclude_hv) 6887 mask |= PERF_SAMPLE_BRANCH_HV; 6888 /* 6889 * adjust user setting (for HW filter setup) 6890 */ 6891 attr->branch_sample_type = mask; 6892 } 6893 /* privileged levels capture (kernel, hv): check permissions */ 6894 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 6895 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 6896 return -EACCES; 6897 } 6898 6899 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { 6900 ret = perf_reg_validate(attr->sample_regs_user); 6901 if (ret) 6902 return ret; 6903 } 6904 6905 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { 6906 if (!arch_perf_have_user_stack_dump()) 6907 return -ENOSYS; 6908 6909 /* 6910 * We have __u32 type for the size, but so far 6911 * we can only use __u16 as maximum due to the 6912 * __u16 sample size limit. 6913 */ 6914 if (attr->sample_stack_user >= USHRT_MAX) 6915 ret = -EINVAL; 6916 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 6917 ret = -EINVAL; 6918 } 6919 6920 out: 6921 return ret; 6922 6923 err_size: 6924 put_user(sizeof(*attr), &uattr->size); 6925 ret = -E2BIG; 6926 goto out; 6927 } 6928 6929 static int 6930 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6931 { 6932 struct ring_buffer *rb = NULL, *old_rb = NULL; 6933 int ret = -EINVAL; 6934 6935 if (!output_event) 6936 goto set; 6937 6938 /* don't allow circular references */ 6939 if (event == output_event) 6940 goto out; 6941 6942 /* 6943 * Don't allow cross-cpu buffers 6944 */ 6945 if (output_event->cpu != event->cpu) 6946 goto out; 6947 6948 /* 6949 * If its not a per-cpu rb, it must be the same task. 6950 */ 6951 if (output_event->cpu == -1 && output_event->ctx != event->ctx) 6952 goto out; 6953 6954 set: 6955 mutex_lock(&event->mmap_mutex); 6956 /* Can't redirect output if we've got an active mmap() */ 6957 if (atomic_read(&event->mmap_count)) 6958 goto unlock; 6959 6960 old_rb = event->rb; 6961 6962 if (output_event) { 6963 /* get the rb we want to redirect to */ 6964 rb = ring_buffer_get(output_event); 6965 if (!rb) 6966 goto unlock; 6967 } 6968 6969 if (old_rb) 6970 ring_buffer_detach(event, old_rb); 6971 6972 if (rb) 6973 ring_buffer_attach(event, rb); 6974 6975 rcu_assign_pointer(event->rb, rb); 6976 6977 if (old_rb) { 6978 ring_buffer_put(old_rb); 6979 /* 6980 * Since we detached before setting the new rb, so that we 6981 * could attach the new rb, we could have missed a wakeup. 6982 * Provide it now. 6983 */ 6984 wake_up_all(&event->waitq); 6985 } 6986 6987 ret = 0; 6988 unlock: 6989 mutex_unlock(&event->mmap_mutex); 6990 6991 out: 6992 return ret; 6993 } 6994 6995 /** 6996 * sys_perf_event_open - open a performance event, associate it to a task/cpu 6997 * 6998 * @attr_uptr: event_id type attributes for monitoring/sampling 6999 * @pid: target pid 7000 * @cpu: target cpu 7001 * @group_fd: group leader event fd 7002 */ 7003 SYSCALL_DEFINE5(perf_event_open, 7004 struct perf_event_attr __user *, attr_uptr, 7005 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 7006 { 7007 struct perf_event *group_leader = NULL, *output_event = NULL; 7008 struct perf_event *event, *sibling; 7009 struct perf_event_attr attr; 7010 struct perf_event_context *ctx; 7011 struct file *event_file = NULL; 7012 struct fd group = {NULL, 0}; 7013 struct task_struct *task = NULL; 7014 struct pmu *pmu; 7015 int event_fd; 7016 int move_group = 0; 7017 int err; 7018 int f_flags = O_RDWR; 7019 7020 /* for future expandability... */ 7021 if (flags & ~PERF_FLAG_ALL) 7022 return -EINVAL; 7023 7024 err = perf_copy_attr(attr_uptr, &attr); 7025 if (err) 7026 return err; 7027 7028 if (!attr.exclude_kernel) { 7029 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 7030 return -EACCES; 7031 } 7032 7033 if (attr.freq) { 7034 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7035 return -EINVAL; 7036 } 7037 7038 /* 7039 * In cgroup mode, the pid argument is used to pass the fd 7040 * opened to the cgroup directory in cgroupfs. The cpu argument 7041 * designates the cpu on which to monitor threads from that 7042 * cgroup. 7043 */ 7044 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) 7045 return -EINVAL; 7046 7047 if (flags & PERF_FLAG_FD_CLOEXEC) 7048 f_flags |= O_CLOEXEC; 7049 7050 event_fd = get_unused_fd_flags(f_flags); 7051 if (event_fd < 0) 7052 return event_fd; 7053 7054 if (group_fd != -1) { 7055 err = perf_fget_light(group_fd, &group); 7056 if (err) 7057 goto err_fd; 7058 group_leader = group.file->private_data; 7059 if (flags & PERF_FLAG_FD_OUTPUT) 7060 output_event = group_leader; 7061 if (flags & PERF_FLAG_FD_NO_GROUP) 7062 group_leader = NULL; 7063 } 7064 7065 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { 7066 task = find_lively_task_by_vpid(pid); 7067 if (IS_ERR(task)) { 7068 err = PTR_ERR(task); 7069 goto err_group_fd; 7070 } 7071 } 7072 7073 get_online_cpus(); 7074 7075 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 7076 NULL, NULL); 7077 if (IS_ERR(event)) { 7078 err = PTR_ERR(event); 7079 goto err_task; 7080 } 7081 7082 if (flags & PERF_FLAG_PID_CGROUP) { 7083 err = perf_cgroup_connect(pid, event, &attr, group_leader); 7084 if (err) { 7085 __free_event(event); 7086 goto err_task; 7087 } 7088 } 7089 7090 account_event(event); 7091 7092 /* 7093 * Special case software events and allow them to be part of 7094 * any hardware group. 7095 */ 7096 pmu = event->pmu; 7097 7098 if (group_leader && 7099 (is_software_event(event) != is_software_event(group_leader))) { 7100 if (is_software_event(event)) { 7101 /* 7102 * If event and group_leader are not both a software 7103 * event, and event is, then group leader is not. 7104 * 7105 * Allow the addition of software events to !software 7106 * groups, this is safe because software events never 7107 * fail to schedule. 7108 */ 7109 pmu = group_leader->pmu; 7110 } else if (is_software_event(group_leader) && 7111 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { 7112 /* 7113 * In case the group is a pure software group, and we 7114 * try to add a hardware event, move the whole group to 7115 * the hardware context. 7116 */ 7117 move_group = 1; 7118 } 7119 } 7120 7121 /* 7122 * Get the target context (task or percpu): 7123 */ 7124 ctx = find_get_context(pmu, task, event->cpu); 7125 if (IS_ERR(ctx)) { 7126 err = PTR_ERR(ctx); 7127 goto err_alloc; 7128 } 7129 7130 if (task) { 7131 put_task_struct(task); 7132 task = NULL; 7133 } 7134 7135 /* 7136 * Look up the group leader (we will attach this event to it): 7137 */ 7138 if (group_leader) { 7139 err = -EINVAL; 7140 7141 /* 7142 * Do not allow a recursive hierarchy (this new sibling 7143 * becoming part of another group-sibling): 7144 */ 7145 if (group_leader->group_leader != group_leader) 7146 goto err_context; 7147 /* 7148 * Do not allow to attach to a group in a different 7149 * task or CPU context: 7150 */ 7151 if (move_group) { 7152 if (group_leader->ctx->type != ctx->type) 7153 goto err_context; 7154 } else { 7155 if (group_leader->ctx != ctx) 7156 goto err_context; 7157 } 7158 7159 /* 7160 * Only a group leader can be exclusive or pinned 7161 */ 7162 if (attr.exclusive || attr.pinned) 7163 goto err_context; 7164 } 7165 7166 if (output_event) { 7167 err = perf_event_set_output(event, output_event); 7168 if (err) 7169 goto err_context; 7170 } 7171 7172 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, 7173 f_flags); 7174 if (IS_ERR(event_file)) { 7175 err = PTR_ERR(event_file); 7176 goto err_context; 7177 } 7178 7179 if (move_group) { 7180 struct perf_event_context *gctx = group_leader->ctx; 7181 7182 mutex_lock(&gctx->mutex); 7183 perf_remove_from_context(group_leader); 7184 7185 /* 7186 * Removing from the context ends up with disabled 7187 * event. What we want here is event in the initial 7188 * startup state, ready to be add into new context. 7189 */ 7190 perf_event__state_init(group_leader); 7191 list_for_each_entry(sibling, &group_leader->sibling_list, 7192 group_entry) { 7193 perf_remove_from_context(sibling); 7194 perf_event__state_init(sibling); 7195 put_ctx(gctx); 7196 } 7197 mutex_unlock(&gctx->mutex); 7198 put_ctx(gctx); 7199 } 7200 7201 WARN_ON_ONCE(ctx->parent_ctx); 7202 mutex_lock(&ctx->mutex); 7203 7204 if (move_group) { 7205 synchronize_rcu(); 7206 perf_install_in_context(ctx, group_leader, event->cpu); 7207 get_ctx(ctx); 7208 list_for_each_entry(sibling, &group_leader->sibling_list, 7209 group_entry) { 7210 perf_install_in_context(ctx, sibling, event->cpu); 7211 get_ctx(ctx); 7212 } 7213 } 7214 7215 perf_install_in_context(ctx, event, event->cpu); 7216 perf_unpin_context(ctx); 7217 mutex_unlock(&ctx->mutex); 7218 7219 put_online_cpus(); 7220 7221 event->owner = current; 7222 7223 mutex_lock(¤t->perf_event_mutex); 7224 list_add_tail(&event->owner_entry, ¤t->perf_event_list); 7225 mutex_unlock(¤t->perf_event_mutex); 7226 7227 /* 7228 * Precalculate sample_data sizes 7229 */ 7230 perf_event__header_size(event); 7231 perf_event__id_header_size(event); 7232 7233 /* 7234 * Drop the reference on the group_event after placing the 7235 * new event on the sibling_list. This ensures destruction 7236 * of the group leader will find the pointer to itself in 7237 * perf_group_detach(). 7238 */ 7239 fdput(group); 7240 fd_install(event_fd, event_file); 7241 return event_fd; 7242 7243 err_context: 7244 perf_unpin_context(ctx); 7245 put_ctx(ctx); 7246 err_alloc: 7247 free_event(event); 7248 err_task: 7249 put_online_cpus(); 7250 if (task) 7251 put_task_struct(task); 7252 err_group_fd: 7253 fdput(group); 7254 err_fd: 7255 put_unused_fd(event_fd); 7256 return err; 7257 } 7258 7259 /** 7260 * perf_event_create_kernel_counter 7261 * 7262 * @attr: attributes of the counter to create 7263 * @cpu: cpu in which the counter is bound 7264 * @task: task to profile (NULL for percpu) 7265 */ 7266 struct perf_event * 7267 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, 7268 struct task_struct *task, 7269 perf_overflow_handler_t overflow_handler, 7270 void *context) 7271 { 7272 struct perf_event_context *ctx; 7273 struct perf_event *event; 7274 int err; 7275 7276 /* 7277 * Get the target context (task or percpu): 7278 */ 7279 7280 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 7281 overflow_handler, context); 7282 if (IS_ERR(event)) { 7283 err = PTR_ERR(event); 7284 goto err; 7285 } 7286 7287 account_event(event); 7288 7289 ctx = find_get_context(event->pmu, task, cpu); 7290 if (IS_ERR(ctx)) { 7291 err = PTR_ERR(ctx); 7292 goto err_free; 7293 } 7294 7295 WARN_ON_ONCE(ctx->parent_ctx); 7296 mutex_lock(&ctx->mutex); 7297 perf_install_in_context(ctx, event, cpu); 7298 perf_unpin_context(ctx); 7299 mutex_unlock(&ctx->mutex); 7300 7301 return event; 7302 7303 err_free: 7304 free_event(event); 7305 err: 7306 return ERR_PTR(err); 7307 } 7308 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 7309 7310 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) 7311 { 7312 struct perf_event_context *src_ctx; 7313 struct perf_event_context *dst_ctx; 7314 struct perf_event *event, *tmp; 7315 LIST_HEAD(events); 7316 7317 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 7318 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 7319 7320 mutex_lock(&src_ctx->mutex); 7321 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7322 event_entry) { 7323 perf_remove_from_context(event); 7324 unaccount_event_cpu(event, src_cpu); 7325 put_ctx(src_ctx); 7326 list_add(&event->migrate_entry, &events); 7327 } 7328 mutex_unlock(&src_ctx->mutex); 7329 7330 synchronize_rcu(); 7331 7332 mutex_lock(&dst_ctx->mutex); 7333 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 7334 list_del(&event->migrate_entry); 7335 if (event->state >= PERF_EVENT_STATE_OFF) 7336 event->state = PERF_EVENT_STATE_INACTIVE; 7337 account_event_cpu(event, dst_cpu); 7338 perf_install_in_context(dst_ctx, event, dst_cpu); 7339 get_ctx(dst_ctx); 7340 } 7341 mutex_unlock(&dst_ctx->mutex); 7342 } 7343 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 7344 7345 static void sync_child_event(struct perf_event *child_event, 7346 struct task_struct *child) 7347 { 7348 struct perf_event *parent_event = child_event->parent; 7349 u64 child_val; 7350 7351 if (child_event->attr.inherit_stat) 7352 perf_event_read_event(child_event, child); 7353 7354 child_val = perf_event_count(child_event); 7355 7356 /* 7357 * Add back the child's count to the parent's count: 7358 */ 7359 atomic64_add(child_val, &parent_event->child_count); 7360 atomic64_add(child_event->total_time_enabled, 7361 &parent_event->child_total_time_enabled); 7362 atomic64_add(child_event->total_time_running, 7363 &parent_event->child_total_time_running); 7364 7365 /* 7366 * Remove this event from the parent's list 7367 */ 7368 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 7369 mutex_lock(&parent_event->child_mutex); 7370 list_del_init(&child_event->child_list); 7371 mutex_unlock(&parent_event->child_mutex); 7372 7373 /* 7374 * Release the parent event, if this was the last 7375 * reference to it. 7376 */ 7377 put_event(parent_event); 7378 } 7379 7380 static void 7381 __perf_event_exit_task(struct perf_event *child_event, 7382 struct perf_event_context *child_ctx, 7383 struct task_struct *child) 7384 { 7385 if (child_event->parent) { 7386 raw_spin_lock_irq(&child_ctx->lock); 7387 perf_group_detach(child_event); 7388 raw_spin_unlock_irq(&child_ctx->lock); 7389 } 7390 7391 perf_remove_from_context(child_event); 7392 7393 /* 7394 * It can happen that the parent exits first, and has events 7395 * that are still around due to the child reference. These 7396 * events need to be zapped. 7397 */ 7398 if (child_event->parent) { 7399 sync_child_event(child_event, child); 7400 free_event(child_event); 7401 } 7402 } 7403 7404 static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7405 { 7406 struct perf_event *child_event, *tmp; 7407 struct perf_event_context *child_ctx; 7408 unsigned long flags; 7409 7410 if (likely(!child->perf_event_ctxp[ctxn])) { 7411 perf_event_task(child, NULL, 0); 7412 return; 7413 } 7414 7415 local_irq_save(flags); 7416 /* 7417 * We can't reschedule here because interrupts are disabled, 7418 * and either child is current or it is a task that can't be 7419 * scheduled, so we are now safe from rescheduling changing 7420 * our context. 7421 */ 7422 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 7423 7424 /* 7425 * Take the context lock here so that if find_get_context is 7426 * reading child->perf_event_ctxp, we wait until it has 7427 * incremented the context's refcount before we do put_ctx below. 7428 */ 7429 raw_spin_lock(&child_ctx->lock); 7430 task_ctx_sched_out(child_ctx); 7431 child->perf_event_ctxp[ctxn] = NULL; 7432 /* 7433 * If this context is a clone; unclone it so it can't get 7434 * swapped to another process while we're removing all 7435 * the events from it. 7436 */ 7437 unclone_ctx(child_ctx); 7438 update_context_time(child_ctx); 7439 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7440 7441 /* 7442 * Report the task dead after unscheduling the events so that we 7443 * won't get any samples after PERF_RECORD_EXIT. We can however still 7444 * get a few PERF_RECORD_READ events. 7445 */ 7446 perf_event_task(child, child_ctx, 0); 7447 7448 /* 7449 * We can recurse on the same lock type through: 7450 * 7451 * __perf_event_exit_task() 7452 * sync_child_event() 7453 * put_event() 7454 * mutex_lock(&ctx->mutex) 7455 * 7456 * But since its the parent context it won't be the same instance. 7457 */ 7458 mutex_lock(&child_ctx->mutex); 7459 7460 again: 7461 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, 7462 group_entry) 7463 __perf_event_exit_task(child_event, child_ctx, child); 7464 7465 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, 7466 group_entry) 7467 __perf_event_exit_task(child_event, child_ctx, child); 7468 7469 /* 7470 * If the last event was a group event, it will have appended all 7471 * its siblings to the list, but we obtained 'tmp' before that which 7472 * will still point to the list head terminating the iteration. 7473 */ 7474 if (!list_empty(&child_ctx->pinned_groups) || 7475 !list_empty(&child_ctx->flexible_groups)) 7476 goto again; 7477 7478 mutex_unlock(&child_ctx->mutex); 7479 7480 put_ctx(child_ctx); 7481 } 7482 7483 /* 7484 * When a child task exits, feed back event values to parent events. 7485 */ 7486 void perf_event_exit_task(struct task_struct *child) 7487 { 7488 struct perf_event *event, *tmp; 7489 int ctxn; 7490 7491 mutex_lock(&child->perf_event_mutex); 7492 list_for_each_entry_safe(event, tmp, &child->perf_event_list, 7493 owner_entry) { 7494 list_del_init(&event->owner_entry); 7495 7496 /* 7497 * Ensure the list deletion is visible before we clear 7498 * the owner, closes a race against perf_release() where 7499 * we need to serialize on the owner->perf_event_mutex. 7500 */ 7501 smp_wmb(); 7502 event->owner = NULL; 7503 } 7504 mutex_unlock(&child->perf_event_mutex); 7505 7506 for_each_task_context_nr(ctxn) 7507 perf_event_exit_task_context(child, ctxn); 7508 } 7509 7510 static void perf_free_event(struct perf_event *event, 7511 struct perf_event_context *ctx) 7512 { 7513 struct perf_event *parent = event->parent; 7514 7515 if (WARN_ON_ONCE(!parent)) 7516 return; 7517 7518 mutex_lock(&parent->child_mutex); 7519 list_del_init(&event->child_list); 7520 mutex_unlock(&parent->child_mutex); 7521 7522 put_event(parent); 7523 7524 perf_group_detach(event); 7525 list_del_event(event, ctx); 7526 free_event(event); 7527 } 7528 7529 /* 7530 * free an unexposed, unused context as created by inheritance by 7531 * perf_event_init_task below, used by fork() in case of fail. 7532 */ 7533 void perf_event_free_task(struct task_struct *task) 7534 { 7535 struct perf_event_context *ctx; 7536 struct perf_event *event, *tmp; 7537 int ctxn; 7538 7539 for_each_task_context_nr(ctxn) { 7540 ctx = task->perf_event_ctxp[ctxn]; 7541 if (!ctx) 7542 continue; 7543 7544 mutex_lock(&ctx->mutex); 7545 again: 7546 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 7547 group_entry) 7548 perf_free_event(event, ctx); 7549 7550 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 7551 group_entry) 7552 perf_free_event(event, ctx); 7553 7554 if (!list_empty(&ctx->pinned_groups) || 7555 !list_empty(&ctx->flexible_groups)) 7556 goto again; 7557 7558 mutex_unlock(&ctx->mutex); 7559 7560 put_ctx(ctx); 7561 } 7562 } 7563 7564 void perf_event_delayed_put(struct task_struct *task) 7565 { 7566 int ctxn; 7567 7568 for_each_task_context_nr(ctxn) 7569 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); 7570 } 7571 7572 /* 7573 * inherit a event from parent task to child task: 7574 */ 7575 static struct perf_event * 7576 inherit_event(struct perf_event *parent_event, 7577 struct task_struct *parent, 7578 struct perf_event_context *parent_ctx, 7579 struct task_struct *child, 7580 struct perf_event *group_leader, 7581 struct perf_event_context *child_ctx) 7582 { 7583 struct perf_event *child_event; 7584 unsigned long flags; 7585 7586 /* 7587 * Instead of creating recursive hierarchies of events, 7588 * we link inherited events back to the original parent, 7589 * which has a filp for sure, which we use as the reference 7590 * count: 7591 */ 7592 if (parent_event->parent) 7593 parent_event = parent_event->parent; 7594 7595 child_event = perf_event_alloc(&parent_event->attr, 7596 parent_event->cpu, 7597 child, 7598 group_leader, parent_event, 7599 NULL, NULL); 7600 if (IS_ERR(child_event)) 7601 return child_event; 7602 7603 if (!atomic_long_inc_not_zero(&parent_event->refcount)) { 7604 free_event(child_event); 7605 return NULL; 7606 } 7607 7608 get_ctx(child_ctx); 7609 7610 /* 7611 * Make the child state follow the state of the parent event, 7612 * not its attr.disabled bit. We hold the parent's mutex, 7613 * so we won't race with perf_event_{en, dis}able_family. 7614 */ 7615 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) 7616 child_event->state = PERF_EVENT_STATE_INACTIVE; 7617 else 7618 child_event->state = PERF_EVENT_STATE_OFF; 7619 7620 if (parent_event->attr.freq) { 7621 u64 sample_period = parent_event->hw.sample_period; 7622 struct hw_perf_event *hwc = &child_event->hw; 7623 7624 hwc->sample_period = sample_period; 7625 hwc->last_period = sample_period; 7626 7627 local64_set(&hwc->period_left, sample_period); 7628 } 7629 7630 child_event->ctx = child_ctx; 7631 child_event->overflow_handler = parent_event->overflow_handler; 7632 child_event->overflow_handler_context 7633 = parent_event->overflow_handler_context; 7634 7635 /* 7636 * Precalculate sample_data sizes 7637 */ 7638 perf_event__header_size(child_event); 7639 perf_event__id_header_size(child_event); 7640 7641 /* 7642 * Link it up in the child's context: 7643 */ 7644 raw_spin_lock_irqsave(&child_ctx->lock, flags); 7645 add_event_to_ctx(child_event, child_ctx); 7646 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7647 7648 /* 7649 * Link this into the parent event's child list 7650 */ 7651 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 7652 mutex_lock(&parent_event->child_mutex); 7653 list_add_tail(&child_event->child_list, &parent_event->child_list); 7654 mutex_unlock(&parent_event->child_mutex); 7655 7656 return child_event; 7657 } 7658 7659 static int inherit_group(struct perf_event *parent_event, 7660 struct task_struct *parent, 7661 struct perf_event_context *parent_ctx, 7662 struct task_struct *child, 7663 struct perf_event_context *child_ctx) 7664 { 7665 struct perf_event *leader; 7666 struct perf_event *sub; 7667 struct perf_event *child_ctr; 7668 7669 leader = inherit_event(parent_event, parent, parent_ctx, 7670 child, NULL, child_ctx); 7671 if (IS_ERR(leader)) 7672 return PTR_ERR(leader); 7673 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 7674 child_ctr = inherit_event(sub, parent, parent_ctx, 7675 child, leader, child_ctx); 7676 if (IS_ERR(child_ctr)) 7677 return PTR_ERR(child_ctr); 7678 } 7679 return 0; 7680 } 7681 7682 static int 7683 inherit_task_group(struct perf_event *event, struct task_struct *parent, 7684 struct perf_event_context *parent_ctx, 7685 struct task_struct *child, int ctxn, 7686 int *inherited_all) 7687 { 7688 int ret; 7689 struct perf_event_context *child_ctx; 7690 7691 if (!event->attr.inherit) { 7692 *inherited_all = 0; 7693 return 0; 7694 } 7695 7696 child_ctx = child->perf_event_ctxp[ctxn]; 7697 if (!child_ctx) { 7698 /* 7699 * This is executed from the parent task context, so 7700 * inherit events that have been marked for cloning. 7701 * First allocate and initialize a context for the 7702 * child. 7703 */ 7704 7705 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 7706 if (!child_ctx) 7707 return -ENOMEM; 7708 7709 child->perf_event_ctxp[ctxn] = child_ctx; 7710 } 7711 7712 ret = inherit_group(event, parent, parent_ctx, 7713 child, child_ctx); 7714 7715 if (ret) 7716 *inherited_all = 0; 7717 7718 return ret; 7719 } 7720 7721 /* 7722 * Initialize the perf_event context in task_struct 7723 */ 7724 int perf_event_init_context(struct task_struct *child, int ctxn) 7725 { 7726 struct perf_event_context *child_ctx, *parent_ctx; 7727 struct perf_event_context *cloned_ctx; 7728 struct perf_event *event; 7729 struct task_struct *parent = current; 7730 int inherited_all = 1; 7731 unsigned long flags; 7732 int ret = 0; 7733 7734 if (likely(!parent->perf_event_ctxp[ctxn])) 7735 return 0; 7736 7737 /* 7738 * If the parent's context is a clone, pin it so it won't get 7739 * swapped under us. 7740 */ 7741 parent_ctx = perf_pin_task_context(parent, ctxn); 7742 7743 /* 7744 * No need to check if parent_ctx != NULL here; since we saw 7745 * it non-NULL earlier, the only reason for it to become NULL 7746 * is if we exit, and since we're currently in the middle of 7747 * a fork we can't be exiting at the same time. 7748 */ 7749 7750 /* 7751 * Lock the parent list. No need to lock the child - not PID 7752 * hashed yet and not running, so nobody can access it. 7753 */ 7754 mutex_lock(&parent_ctx->mutex); 7755 7756 /* 7757 * We dont have to disable NMIs - we are only looking at 7758 * the list, not manipulating it: 7759 */ 7760 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { 7761 ret = inherit_task_group(event, parent, parent_ctx, 7762 child, ctxn, &inherited_all); 7763 if (ret) 7764 break; 7765 } 7766 7767 /* 7768 * We can't hold ctx->lock when iterating the ->flexible_group list due 7769 * to allocations, but we need to prevent rotation because 7770 * rotate_ctx() will change the list from interrupt context. 7771 */ 7772 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 7773 parent_ctx->rotate_disable = 1; 7774 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 7775 7776 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 7777 ret = inherit_task_group(event, parent, parent_ctx, 7778 child, ctxn, &inherited_all); 7779 if (ret) 7780 break; 7781 } 7782 7783 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 7784 parent_ctx->rotate_disable = 0; 7785 7786 child_ctx = child->perf_event_ctxp[ctxn]; 7787 7788 if (child_ctx && inherited_all) { 7789 /* 7790 * Mark the child context as a clone of the parent 7791 * context, or of whatever the parent is a clone of. 7792 * 7793 * Note that if the parent is a clone, the holding of 7794 * parent_ctx->lock avoids it from being uncloned. 7795 */ 7796 cloned_ctx = parent_ctx->parent_ctx; 7797 if (cloned_ctx) { 7798 child_ctx->parent_ctx = cloned_ctx; 7799 child_ctx->parent_gen = parent_ctx->parent_gen; 7800 } else { 7801 child_ctx->parent_ctx = parent_ctx; 7802 child_ctx->parent_gen = parent_ctx->generation; 7803 } 7804 get_ctx(child_ctx->parent_ctx); 7805 } 7806 7807 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 7808 mutex_unlock(&parent_ctx->mutex); 7809 7810 perf_unpin_context(parent_ctx); 7811 put_ctx(parent_ctx); 7812 7813 return ret; 7814 } 7815 7816 /* 7817 * Initialize the perf_event context in task_struct 7818 */ 7819 int perf_event_init_task(struct task_struct *child) 7820 { 7821 int ctxn, ret; 7822 7823 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); 7824 mutex_init(&child->perf_event_mutex); 7825 INIT_LIST_HEAD(&child->perf_event_list); 7826 7827 for_each_task_context_nr(ctxn) { 7828 ret = perf_event_init_context(child, ctxn); 7829 if (ret) 7830 return ret; 7831 } 7832 7833 return 0; 7834 } 7835 7836 static void __init perf_event_init_all_cpus(void) 7837 { 7838 struct swevent_htable *swhash; 7839 int cpu; 7840 7841 for_each_possible_cpu(cpu) { 7842 swhash = &per_cpu(swevent_htable, cpu); 7843 mutex_init(&swhash->hlist_mutex); 7844 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); 7845 } 7846 } 7847 7848 static void perf_event_init_cpu(int cpu) 7849 { 7850 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7851 7852 mutex_lock(&swhash->hlist_mutex); 7853 if (swhash->hlist_refcount > 0) { 7854 struct swevent_hlist *hlist; 7855 7856 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 7857 WARN_ON(!hlist); 7858 rcu_assign_pointer(swhash->swevent_hlist, hlist); 7859 } 7860 mutex_unlock(&swhash->hlist_mutex); 7861 } 7862 7863 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 7864 static void perf_pmu_rotate_stop(struct pmu *pmu) 7865 { 7866 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 7867 7868 WARN_ON(!irqs_disabled()); 7869 7870 list_del_init(&cpuctx->rotation_list); 7871 } 7872 7873 static void __perf_event_exit_context(void *__info) 7874 { 7875 struct perf_event_context *ctx = __info; 7876 struct perf_event *event; 7877 7878 perf_pmu_rotate_stop(ctx->pmu); 7879 7880 rcu_read_lock(); 7881 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) 7882 __perf_remove_from_context(event); 7883 rcu_read_unlock(); 7884 } 7885 7886 static void perf_event_exit_cpu_context(int cpu) 7887 { 7888 struct perf_event_context *ctx; 7889 struct pmu *pmu; 7890 int idx; 7891 7892 idx = srcu_read_lock(&pmus_srcu); 7893 list_for_each_entry_rcu(pmu, &pmus, entry) { 7894 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; 7895 7896 mutex_lock(&ctx->mutex); 7897 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); 7898 mutex_unlock(&ctx->mutex); 7899 } 7900 srcu_read_unlock(&pmus_srcu, idx); 7901 } 7902 7903 static void perf_event_exit_cpu(int cpu) 7904 { 7905 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7906 7907 perf_event_exit_cpu_context(cpu); 7908 7909 mutex_lock(&swhash->hlist_mutex); 7910 swevent_hlist_release(swhash); 7911 mutex_unlock(&swhash->hlist_mutex); 7912 } 7913 #else 7914 static inline void perf_event_exit_cpu(int cpu) { } 7915 #endif 7916 7917 static int 7918 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) 7919 { 7920 int cpu; 7921 7922 for_each_online_cpu(cpu) 7923 perf_event_exit_cpu(cpu); 7924 7925 return NOTIFY_OK; 7926 } 7927 7928 /* 7929 * Run the perf reboot notifier at the very last possible moment so that 7930 * the generic watchdog code runs as long as possible. 7931 */ 7932 static struct notifier_block perf_reboot_notifier = { 7933 .notifier_call = perf_reboot, 7934 .priority = INT_MIN, 7935 }; 7936 7937 static int 7938 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 7939 { 7940 unsigned int cpu = (long)hcpu; 7941 7942 switch (action & ~CPU_TASKS_FROZEN) { 7943 7944 case CPU_UP_PREPARE: 7945 case CPU_DOWN_FAILED: 7946 perf_event_init_cpu(cpu); 7947 break; 7948 7949 case CPU_UP_CANCELED: 7950 case CPU_DOWN_PREPARE: 7951 perf_event_exit_cpu(cpu); 7952 break; 7953 default: 7954 break; 7955 } 7956 7957 return NOTIFY_OK; 7958 } 7959 7960 void __init perf_event_init(void) 7961 { 7962 int ret; 7963 7964 idr_init(&pmu_idr); 7965 7966 perf_event_init_all_cpus(); 7967 init_srcu_struct(&pmus_srcu); 7968 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); 7969 perf_pmu_register(&perf_cpu_clock, NULL, -1); 7970 perf_pmu_register(&perf_task_clock, NULL, -1); 7971 perf_tp_register(); 7972 perf_cpu_notifier(perf_cpu_notify); 7973 register_reboot_notifier(&perf_reboot_notifier); 7974 7975 ret = init_hw_breakpoint(); 7976 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 7977 7978 /* do not patch jump label more than once per second */ 7979 jump_label_rate_limit(&perf_sched_events, HZ); 7980 7981 /* 7982 * Build time assertion that we keep the data_head at the intended 7983 * location. IOW, validation we got the __reserved[] size right. 7984 */ 7985 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) 7986 != 1024); 7987 } 7988 7989 static int __init perf_event_sysfs_init(void) 7990 { 7991 struct pmu *pmu; 7992 int ret; 7993 7994 mutex_lock(&pmus_lock); 7995 7996 ret = bus_register(&pmu_bus); 7997 if (ret) 7998 goto unlock; 7999 8000 list_for_each_entry(pmu, &pmus, entry) { 8001 if (!pmu->name || pmu->type < 0) 8002 continue; 8003 8004 ret = pmu_dev_alloc(pmu); 8005 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); 8006 } 8007 pmu_bus_running = 1; 8008 ret = 0; 8009 8010 unlock: 8011 mutex_unlock(&pmus_lock); 8012 8013 return ret; 8014 } 8015 device_initcall(perf_event_sysfs_init); 8016 8017 #ifdef CONFIG_CGROUP_PERF 8018 static struct cgroup_subsys_state * 8019 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8020 { 8021 struct perf_cgroup *jc; 8022 8023 jc = kzalloc(sizeof(*jc), GFP_KERNEL); 8024 if (!jc) 8025 return ERR_PTR(-ENOMEM); 8026 8027 jc->info = alloc_percpu(struct perf_cgroup_info); 8028 if (!jc->info) { 8029 kfree(jc); 8030 return ERR_PTR(-ENOMEM); 8031 } 8032 8033 return &jc->css; 8034 } 8035 8036 static void perf_cgroup_css_free(struct cgroup_subsys_state *css) 8037 { 8038 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); 8039 8040 free_percpu(jc->info); 8041 kfree(jc); 8042 } 8043 8044 static int __perf_cgroup_move(void *info) 8045 { 8046 struct task_struct *task = info; 8047 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 8048 return 0; 8049 } 8050 8051 static void perf_cgroup_attach(struct cgroup_subsys_state *css, 8052 struct cgroup_taskset *tset) 8053 { 8054 struct task_struct *task; 8055 8056 cgroup_taskset_for_each(task, tset) 8057 task_function_call(task, __perf_cgroup_move, task); 8058 } 8059 8060 static void perf_cgroup_exit(struct cgroup_subsys_state *css, 8061 struct cgroup_subsys_state *old_css, 8062 struct task_struct *task) 8063 { 8064 /* 8065 * cgroup_exit() is called in the copy_process() failure path. 8066 * Ignore this case since the task hasn't ran yet, this avoids 8067 * trying to poke a half freed task state from generic code. 8068 */ 8069 if (!(task->flags & PF_EXITING)) 8070 return; 8071 8072 task_function_call(task, __perf_cgroup_move, task); 8073 } 8074 8075 struct cgroup_subsys perf_event_cgrp_subsys = { 8076 .css_alloc = perf_cgroup_css_alloc, 8077 .css_free = perf_cgroup_css_free, 8078 .exit = perf_cgroup_exit, 8079 .attach = perf_cgroup_attach, 8080 }; 8081 #endif /* CONFIG_CGROUP_PERF */ 8082