1 // SPDX-License-Identifier: GPL-2.0-only 2 #undef DEBUG 3 4 /* 5 * ARM performance counter support. 6 * 7 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles 8 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> 9 * 10 * This code is based on the sparc64 perf event code, which is in turn based 11 * on the x86 code. 12 */ 13 #define pr_fmt(fmt) "hw perfevents: " fmt 14 15 #include <linux/bitmap.h> 16 #include <linux/cpumask.h> 17 #include <linux/cpu_pm.h> 18 #include <linux/export.h> 19 #include <linux/kernel.h> 20 #include <linux/perf/arm_pmu.h> 21 #include <linux/slab.h> 22 #include <linux/sched/clock.h> 23 #include <linux/spinlock.h> 24 #include <linux/irq.h> 25 #include <linux/irqdesc.h> 26 27 #include <asm/irq_regs.h> 28 29 static int armpmu_count_irq_users(const int irq); 30 31 struct pmu_irq_ops { 32 void (*enable_pmuirq)(unsigned int irq); 33 void (*disable_pmuirq)(unsigned int irq); 34 void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); 35 }; 36 37 static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) 38 { 39 free_irq(irq, per_cpu_ptr(devid, cpu)); 40 } 41 42 static const struct pmu_irq_ops pmuirq_ops = { 43 .enable_pmuirq = enable_irq, 44 .disable_pmuirq = disable_irq_nosync, 45 .free_pmuirq = armpmu_free_pmuirq 46 }; 47 48 static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) 49 { 50 free_nmi(irq, per_cpu_ptr(devid, cpu)); 51 } 52 53 static const struct pmu_irq_ops pmunmi_ops = { 54 .enable_pmuirq = enable_nmi, 55 .disable_pmuirq = disable_nmi_nosync, 56 .free_pmuirq = armpmu_free_pmunmi 57 }; 58 59 static void armpmu_enable_percpu_pmuirq(unsigned int irq) 60 { 61 enable_percpu_irq(irq, IRQ_TYPE_NONE); 62 } 63 64 static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, 65 void __percpu *devid) 66 { 67 if (armpmu_count_irq_users(irq) == 1) 68 free_percpu_irq(irq, devid); 69 } 70 71 static const struct pmu_irq_ops percpu_pmuirq_ops = { 72 .enable_pmuirq = armpmu_enable_percpu_pmuirq, 73 .disable_pmuirq = disable_percpu_irq, 74 .free_pmuirq = armpmu_free_percpu_pmuirq 75 }; 76 77 static void armpmu_enable_percpu_pmunmi(unsigned int irq) 78 { 79 if (!prepare_percpu_nmi(irq)) 80 enable_percpu_nmi(irq, IRQ_TYPE_NONE); 81 } 82 83 static void armpmu_disable_percpu_pmunmi(unsigned int irq) 84 { 85 disable_percpu_nmi(irq); 86 teardown_percpu_nmi(irq); 87 } 88 89 static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, 90 void __percpu *devid) 91 { 92 if (armpmu_count_irq_users(irq) == 1) 93 free_percpu_nmi(irq, devid); 94 } 95 96 static const struct pmu_irq_ops percpu_pmunmi_ops = { 97 .enable_pmuirq = armpmu_enable_percpu_pmunmi, 98 .disable_pmuirq = armpmu_disable_percpu_pmunmi, 99 .free_pmuirq = armpmu_free_percpu_pmunmi 100 }; 101 102 static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); 103 static DEFINE_PER_CPU(int, cpu_irq); 104 static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); 105 106 static bool has_nmi; 107 108 static inline u64 arm_pmu_event_max_period(struct perf_event *event) 109 { 110 if (event->hw.flags & ARMPMU_EVT_64BIT) 111 return GENMASK_ULL(63, 0); 112 else if (event->hw.flags & ARMPMU_EVT_63BIT) 113 return GENMASK_ULL(62, 0); 114 else if (event->hw.flags & ARMPMU_EVT_47BIT) 115 return GENMASK_ULL(46, 0); 116 else 117 return GENMASK_ULL(31, 0); 118 } 119 120 static int 121 armpmu_map_cache_event(const unsigned (*cache_map) 122 [PERF_COUNT_HW_CACHE_MAX] 123 [PERF_COUNT_HW_CACHE_OP_MAX] 124 [PERF_COUNT_HW_CACHE_RESULT_MAX], 125 u64 config) 126 { 127 unsigned int cache_type, cache_op, cache_result, ret; 128 129 cache_type = (config >> 0) & 0xff; 130 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 131 return -EINVAL; 132 133 cache_op = (config >> 8) & 0xff; 134 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 135 return -EINVAL; 136 137 cache_result = (config >> 16) & 0xff; 138 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 139 return -EINVAL; 140 141 if (!cache_map) 142 return -ENOENT; 143 144 ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; 145 146 if (ret == CACHE_OP_UNSUPPORTED) 147 return -ENOENT; 148 149 return ret; 150 } 151 152 static int 153 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 154 { 155 int mapping; 156 157 if (config >= PERF_COUNT_HW_MAX) 158 return -EINVAL; 159 160 if (!event_map) 161 return -ENOENT; 162 163 mapping = (*event_map)[config]; 164 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 165 } 166 167 static int 168 armpmu_map_raw_event(u32 raw_event_mask, u64 config) 169 { 170 return (int)(config & raw_event_mask); 171 } 172 173 int 174 armpmu_map_event(struct perf_event *event, 175 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 176 const unsigned (*cache_map) 177 [PERF_COUNT_HW_CACHE_MAX] 178 [PERF_COUNT_HW_CACHE_OP_MAX] 179 [PERF_COUNT_HW_CACHE_RESULT_MAX], 180 u32 raw_event_mask) 181 { 182 u64 config = event->attr.config; 183 int type = event->attr.type; 184 185 if (type == event->pmu->type) 186 return armpmu_map_raw_event(raw_event_mask, config); 187 188 switch (type) { 189 case PERF_TYPE_HARDWARE: 190 return armpmu_map_hw_event(event_map, config); 191 case PERF_TYPE_HW_CACHE: 192 return armpmu_map_cache_event(cache_map, config); 193 case PERF_TYPE_RAW: 194 return armpmu_map_raw_event(raw_event_mask, config); 195 } 196 197 return -ENOENT; 198 } 199 200 int armpmu_event_set_period(struct perf_event *event) 201 { 202 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 203 struct hw_perf_event *hwc = &event->hw; 204 s64 left = local64_read(&hwc->period_left); 205 s64 period = hwc->sample_period; 206 u64 max_period; 207 int ret = 0; 208 209 max_period = arm_pmu_event_max_period(event); 210 if (unlikely(left <= -period)) { 211 left = period; 212 local64_set(&hwc->period_left, left); 213 hwc->last_period = period; 214 ret = 1; 215 } 216 217 if (unlikely(left <= 0)) { 218 left += period; 219 local64_set(&hwc->period_left, left); 220 hwc->last_period = period; 221 ret = 1; 222 } 223 224 /* 225 * Limit the maximum period to prevent the counter value 226 * from overtaking the one we are about to program. In 227 * effect we are reducing max_period to account for 228 * interrupt latency (and we are being very conservative). 229 */ 230 if (left > (max_period >> 1)) 231 left = (max_period >> 1); 232 233 local64_set(&hwc->prev_count, (u64)-left); 234 235 armpmu->write_counter(event, (u64)(-left) & max_period); 236 237 perf_event_update_userpage(event); 238 239 return ret; 240 } 241 242 u64 armpmu_event_update(struct perf_event *event) 243 { 244 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 245 struct hw_perf_event *hwc = &event->hw; 246 u64 delta, prev_raw_count, new_raw_count; 247 u64 max_period = arm_pmu_event_max_period(event); 248 249 again: 250 prev_raw_count = local64_read(&hwc->prev_count); 251 new_raw_count = armpmu->read_counter(event); 252 253 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 254 new_raw_count) != prev_raw_count) 255 goto again; 256 257 delta = (new_raw_count - prev_raw_count) & max_period; 258 259 local64_add(delta, &event->count); 260 local64_sub(delta, &hwc->period_left); 261 262 return new_raw_count; 263 } 264 265 static void 266 armpmu_read(struct perf_event *event) 267 { 268 armpmu_event_update(event); 269 } 270 271 static void 272 armpmu_stop(struct perf_event *event, int flags) 273 { 274 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 275 struct hw_perf_event *hwc = &event->hw; 276 277 /* 278 * ARM pmu always has to update the counter, so ignore 279 * PERF_EF_UPDATE, see comments in armpmu_start(). 280 */ 281 if (!(hwc->state & PERF_HES_STOPPED)) { 282 armpmu->disable(event); 283 armpmu_event_update(event); 284 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 285 } 286 } 287 288 static void armpmu_start(struct perf_event *event, int flags) 289 { 290 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 291 struct hw_perf_event *hwc = &event->hw; 292 293 /* 294 * ARM pmu always has to reprogram the period, so ignore 295 * PERF_EF_RELOAD, see the comment below. 296 */ 297 if (flags & PERF_EF_RELOAD) 298 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 299 300 hwc->state = 0; 301 /* 302 * Set the period again. Some counters can't be stopped, so when we 303 * were stopped we simply disabled the IRQ source and the counter 304 * may have been left counting. If we don't do this step then we may 305 * get an interrupt too soon or *way* too late if the overflow has 306 * happened since disabling. 307 */ 308 armpmu_event_set_period(event); 309 armpmu->enable(event); 310 } 311 312 static void 313 armpmu_del(struct perf_event *event, int flags) 314 { 315 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 316 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 317 struct hw_perf_event *hwc = &event->hw; 318 int idx = hwc->idx; 319 320 armpmu_stop(event, PERF_EF_UPDATE); 321 hw_events->events[idx] = NULL; 322 armpmu->clear_event_idx(hw_events, event); 323 perf_event_update_userpage(event); 324 /* Clear the allocated counter */ 325 hwc->idx = -1; 326 } 327 328 static int 329 armpmu_add(struct perf_event *event, int flags) 330 { 331 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 332 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 333 struct hw_perf_event *hwc = &event->hw; 334 int idx; 335 336 /* An event following a process won't be stopped earlier */ 337 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 338 return -ENOENT; 339 340 /* If we don't have a space for the counter then finish early. */ 341 idx = armpmu->get_event_idx(hw_events, event); 342 if (idx < 0) 343 return idx; 344 345 /* 346 * If there is an event in the counter we are going to use then make 347 * sure it is disabled. 348 */ 349 event->hw.idx = idx; 350 armpmu->disable(event); 351 hw_events->events[idx] = event; 352 353 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 354 if (flags & PERF_EF_START) 355 armpmu_start(event, PERF_EF_RELOAD); 356 357 /* Propagate our changes to the userspace mapping. */ 358 perf_event_update_userpage(event); 359 360 return 0; 361 } 362 363 static int 364 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, 365 struct perf_event *event) 366 { 367 struct arm_pmu *armpmu; 368 369 if (is_software_event(event)) 370 return 1; 371 372 /* 373 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The 374 * core perf code won't check that the pmu->ctx == leader->ctx 375 * until after pmu->event_init(event). 376 */ 377 if (event->pmu != pmu) 378 return 0; 379 380 if (event->state < PERF_EVENT_STATE_OFF) 381 return 1; 382 383 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 384 return 1; 385 386 armpmu = to_arm_pmu(event->pmu); 387 return armpmu->get_event_idx(hw_events, event) >= 0; 388 } 389 390 static int 391 validate_group(struct perf_event *event) 392 { 393 struct perf_event *sibling, *leader = event->group_leader; 394 struct pmu_hw_events fake_pmu; 395 396 /* 397 * Initialise the fake PMU. We only need to populate the 398 * used_mask for the purposes of validation. 399 */ 400 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 401 402 if (!validate_event(event->pmu, &fake_pmu, leader)) 403 return -EINVAL; 404 405 if (event == leader) 406 return 0; 407 408 for_each_sibling_event(sibling, leader) { 409 if (!validate_event(event->pmu, &fake_pmu, sibling)) 410 return -EINVAL; 411 } 412 413 if (!validate_event(event->pmu, &fake_pmu, event)) 414 return -EINVAL; 415 416 return 0; 417 } 418 419 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 420 { 421 struct arm_pmu *armpmu; 422 int ret; 423 u64 start_clock, finish_clock; 424 425 /* 426 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but 427 * the handlers expect a struct arm_pmu*. The percpu_irq framework will 428 * do any necessary shifting, we just need to perform the first 429 * dereference. 430 */ 431 armpmu = *(void **)dev; 432 if (WARN_ON_ONCE(!armpmu)) 433 return IRQ_NONE; 434 435 start_clock = sched_clock(); 436 ret = armpmu->handle_irq(armpmu); 437 finish_clock = sched_clock(); 438 439 perf_sample_event_took(finish_clock - start_clock); 440 return ret; 441 } 442 443 static int 444 __hw_perf_event_init(struct perf_event *event) 445 { 446 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 447 struct hw_perf_event *hwc = &event->hw; 448 int mapping, ret; 449 450 hwc->flags = 0; 451 mapping = armpmu->map_event(event); 452 453 if (mapping < 0) { 454 pr_debug("event %x:%llx not supported\n", event->attr.type, 455 event->attr.config); 456 return mapping; 457 } 458 459 /* 460 * We don't assign an index until we actually place the event onto 461 * hardware. Use -1 to signify that we haven't decided where to put it 462 * yet. For SMP systems, each core has it's own PMU so we can't do any 463 * clever allocation or constraints checking at this point. 464 */ 465 hwc->idx = -1; 466 hwc->config_base = 0; 467 hwc->config = 0; 468 hwc->event_base = 0; 469 470 /* 471 * Check whether we need to exclude the counter from certain modes. 472 */ 473 if (armpmu->set_event_filter) { 474 ret = armpmu->set_event_filter(hwc, &event->attr); 475 if (ret) 476 return ret; 477 } 478 479 /* 480 * Store the event encoding into the config_base field. 481 */ 482 hwc->config_base |= (unsigned long)mapping; 483 484 if (!is_sampling_event(event)) { 485 /* 486 * For non-sampling runs, limit the sample_period to half 487 * of the counter width. That way, the new counter value 488 * is far less likely to overtake the previous one unless 489 * you have some serious IRQ latency issues. 490 */ 491 hwc->sample_period = arm_pmu_event_max_period(event) >> 1; 492 hwc->last_period = hwc->sample_period; 493 local64_set(&hwc->period_left, hwc->sample_period); 494 } 495 496 return validate_group(event); 497 } 498 499 static int armpmu_event_init(struct perf_event *event) 500 { 501 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 502 503 /* 504 * Reject CPU-affine events for CPUs that are of a different class to 505 * that which this PMU handles. Process-following events (where 506 * event->cpu == -1) can be migrated between CPUs, and thus we have to 507 * reject them later (in armpmu_add) if they're scheduled on a 508 * different class of CPU. 509 */ 510 if (event->cpu != -1 && 511 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) 512 return -ENOENT; 513 514 /* does not support taken branch sampling */ 515 if (has_branch_stack(event)) 516 return -EOPNOTSUPP; 517 518 return __hw_perf_event_init(event); 519 } 520 521 static void armpmu_enable(struct pmu *pmu) 522 { 523 struct arm_pmu *armpmu = to_arm_pmu(pmu); 524 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 525 bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); 526 527 /* For task-bound events we may be called on other CPUs */ 528 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 529 return; 530 531 if (enabled) 532 armpmu->start(armpmu); 533 } 534 535 static void armpmu_disable(struct pmu *pmu) 536 { 537 struct arm_pmu *armpmu = to_arm_pmu(pmu); 538 539 /* For task-bound events we may be called on other CPUs */ 540 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 541 return; 542 543 armpmu->stop(armpmu); 544 } 545 546 /* 547 * In heterogeneous systems, events are specific to a particular 548 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of 549 * the same microarchitecture. 550 */ 551 static bool armpmu_filter(struct pmu *pmu, int cpu) 552 { 553 struct arm_pmu *armpmu = to_arm_pmu(pmu); 554 return !cpumask_test_cpu(cpu, &armpmu->supported_cpus); 555 } 556 557 static ssize_t cpus_show(struct device *dev, 558 struct device_attribute *attr, char *buf) 559 { 560 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); 561 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); 562 } 563 564 static DEVICE_ATTR_RO(cpus); 565 566 static struct attribute *armpmu_common_attrs[] = { 567 &dev_attr_cpus.attr, 568 NULL, 569 }; 570 571 static const struct attribute_group armpmu_common_attr_group = { 572 .attrs = armpmu_common_attrs, 573 }; 574 575 static int armpmu_count_irq_users(const int irq) 576 { 577 int cpu, count = 0; 578 579 for_each_possible_cpu(cpu) { 580 if (per_cpu(cpu_irq, cpu) == irq) 581 count++; 582 } 583 584 return count; 585 } 586 587 static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) 588 { 589 const struct pmu_irq_ops *ops = NULL; 590 int cpu; 591 592 for_each_possible_cpu(cpu) { 593 if (per_cpu(cpu_irq, cpu) != irq) 594 continue; 595 596 ops = per_cpu(cpu_irq_ops, cpu); 597 if (ops) 598 break; 599 } 600 601 return ops; 602 } 603 604 void armpmu_free_irq(int irq, int cpu) 605 { 606 if (per_cpu(cpu_irq, cpu) == 0) 607 return; 608 if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) 609 return; 610 611 per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); 612 613 per_cpu(cpu_irq, cpu) = 0; 614 per_cpu(cpu_irq_ops, cpu) = NULL; 615 } 616 617 int armpmu_request_irq(int irq, int cpu) 618 { 619 int err = 0; 620 const irq_handler_t handler = armpmu_dispatch_irq; 621 const struct pmu_irq_ops *irq_ops; 622 623 if (!irq) 624 return 0; 625 626 if (!irq_is_percpu_devid(irq)) { 627 unsigned long irq_flags; 628 629 err = irq_force_affinity(irq, cpumask_of(cpu)); 630 631 if (err && num_possible_cpus() > 1) { 632 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 633 irq, cpu); 634 goto err_out; 635 } 636 637 irq_flags = IRQF_PERCPU | 638 IRQF_NOBALANCING | IRQF_NO_AUTOEN | 639 IRQF_NO_THREAD; 640 641 err = request_nmi(irq, handler, irq_flags, "arm-pmu", 642 per_cpu_ptr(&cpu_armpmu, cpu)); 643 644 /* If cannot get an NMI, get a normal interrupt */ 645 if (err) { 646 err = request_irq(irq, handler, irq_flags, "arm-pmu", 647 per_cpu_ptr(&cpu_armpmu, cpu)); 648 irq_ops = &pmuirq_ops; 649 } else { 650 has_nmi = true; 651 irq_ops = &pmunmi_ops; 652 } 653 } else if (armpmu_count_irq_users(irq) == 0) { 654 err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); 655 656 /* If cannot get an NMI, get a normal interrupt */ 657 if (err) { 658 err = request_percpu_irq(irq, handler, "arm-pmu", 659 &cpu_armpmu); 660 irq_ops = &percpu_pmuirq_ops; 661 } else { 662 has_nmi = true; 663 irq_ops = &percpu_pmunmi_ops; 664 } 665 } else { 666 /* Per cpudevid irq was already requested by another CPU */ 667 irq_ops = armpmu_find_irq_ops(irq); 668 669 if (WARN_ON(!irq_ops)) 670 err = -EINVAL; 671 } 672 673 if (err) 674 goto err_out; 675 676 per_cpu(cpu_irq, cpu) = irq; 677 per_cpu(cpu_irq_ops, cpu) = irq_ops; 678 return 0; 679 680 err_out: 681 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); 682 return err; 683 } 684 685 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) 686 { 687 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; 688 return per_cpu(hw_events->irq, cpu); 689 } 690 691 bool arm_pmu_irq_is_nmi(void) 692 { 693 return has_nmi; 694 } 695 696 /* 697 * PMU hardware loses all context when a CPU goes offline. 698 * When a CPU is hotplugged back in, since some hardware registers are 699 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 700 * junk values out of them. 701 */ 702 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) 703 { 704 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 705 int irq; 706 707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 708 return 0; 709 if (pmu->reset) 710 pmu->reset(pmu); 711 712 per_cpu(cpu_armpmu, cpu) = pmu; 713 714 irq = armpmu_get_cpu_irq(pmu, cpu); 715 if (irq) 716 per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); 717 718 return 0; 719 } 720 721 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) 722 { 723 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 724 int irq; 725 726 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 727 return 0; 728 729 irq = armpmu_get_cpu_irq(pmu, cpu); 730 if (irq) 731 per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); 732 733 per_cpu(cpu_armpmu, cpu) = NULL; 734 735 return 0; 736 } 737 738 #ifdef CONFIG_CPU_PM 739 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) 740 { 741 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 742 struct perf_event *event; 743 int idx; 744 745 for_each_set_bit(idx, armpmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { 746 event = hw_events->events[idx]; 747 if (!event) 748 continue; 749 750 switch (cmd) { 751 case CPU_PM_ENTER: 752 /* 753 * Stop and update the counter 754 */ 755 armpmu_stop(event, PERF_EF_UPDATE); 756 break; 757 case CPU_PM_EXIT: 758 case CPU_PM_ENTER_FAILED: 759 /* 760 * Restore and enable the counter. 761 */ 762 armpmu_start(event, PERF_EF_RELOAD); 763 break; 764 default: 765 break; 766 } 767 } 768 } 769 770 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, 771 void *v) 772 { 773 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); 774 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 775 bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); 776 777 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 778 return NOTIFY_DONE; 779 780 /* 781 * Always reset the PMU registers on power-up even if 782 * there are no events running. 783 */ 784 if (cmd == CPU_PM_EXIT && armpmu->reset) 785 armpmu->reset(armpmu); 786 787 if (!enabled) 788 return NOTIFY_OK; 789 790 switch (cmd) { 791 case CPU_PM_ENTER: 792 armpmu->stop(armpmu); 793 cpu_pm_pmu_setup(armpmu, cmd); 794 break; 795 case CPU_PM_EXIT: 796 case CPU_PM_ENTER_FAILED: 797 cpu_pm_pmu_setup(armpmu, cmd); 798 armpmu->start(armpmu); 799 break; 800 default: 801 return NOTIFY_DONE; 802 } 803 804 return NOTIFY_OK; 805 } 806 807 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) 808 { 809 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; 810 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); 811 } 812 813 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) 814 { 815 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); 816 } 817 #else 818 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } 819 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } 820 #endif 821 822 static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 823 { 824 int err; 825 826 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, 827 &cpu_pmu->node); 828 if (err) 829 goto out; 830 831 err = cpu_pm_pmu_register(cpu_pmu); 832 if (err) 833 goto out_unregister; 834 835 return 0; 836 837 out_unregister: 838 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 839 &cpu_pmu->node); 840 out: 841 return err; 842 } 843 844 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 845 { 846 cpu_pm_pmu_unregister(cpu_pmu); 847 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 848 &cpu_pmu->node); 849 } 850 851 struct arm_pmu *armpmu_alloc(void) 852 { 853 struct arm_pmu *pmu; 854 int cpu; 855 856 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 857 if (!pmu) 858 goto out; 859 860 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); 861 if (!pmu->hw_events) { 862 pr_info("failed to allocate per-cpu PMU data.\n"); 863 goto out_free_pmu; 864 } 865 866 pmu->pmu = (struct pmu) { 867 .pmu_enable = armpmu_enable, 868 .pmu_disable = armpmu_disable, 869 .event_init = armpmu_event_init, 870 .add = armpmu_add, 871 .del = armpmu_del, 872 .start = armpmu_start, 873 .stop = armpmu_stop, 874 .read = armpmu_read, 875 .filter = armpmu_filter, 876 .attr_groups = pmu->attr_groups, 877 /* 878 * This is a CPU PMU potentially in a heterogeneous 879 * configuration (e.g. big.LITTLE) so 880 * PERF_PMU_CAP_EXTENDED_HW_TYPE is required to open 881 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE events on a 882 * specific PMU. 883 */ 884 .capabilities = PERF_PMU_CAP_EXTENDED_REGS | 885 PERF_PMU_CAP_EXTENDED_HW_TYPE, 886 }; 887 888 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = 889 &armpmu_common_attr_group; 890 891 for_each_possible_cpu(cpu) { 892 struct pmu_hw_events *events; 893 894 events = per_cpu_ptr(pmu->hw_events, cpu); 895 events->percpu_pmu = pmu; 896 } 897 898 return pmu; 899 900 out_free_pmu: 901 kfree(pmu); 902 out: 903 return NULL; 904 } 905 906 void armpmu_free(struct arm_pmu *pmu) 907 { 908 free_percpu(pmu->hw_events); 909 kfree(pmu); 910 } 911 912 int armpmu_register(struct arm_pmu *pmu) 913 { 914 int ret; 915 916 ret = cpu_pmu_init(pmu); 917 if (ret) 918 return ret; 919 920 if (!pmu->set_event_filter) 921 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; 922 923 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 924 if (ret) 925 goto out_destroy; 926 927 pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n", 928 pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS), 929 ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask, 930 has_nmi ? ", using NMIs" : ""); 931 932 kvm_host_pmu_init(pmu); 933 934 return 0; 935 936 out_destroy: 937 cpu_pmu_destroy(pmu); 938 return ret; 939 } 940 941 static int arm_pmu_hp_init(void) 942 { 943 int ret; 944 945 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, 946 "perf/arm/pmu:starting", 947 arm_perf_starting_cpu, 948 arm_perf_teardown_cpu); 949 if (ret) 950 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", 951 ret); 952 return ret; 953 } 954 subsys_initcall(arm_pmu_hp_init); 955