1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 4 #include <asm/cpu_device_id.h> 5 #include <asm/intel-family.h> 6 #include <asm/msr.h> 7 #include "uncore.h" 8 #include "uncore_discovery.h" 9 10 static bool uncore_no_discover; 11 module_param(uncore_no_discover, bool, 0); 12 MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism " 13 "(default: enable the discovery mechanism)."); 14 struct intel_uncore_type *empty_uncore[] = { NULL, }; 15 struct intel_uncore_type **uncore_msr_uncores = empty_uncore; 16 struct intel_uncore_type **uncore_pci_uncores = empty_uncore; 17 struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; 18 19 static bool pcidrv_registered; 20 struct pci_driver *uncore_pci_driver; 21 /* The PCI driver for the device which the uncore doesn't own. */ 22 struct pci_driver *uncore_pci_sub_driver; 23 /* pci bus to socket mapping */ 24 DEFINE_RAW_SPINLOCK(pci2phy_map_lock); 25 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); 26 struct pci_extra_dev *uncore_extra_pci_dev; 27 int __uncore_max_dies; 28 29 /* mask of cpus that collect uncore events */ 30 static cpumask_t uncore_cpu_mask; 31 32 /* constraint for the fixed counter */ 33 static struct event_constraint uncore_constraint_fixed = 34 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); 35 struct event_constraint uncore_constraint_empty = 36 EVENT_CONSTRAINT(0, 0, 0); 37 38 MODULE_DESCRIPTION("Support for Intel uncore performance events"); 39 MODULE_LICENSE("GPL"); 40 41 int uncore_pcibus_to_dieid(struct pci_bus *bus) 42 { 43 struct pci2phy_map *map; 44 int die_id = -1; 45 46 raw_spin_lock(&pci2phy_map_lock); 47 list_for_each_entry(map, &pci2phy_map_head, list) { 48 if (map->segment == pci_domain_nr(bus)) { 49 die_id = map->pbus_to_dieid[bus->number]; 50 break; 51 } 52 } 53 raw_spin_unlock(&pci2phy_map_lock); 54 55 return die_id; 56 } 57 58 int uncore_die_to_segment(int die) 59 { 60 struct pci_bus *bus = NULL; 61 62 /* Find first pci bus which attributes to specified die. */ 63 while ((bus = pci_find_next_bus(bus)) && 64 (die != uncore_pcibus_to_dieid(bus))) 65 ; 66 67 return bus ? pci_domain_nr(bus) : -EINVAL; 68 } 69 70 int uncore_device_to_die(struct pci_dev *dev) 71 { 72 int node = pcibus_to_node(dev->bus); 73 int cpu; 74 75 for_each_cpu(cpu, cpumask_of_pcibus(dev->bus)) { 76 struct cpuinfo_x86 *c = &cpu_data(cpu); 77 78 if (c->initialized && cpu_to_node(cpu) == node) 79 return c->topo.logical_die_id; 80 } 81 82 return -1; 83 } 84 85 static void uncore_free_pcibus_map(void) 86 { 87 struct pci2phy_map *map, *tmp; 88 89 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) { 90 list_del(&map->list); 91 kfree(map); 92 } 93 } 94 95 struct pci2phy_map *__find_pci2phy_map(int segment) 96 { 97 struct pci2phy_map *map, *alloc = NULL; 98 int i; 99 100 lockdep_assert_held(&pci2phy_map_lock); 101 102 lookup: 103 list_for_each_entry(map, &pci2phy_map_head, list) { 104 if (map->segment == segment) 105 goto end; 106 } 107 108 if (!alloc) { 109 raw_spin_unlock(&pci2phy_map_lock); 110 alloc = kmalloc_obj(struct pci2phy_map); 111 raw_spin_lock(&pci2phy_map_lock); 112 113 if (!alloc) 114 return NULL; 115 116 goto lookup; 117 } 118 119 map = alloc; 120 alloc = NULL; 121 map->segment = segment; 122 for (i = 0; i < 256; i++) 123 map->pbus_to_dieid[i] = -1; 124 list_add_tail(&map->list, &pci2phy_map_head); 125 126 end: 127 kfree(alloc); 128 return map; 129 } 130 131 ssize_t uncore_event_show(struct device *dev, 132 struct device_attribute *attr, char *buf) 133 { 134 struct uncore_event_desc *event = 135 container_of(attr, struct uncore_event_desc, attr); 136 return sprintf(buf, "%s", event->config); 137 } 138 139 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 140 { 141 unsigned int dieid = topology_logical_die_id(cpu); 142 143 /* 144 * The unsigned check also catches the '-1' return value for non 145 * existent mappings in the topology map. 146 */ 147 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; 148 } 149 150 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 151 { 152 u64 count; 153 154 rdmsrq(event->hw.event_base, count); 155 156 return count; 157 } 158 159 void uncore_mmio_exit_box(struct intel_uncore_box *box) 160 { 161 if (box->io_addr) 162 iounmap(box->io_addr); 163 } 164 165 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 166 struct perf_event *event) 167 { 168 if (!box->io_addr) 169 return 0; 170 171 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) 172 return 0; 173 174 return readq(box->io_addr + event->hw.event_base); 175 } 176 177 /* 178 * generic get constraint function for shared match/mask registers. 179 */ 180 struct event_constraint * 181 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 182 { 183 struct intel_uncore_extra_reg *er; 184 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 185 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 186 unsigned long flags; 187 bool ok = false; 188 189 /* 190 * reg->alloc can be set due to existing state, so for fake box we 191 * need to ignore this, otherwise we might fail to allocate proper 192 * fake state for this extra reg constraint. 193 */ 194 if (reg1->idx == EXTRA_REG_NONE || 195 (!uncore_box_is_fake(box) && reg1->alloc)) 196 return NULL; 197 198 er = &box->shared_regs[reg1->idx]; 199 raw_spin_lock_irqsave(&er->lock, flags); 200 if (!atomic_read(&er->ref) || 201 (er->config1 == reg1->config && er->config2 == reg2->config)) { 202 atomic_inc(&er->ref); 203 er->config1 = reg1->config; 204 er->config2 = reg2->config; 205 ok = true; 206 } 207 raw_spin_unlock_irqrestore(&er->lock, flags); 208 209 if (ok) { 210 if (!uncore_box_is_fake(box)) 211 reg1->alloc = 1; 212 return NULL; 213 } 214 215 return &uncore_constraint_empty; 216 } 217 218 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 219 { 220 struct intel_uncore_extra_reg *er; 221 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 222 223 /* 224 * Only put constraint if extra reg was actually allocated. Also 225 * takes care of event which do not use an extra shared reg. 226 * 227 * Also, if this is a fake box we shouldn't touch any event state 228 * (reg->alloc) and we don't care about leaving inconsistent box 229 * state either since it will be thrown out. 230 */ 231 if (uncore_box_is_fake(box) || !reg1->alloc) 232 return; 233 234 er = &box->shared_regs[reg1->idx]; 235 atomic_dec(&er->ref); 236 reg1->alloc = 0; 237 } 238 239 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) 240 { 241 struct intel_uncore_extra_reg *er; 242 unsigned long flags; 243 u64 config; 244 245 er = &box->shared_regs[idx]; 246 247 raw_spin_lock_irqsave(&er->lock, flags); 248 config = er->config; 249 raw_spin_unlock_irqrestore(&er->lock, flags); 250 251 return config; 252 } 253 254 static void uncore_assign_hw_event(struct intel_uncore_box *box, 255 struct perf_event *event, int idx) 256 { 257 struct hw_perf_event *hwc = &event->hw; 258 259 hwc->idx = idx; 260 hwc->last_tag = ++box->tags[idx]; 261 262 if (uncore_pmc_fixed(hwc->idx)) { 263 hwc->event_base = uncore_fixed_ctr(box); 264 hwc->config_base = uncore_fixed_ctl(box); 265 return; 266 } 267 268 if (intel_generic_uncore_assign_hw_event(event, box)) 269 return; 270 271 hwc->config_base = uncore_event_ctl(box, hwc->idx); 272 hwc->event_base = uncore_perf_ctr(box, hwc->idx); 273 } 274 275 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) 276 { 277 u64 prev_count, new_count, delta; 278 int shift; 279 280 if (uncore_pmc_freerunning(event->hw.idx)) 281 shift = 64 - uncore_freerunning_bits(box, event); 282 else if (uncore_pmc_fixed(event->hw.idx)) 283 shift = 64 - uncore_fixed_ctr_bits(box); 284 else 285 shift = 64 - uncore_perf_ctr_bits(box); 286 287 /* the hrtimer might modify the previous event value */ 288 again: 289 prev_count = local64_read(&event->hw.prev_count); 290 new_count = uncore_read_counter(box, event); 291 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) 292 goto again; 293 294 delta = (new_count << shift) - (prev_count << shift); 295 delta >>= shift; 296 297 local64_add(delta, &event->count); 298 } 299 300 /* 301 * The overflow interrupt is unavailable for SandyBridge-EP, is broken 302 * for SandyBridge. So we use hrtimer to periodically poll the counter 303 * to avoid overflow. 304 */ 305 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) 306 { 307 struct intel_uncore_box *box; 308 struct perf_event *event; 309 int bit; 310 311 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); 312 if (!box->n_active || box->cpu != smp_processor_id()) 313 return HRTIMER_NORESTART; 314 315 /* 316 * handle boxes with an active event list as opposed to active 317 * counters 318 */ 319 list_for_each_entry(event, &box->active_list, active_entry) { 320 uncore_perf_event_update(box, event); 321 } 322 323 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) 324 uncore_perf_event_update(box, box->events[bit]); 325 326 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); 327 return HRTIMER_RESTART; 328 } 329 330 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) 331 { 332 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), 333 HRTIMER_MODE_REL_PINNED_HARD); 334 } 335 336 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) 337 { 338 hrtimer_cancel(&box->hrtimer); 339 } 340 341 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) 342 { 343 hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 344 } 345 346 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, 347 int node) 348 { 349 int i, size, numshared = type->num_shared_regs ; 350 struct intel_uncore_box *box; 351 352 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); 353 354 box = kzalloc_node(size, GFP_KERNEL, node); 355 if (!box) 356 return NULL; 357 358 for (i = 0; i < numshared; i++) 359 raw_spin_lock_init(&box->shared_regs[i].lock); 360 361 uncore_pmu_init_hrtimer(box); 362 box->cpu = -1; 363 box->dieid = -1; 364 365 /* set default hrtimer timeout */ 366 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; 367 368 INIT_LIST_HEAD(&box->active_list); 369 370 return box; 371 } 372 373 /* 374 * Using uncore_pmu_event_init pmu event_init callback 375 * as a detection point for uncore events. 376 */ 377 static int uncore_pmu_event_init(struct perf_event *event); 378 379 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) 380 { 381 return &box->pmu->pmu == event->pmu; 382 } 383 384 static int 385 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, 386 bool dogrp) 387 { 388 struct perf_event *event; 389 int n, max_count; 390 391 max_count = box->pmu->type->num_counters; 392 if (box->pmu->type->fixed_ctl) 393 max_count++; 394 395 if (box->n_events >= max_count) 396 return -EINVAL; 397 398 n = box->n_events; 399 400 if (is_box_event(box, leader)) { 401 box->event_list[n] = leader; 402 n++; 403 } 404 405 if (!dogrp) 406 return n; 407 408 for_each_sibling_event(event, leader) { 409 if (!is_box_event(box, event) || 410 event->state <= PERF_EVENT_STATE_OFF) 411 continue; 412 413 if (n >= max_count) 414 return -EINVAL; 415 416 box->event_list[n] = event; 417 n++; 418 } 419 return n; 420 } 421 422 static struct event_constraint * 423 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) 424 { 425 struct intel_uncore_type *type = box->pmu->type; 426 struct event_constraint *c; 427 428 if (type->ops->get_constraint) { 429 c = type->ops->get_constraint(box, event); 430 if (c) 431 return c; 432 } 433 434 if (event->attr.config == UNCORE_FIXED_EVENT) 435 return &uncore_constraint_fixed; 436 437 if (type->constraints) { 438 for_each_event_constraint(c, type->constraints) { 439 if (constraint_match(c, event->hw.config)) 440 return c; 441 } 442 } 443 444 return &type->unconstrainted; 445 } 446 447 static void uncore_put_event_constraint(struct intel_uncore_box *box, 448 struct perf_event *event) 449 { 450 if (box->pmu->type->ops->put_constraint) 451 box->pmu->type->ops->put_constraint(box, event); 452 } 453 454 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) 455 { 456 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 457 struct event_constraint *c; 458 int i, wmin, wmax, ret = 0; 459 struct hw_perf_event *hwc; 460 461 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); 462 463 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { 464 c = uncore_get_event_constraint(box, box->event_list[i]); 465 box->event_constraint[i] = c; 466 wmin = min(wmin, c->weight); 467 wmax = max(wmax, c->weight); 468 } 469 470 /* fastpath, try to reuse previous register */ 471 for (i = 0; i < n; i++) { 472 hwc = &box->event_list[i]->hw; 473 c = box->event_constraint[i]; 474 475 /* never assigned */ 476 if (hwc->idx == -1) 477 break; 478 479 /* constraint still honored */ 480 if (!test_bit(hwc->idx, c->idxmsk)) 481 break; 482 483 /* not already used */ 484 if (test_bit(hwc->idx, used_mask)) 485 break; 486 487 __set_bit(hwc->idx, used_mask); 488 if (assign) 489 assign[i] = hwc->idx; 490 } 491 /* slow path */ 492 if (i != n) 493 ret = perf_assign_events(box->event_constraint, n, 494 wmin, wmax, n, assign); 495 496 if (!assign || ret) { 497 for (i = 0; i < n; i++) 498 uncore_put_event_constraint(box, box->event_list[i]); 499 } 500 return ret ? -EINVAL : 0; 501 } 502 503 void uncore_pmu_event_start(struct perf_event *event, int flags) 504 { 505 struct intel_uncore_box *box = uncore_event_to_box(event); 506 int idx = event->hw.idx; 507 508 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) 509 return; 510 511 /* 512 * Free running counter is read-only and always active. 513 * Use the current counter value as start point. 514 * There is no overflow interrupt for free running counter. 515 * Use hrtimer to periodically poll the counter to avoid overflow. 516 */ 517 if (uncore_pmc_freerunning(event->hw.idx)) { 518 list_add_tail(&event->active_entry, &box->active_list); 519 local64_set(&event->hw.prev_count, 520 uncore_read_counter(box, event)); 521 if (box->n_active++ == 0) 522 uncore_pmu_start_hrtimer(box); 523 return; 524 } 525 526 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 527 return; 528 529 event->hw.state = 0; 530 box->events[idx] = event; 531 box->n_active++; 532 __set_bit(idx, box->active_mask); 533 534 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); 535 uncore_enable_event(box, event); 536 537 if (box->n_active == 1) 538 uncore_pmu_start_hrtimer(box); 539 } 540 541 void uncore_pmu_event_stop(struct perf_event *event, int flags) 542 { 543 struct intel_uncore_box *box = uncore_event_to_box(event); 544 struct hw_perf_event *hwc = &event->hw; 545 546 /* Cannot disable free running counter which is read-only */ 547 if (uncore_pmc_freerunning(hwc->idx)) { 548 list_del(&event->active_entry); 549 if (--box->n_active == 0) 550 uncore_pmu_cancel_hrtimer(box); 551 uncore_perf_event_update(box, event); 552 return; 553 } 554 555 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { 556 uncore_disable_event(box, event); 557 box->n_active--; 558 box->events[hwc->idx] = NULL; 559 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 560 hwc->state |= PERF_HES_STOPPED; 561 562 if (box->n_active == 0) 563 uncore_pmu_cancel_hrtimer(box); 564 } 565 566 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 567 /* 568 * Drain the remaining delta count out of a event 569 * that we are disabling: 570 */ 571 uncore_perf_event_update(box, event); 572 hwc->state |= PERF_HES_UPTODATE; 573 } 574 } 575 576 int uncore_pmu_event_add(struct perf_event *event, int flags) 577 { 578 struct intel_uncore_box *box = uncore_event_to_box(event); 579 struct hw_perf_event *hwc = &event->hw; 580 int assign[UNCORE_PMC_IDX_MAX]; 581 int i, n, ret; 582 583 if (!box) 584 return -ENODEV; 585 586 /* 587 * The free funning counter is assigned in event_init(). 588 * The free running counter event and free running counter 589 * are 1:1 mapped. It doesn't need to be tracked in event_list. 590 */ 591 if (uncore_pmc_freerunning(hwc->idx)) { 592 if (flags & PERF_EF_START) 593 uncore_pmu_event_start(event, 0); 594 return 0; 595 } 596 597 ret = n = uncore_collect_events(box, event, false); 598 if (ret < 0) 599 return ret; 600 601 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 602 if (!(flags & PERF_EF_START)) 603 hwc->state |= PERF_HES_ARCH; 604 605 ret = uncore_assign_events(box, assign, n); 606 if (ret) 607 return ret; 608 609 /* save events moving to new counters */ 610 for (i = 0; i < box->n_events; i++) { 611 event = box->event_list[i]; 612 hwc = &event->hw; 613 614 if (hwc->idx == assign[i] && 615 hwc->last_tag == box->tags[assign[i]]) 616 continue; 617 /* 618 * Ensure we don't accidentally enable a stopped 619 * counter simply because we rescheduled. 620 */ 621 if (hwc->state & PERF_HES_STOPPED) 622 hwc->state |= PERF_HES_ARCH; 623 624 uncore_pmu_event_stop(event, PERF_EF_UPDATE); 625 } 626 627 /* reprogram moved events into new counters */ 628 for (i = 0; i < n; i++) { 629 event = box->event_list[i]; 630 hwc = &event->hw; 631 632 if (hwc->idx != assign[i] || 633 hwc->last_tag != box->tags[assign[i]]) 634 uncore_assign_hw_event(box, event, assign[i]); 635 else if (i < box->n_events) 636 continue; 637 638 if (hwc->state & PERF_HES_ARCH) 639 continue; 640 641 uncore_pmu_event_start(event, 0); 642 } 643 box->n_events = n; 644 645 return 0; 646 } 647 648 void uncore_pmu_event_del(struct perf_event *event, int flags) 649 { 650 struct intel_uncore_box *box = uncore_event_to_box(event); 651 int i; 652 653 uncore_pmu_event_stop(event, PERF_EF_UPDATE); 654 655 /* 656 * The event for free running counter is not tracked by event_list. 657 * It doesn't need to force event->hw.idx = -1 to reassign the counter. 658 * Because the event and the free running counter are 1:1 mapped. 659 */ 660 if (uncore_pmc_freerunning(event->hw.idx)) 661 return; 662 663 for (i = 0; i < box->n_events; i++) { 664 if (event == box->event_list[i]) { 665 uncore_put_event_constraint(box, event); 666 667 for (++i; i < box->n_events; i++) 668 box->event_list[i - 1] = box->event_list[i]; 669 670 --box->n_events; 671 break; 672 } 673 } 674 675 event->hw.idx = -1; 676 event->hw.last_tag = ~0ULL; 677 } 678 679 void uncore_pmu_event_read(struct perf_event *event) 680 { 681 struct intel_uncore_box *box = uncore_event_to_box(event); 682 uncore_perf_event_update(box, event); 683 } 684 685 /* 686 * validation ensures the group can be loaded onto the 687 * PMU if it was the only group available. 688 */ 689 static int uncore_validate_group(struct intel_uncore_pmu *pmu, 690 struct perf_event *event) 691 { 692 struct perf_event *leader = event->group_leader; 693 struct intel_uncore_box *fake_box; 694 int ret = -EINVAL, n; 695 696 /* The free running counter is always active. */ 697 if (uncore_pmc_freerunning(event->hw.idx)) 698 return 0; 699 700 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); 701 if (!fake_box) 702 return -ENOMEM; 703 704 fake_box->pmu = pmu; 705 /* 706 * the event is not yet connected with its 707 * siblings therefore we must first collect 708 * existing siblings, then add the new event 709 * before we can simulate the scheduling 710 */ 711 n = uncore_collect_events(fake_box, leader, true); 712 if (n < 0) 713 goto out; 714 715 fake_box->n_events = n; 716 n = uncore_collect_events(fake_box, event, false); 717 if (n < 0) 718 goto out; 719 720 fake_box->n_events = n; 721 722 ret = uncore_assign_events(fake_box, NULL, n); 723 out: 724 kfree(fake_box); 725 return ret; 726 } 727 728 static int uncore_pmu_event_init(struct perf_event *event) 729 { 730 struct intel_uncore_pmu *pmu; 731 struct intel_uncore_box *box; 732 struct hw_perf_event *hwc = &event->hw; 733 int ret; 734 735 if (event->attr.type != event->pmu->type) 736 return -ENOENT; 737 738 pmu = uncore_event_to_pmu(event); 739 /* no device found for this pmu */ 740 if (!pmu->registered) 741 return -ENOENT; 742 743 /* Sampling not supported yet */ 744 if (hwc->sample_period) 745 return -EINVAL; 746 747 /* 748 * Place all uncore events for a particular physical package 749 * onto a single cpu 750 */ 751 if (event->cpu < 0) 752 return -EINVAL; 753 box = uncore_pmu_to_box(pmu, event->cpu); 754 if (!box || box->cpu < 0) 755 return -EINVAL; 756 event->cpu = box->cpu; 757 event->pmu_private = box; 758 759 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 760 761 event->hw.idx = -1; 762 event->hw.last_tag = ~0ULL; 763 event->hw.extra_reg.idx = EXTRA_REG_NONE; 764 event->hw.branch_reg.idx = EXTRA_REG_NONE; 765 766 if (event->attr.config == UNCORE_FIXED_EVENT) { 767 /* no fixed counter */ 768 if (!pmu->type->fixed_ctl) 769 return -EINVAL; 770 /* 771 * if there is only one fixed counter, only the first pmu 772 * can access the fixed counter 773 */ 774 if (pmu->type->single_fixed && pmu->pmu_idx > 0) 775 return -EINVAL; 776 777 /* fixed counters have event field hardcoded to zero */ 778 hwc->config = 0ULL; 779 } else if (is_freerunning_event(event)) { 780 hwc->config = event->attr.config; 781 if (!check_valid_freerunning_event(box, event)) 782 return -EINVAL; 783 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; 784 /* 785 * The free running counter event and free running counter 786 * are always 1:1 mapped. 787 * The free running counter is always active. 788 * Assign the free running counter here. 789 */ 790 event->hw.event_base = uncore_freerunning_counter(box, event); 791 } else { 792 hwc->config = event->attr.config & 793 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); 794 if (pmu->type->ops->hw_config) { 795 ret = pmu->type->ops->hw_config(box, event); 796 if (ret) 797 return ret; 798 } 799 } 800 801 if (event->group_leader != event) 802 ret = uncore_validate_group(pmu, event); 803 else 804 ret = 0; 805 806 return ret; 807 } 808 809 static void uncore_pmu_enable(struct pmu *pmu) 810 { 811 struct intel_uncore_pmu *uncore_pmu; 812 struct intel_uncore_box *box; 813 814 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); 815 816 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); 817 if (!box) 818 return; 819 820 if (uncore_pmu->type->ops->enable_box) 821 uncore_pmu->type->ops->enable_box(box); 822 } 823 824 static void uncore_pmu_disable(struct pmu *pmu) 825 { 826 struct intel_uncore_pmu *uncore_pmu; 827 struct intel_uncore_box *box; 828 829 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); 830 831 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); 832 if (!box) 833 return; 834 835 if (uncore_pmu->type->ops->disable_box) 836 uncore_pmu->type->ops->disable_box(box); 837 } 838 839 static ssize_t uncore_get_attr_cpumask(struct device *dev, 840 struct device_attribute *attr, char *buf) 841 { 842 struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); 843 844 return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask); 845 } 846 847 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); 848 849 static struct attribute *uncore_pmu_attrs[] = { 850 &dev_attr_cpumask.attr, 851 NULL, 852 }; 853 854 static const struct attribute_group uncore_pmu_attr_group = { 855 .attrs = uncore_pmu_attrs, 856 }; 857 858 static inline int uncore_get_box_id(struct intel_uncore_type *type, 859 struct intel_uncore_pmu *pmu) 860 { 861 if (type->boxes) 862 return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx); 863 864 return pmu->pmu_idx; 865 } 866 867 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu) 868 { 869 struct intel_uncore_type *type = pmu->type; 870 871 if (type->num_boxes == 1) 872 sprintf(pmu_name, "uncore_type_%u", type->type_id); 873 else { 874 sprintf(pmu_name, "uncore_type_%u_%d", 875 type->type_id, uncore_get_box_id(type, pmu)); 876 } 877 } 878 879 static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) 880 { 881 struct intel_uncore_type *type = pmu->type; 882 883 /* 884 * No uncore block name in discovery table. 885 * Use uncore_type_&typeid_&boxid as name. 886 */ 887 if (!type->name) { 888 uncore_get_alias_name(pmu->name, pmu); 889 return; 890 } 891 892 if (type->num_boxes == 1) { 893 if (strlen(type->name) > 0) 894 sprintf(pmu->name, "uncore_%s", type->name); 895 else 896 sprintf(pmu->name, "uncore"); 897 } else { 898 /* 899 * Use the box ID from the discovery table if applicable. 900 */ 901 sprintf(pmu->name, "uncore_%s_%d", type->name, 902 uncore_get_box_id(type, pmu)); 903 } 904 } 905 906 static int uncore_pmu_register(struct intel_uncore_pmu *pmu) 907 { 908 int ret; 909 910 if (!pmu->type->pmu) { 911 pmu->pmu = (struct pmu) { 912 .attr_groups = pmu->type->attr_groups, 913 .task_ctx_nr = perf_invalid_context, 914 .pmu_enable = uncore_pmu_enable, 915 .pmu_disable = uncore_pmu_disable, 916 .event_init = uncore_pmu_event_init, 917 .add = uncore_pmu_event_add, 918 .del = uncore_pmu_event_del, 919 .start = uncore_pmu_event_start, 920 .stop = uncore_pmu_event_stop, 921 .read = uncore_pmu_event_read, 922 .module = THIS_MODULE, 923 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 924 .attr_update = pmu->type->attr_update, 925 }; 926 } else { 927 pmu->pmu = *pmu->type->pmu; 928 pmu->pmu.attr_groups = pmu->type->attr_groups; 929 pmu->pmu.attr_update = pmu->type->attr_update; 930 } 931 932 uncore_get_pmu_name(pmu); 933 934 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 935 if (!ret) 936 pmu->registered = true; 937 return ret; 938 } 939 940 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu) 941 { 942 if (!pmu->registered) 943 return; 944 perf_pmu_unregister(&pmu->pmu); 945 pmu->registered = false; 946 } 947 948 static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 949 { 950 int die; 951 952 for (die = 0; die < uncore_max_dies(); die++) 953 kfree(pmu->boxes[die]); 954 kfree(pmu->boxes); 955 } 956 957 static void uncore_type_exit(struct intel_uncore_type *type) 958 { 959 struct intel_uncore_pmu *pmu = type->pmus; 960 int i; 961 962 if (type->cleanup_mapping) 963 type->cleanup_mapping(type); 964 965 if (type->cleanup_extra_boxes) 966 type->cleanup_extra_boxes(type); 967 968 if (pmu) { 969 for (i = 0; i < type->num_boxes; i++, pmu++) { 970 uncore_pmu_unregister(pmu); 971 uncore_free_boxes(pmu); 972 } 973 kfree(type->pmus); 974 type->pmus = NULL; 975 } 976 977 kfree(type->events_group); 978 type->events_group = NULL; 979 } 980 981 static void uncore_types_exit(struct intel_uncore_type **types) 982 { 983 for (; *types; types++) 984 uncore_type_exit(*types); 985 } 986 987 static int __init uncore_type_init(struct intel_uncore_type *type) 988 { 989 struct intel_uncore_pmu *pmus; 990 size_t size; 991 int i, j; 992 993 pmus = kzalloc_objs(*pmus, type->num_boxes); 994 if (!pmus) 995 return -ENOMEM; 996 997 size = uncore_max_dies() * sizeof(struct intel_uncore_box *); 998 999 for (i = 0; i < type->num_boxes; i++) { 1000 pmus[i].pmu_idx = i; 1001 pmus[i].type = type; 1002 pmus[i].boxes = kzalloc(size, GFP_KERNEL); 1003 if (!pmus[i].boxes) 1004 goto err; 1005 } 1006 1007 type->pmus = pmus; 1008 type->unconstrainted = (struct event_constraint) 1009 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 1010 0, type->num_counters, 0, 0); 1011 1012 if (type->event_descs) { 1013 struct { 1014 struct attribute_group group; 1015 struct attribute *attrs[]; 1016 } *attr_group; 1017 for (i = 0; type->event_descs[i].attr.attr.name; i++); 1018 1019 attr_group = kzalloc_flex(*attr_group, attrs, i + 1); 1020 if (!attr_group) 1021 goto err; 1022 1023 attr_group->group.name = "events"; 1024 attr_group->group.attrs = attr_group->attrs; 1025 1026 for (j = 0; j < i; j++) 1027 attr_group->attrs[j] = &type->event_descs[j].attr.attr; 1028 1029 type->events_group = &attr_group->group; 1030 } 1031 1032 type->pmu_group = &uncore_pmu_attr_group; 1033 1034 if (type->set_mapping) 1035 type->set_mapping(type); 1036 1037 return 0; 1038 1039 err: 1040 for (i = 0; i < type->num_boxes; i++) 1041 kfree(pmus[i].boxes); 1042 kfree(pmus); 1043 1044 return -ENOMEM; 1045 } 1046 1047 static int __init 1048 uncore_types_init(struct intel_uncore_type **types) 1049 { 1050 int ret; 1051 1052 for (; *types; types++) { 1053 ret = uncore_type_init(*types); 1054 if (ret) 1055 return ret; 1056 } 1057 return 0; 1058 } 1059 1060 /* 1061 * Get the die information of a PCI device. 1062 * @pdev: The PCI device. 1063 * @die: The die id which the device maps to. 1064 */ 1065 static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die) 1066 { 1067 *die = uncore_pcibus_to_dieid(pdev->bus); 1068 if (*die < 0) 1069 return -EINVAL; 1070 1071 return 0; 1072 } 1073 1074 static struct intel_uncore_pmu * 1075 uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev) 1076 { 1077 struct intel_uncore_type **types = uncore_pci_uncores; 1078 struct intel_uncore_discovery_unit *unit; 1079 struct intel_uncore_type *type; 1080 struct rb_node *node; 1081 1082 for (; *types; types++) { 1083 type = *types; 1084 1085 for (node = rb_first(type->boxes); node; node = rb_next(node)) { 1086 unit = rb_entry(node, struct intel_uncore_discovery_unit, node); 1087 if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) && 1088 pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) && 1089 pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr)) 1090 return &type->pmus[unit->pmu_idx]; 1091 } 1092 } 1093 1094 return NULL; 1095 } 1096 1097 /* 1098 * Find the PMU of a PCI device. 1099 * @pdev: The PCI device. 1100 * @ids: The ID table of the available PCI devices with a PMU. 1101 * If NULL, search the whole uncore_pci_uncores. 1102 */ 1103 static struct intel_uncore_pmu * 1104 uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) 1105 { 1106 struct intel_uncore_pmu *pmu = NULL; 1107 struct intel_uncore_type *type; 1108 kernel_ulong_t data; 1109 unsigned int devfn; 1110 1111 if (!ids) 1112 return uncore_pci_find_dev_pmu_from_types(pdev); 1113 1114 while (ids && ids->vendor) { 1115 if ((ids->vendor == pdev->vendor) && 1116 (ids->device == pdev->device)) { 1117 data = ids->driver_data; 1118 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data), 1119 UNCORE_PCI_DEV_FUNC(data)); 1120 if (devfn == pdev->devfn) { 1121 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)]; 1122 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)]; 1123 break; 1124 } 1125 } 1126 ids++; 1127 } 1128 return pmu; 1129 } 1130 1131 /* 1132 * Register the PMU for a PCI device 1133 * @pdev: The PCI device. 1134 * @type: The corresponding PMU type of the device. 1135 * @pmu: The corresponding PMU of the device. 1136 * @die: The die id which the device maps to. 1137 */ 1138 static int uncore_pci_pmu_register(struct pci_dev *pdev, 1139 struct intel_uncore_type *type, 1140 struct intel_uncore_pmu *pmu, 1141 int die) 1142 { 1143 struct intel_uncore_box *box; 1144 int ret; 1145 1146 if (WARN_ON_ONCE(pmu->boxes[die] != NULL)) 1147 return -EINVAL; 1148 1149 box = uncore_alloc_box(type, NUMA_NO_NODE); 1150 if (!box) 1151 return -ENOMEM; 1152 1153 atomic_inc(&box->refcnt); 1154 box->dieid = die; 1155 box->pci_dev = pdev; 1156 box->pmu = pmu; 1157 uncore_box_init(box); 1158 1159 pmu->boxes[die] = box; 1160 if (atomic_inc_return(&pmu->activeboxes) > 1) 1161 return 0; 1162 1163 /* First active box registers the pmu */ 1164 ret = uncore_pmu_register(pmu); 1165 if (ret) { 1166 pmu->boxes[die] = NULL; 1167 uncore_box_exit(box); 1168 kfree(box); 1169 } 1170 return ret; 1171 } 1172 1173 /* 1174 * add a pci uncore device 1175 */ 1176 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1177 { 1178 struct intel_uncore_type *type; 1179 struct intel_uncore_pmu *pmu = NULL; 1180 int die, ret; 1181 1182 ret = uncore_pci_get_dev_die_info(pdev, &die); 1183 if (ret) 1184 return ret; 1185 1186 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { 1187 int idx = UNCORE_PCI_DEV_IDX(id->driver_data); 1188 1189 uncore_extra_pci_dev[die].dev[idx] = pdev; 1190 pci_set_drvdata(pdev, NULL); 1191 return 0; 1192 } 1193 1194 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 1195 1196 /* 1197 * Some platforms, e.g. Knights Landing, use a common PCI device ID 1198 * for multiple instances of an uncore PMU device type. We should check 1199 * PCI slot and func to indicate the uncore box. 1200 */ 1201 if (id->driver_data & ~0xffff) { 1202 struct pci_driver *pci_drv = to_pci_driver(pdev->dev.driver); 1203 1204 pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table); 1205 if (pmu == NULL) 1206 return -ENODEV; 1207 } else { 1208 /* 1209 * for performance monitoring unit with multiple boxes, 1210 * each box has a different function id. 1211 */ 1212 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; 1213 } 1214 1215 ret = uncore_pci_pmu_register(pdev, type, pmu, die); 1216 1217 pci_set_drvdata(pdev, pmu->boxes[die]); 1218 1219 return ret; 1220 } 1221 1222 /* 1223 * Unregister the PMU of a PCI device 1224 * @pmu: The corresponding PMU is unregistered. 1225 * @die: The die id which the device maps to. 1226 */ 1227 static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die) 1228 { 1229 struct intel_uncore_box *box = pmu->boxes[die]; 1230 1231 pmu->boxes[die] = NULL; 1232 if (atomic_dec_return(&pmu->activeboxes) == 0) 1233 uncore_pmu_unregister(pmu); 1234 uncore_box_exit(box); 1235 kfree(box); 1236 } 1237 1238 static void uncore_pci_remove(struct pci_dev *pdev) 1239 { 1240 struct intel_uncore_box *box; 1241 struct intel_uncore_pmu *pmu; 1242 int i, die; 1243 1244 if (uncore_pci_get_dev_die_info(pdev, &die)) 1245 return; 1246 1247 box = pci_get_drvdata(pdev); 1248 if (!box) { 1249 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { 1250 if (uncore_extra_pci_dev[die].dev[i] == pdev) { 1251 uncore_extra_pci_dev[die].dev[i] = NULL; 1252 break; 1253 } 1254 } 1255 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); 1256 return; 1257 } 1258 1259 pmu = box->pmu; 1260 1261 pci_set_drvdata(pdev, NULL); 1262 1263 uncore_pci_pmu_unregister(pmu, die); 1264 } 1265 1266 static int uncore_bus_notify(struct notifier_block *nb, 1267 unsigned long action, void *data, 1268 const struct pci_device_id *ids) 1269 { 1270 struct device *dev = data; 1271 struct pci_dev *pdev = to_pci_dev(dev); 1272 struct intel_uncore_pmu *pmu; 1273 int die; 1274 1275 /* Unregister the PMU when the device is going to be deleted. */ 1276 if (action != BUS_NOTIFY_DEL_DEVICE) 1277 return NOTIFY_DONE; 1278 1279 pmu = uncore_pci_find_dev_pmu(pdev, ids); 1280 if (!pmu) 1281 return NOTIFY_DONE; 1282 1283 if (uncore_pci_get_dev_die_info(pdev, &die)) 1284 return NOTIFY_DONE; 1285 1286 uncore_pci_pmu_unregister(pmu, die); 1287 1288 return NOTIFY_OK; 1289 } 1290 1291 static int uncore_pci_sub_bus_notify(struct notifier_block *nb, 1292 unsigned long action, void *data) 1293 { 1294 return uncore_bus_notify(nb, action, data, 1295 uncore_pci_sub_driver->id_table); 1296 } 1297 1298 static struct notifier_block uncore_pci_sub_notifier = { 1299 .notifier_call = uncore_pci_sub_bus_notify, 1300 }; 1301 1302 static void uncore_pci_sub_driver_init(void) 1303 { 1304 const struct pci_device_id *ids = uncore_pci_sub_driver->id_table; 1305 struct intel_uncore_type *type; 1306 struct intel_uncore_pmu *pmu; 1307 struct pci_dev *pci_sub_dev; 1308 bool notify = false; 1309 unsigned int devfn; 1310 int die; 1311 1312 while (ids && ids->vendor) { 1313 pci_sub_dev = NULL; 1314 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)]; 1315 /* 1316 * Search the available device, and register the 1317 * corresponding PMU. 1318 */ 1319 while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 1320 ids->device, pci_sub_dev))) { 1321 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), 1322 UNCORE_PCI_DEV_FUNC(ids->driver_data)); 1323 if (devfn != pci_sub_dev->devfn) 1324 continue; 1325 1326 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; 1327 1328 if (uncore_pci_get_dev_die_info(pci_sub_dev, &die)) 1329 continue; 1330 1331 if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu, 1332 die)) 1333 notify = true; 1334 } 1335 ids++; 1336 } 1337 1338 if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier)) 1339 notify = false; 1340 1341 if (!notify) 1342 uncore_pci_sub_driver = NULL; 1343 } 1344 1345 static int uncore_pci_bus_notify(struct notifier_block *nb, 1346 unsigned long action, void *data) 1347 { 1348 return uncore_bus_notify(nb, action, data, NULL); 1349 } 1350 1351 static struct notifier_block uncore_pci_notifier = { 1352 .notifier_call = uncore_pci_bus_notify, 1353 }; 1354 1355 1356 static void uncore_pci_pmus_register(void) 1357 { 1358 struct intel_uncore_type **types = uncore_pci_uncores; 1359 struct intel_uncore_discovery_unit *unit; 1360 struct intel_uncore_type *type; 1361 struct intel_uncore_pmu *pmu; 1362 struct rb_node *node; 1363 struct pci_dev *pdev; 1364 1365 for (; *types; types++) { 1366 type = *types; 1367 1368 for (node = rb_first(type->boxes); node; node = rb_next(node)) { 1369 unit = rb_entry(node, struct intel_uncore_discovery_unit, node); 1370 pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr), 1371 UNCORE_DISCOVERY_PCI_BUS(unit->addr), 1372 UNCORE_DISCOVERY_PCI_DEVFN(unit->addr)); 1373 1374 if (!pdev) 1375 continue; 1376 pmu = &type->pmus[unit->pmu_idx]; 1377 uncore_pci_pmu_register(pdev, type, pmu, unit->die); 1378 } 1379 } 1380 1381 bus_register_notifier(&pci_bus_type, &uncore_pci_notifier); 1382 } 1383 1384 static int __init uncore_pci_init(void) 1385 { 1386 size_t size; 1387 int ret; 1388 1389 size = uncore_max_dies() * sizeof(struct pci_extra_dev); 1390 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL); 1391 if (!uncore_extra_pci_dev) { 1392 ret = -ENOMEM; 1393 goto err; 1394 } 1395 1396 ret = uncore_types_init(uncore_pci_uncores); 1397 if (ret) 1398 goto errtype; 1399 1400 if (uncore_pci_driver) { 1401 uncore_pci_driver->probe = uncore_pci_probe; 1402 uncore_pci_driver->remove = uncore_pci_remove; 1403 1404 ret = pci_register_driver(uncore_pci_driver); 1405 if (ret) 1406 goto errtype; 1407 } else 1408 uncore_pci_pmus_register(); 1409 1410 if (uncore_pci_sub_driver) 1411 uncore_pci_sub_driver_init(); 1412 1413 pcidrv_registered = true; 1414 return 0; 1415 1416 errtype: 1417 uncore_types_exit(uncore_pci_uncores); 1418 kfree(uncore_extra_pci_dev); 1419 uncore_extra_pci_dev = NULL; 1420 uncore_free_pcibus_map(); 1421 err: 1422 uncore_pci_uncores = empty_uncore; 1423 return ret; 1424 } 1425 1426 static void uncore_pci_exit(void) 1427 { 1428 if (pcidrv_registered) { 1429 pcidrv_registered = false; 1430 if (uncore_pci_sub_driver) 1431 bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier); 1432 if (uncore_pci_driver) 1433 pci_unregister_driver(uncore_pci_driver); 1434 else 1435 bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier); 1436 uncore_types_exit(uncore_pci_uncores); 1437 kfree(uncore_extra_pci_dev); 1438 uncore_free_pcibus_map(); 1439 } 1440 } 1441 1442 static bool uncore_die_has_box(struct intel_uncore_type *type, 1443 int die, unsigned int pmu_idx) 1444 { 1445 if (!type->boxes) 1446 return true; 1447 1448 if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0) 1449 return false; 1450 1451 return true; 1452 } 1453 1454 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1455 int new_cpu) 1456 { 1457 struct intel_uncore_pmu *pmu = type->pmus; 1458 struct intel_uncore_box *box; 1459 int i, die; 1460 1461 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); 1462 for (i = 0; i < type->num_boxes; i++, pmu++) { 1463 box = pmu->boxes[die]; 1464 if (!box) 1465 continue; 1466 1467 if (old_cpu < 0) { 1468 WARN_ON_ONCE(box->cpu != -1); 1469 if (uncore_die_has_box(type, die, pmu->pmu_idx)) { 1470 box->cpu = new_cpu; 1471 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); 1472 } 1473 continue; 1474 } 1475 1476 WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu); 1477 box->cpu = -1; 1478 cpumask_clear_cpu(old_cpu, &pmu->cpu_mask); 1479 if (new_cpu < 0) 1480 continue; 1481 1482 if (!uncore_die_has_box(type, die, pmu->pmu_idx)) 1483 continue; 1484 uncore_pmu_cancel_hrtimer(box); 1485 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); 1486 box->cpu = new_cpu; 1487 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); 1488 } 1489 } 1490 1491 static void uncore_change_context(struct intel_uncore_type **uncores, 1492 int old_cpu, int new_cpu) 1493 { 1494 for (; *uncores; uncores++) 1495 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); 1496 } 1497 1498 static void uncore_box_unref(struct intel_uncore_type **types, int id) 1499 { 1500 struct intel_uncore_type *type; 1501 struct intel_uncore_pmu *pmu; 1502 struct intel_uncore_box *box; 1503 int i; 1504 1505 for (; *types; types++) { 1506 type = *types; 1507 pmu = type->pmus; 1508 for (i = 0; i < type->num_boxes; i++, pmu++) { 1509 box = pmu->boxes[id]; 1510 if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0) 1511 uncore_box_exit(box); 1512 } 1513 } 1514 } 1515 1516 static int uncore_event_cpu_offline(unsigned int cpu) 1517 { 1518 int die, target; 1519 1520 /* Check if exiting cpu is used for collecting uncore events */ 1521 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1522 goto unref; 1523 /* Find a new cpu to collect uncore events */ 1524 target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 1525 1526 /* Migrate uncore events to the new target */ 1527 if (target < nr_cpu_ids) 1528 cpumask_set_cpu(target, &uncore_cpu_mask); 1529 else 1530 target = -1; 1531 1532 uncore_change_context(uncore_msr_uncores, cpu, target); 1533 uncore_change_context(uncore_mmio_uncores, cpu, target); 1534 uncore_change_context(uncore_pci_uncores, cpu, target); 1535 1536 unref: 1537 /* Clear the references */ 1538 die = topology_logical_die_id(cpu); 1539 uncore_box_unref(uncore_msr_uncores, die); 1540 uncore_box_unref(uncore_mmio_uncores, die); 1541 return 0; 1542 } 1543 1544 static int allocate_boxes(struct intel_uncore_type **types, 1545 unsigned int die, unsigned int cpu) 1546 { 1547 struct intel_uncore_box *box, *tmp; 1548 struct intel_uncore_type *type; 1549 struct intel_uncore_pmu *pmu; 1550 LIST_HEAD(allocated); 1551 int i; 1552 1553 /* Try to allocate all required boxes */ 1554 for (; *types; types++) { 1555 type = *types; 1556 pmu = type->pmus; 1557 for (i = 0; i < type->num_boxes; i++, pmu++) { 1558 if (pmu->boxes[die]) 1559 continue; 1560 box = uncore_alloc_box(type, cpu_to_node(cpu)); 1561 if (!box) 1562 goto cleanup; 1563 box->pmu = pmu; 1564 box->dieid = die; 1565 list_add(&box->active_list, &allocated); 1566 } 1567 } 1568 /* Install them in the pmus */ 1569 list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1570 list_del_init(&box->active_list); 1571 box->pmu->boxes[die] = box; 1572 } 1573 return 0; 1574 1575 cleanup: 1576 list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1577 list_del_init(&box->active_list); 1578 kfree(box); 1579 } 1580 return -ENOMEM; 1581 } 1582 1583 static int uncore_box_ref(struct intel_uncore_type **types, 1584 int id, unsigned int cpu) 1585 { 1586 struct intel_uncore_type *type; 1587 struct intel_uncore_pmu *pmu; 1588 struct intel_uncore_box *box; 1589 int i, ret; 1590 1591 ret = allocate_boxes(types, id, cpu); 1592 if (ret) 1593 return ret; 1594 1595 for (; *types; types++) { 1596 type = *types; 1597 pmu = type->pmus; 1598 for (i = 0; i < type->num_boxes; i++, pmu++) { 1599 box = pmu->boxes[id]; 1600 if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1) 1601 uncore_box_init(box); 1602 } 1603 } 1604 return 0; 1605 } 1606 1607 static int uncore_event_cpu_online(unsigned int cpu) 1608 { 1609 int die, target, msr_ret, mmio_ret; 1610 1611 die = topology_logical_die_id(cpu); 1612 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu); 1613 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu); 1614 if (msr_ret && mmio_ret) 1615 return -ENOMEM; 1616 1617 /* 1618 * Check if there is an online cpu in the package 1619 * which collects uncore events already. 1620 */ 1621 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); 1622 if (target < nr_cpu_ids) 1623 return 0; 1624 1625 cpumask_set_cpu(cpu, &uncore_cpu_mask); 1626 1627 if (!msr_ret) 1628 uncore_change_context(uncore_msr_uncores, -1, cpu); 1629 if (!mmio_ret) 1630 uncore_change_context(uncore_mmio_uncores, -1, cpu); 1631 uncore_change_context(uncore_pci_uncores, -1, cpu); 1632 return 0; 1633 } 1634 1635 static int __init type_pmu_register(struct intel_uncore_type *type) 1636 { 1637 int i, ret; 1638 1639 for (i = 0; i < type->num_boxes; i++) { 1640 ret = uncore_pmu_register(&type->pmus[i]); 1641 if (ret) 1642 return ret; 1643 } 1644 return 0; 1645 } 1646 1647 static int __init uncore_msr_pmus_register(void) 1648 { 1649 struct intel_uncore_type **types = uncore_msr_uncores; 1650 int ret; 1651 1652 for (; *types; types++) { 1653 ret = type_pmu_register(*types); 1654 if (ret) 1655 return ret; 1656 } 1657 return 0; 1658 } 1659 1660 static int __init uncore_cpu_init(void) 1661 { 1662 int ret; 1663 1664 ret = uncore_types_init(uncore_msr_uncores); 1665 if (ret) 1666 goto err; 1667 1668 ret = uncore_msr_pmus_register(); 1669 if (ret) 1670 goto err; 1671 return 0; 1672 err: 1673 uncore_types_exit(uncore_msr_uncores); 1674 uncore_msr_uncores = empty_uncore; 1675 return ret; 1676 } 1677 1678 static int __init uncore_mmio_init(void) 1679 { 1680 struct intel_uncore_type **types = uncore_mmio_uncores; 1681 int ret; 1682 1683 ret = uncore_types_init(types); 1684 if (ret) 1685 goto err; 1686 1687 for (; *types; types++) { 1688 ret = type_pmu_register(*types); 1689 if (ret) 1690 goto err; 1691 } 1692 return 0; 1693 err: 1694 uncore_types_exit(uncore_mmio_uncores); 1695 uncore_mmio_uncores = empty_uncore; 1696 return ret; 1697 } 1698 1699 static int uncore_mmio_global_init(u64 ctl) 1700 { 1701 void __iomem *io_addr; 1702 1703 io_addr = ioremap(ctl, sizeof(ctl)); 1704 if (!io_addr) 1705 return -ENOMEM; 1706 1707 /* Clear freeze bit (0) to enable all counters. */ 1708 writel(0, io_addr); 1709 1710 iounmap(io_addr); 1711 return 0; 1712 } 1713 1714 static const struct uncore_plat_init nhm_uncore_init __initconst = { 1715 .cpu_init = nhm_uncore_cpu_init, 1716 }; 1717 1718 static const struct uncore_plat_init snb_uncore_init __initconst = { 1719 .cpu_init = snb_uncore_cpu_init, 1720 .pci_init = snb_uncore_pci_init, 1721 }; 1722 1723 static const struct uncore_plat_init ivb_uncore_init __initconst = { 1724 .cpu_init = snb_uncore_cpu_init, 1725 .pci_init = ivb_uncore_pci_init, 1726 }; 1727 1728 static const struct uncore_plat_init hsw_uncore_init __initconst = { 1729 .cpu_init = snb_uncore_cpu_init, 1730 .pci_init = hsw_uncore_pci_init, 1731 }; 1732 1733 static const struct uncore_plat_init bdw_uncore_init __initconst = { 1734 .cpu_init = snb_uncore_cpu_init, 1735 .pci_init = bdw_uncore_pci_init, 1736 }; 1737 1738 static const struct uncore_plat_init snbep_uncore_init __initconst = { 1739 .cpu_init = snbep_uncore_cpu_init, 1740 .pci_init = snbep_uncore_pci_init, 1741 }; 1742 1743 static const struct uncore_plat_init nhmex_uncore_init __initconst = { 1744 .cpu_init = nhmex_uncore_cpu_init, 1745 }; 1746 1747 static const struct uncore_plat_init ivbep_uncore_init __initconst = { 1748 .cpu_init = ivbep_uncore_cpu_init, 1749 .pci_init = ivbep_uncore_pci_init, 1750 }; 1751 1752 static const struct uncore_plat_init hswep_uncore_init __initconst = { 1753 .cpu_init = hswep_uncore_cpu_init, 1754 .pci_init = hswep_uncore_pci_init, 1755 }; 1756 1757 static const struct uncore_plat_init bdx_uncore_init __initconst = { 1758 .cpu_init = bdx_uncore_cpu_init, 1759 .pci_init = bdx_uncore_pci_init, 1760 }; 1761 1762 static const struct uncore_plat_init knl_uncore_init __initconst = { 1763 .cpu_init = knl_uncore_cpu_init, 1764 .pci_init = knl_uncore_pci_init, 1765 }; 1766 1767 static const struct uncore_plat_init skl_uncore_init __initconst = { 1768 .cpu_init = skl_uncore_cpu_init, 1769 .pci_init = skl_uncore_pci_init, 1770 }; 1771 1772 static const struct uncore_plat_init skx_uncore_init __initconst = { 1773 .cpu_init = skx_uncore_cpu_init, 1774 .pci_init = skx_uncore_pci_init, 1775 }; 1776 1777 static const struct uncore_plat_init icl_uncore_init __initconst = { 1778 .cpu_init = icl_uncore_cpu_init, 1779 .pci_init = skl_uncore_pci_init, 1780 }; 1781 1782 static const struct uncore_plat_init tgl_uncore_init __initconst = { 1783 .cpu_init = tgl_uncore_cpu_init, 1784 .mmio_init = tgl_uncore_mmio_init, 1785 }; 1786 1787 static const struct uncore_plat_init tgl_l_uncore_init __initconst = { 1788 .cpu_init = tgl_uncore_cpu_init, 1789 .mmio_init = tgl_l_uncore_mmio_init, 1790 }; 1791 1792 static const struct uncore_plat_init rkl_uncore_init __initconst = { 1793 .cpu_init = tgl_uncore_cpu_init, 1794 .pci_init = skl_uncore_pci_init, 1795 }; 1796 1797 static const struct uncore_plat_init adl_uncore_init __initconst = { 1798 .cpu_init = adl_uncore_cpu_init, 1799 .mmio_init = adl_uncore_mmio_init, 1800 }; 1801 1802 static const struct uncore_plat_init mtl_uncore_init __initconst = { 1803 .cpu_init = mtl_uncore_cpu_init, 1804 .mmio_init = adl_uncore_mmio_init, 1805 }; 1806 1807 static const struct uncore_plat_init lnl_uncore_init __initconst = { 1808 .cpu_init = lnl_uncore_cpu_init, 1809 .mmio_init = lnl_uncore_mmio_init, 1810 }; 1811 1812 static const struct uncore_plat_init ptl_uncore_init __initconst = { 1813 .cpu_init = ptl_uncore_cpu_init, 1814 .mmio_init = ptl_uncore_mmio_init, 1815 .domain[0].discovery_base = UNCORE_DISCOVERY_MSR, 1816 .domain[0].global_init = uncore_mmio_global_init, 1817 }; 1818 1819 static const struct uncore_plat_init nvl_uncore_init __initconst = { 1820 .cpu_init = nvl_uncore_cpu_init, 1821 .mmio_init = ptl_uncore_mmio_init, 1822 .domain[0].discovery_base = PACKAGE_UNCORE_DISCOVERY_MSR, 1823 .domain[0].global_init = uncore_mmio_global_init, 1824 }; 1825 1826 static const struct uncore_plat_init icx_uncore_init __initconst = { 1827 .cpu_init = icx_uncore_cpu_init, 1828 .pci_init = icx_uncore_pci_init, 1829 .mmio_init = icx_uncore_mmio_init, 1830 }; 1831 1832 static const struct uncore_plat_init snr_uncore_init __initconst = { 1833 .cpu_init = snr_uncore_cpu_init, 1834 .pci_init = snr_uncore_pci_init, 1835 .mmio_init = snr_uncore_mmio_init, 1836 }; 1837 1838 static const struct uncore_plat_init spr_uncore_init __initconst = { 1839 .cpu_init = spr_uncore_cpu_init, 1840 .pci_init = spr_uncore_pci_init, 1841 .mmio_init = spr_uncore_mmio_init, 1842 .domain[0].base_is_pci = true, 1843 .domain[0].discovery_base = UNCORE_DISCOVERY_TABLE_DEVICE, 1844 .domain[0].units_ignore = spr_uncore_units_ignore, 1845 }; 1846 1847 static const struct uncore_plat_init gnr_uncore_init __initconst = { 1848 .cpu_init = gnr_uncore_cpu_init, 1849 .pci_init = gnr_uncore_pci_init, 1850 .mmio_init = gnr_uncore_mmio_init, 1851 .domain[0].base_is_pci = true, 1852 .domain[0].discovery_base = UNCORE_DISCOVERY_TABLE_DEVICE, 1853 .domain[0].units_ignore = gnr_uncore_units_ignore, 1854 }; 1855 1856 static const struct uncore_plat_init dmr_uncore_init __initconst = { 1857 .pci_init = dmr_uncore_pci_init, 1858 .mmio_init = dmr_uncore_mmio_init, 1859 .domain[0].base_is_pci = true, 1860 .domain[0].discovery_base = DMR_UNCORE_DISCOVERY_TABLE_DEVICE, 1861 .domain[0].units_ignore = dmr_uncore_imh_units_ignore, 1862 .domain[1].discovery_base = CBB_UNCORE_DISCOVERY_MSR, 1863 .domain[1].units_ignore = dmr_uncore_cbb_units_ignore, 1864 .domain[1].global_init = uncore_mmio_global_init, 1865 }; 1866 1867 static const struct uncore_plat_init generic_uncore_init __initconst = { 1868 .cpu_init = intel_uncore_generic_uncore_cpu_init, 1869 .pci_init = intel_uncore_generic_uncore_pci_init, 1870 .mmio_init = intel_uncore_generic_uncore_mmio_init, 1871 .domain[0].base_is_pci = true, 1872 .domain[0].discovery_base = PCI_ANY_ID, 1873 .domain[1].discovery_base = UNCORE_DISCOVERY_MSR, 1874 }; 1875 1876 static const struct x86_cpu_id intel_uncore_match[] __initconst = { 1877 X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_uncore_init), 1878 X86_MATCH_VFM(INTEL_NEHALEM, &nhm_uncore_init), 1879 X86_MATCH_VFM(INTEL_WESTMERE, &nhm_uncore_init), 1880 X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_uncore_init), 1881 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_uncore_init), 1882 X86_MATCH_VFM(INTEL_IVYBRIDGE, &ivb_uncore_init), 1883 X86_MATCH_VFM(INTEL_HASWELL, &hsw_uncore_init), 1884 X86_MATCH_VFM(INTEL_HASWELL_L, &hsw_uncore_init), 1885 X86_MATCH_VFM(INTEL_HASWELL_G, &hsw_uncore_init), 1886 X86_MATCH_VFM(INTEL_BROADWELL, &bdw_uncore_init), 1887 X86_MATCH_VFM(INTEL_BROADWELL_G, &bdw_uncore_init), 1888 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snbep_uncore_init), 1889 X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhmex_uncore_init), 1890 X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhmex_uncore_init), 1891 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ivbep_uncore_init), 1892 X86_MATCH_VFM(INTEL_HASWELL_X, &hswep_uncore_init), 1893 X86_MATCH_VFM(INTEL_BROADWELL_X, &bdx_uncore_init), 1894 X86_MATCH_VFM(INTEL_BROADWELL_D, &bdx_uncore_init), 1895 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_uncore_init), 1896 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_uncore_init), 1897 X86_MATCH_VFM(INTEL_SKYLAKE, &skl_uncore_init), 1898 X86_MATCH_VFM(INTEL_SKYLAKE_L, &skl_uncore_init), 1899 X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_uncore_init), 1900 X86_MATCH_VFM(INTEL_KABYLAKE_L, &skl_uncore_init), 1901 X86_MATCH_VFM(INTEL_KABYLAKE, &skl_uncore_init), 1902 X86_MATCH_VFM(INTEL_COMETLAKE_L, &skl_uncore_init), 1903 X86_MATCH_VFM(INTEL_COMETLAKE, &skl_uncore_init), 1904 X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_uncore_init), 1905 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_uncore_init), 1906 X86_MATCH_VFM(INTEL_ICELAKE, &icl_uncore_init), 1907 X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_uncore_init), 1908 X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_uncore_init), 1909 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_uncore_init), 1910 X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_uncore_init), 1911 X86_MATCH_VFM(INTEL_ROCKETLAKE, &rkl_uncore_init), 1912 X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_uncore_init), 1913 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_uncore_init), 1914 X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_uncore_init), 1915 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_uncore_init), 1916 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init), 1917 X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init), 1918 X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init), 1919 X86_MATCH_VFM(INTEL_ARROWLAKE, &mtl_uncore_init), 1920 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &mtl_uncore_init), 1921 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init), 1922 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init), 1923 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init), 1924 X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init), 1925 X86_MATCH_VFM(INTEL_NOVALAKE, &nvl_uncore_init), 1926 X86_MATCH_VFM(INTEL_NOVALAKE_L, &nvl_uncore_init), 1927 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), 1928 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), 1929 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init), 1930 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_uncore_init), 1931 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &snr_uncore_init), 1932 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init), 1933 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init), 1934 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init), 1935 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init), 1936 X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, &dmr_uncore_init), 1937 {}, 1938 }; 1939 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); 1940 1941 static bool uncore_use_discovery(struct uncore_plat_init *config) 1942 { 1943 for (int i = 0; i < UNCORE_DISCOVERY_DOMAINS; i++) { 1944 if (config->domain[i].discovery_base) 1945 return true; 1946 } 1947 1948 return false; 1949 } 1950 1951 static int __init intel_uncore_init(void) 1952 { 1953 const struct x86_cpu_id *id; 1954 struct uncore_plat_init *uncore_init; 1955 int pret = 0, cret = 0, mret = 0, ret; 1956 1957 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 1958 return -ENODEV; 1959 1960 __uncore_max_dies = 1961 topology_max_packages() * topology_max_dies_per_package(); 1962 1963 id = x86_match_cpu(intel_uncore_match); 1964 if (!id) { 1965 uncore_init = (struct uncore_plat_init *)&generic_uncore_init; 1966 if (uncore_no_discover || !uncore_discovery(uncore_init)) 1967 return -ENODEV; 1968 } else { 1969 uncore_init = (struct uncore_plat_init *)id->driver_data; 1970 if (uncore_no_discover && uncore_use_discovery(uncore_init)) 1971 return -ENODEV; 1972 if (uncore_use_discovery(uncore_init) && 1973 !uncore_discovery(uncore_init)) 1974 return -ENODEV; 1975 } 1976 1977 if (uncore_init->pci_init) { 1978 pret = uncore_init->pci_init(); 1979 if (!pret) 1980 pret = uncore_pci_init(); 1981 } 1982 1983 if (uncore_init->cpu_init) { 1984 uncore_init->cpu_init(); 1985 cret = uncore_cpu_init(); 1986 } 1987 1988 if (uncore_init->mmio_init) { 1989 uncore_init->mmio_init(); 1990 mret = uncore_mmio_init(); 1991 } 1992 1993 if (cret && pret && mret) { 1994 ret = -ENODEV; 1995 goto free_discovery; 1996 } 1997 1998 /* Install hotplug callbacks to setup the targets for each package */ 1999 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 2000 "perf/x86/intel/uncore:online", 2001 uncore_event_cpu_online, 2002 uncore_event_cpu_offline); 2003 if (ret) 2004 goto err; 2005 return 0; 2006 2007 err: 2008 uncore_types_exit(uncore_msr_uncores); 2009 uncore_types_exit(uncore_mmio_uncores); 2010 uncore_pci_exit(); 2011 free_discovery: 2012 intel_uncore_clear_discovery_tables(); 2013 return ret; 2014 } 2015 module_init(intel_uncore_init); 2016 2017 static void __exit intel_uncore_exit(void) 2018 { 2019 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 2020 uncore_types_exit(uncore_msr_uncores); 2021 uncore_types_exit(uncore_mmio_uncores); 2022 uncore_pci_exit(); 2023 intel_uncore_clear_discovery_tables(); 2024 } 2025 module_exit(intel_uncore_exit); 2026