1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 4 #include <asm/cpu_device_id.h> 5 #include <asm/intel-family.h> 6 #include <asm/msr.h> 7 #include "uncore.h" 8 #include "uncore_discovery.h" 9 10 static bool uncore_no_discover; 11 module_param(uncore_no_discover, bool, 0); 12 MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism " 13 "(default: enable the discovery mechanism)."); 14 struct intel_uncore_type *empty_uncore[] = { NULL, }; 15 struct intel_uncore_type **uncore_msr_uncores = empty_uncore; 16 struct intel_uncore_type **uncore_pci_uncores = empty_uncore; 17 struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; 18 19 static bool pcidrv_registered; 20 struct pci_driver *uncore_pci_driver; 21 /* The PCI driver for the device which the uncore doesn't own. */ 22 struct pci_driver *uncore_pci_sub_driver; 23 /* pci bus to socket mapping */ 24 DEFINE_RAW_SPINLOCK(pci2phy_map_lock); 25 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); 26 struct pci_extra_dev *uncore_extra_pci_dev; 27 int __uncore_max_dies; 28 29 /* mask of cpus that collect uncore events */ 30 static cpumask_t uncore_cpu_mask; 31 32 /* constraint for the fixed counter */ 33 static struct event_constraint uncore_constraint_fixed = 34 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); 35 struct event_constraint uncore_constraint_empty = 36 EVENT_CONSTRAINT(0, 0, 0); 37 38 MODULE_DESCRIPTION("Support for Intel uncore performance events"); 39 MODULE_LICENSE("GPL"); 40 41 int uncore_pcibus_to_dieid(struct pci_bus *bus) 42 { 43 struct pci2phy_map *map; 44 int die_id = -1; 45 46 raw_spin_lock(&pci2phy_map_lock); 47 list_for_each_entry(map, &pci2phy_map_head, list) { 48 if (map->segment == pci_domain_nr(bus)) { 49 die_id = map->pbus_to_dieid[bus->number]; 50 break; 51 } 52 } 53 raw_spin_unlock(&pci2phy_map_lock); 54 55 return die_id; 56 } 57 58 int uncore_die_to_segment(int die) 59 { 60 struct pci_bus *bus = NULL; 61 62 /* Find first pci bus which attributes to specified die. */ 63 while ((bus = pci_find_next_bus(bus)) && 64 (die != uncore_pcibus_to_dieid(bus))) 65 ; 66 67 return bus ? pci_domain_nr(bus) : -EINVAL; 68 } 69 70 /* Note: This API can only be used when NUMA information is available. */ 71 int uncore_device_to_die(struct pci_dev *dev) 72 { 73 int node = pcibus_to_node(dev->bus); 74 int cpu; 75 76 for_each_cpu(cpu, cpumask_of_pcibus(dev->bus)) { 77 struct cpuinfo_x86 *c = &cpu_data(cpu); 78 79 if (c->initialized && cpu_to_node(cpu) == node) 80 return c->topo.logical_die_id; 81 } 82 83 return -1; 84 } 85 86 static void uncore_free_pcibus_map(void) 87 { 88 struct pci2phy_map *map, *tmp; 89 90 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) { 91 list_del(&map->list); 92 kfree(map); 93 } 94 } 95 96 struct pci2phy_map *__find_pci2phy_map(int segment) 97 { 98 struct pci2phy_map *map, *alloc = NULL; 99 int i; 100 101 lockdep_assert_held(&pci2phy_map_lock); 102 103 lookup: 104 list_for_each_entry(map, &pci2phy_map_head, list) { 105 if (map->segment == segment) 106 goto end; 107 } 108 109 if (!alloc) { 110 raw_spin_unlock(&pci2phy_map_lock); 111 alloc = kmalloc_obj(struct pci2phy_map); 112 raw_spin_lock(&pci2phy_map_lock); 113 114 if (!alloc) 115 return NULL; 116 117 goto lookup; 118 } 119 120 map = alloc; 121 alloc = NULL; 122 map->segment = segment; 123 for (i = 0; i < 256; i++) 124 map->pbus_to_dieid[i] = -1; 125 list_add_tail(&map->list, &pci2phy_map_head); 126 127 end: 128 kfree(alloc); 129 return map; 130 } 131 132 ssize_t uncore_event_show(struct device *dev, 133 struct device_attribute *attr, char *buf) 134 { 135 struct uncore_event_desc *event = 136 container_of(attr, struct uncore_event_desc, attr); 137 return sprintf(buf, "%s", event->config); 138 } 139 140 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 141 { 142 unsigned int dieid = topology_logical_die_id(cpu); 143 144 /* 145 * The unsigned check also catches the '-1' return value for non 146 * existent mappings in the topology map. 147 */ 148 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; 149 } 150 151 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 152 { 153 u64 count; 154 155 rdmsrq(event->hw.event_base, count); 156 157 return count; 158 } 159 160 void uncore_mmio_exit_box(struct intel_uncore_box *box) 161 { 162 if (box->io_addr) 163 iounmap(box->io_addr); 164 } 165 166 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 167 struct perf_event *event) 168 { 169 if (!box->io_addr) 170 return 0; 171 172 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) 173 return 0; 174 175 return readq(box->io_addr + event->hw.event_base); 176 } 177 178 /* 179 * generic get constraint function for shared match/mask registers. 180 */ 181 struct event_constraint * 182 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 183 { 184 struct intel_uncore_extra_reg *er; 185 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 186 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 187 unsigned long flags; 188 bool ok = false; 189 190 /* 191 * reg->alloc can be set due to existing state, so for fake box we 192 * need to ignore this, otherwise we might fail to allocate proper 193 * fake state for this extra reg constraint. 194 */ 195 if (reg1->idx == EXTRA_REG_NONE || 196 (!uncore_box_is_fake(box) && reg1->alloc)) 197 return NULL; 198 199 er = &box->shared_regs[reg1->idx]; 200 raw_spin_lock_irqsave(&er->lock, flags); 201 if (!atomic_read(&er->ref) || 202 (er->config1 == reg1->config && er->config2 == reg2->config)) { 203 atomic_inc(&er->ref); 204 er->config1 = reg1->config; 205 er->config2 = reg2->config; 206 ok = true; 207 } 208 raw_spin_unlock_irqrestore(&er->lock, flags); 209 210 if (ok) { 211 if (!uncore_box_is_fake(box)) 212 reg1->alloc = 1; 213 return NULL; 214 } 215 216 return &uncore_constraint_empty; 217 } 218 219 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 220 { 221 struct intel_uncore_extra_reg *er; 222 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 223 224 /* 225 * Only put constraint if extra reg was actually allocated. Also 226 * takes care of event which do not use an extra shared reg. 227 * 228 * Also, if this is a fake box we shouldn't touch any event state 229 * (reg->alloc) and we don't care about leaving inconsistent box 230 * state either since it will be thrown out. 231 */ 232 if (uncore_box_is_fake(box) || !reg1->alloc) 233 return; 234 235 er = &box->shared_regs[reg1->idx]; 236 atomic_dec(&er->ref); 237 reg1->alloc = 0; 238 } 239 240 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) 241 { 242 struct intel_uncore_extra_reg *er; 243 unsigned long flags; 244 u64 config; 245 246 er = &box->shared_regs[idx]; 247 248 raw_spin_lock_irqsave(&er->lock, flags); 249 config = er->config; 250 raw_spin_unlock_irqrestore(&er->lock, flags); 251 252 return config; 253 } 254 255 static void uncore_assign_hw_event(struct intel_uncore_box *box, 256 struct perf_event *event, int idx) 257 { 258 struct hw_perf_event *hwc = &event->hw; 259 260 hwc->idx = idx; 261 hwc->last_tag = ++box->tags[idx]; 262 263 if (uncore_pmc_fixed(hwc->idx)) { 264 hwc->event_base = uncore_fixed_ctr(box); 265 hwc->config_base = uncore_fixed_ctl(box); 266 return; 267 } 268 269 if (intel_generic_uncore_assign_hw_event(event, box)) 270 return; 271 272 hwc->config_base = uncore_event_ctl(box, hwc->idx); 273 hwc->event_base = uncore_perf_ctr(box, hwc->idx); 274 } 275 276 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) 277 { 278 u64 prev_count, new_count, delta; 279 int shift; 280 281 if (uncore_pmc_freerunning(event->hw.idx)) 282 shift = 64 - uncore_freerunning_bits(box, event); 283 else if (uncore_pmc_fixed(event->hw.idx)) 284 shift = 64 - uncore_fixed_ctr_bits(box); 285 else 286 shift = 64 - uncore_perf_ctr_bits(box); 287 288 /* the hrtimer might modify the previous event value */ 289 again: 290 prev_count = local64_read(&event->hw.prev_count); 291 new_count = uncore_read_counter(box, event); 292 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) 293 goto again; 294 295 delta = (new_count << shift) - (prev_count << shift); 296 delta >>= shift; 297 298 local64_add(delta, &event->count); 299 } 300 301 /* 302 * The overflow interrupt is unavailable for SandyBridge-EP, is broken 303 * for SandyBridge. So we use hrtimer to periodically poll the counter 304 * to avoid overflow. 305 */ 306 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) 307 { 308 struct intel_uncore_box *box; 309 struct perf_event *event; 310 int bit; 311 312 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); 313 if (!box->n_active || box->cpu != smp_processor_id()) 314 return HRTIMER_NORESTART; 315 316 /* 317 * handle boxes with an active event list as opposed to active 318 * counters 319 */ 320 list_for_each_entry(event, &box->active_list, active_entry) { 321 uncore_perf_event_update(box, event); 322 } 323 324 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) 325 uncore_perf_event_update(box, box->events[bit]); 326 327 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); 328 return HRTIMER_RESTART; 329 } 330 331 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) 332 { 333 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), 334 HRTIMER_MODE_REL_PINNED_HARD); 335 } 336 337 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) 338 { 339 hrtimer_cancel(&box->hrtimer); 340 } 341 342 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) 343 { 344 hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 345 } 346 347 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, 348 int node) 349 { 350 int i, size, numshared = type->num_shared_regs ; 351 struct intel_uncore_box *box; 352 353 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); 354 355 box = kzalloc_node(size, GFP_KERNEL, node); 356 if (!box) 357 return NULL; 358 359 for (i = 0; i < numshared; i++) 360 raw_spin_lock_init(&box->shared_regs[i].lock); 361 362 uncore_pmu_init_hrtimer(box); 363 box->cpu = -1; 364 box->dieid = -1; 365 366 /* set default hrtimer timeout */ 367 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; 368 369 INIT_LIST_HEAD(&box->active_list); 370 371 return box; 372 } 373 374 /* 375 * Using uncore_pmu_event_init pmu event_init callback 376 * as a detection point for uncore events. 377 */ 378 static int uncore_pmu_event_init(struct perf_event *event); 379 380 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) 381 { 382 return &box->pmu->pmu == event->pmu; 383 } 384 385 static int 386 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, 387 bool dogrp) 388 { 389 struct perf_event *event; 390 int n, max_count; 391 392 max_count = box->pmu->type->num_counters; 393 if (box->pmu->type->fixed_ctl) 394 max_count++; 395 396 if (box->n_events >= max_count) 397 return -EINVAL; 398 399 n = box->n_events; 400 401 if (is_box_event(box, leader)) { 402 box->event_list[n] = leader; 403 n++; 404 } 405 406 if (!dogrp) 407 return n; 408 409 for_each_sibling_event(event, leader) { 410 if (!is_box_event(box, event) || 411 event->state <= PERF_EVENT_STATE_OFF) 412 continue; 413 414 if (n >= max_count) 415 return -EINVAL; 416 417 box->event_list[n] = event; 418 n++; 419 } 420 return n; 421 } 422 423 static struct event_constraint * 424 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) 425 { 426 struct intel_uncore_type *type = box->pmu->type; 427 struct event_constraint *c; 428 429 if (type->ops->get_constraint) { 430 c = type->ops->get_constraint(box, event); 431 if (c) 432 return c; 433 } 434 435 if (event->attr.config == UNCORE_FIXED_EVENT) 436 return &uncore_constraint_fixed; 437 438 if (type->constraints) { 439 for_each_event_constraint(c, type->constraints) { 440 if (constraint_match(c, event->hw.config)) 441 return c; 442 } 443 } 444 445 return &type->unconstrainted; 446 } 447 448 static void uncore_put_event_constraint(struct intel_uncore_box *box, 449 struct perf_event *event) 450 { 451 if (box->pmu->type->ops->put_constraint) 452 box->pmu->type->ops->put_constraint(box, event); 453 } 454 455 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) 456 { 457 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 458 struct event_constraint *c; 459 int i, wmin, wmax, ret = 0; 460 struct hw_perf_event *hwc; 461 462 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); 463 464 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { 465 c = uncore_get_event_constraint(box, box->event_list[i]); 466 box->event_constraint[i] = c; 467 wmin = min(wmin, c->weight); 468 wmax = max(wmax, c->weight); 469 } 470 471 /* fastpath, try to reuse previous register */ 472 for (i = 0; i < n; i++) { 473 hwc = &box->event_list[i]->hw; 474 c = box->event_constraint[i]; 475 476 /* never assigned */ 477 if (hwc->idx == -1) 478 break; 479 480 /* constraint still honored */ 481 if (!test_bit(hwc->idx, c->idxmsk)) 482 break; 483 484 /* not already used */ 485 if (test_bit(hwc->idx, used_mask)) 486 break; 487 488 __set_bit(hwc->idx, used_mask); 489 if (assign) 490 assign[i] = hwc->idx; 491 } 492 /* slow path */ 493 if (i != n) 494 ret = perf_assign_events(box->event_constraint, n, 495 wmin, wmax, n, assign); 496 497 if (!assign || ret) { 498 for (i = 0; i < n; i++) 499 uncore_put_event_constraint(box, box->event_list[i]); 500 } 501 return ret ? -EINVAL : 0; 502 } 503 504 void uncore_pmu_event_start(struct perf_event *event, int flags) 505 { 506 struct intel_uncore_box *box = uncore_event_to_box(event); 507 int idx = event->hw.idx; 508 509 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) 510 return; 511 512 /* 513 * Free running counter is read-only and always active. 514 * Use the current counter value as start point. 515 * There is no overflow interrupt for free running counter. 516 * Use hrtimer to periodically poll the counter to avoid overflow. 517 */ 518 if (uncore_pmc_freerunning(event->hw.idx)) { 519 list_add_tail(&event->active_entry, &box->active_list); 520 local64_set(&event->hw.prev_count, 521 uncore_read_counter(box, event)); 522 if (box->n_active++ == 0) 523 uncore_pmu_start_hrtimer(box); 524 return; 525 } 526 527 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 528 return; 529 530 event->hw.state = 0; 531 box->events[idx] = event; 532 box->n_active++; 533 __set_bit(idx, box->active_mask); 534 535 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); 536 uncore_enable_event(box, event); 537 538 if (box->n_active == 1) 539 uncore_pmu_start_hrtimer(box); 540 } 541 542 void uncore_pmu_event_stop(struct perf_event *event, int flags) 543 { 544 struct intel_uncore_box *box = uncore_event_to_box(event); 545 struct hw_perf_event *hwc = &event->hw; 546 547 /* Cannot disable free running counter which is read-only */ 548 if (uncore_pmc_freerunning(hwc->idx)) { 549 list_del(&event->active_entry); 550 if (--box->n_active == 0) 551 uncore_pmu_cancel_hrtimer(box); 552 uncore_perf_event_update(box, event); 553 return; 554 } 555 556 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { 557 uncore_disable_event(box, event); 558 box->n_active--; 559 box->events[hwc->idx] = NULL; 560 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 561 hwc->state |= PERF_HES_STOPPED; 562 563 if (box->n_active == 0) 564 uncore_pmu_cancel_hrtimer(box); 565 } 566 567 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 568 /* 569 * Drain the remaining delta count out of a event 570 * that we are disabling: 571 */ 572 uncore_perf_event_update(box, event); 573 hwc->state |= PERF_HES_UPTODATE; 574 } 575 } 576 577 int uncore_pmu_event_add(struct perf_event *event, int flags) 578 { 579 struct intel_uncore_box *box = uncore_event_to_box(event); 580 struct hw_perf_event *hwc = &event->hw; 581 int assign[UNCORE_PMC_IDX_MAX]; 582 int i, n, ret; 583 584 if (!box) 585 return -ENODEV; 586 587 /* 588 * The free funning counter is assigned in event_init(). 589 * The free running counter event and free running counter 590 * are 1:1 mapped. It doesn't need to be tracked in event_list. 591 */ 592 if (uncore_pmc_freerunning(hwc->idx)) { 593 if (flags & PERF_EF_START) 594 uncore_pmu_event_start(event, 0); 595 return 0; 596 } 597 598 ret = n = uncore_collect_events(box, event, false); 599 if (ret < 0) 600 return ret; 601 602 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 603 if (!(flags & PERF_EF_START)) 604 hwc->state |= PERF_HES_ARCH; 605 606 ret = uncore_assign_events(box, assign, n); 607 if (ret) 608 return ret; 609 610 /* save events moving to new counters */ 611 for (i = 0; i < box->n_events; i++) { 612 event = box->event_list[i]; 613 hwc = &event->hw; 614 615 if (hwc->idx == assign[i] && 616 hwc->last_tag == box->tags[assign[i]]) 617 continue; 618 /* 619 * Ensure we don't accidentally enable a stopped 620 * counter simply because we rescheduled. 621 */ 622 if (hwc->state & PERF_HES_STOPPED) 623 hwc->state |= PERF_HES_ARCH; 624 625 uncore_pmu_event_stop(event, PERF_EF_UPDATE); 626 } 627 628 /* reprogram moved events into new counters */ 629 for (i = 0; i < n; i++) { 630 event = box->event_list[i]; 631 hwc = &event->hw; 632 633 if (hwc->idx != assign[i] || 634 hwc->last_tag != box->tags[assign[i]]) 635 uncore_assign_hw_event(box, event, assign[i]); 636 else if (i < box->n_events) 637 continue; 638 639 if (hwc->state & PERF_HES_ARCH) 640 continue; 641 642 uncore_pmu_event_start(event, 0); 643 } 644 box->n_events = n; 645 646 return 0; 647 } 648 649 void uncore_pmu_event_del(struct perf_event *event, int flags) 650 { 651 struct intel_uncore_box *box = uncore_event_to_box(event); 652 int i; 653 654 uncore_pmu_event_stop(event, PERF_EF_UPDATE); 655 656 /* 657 * The event for free running counter is not tracked by event_list. 658 * It doesn't need to force event->hw.idx = -1 to reassign the counter. 659 * Because the event and the free running counter are 1:1 mapped. 660 */ 661 if (uncore_pmc_freerunning(event->hw.idx)) 662 return; 663 664 for (i = 0; i < box->n_events; i++) { 665 if (event == box->event_list[i]) { 666 uncore_put_event_constraint(box, event); 667 668 for (++i; i < box->n_events; i++) 669 box->event_list[i - 1] = box->event_list[i]; 670 671 --box->n_events; 672 break; 673 } 674 } 675 676 event->hw.idx = -1; 677 event->hw.last_tag = ~0ULL; 678 } 679 680 void uncore_pmu_event_read(struct perf_event *event) 681 { 682 struct intel_uncore_box *box = uncore_event_to_box(event); 683 uncore_perf_event_update(box, event); 684 } 685 686 /* 687 * validation ensures the group can be loaded onto the 688 * PMU if it was the only group available. 689 */ 690 static int uncore_validate_group(struct intel_uncore_pmu *pmu, 691 struct perf_event *event) 692 { 693 struct perf_event *leader = event->group_leader; 694 struct intel_uncore_box *fake_box; 695 int ret = -EINVAL, n; 696 697 /* The free running counter is always active. */ 698 if (uncore_pmc_freerunning(event->hw.idx)) 699 return 0; 700 701 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); 702 if (!fake_box) 703 return -ENOMEM; 704 705 fake_box->pmu = pmu; 706 /* 707 * the event is not yet connected with its 708 * siblings therefore we must first collect 709 * existing siblings, then add the new event 710 * before we can simulate the scheduling 711 */ 712 n = uncore_collect_events(fake_box, leader, true); 713 if (n < 0) 714 goto out; 715 716 fake_box->n_events = n; 717 n = uncore_collect_events(fake_box, event, false); 718 if (n < 0) 719 goto out; 720 721 fake_box->n_events = n; 722 723 ret = uncore_assign_events(fake_box, NULL, n); 724 out: 725 kfree(fake_box); 726 return ret; 727 } 728 729 static int uncore_pmu_event_init(struct perf_event *event) 730 { 731 struct intel_uncore_pmu *pmu; 732 struct intel_uncore_box *box; 733 struct hw_perf_event *hwc = &event->hw; 734 int ret; 735 736 if (event->attr.type != event->pmu->type) 737 return -ENOENT; 738 739 pmu = uncore_event_to_pmu(event); 740 /* no device found for this pmu */ 741 if (!pmu->registered) 742 return -ENOENT; 743 744 /* Sampling not supported yet */ 745 if (hwc->sample_period) 746 return -EINVAL; 747 748 /* 749 * Place all uncore events for a particular physical package 750 * onto a single cpu 751 */ 752 if (event->cpu < 0) 753 return -EINVAL; 754 box = uncore_pmu_to_box(pmu, event->cpu); 755 if (!box || box->cpu < 0) 756 return -EINVAL; 757 event->cpu = box->cpu; 758 event->pmu_private = box; 759 760 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 761 762 event->hw.idx = -1; 763 event->hw.last_tag = ~0ULL; 764 event->hw.extra_reg.idx = EXTRA_REG_NONE; 765 event->hw.branch_reg.idx = EXTRA_REG_NONE; 766 767 if (event->attr.config == UNCORE_FIXED_EVENT) { 768 /* no fixed counter */ 769 if (!pmu->type->fixed_ctl) 770 return -EINVAL; 771 /* 772 * if there is only one fixed counter, only the first pmu 773 * can access the fixed counter 774 */ 775 if (pmu->type->single_fixed && pmu->pmu_idx > 0) 776 return -EINVAL; 777 778 /* fixed counters have event field hardcoded to zero */ 779 hwc->config = 0ULL; 780 } else if (is_freerunning_event(event)) { 781 hwc->config = event->attr.config; 782 if (!check_valid_freerunning_event(box, event)) 783 return -EINVAL; 784 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; 785 /* 786 * The free running counter event and free running counter 787 * are always 1:1 mapped. 788 * The free running counter is always active. 789 * Assign the free running counter here. 790 */ 791 event->hw.event_base = uncore_freerunning_counter(box, event); 792 } else { 793 hwc->config = event->attr.config & 794 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); 795 if (pmu->type->ops->hw_config) { 796 ret = pmu->type->ops->hw_config(box, event); 797 if (ret) 798 return ret; 799 } 800 } 801 802 if (event->group_leader != event) 803 ret = uncore_validate_group(pmu, event); 804 else 805 ret = 0; 806 807 return ret; 808 } 809 810 static void uncore_pmu_enable(struct pmu *pmu) 811 { 812 struct intel_uncore_pmu *uncore_pmu; 813 struct intel_uncore_box *box; 814 815 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); 816 817 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); 818 if (!box) 819 return; 820 821 if (uncore_pmu->type->ops->enable_box) 822 uncore_pmu->type->ops->enable_box(box); 823 } 824 825 static void uncore_pmu_disable(struct pmu *pmu) 826 { 827 struct intel_uncore_pmu *uncore_pmu; 828 struct intel_uncore_box *box; 829 830 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); 831 832 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); 833 if (!box) 834 return; 835 836 if (uncore_pmu->type->ops->disable_box) 837 uncore_pmu->type->ops->disable_box(box); 838 } 839 840 static ssize_t uncore_get_attr_cpumask(struct device *dev, 841 struct device_attribute *attr, char *buf) 842 { 843 struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); 844 845 return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask); 846 } 847 848 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); 849 850 static struct attribute *uncore_pmu_attrs[] = { 851 &dev_attr_cpumask.attr, 852 NULL, 853 }; 854 855 static const struct attribute_group uncore_pmu_attr_group = { 856 .attrs = uncore_pmu_attrs, 857 }; 858 859 static inline int uncore_get_box_id(struct intel_uncore_type *type, 860 struct intel_uncore_pmu *pmu) 861 { 862 if (type->boxes) 863 return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx); 864 865 return pmu->pmu_idx; 866 } 867 868 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu) 869 { 870 struct intel_uncore_type *type = pmu->type; 871 872 if (type->num_boxes == 1) 873 sprintf(pmu_name, "uncore_type_%u", type->type_id); 874 else { 875 sprintf(pmu_name, "uncore_type_%u_%d", 876 type->type_id, uncore_get_box_id(type, pmu)); 877 } 878 } 879 880 static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) 881 { 882 struct intel_uncore_type *type = pmu->type; 883 884 /* 885 * No uncore block name in discovery table. 886 * Use uncore_type_&typeid_&boxid as name. 887 */ 888 if (!type->name) { 889 uncore_get_alias_name(pmu->name, pmu); 890 return; 891 } 892 893 if (type->num_boxes == 1) { 894 if (strlen(type->name) > 0) 895 sprintf(pmu->name, "uncore_%s", type->name); 896 else 897 sprintf(pmu->name, "uncore"); 898 } else { 899 /* 900 * Use the box ID from the discovery table if applicable. 901 */ 902 sprintf(pmu->name, "uncore_%s_%d", type->name, 903 uncore_get_box_id(type, pmu)); 904 } 905 } 906 907 static int uncore_pmu_register(struct intel_uncore_pmu *pmu) 908 { 909 int ret; 910 911 if (!pmu->type->pmu) { 912 pmu->pmu = (struct pmu) { 913 .attr_groups = pmu->type->attr_groups, 914 .task_ctx_nr = perf_invalid_context, 915 .pmu_enable = uncore_pmu_enable, 916 .pmu_disable = uncore_pmu_disable, 917 .event_init = uncore_pmu_event_init, 918 .add = uncore_pmu_event_add, 919 .del = uncore_pmu_event_del, 920 .start = uncore_pmu_event_start, 921 .stop = uncore_pmu_event_stop, 922 .read = uncore_pmu_event_read, 923 .module = THIS_MODULE, 924 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 925 .attr_update = pmu->type->attr_update, 926 }; 927 } else { 928 pmu->pmu = *pmu->type->pmu; 929 pmu->pmu.attr_groups = pmu->type->attr_groups; 930 pmu->pmu.attr_update = pmu->type->attr_update; 931 } 932 933 uncore_get_pmu_name(pmu); 934 935 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 936 if (!ret) 937 pmu->registered = true; 938 return ret; 939 } 940 941 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu) 942 { 943 if (!pmu->registered) 944 return; 945 perf_pmu_unregister(&pmu->pmu); 946 pmu->registered = false; 947 } 948 949 static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 950 { 951 int die; 952 953 for (die = 0; die < uncore_max_dies(); die++) 954 kfree(pmu->boxes[die]); 955 kfree(pmu->boxes); 956 } 957 958 static void uncore_type_exit(struct intel_uncore_type *type) 959 { 960 struct intel_uncore_pmu *pmu = type->pmus; 961 int i; 962 963 if (type->cleanup_mapping) 964 type->cleanup_mapping(type); 965 966 if (type->cleanup_extra_boxes) 967 type->cleanup_extra_boxes(type); 968 969 if (pmu) { 970 for (i = 0; i < type->num_boxes; i++, pmu++) { 971 uncore_pmu_unregister(pmu); 972 uncore_free_boxes(pmu); 973 } 974 kfree(type->pmus); 975 type->pmus = NULL; 976 } 977 978 kfree(type->events_group); 979 type->events_group = NULL; 980 } 981 982 static void uncore_types_exit(struct intel_uncore_type **types) 983 { 984 for (; *types; types++) 985 uncore_type_exit(*types); 986 } 987 988 static int __init uncore_type_init(struct intel_uncore_type *type) 989 { 990 struct intel_uncore_pmu *pmus; 991 size_t size; 992 int i, j; 993 994 pmus = kzalloc_objs(*pmus, type->num_boxes); 995 if (!pmus) 996 return -ENOMEM; 997 998 size = uncore_max_dies() * sizeof(struct intel_uncore_box *); 999 1000 for (i = 0; i < type->num_boxes; i++) { 1001 pmus[i].pmu_idx = i; 1002 pmus[i].type = type; 1003 pmus[i].boxes = kzalloc(size, GFP_KERNEL); 1004 if (!pmus[i].boxes) 1005 goto err; 1006 } 1007 1008 type->pmus = pmus; 1009 type->unconstrainted = (struct event_constraint) 1010 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 1011 0, type->num_counters, 0, 0); 1012 1013 if (type->event_descs) { 1014 struct { 1015 struct attribute_group group; 1016 struct attribute *attrs[]; 1017 } *attr_group; 1018 for (i = 0; type->event_descs[i].attr.attr.name; i++); 1019 1020 attr_group = kzalloc_flex(*attr_group, attrs, i + 1); 1021 if (!attr_group) 1022 goto err; 1023 1024 attr_group->group.name = "events"; 1025 attr_group->group.attrs = attr_group->attrs; 1026 1027 for (j = 0; j < i; j++) 1028 attr_group->attrs[j] = &type->event_descs[j].attr.attr; 1029 1030 type->events_group = &attr_group->group; 1031 } 1032 1033 type->pmu_group = &uncore_pmu_attr_group; 1034 1035 if (type->set_mapping) 1036 type->set_mapping(type); 1037 1038 return 0; 1039 1040 err: 1041 for (i = 0; i < type->num_boxes; i++) 1042 kfree(pmus[i].boxes); 1043 kfree(pmus); 1044 1045 return -ENOMEM; 1046 } 1047 1048 static int __init 1049 uncore_types_init(struct intel_uncore_type **types) 1050 { 1051 int ret; 1052 1053 for (; *types; types++) { 1054 ret = uncore_type_init(*types); 1055 if (ret) 1056 return ret; 1057 } 1058 return 0; 1059 } 1060 1061 /* 1062 * Get the die information of a PCI device. 1063 * @pdev: The PCI device. 1064 * @die: The die id which the device maps to. 1065 */ 1066 static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die) 1067 { 1068 *die = uncore_pcibus_to_dieid(pdev->bus); 1069 if (*die < 0) 1070 return -EINVAL; 1071 1072 return 0; 1073 } 1074 1075 static struct intel_uncore_pmu * 1076 uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev) 1077 { 1078 struct intel_uncore_type **types = uncore_pci_uncores; 1079 struct intel_uncore_discovery_unit *unit; 1080 struct intel_uncore_type *type; 1081 struct rb_node *node; 1082 1083 for (; *types; types++) { 1084 type = *types; 1085 1086 for (node = rb_first(type->boxes); node; node = rb_next(node)) { 1087 unit = rb_entry(node, struct intel_uncore_discovery_unit, node); 1088 if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) && 1089 pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) && 1090 pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr)) 1091 return &type->pmus[unit->pmu_idx]; 1092 } 1093 } 1094 1095 return NULL; 1096 } 1097 1098 /* 1099 * Find the PMU of a PCI device. 1100 * @pdev: The PCI device. 1101 * @ids: The ID table of the available PCI devices with a PMU. 1102 * If NULL, search the whole uncore_pci_uncores. 1103 */ 1104 static struct intel_uncore_pmu * 1105 uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) 1106 { 1107 struct intel_uncore_pmu *pmu = NULL; 1108 struct intel_uncore_type *type; 1109 kernel_ulong_t data; 1110 unsigned int devfn; 1111 1112 if (!ids) 1113 return uncore_pci_find_dev_pmu_from_types(pdev); 1114 1115 while (ids && ids->vendor) { 1116 if ((ids->vendor == pdev->vendor) && 1117 (ids->device == pdev->device)) { 1118 data = ids->driver_data; 1119 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data), 1120 UNCORE_PCI_DEV_FUNC(data)); 1121 if (devfn == pdev->devfn) { 1122 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)]; 1123 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)]; 1124 break; 1125 } 1126 } 1127 ids++; 1128 } 1129 return pmu; 1130 } 1131 1132 /* 1133 * Register the PMU for a PCI device 1134 * @pdev: The PCI device. 1135 * @type: The corresponding PMU type of the device. 1136 * @pmu: The corresponding PMU of the device. 1137 * @die: The die id which the device maps to. 1138 */ 1139 static int uncore_pci_pmu_register(struct pci_dev *pdev, 1140 struct intel_uncore_type *type, 1141 struct intel_uncore_pmu *pmu, 1142 int die) 1143 { 1144 struct intel_uncore_box *box; 1145 int ret; 1146 1147 if (WARN_ON_ONCE(pmu->boxes[die] != NULL)) 1148 return -EINVAL; 1149 1150 box = uncore_alloc_box(type, NUMA_NO_NODE); 1151 if (!box) 1152 return -ENOMEM; 1153 1154 atomic_inc(&box->refcnt); 1155 box->dieid = die; 1156 box->pci_dev = pdev; 1157 box->pmu = pmu; 1158 uncore_box_init(box); 1159 1160 pmu->boxes[die] = box; 1161 if (atomic_inc_return(&pmu->activeboxes) > 1) 1162 return 0; 1163 1164 /* First active box registers the pmu */ 1165 ret = uncore_pmu_register(pmu); 1166 if (ret) { 1167 pmu->boxes[die] = NULL; 1168 uncore_box_exit(box); 1169 kfree(box); 1170 } 1171 return ret; 1172 } 1173 1174 /* 1175 * add a pci uncore device 1176 */ 1177 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1178 { 1179 struct intel_uncore_type *type; 1180 struct intel_uncore_pmu *pmu = NULL; 1181 int die, ret; 1182 1183 ret = uncore_pci_get_dev_die_info(pdev, &die); 1184 if (ret) 1185 return ret; 1186 1187 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { 1188 int idx = UNCORE_PCI_DEV_IDX(id->driver_data); 1189 1190 uncore_extra_pci_dev[die].dev[idx] = pdev; 1191 pci_set_drvdata(pdev, NULL); 1192 return 0; 1193 } 1194 1195 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 1196 1197 /* 1198 * Some platforms, e.g. Knights Landing, use a common PCI device ID 1199 * for multiple instances of an uncore PMU device type. We should check 1200 * PCI slot and func to indicate the uncore box. 1201 */ 1202 if (id->driver_data & ~0xffff) { 1203 struct pci_driver *pci_drv = to_pci_driver(pdev->dev.driver); 1204 1205 pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table); 1206 if (pmu == NULL) 1207 return -ENODEV; 1208 } else { 1209 /* 1210 * for performance monitoring unit with multiple boxes, 1211 * each box has a different function id. 1212 */ 1213 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; 1214 } 1215 1216 ret = uncore_pci_pmu_register(pdev, type, pmu, die); 1217 1218 pci_set_drvdata(pdev, pmu->boxes[die]); 1219 1220 return ret; 1221 } 1222 1223 /* 1224 * Unregister the PMU of a PCI device 1225 * @pmu: The corresponding PMU is unregistered. 1226 * @die: The die id which the device maps to. 1227 */ 1228 static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die) 1229 { 1230 struct intel_uncore_box *box = pmu->boxes[die]; 1231 1232 pmu->boxes[die] = NULL; 1233 if (atomic_dec_return(&pmu->activeboxes) == 0) 1234 uncore_pmu_unregister(pmu); 1235 uncore_box_exit(box); 1236 kfree(box); 1237 } 1238 1239 static void uncore_pci_remove(struct pci_dev *pdev) 1240 { 1241 struct intel_uncore_box *box; 1242 struct intel_uncore_pmu *pmu; 1243 int i, die; 1244 1245 if (uncore_pci_get_dev_die_info(pdev, &die)) 1246 return; 1247 1248 box = pci_get_drvdata(pdev); 1249 if (!box) { 1250 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { 1251 if (uncore_extra_pci_dev[die].dev[i] == pdev) { 1252 uncore_extra_pci_dev[die].dev[i] = NULL; 1253 break; 1254 } 1255 } 1256 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); 1257 return; 1258 } 1259 1260 pmu = box->pmu; 1261 1262 pci_set_drvdata(pdev, NULL); 1263 1264 uncore_pci_pmu_unregister(pmu, die); 1265 } 1266 1267 static int uncore_bus_notify(struct notifier_block *nb, 1268 unsigned long action, void *data, 1269 const struct pci_device_id *ids) 1270 { 1271 struct device *dev = data; 1272 struct pci_dev *pdev = to_pci_dev(dev); 1273 struct intel_uncore_pmu *pmu; 1274 int die; 1275 1276 /* Unregister the PMU when the device is going to be deleted. */ 1277 if (action != BUS_NOTIFY_DEL_DEVICE) 1278 return NOTIFY_DONE; 1279 1280 pmu = uncore_pci_find_dev_pmu(pdev, ids); 1281 if (!pmu) 1282 return NOTIFY_DONE; 1283 1284 if (uncore_pci_get_dev_die_info(pdev, &die)) 1285 return NOTIFY_DONE; 1286 1287 uncore_pci_pmu_unregister(pmu, die); 1288 1289 return NOTIFY_OK; 1290 } 1291 1292 static int uncore_pci_sub_bus_notify(struct notifier_block *nb, 1293 unsigned long action, void *data) 1294 { 1295 return uncore_bus_notify(nb, action, data, 1296 uncore_pci_sub_driver->id_table); 1297 } 1298 1299 static struct notifier_block uncore_pci_sub_notifier = { 1300 .notifier_call = uncore_pci_sub_bus_notify, 1301 }; 1302 1303 static void uncore_pci_sub_driver_init(void) 1304 { 1305 const struct pci_device_id *ids = uncore_pci_sub_driver->id_table; 1306 struct intel_uncore_type *type; 1307 struct intel_uncore_pmu *pmu; 1308 struct pci_dev *pci_sub_dev; 1309 bool notify = false; 1310 unsigned int devfn; 1311 int die; 1312 1313 while (ids && ids->vendor) { 1314 pci_sub_dev = NULL; 1315 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)]; 1316 /* 1317 * Search the available device, and register the 1318 * corresponding PMU. 1319 */ 1320 while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 1321 ids->device, pci_sub_dev))) { 1322 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), 1323 UNCORE_PCI_DEV_FUNC(ids->driver_data)); 1324 if (devfn != pci_sub_dev->devfn) 1325 continue; 1326 1327 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; 1328 1329 if (uncore_pci_get_dev_die_info(pci_sub_dev, &die)) 1330 continue; 1331 1332 if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu, 1333 die)) 1334 notify = true; 1335 } 1336 ids++; 1337 } 1338 1339 if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier)) 1340 notify = false; 1341 1342 if (!notify) 1343 uncore_pci_sub_driver = NULL; 1344 } 1345 1346 static int uncore_pci_bus_notify(struct notifier_block *nb, 1347 unsigned long action, void *data) 1348 { 1349 return uncore_bus_notify(nb, action, data, NULL); 1350 } 1351 1352 static struct notifier_block uncore_pci_notifier = { 1353 .notifier_call = uncore_pci_bus_notify, 1354 }; 1355 1356 1357 static void uncore_pci_pmus_register(void) 1358 { 1359 struct intel_uncore_type **types = uncore_pci_uncores; 1360 struct intel_uncore_discovery_unit *unit; 1361 struct intel_uncore_type *type; 1362 struct intel_uncore_pmu *pmu; 1363 struct rb_node *node; 1364 struct pci_dev *pdev; 1365 1366 for (; *types; types++) { 1367 type = *types; 1368 1369 for (node = rb_first(type->boxes); node; node = rb_next(node)) { 1370 unit = rb_entry(node, struct intel_uncore_discovery_unit, node); 1371 pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr), 1372 UNCORE_DISCOVERY_PCI_BUS(unit->addr), 1373 UNCORE_DISCOVERY_PCI_DEVFN(unit->addr)); 1374 1375 if (!pdev) 1376 continue; 1377 pmu = &type->pmus[unit->pmu_idx]; 1378 uncore_pci_pmu_register(pdev, type, pmu, unit->die); 1379 } 1380 } 1381 1382 bus_register_notifier(&pci_bus_type, &uncore_pci_notifier); 1383 } 1384 1385 static int __init uncore_pci_init(void) 1386 { 1387 size_t size; 1388 int ret; 1389 1390 size = uncore_max_dies() * sizeof(struct pci_extra_dev); 1391 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL); 1392 if (!uncore_extra_pci_dev) { 1393 ret = -ENOMEM; 1394 goto err; 1395 } 1396 1397 ret = uncore_types_init(uncore_pci_uncores); 1398 if (ret) 1399 goto errtype; 1400 1401 if (uncore_pci_driver) { 1402 uncore_pci_driver->probe = uncore_pci_probe; 1403 uncore_pci_driver->remove = uncore_pci_remove; 1404 1405 ret = pci_register_driver(uncore_pci_driver); 1406 if (ret) 1407 goto errtype; 1408 } else 1409 uncore_pci_pmus_register(); 1410 1411 if (uncore_pci_sub_driver) 1412 uncore_pci_sub_driver_init(); 1413 1414 pcidrv_registered = true; 1415 return 0; 1416 1417 errtype: 1418 uncore_types_exit(uncore_pci_uncores); 1419 kfree(uncore_extra_pci_dev); 1420 uncore_extra_pci_dev = NULL; 1421 uncore_free_pcibus_map(); 1422 err: 1423 uncore_pci_uncores = empty_uncore; 1424 return ret; 1425 } 1426 1427 static void uncore_pci_exit(void) 1428 { 1429 if (pcidrv_registered) { 1430 pcidrv_registered = false; 1431 if (uncore_pci_sub_driver) 1432 bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier); 1433 if (uncore_pci_driver) 1434 pci_unregister_driver(uncore_pci_driver); 1435 else 1436 bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier); 1437 uncore_types_exit(uncore_pci_uncores); 1438 kfree(uncore_extra_pci_dev); 1439 uncore_free_pcibus_map(); 1440 } 1441 } 1442 1443 static bool uncore_die_has_box(struct intel_uncore_type *type, 1444 int die, unsigned int pmu_idx) 1445 { 1446 if (!type->boxes) 1447 return true; 1448 1449 if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0) 1450 return false; 1451 1452 return true; 1453 } 1454 1455 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1456 int new_cpu) 1457 { 1458 struct intel_uncore_pmu *pmu = type->pmus; 1459 struct intel_uncore_box *box; 1460 int i, die; 1461 1462 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); 1463 for (i = 0; i < type->num_boxes; i++, pmu++) { 1464 box = pmu->boxes[die]; 1465 if (!box) 1466 continue; 1467 1468 if (old_cpu < 0) { 1469 WARN_ON_ONCE(box->cpu != -1); 1470 if (uncore_die_has_box(type, die, pmu->pmu_idx)) { 1471 box->cpu = new_cpu; 1472 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); 1473 } 1474 continue; 1475 } 1476 1477 WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu); 1478 box->cpu = -1; 1479 cpumask_clear_cpu(old_cpu, &pmu->cpu_mask); 1480 if (new_cpu < 0) 1481 continue; 1482 1483 if (!uncore_die_has_box(type, die, pmu->pmu_idx)) 1484 continue; 1485 uncore_pmu_cancel_hrtimer(box); 1486 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); 1487 box->cpu = new_cpu; 1488 cpumask_set_cpu(new_cpu, &pmu->cpu_mask); 1489 } 1490 } 1491 1492 static void uncore_change_context(struct intel_uncore_type **uncores, 1493 int old_cpu, int new_cpu) 1494 { 1495 for (; *uncores; uncores++) 1496 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); 1497 } 1498 1499 static void uncore_box_unref(struct intel_uncore_type **types, int id) 1500 { 1501 struct intel_uncore_type *type; 1502 struct intel_uncore_pmu *pmu; 1503 struct intel_uncore_box *box; 1504 int i; 1505 1506 for (; *types; types++) { 1507 type = *types; 1508 pmu = type->pmus; 1509 for (i = 0; i < type->num_boxes; i++, pmu++) { 1510 box = pmu->boxes[id]; 1511 if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0) 1512 uncore_box_exit(box); 1513 } 1514 } 1515 } 1516 1517 static int uncore_event_cpu_offline(unsigned int cpu) 1518 { 1519 int die, target; 1520 1521 /* Check if exiting cpu is used for collecting uncore events */ 1522 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1523 goto unref; 1524 /* Find a new cpu to collect uncore events */ 1525 target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 1526 1527 /* Migrate uncore events to the new target */ 1528 if (target < nr_cpu_ids) 1529 cpumask_set_cpu(target, &uncore_cpu_mask); 1530 else 1531 target = -1; 1532 1533 uncore_change_context(uncore_msr_uncores, cpu, target); 1534 uncore_change_context(uncore_mmio_uncores, cpu, target); 1535 uncore_change_context(uncore_pci_uncores, cpu, target); 1536 1537 unref: 1538 /* Clear the references */ 1539 die = topology_logical_die_id(cpu); 1540 uncore_box_unref(uncore_msr_uncores, die); 1541 uncore_box_unref(uncore_mmio_uncores, die); 1542 return 0; 1543 } 1544 1545 static int allocate_boxes(struct intel_uncore_type **types, 1546 unsigned int die, unsigned int cpu) 1547 { 1548 struct intel_uncore_box *box, *tmp; 1549 struct intel_uncore_type *type; 1550 struct intel_uncore_pmu *pmu; 1551 LIST_HEAD(allocated); 1552 int i; 1553 1554 /* Try to allocate all required boxes */ 1555 for (; *types; types++) { 1556 type = *types; 1557 pmu = type->pmus; 1558 for (i = 0; i < type->num_boxes; i++, pmu++) { 1559 if (pmu->boxes[die]) 1560 continue; 1561 box = uncore_alloc_box(type, cpu_to_node(cpu)); 1562 if (!box) 1563 goto cleanup; 1564 box->pmu = pmu; 1565 box->dieid = die; 1566 list_add(&box->active_list, &allocated); 1567 } 1568 } 1569 /* Install them in the pmus */ 1570 list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1571 list_del_init(&box->active_list); 1572 box->pmu->boxes[die] = box; 1573 } 1574 return 0; 1575 1576 cleanup: 1577 list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1578 list_del_init(&box->active_list); 1579 kfree(box); 1580 } 1581 return -ENOMEM; 1582 } 1583 1584 static int uncore_box_ref(struct intel_uncore_type **types, 1585 int id, unsigned int cpu) 1586 { 1587 struct intel_uncore_type *type; 1588 struct intel_uncore_pmu *pmu; 1589 struct intel_uncore_box *box; 1590 int i, ret; 1591 1592 ret = allocate_boxes(types, id, cpu); 1593 if (ret) 1594 return ret; 1595 1596 for (; *types; types++) { 1597 type = *types; 1598 pmu = type->pmus; 1599 for (i = 0; i < type->num_boxes; i++, pmu++) { 1600 box = pmu->boxes[id]; 1601 if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1) 1602 uncore_box_init(box); 1603 } 1604 } 1605 return 0; 1606 } 1607 1608 static int uncore_event_cpu_online(unsigned int cpu) 1609 { 1610 int die, target, msr_ret, mmio_ret; 1611 1612 die = topology_logical_die_id(cpu); 1613 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu); 1614 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu); 1615 if (msr_ret && mmio_ret) 1616 return -ENOMEM; 1617 1618 /* 1619 * Check if there is an online cpu in the package 1620 * which collects uncore events already. 1621 */ 1622 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); 1623 if (target < nr_cpu_ids) 1624 return 0; 1625 1626 cpumask_set_cpu(cpu, &uncore_cpu_mask); 1627 1628 if (!msr_ret) 1629 uncore_change_context(uncore_msr_uncores, -1, cpu); 1630 if (!mmio_ret) 1631 uncore_change_context(uncore_mmio_uncores, -1, cpu); 1632 uncore_change_context(uncore_pci_uncores, -1, cpu); 1633 return 0; 1634 } 1635 1636 static int __init type_pmu_register(struct intel_uncore_type *type) 1637 { 1638 int i, ret; 1639 1640 for (i = 0; i < type->num_boxes; i++) { 1641 ret = uncore_pmu_register(&type->pmus[i]); 1642 if (ret) 1643 return ret; 1644 } 1645 return 0; 1646 } 1647 1648 static int __init uncore_msr_pmus_register(void) 1649 { 1650 struct intel_uncore_type **types = uncore_msr_uncores; 1651 int ret; 1652 1653 for (; *types; types++) { 1654 ret = type_pmu_register(*types); 1655 if (ret) 1656 return ret; 1657 } 1658 return 0; 1659 } 1660 1661 static int __init uncore_cpu_init(void) 1662 { 1663 int ret; 1664 1665 ret = uncore_types_init(uncore_msr_uncores); 1666 if (ret) 1667 goto err; 1668 1669 ret = uncore_msr_pmus_register(); 1670 if (ret) 1671 goto err; 1672 return 0; 1673 err: 1674 uncore_types_exit(uncore_msr_uncores); 1675 uncore_msr_uncores = empty_uncore; 1676 return ret; 1677 } 1678 1679 static int __init uncore_mmio_init(void) 1680 { 1681 struct intel_uncore_type **types = uncore_mmio_uncores; 1682 int ret; 1683 1684 ret = uncore_types_init(types); 1685 if (ret) 1686 goto err; 1687 1688 for (; *types; types++) { 1689 ret = type_pmu_register(*types); 1690 if (ret) 1691 goto err; 1692 } 1693 return 0; 1694 err: 1695 uncore_types_exit(uncore_mmio_uncores); 1696 uncore_mmio_uncores = empty_uncore; 1697 return ret; 1698 } 1699 1700 static int uncore_mmio_global_init(u64 ctl) 1701 { 1702 void __iomem *io_addr; 1703 1704 io_addr = ioremap(ctl, sizeof(ctl)); 1705 if (!io_addr) 1706 return -ENOMEM; 1707 1708 /* Clear freeze bit (0) to enable all counters. */ 1709 writel(0, io_addr); 1710 1711 iounmap(io_addr); 1712 return 0; 1713 } 1714 1715 static const struct uncore_plat_init nhm_uncore_init __initconst = { 1716 .cpu_init = nhm_uncore_cpu_init, 1717 }; 1718 1719 static const struct uncore_plat_init snb_uncore_init __initconst = { 1720 .cpu_init = snb_uncore_cpu_init, 1721 .pci_init = snb_uncore_pci_init, 1722 }; 1723 1724 static const struct uncore_plat_init ivb_uncore_init __initconst = { 1725 .cpu_init = snb_uncore_cpu_init, 1726 .pci_init = ivb_uncore_pci_init, 1727 }; 1728 1729 static const struct uncore_plat_init hsw_uncore_init __initconst = { 1730 .cpu_init = snb_uncore_cpu_init, 1731 .pci_init = hsw_uncore_pci_init, 1732 }; 1733 1734 static const struct uncore_plat_init bdw_uncore_init __initconst = { 1735 .cpu_init = snb_uncore_cpu_init, 1736 .pci_init = bdw_uncore_pci_init, 1737 }; 1738 1739 static const struct uncore_plat_init snbep_uncore_init __initconst = { 1740 .cpu_init = snbep_uncore_cpu_init, 1741 .pci_init = snbep_uncore_pci_init, 1742 }; 1743 1744 static const struct uncore_plat_init nhmex_uncore_init __initconst = { 1745 .cpu_init = nhmex_uncore_cpu_init, 1746 }; 1747 1748 static const struct uncore_plat_init ivbep_uncore_init __initconst = { 1749 .cpu_init = ivbep_uncore_cpu_init, 1750 .pci_init = ivbep_uncore_pci_init, 1751 }; 1752 1753 static const struct uncore_plat_init hswep_uncore_init __initconst = { 1754 .cpu_init = hswep_uncore_cpu_init, 1755 .pci_init = hswep_uncore_pci_init, 1756 }; 1757 1758 static const struct uncore_plat_init bdx_uncore_init __initconst = { 1759 .cpu_init = bdx_uncore_cpu_init, 1760 .pci_init = bdx_uncore_pci_init, 1761 }; 1762 1763 static const struct uncore_plat_init knl_uncore_init __initconst = { 1764 .cpu_init = knl_uncore_cpu_init, 1765 .pci_init = knl_uncore_pci_init, 1766 }; 1767 1768 static const struct uncore_plat_init skl_uncore_init __initconst = { 1769 .cpu_init = skl_uncore_cpu_init, 1770 .pci_init = skl_uncore_pci_init, 1771 }; 1772 1773 static const struct uncore_plat_init skx_uncore_init __initconst = { 1774 .cpu_init = skx_uncore_cpu_init, 1775 .pci_init = skx_uncore_pci_init, 1776 }; 1777 1778 static const struct uncore_plat_init icl_uncore_init __initconst = { 1779 .cpu_init = icl_uncore_cpu_init, 1780 .pci_init = skl_uncore_pci_init, 1781 }; 1782 1783 static const struct uncore_plat_init tgl_uncore_init __initconst = { 1784 .cpu_init = tgl_uncore_cpu_init, 1785 .mmio_init = tgl_uncore_mmio_init, 1786 }; 1787 1788 static const struct uncore_plat_init tgl_l_uncore_init __initconst = { 1789 .cpu_init = tgl_uncore_cpu_init, 1790 .mmio_init = tgl_l_uncore_mmio_init, 1791 }; 1792 1793 static const struct uncore_plat_init rkl_uncore_init __initconst = { 1794 .cpu_init = tgl_uncore_cpu_init, 1795 .pci_init = skl_uncore_pci_init, 1796 }; 1797 1798 static const struct uncore_plat_init adl_uncore_init __initconst = { 1799 .cpu_init = adl_uncore_cpu_init, 1800 .mmio_init = adl_uncore_mmio_init, 1801 }; 1802 1803 static const struct uncore_plat_init mtl_uncore_init __initconst = { 1804 .cpu_init = mtl_uncore_cpu_init, 1805 .mmio_init = adl_uncore_mmio_init, 1806 }; 1807 1808 static const struct uncore_plat_init lnl_uncore_init __initconst = { 1809 .cpu_init = lnl_uncore_cpu_init, 1810 .mmio_init = lnl_uncore_mmio_init, 1811 }; 1812 1813 static const struct uncore_plat_init ptl_uncore_init __initconst = { 1814 .cpu_init = ptl_uncore_cpu_init, 1815 .mmio_init = ptl_uncore_mmio_init, 1816 .domain[0].discovery_base = UNCORE_DISCOVERY_MSR, 1817 .domain[0].global_init = uncore_mmio_global_init, 1818 }; 1819 1820 static const struct uncore_plat_init nvl_uncore_init __initconst = { 1821 .cpu_init = nvl_uncore_cpu_init, 1822 .mmio_init = ptl_uncore_mmio_init, 1823 .domain[0].discovery_base = PACKAGE_UNCORE_DISCOVERY_MSR, 1824 .domain[0].global_init = uncore_mmio_global_init, 1825 }; 1826 1827 static const struct uncore_plat_init icx_uncore_init __initconst = { 1828 .cpu_init = icx_uncore_cpu_init, 1829 .pci_init = icx_uncore_pci_init, 1830 .mmio_init = icx_uncore_mmio_init, 1831 }; 1832 1833 static const struct uncore_plat_init snr_uncore_init __initconst = { 1834 .cpu_init = snr_uncore_cpu_init, 1835 .pci_init = snr_uncore_pci_init, 1836 .mmio_init = snr_uncore_mmio_init, 1837 }; 1838 1839 static const struct uncore_plat_init spr_uncore_init __initconst = { 1840 .cpu_init = spr_uncore_cpu_init, 1841 .pci_init = spr_uncore_pci_init, 1842 .mmio_init = spr_uncore_mmio_init, 1843 .domain[0].base_is_pci = true, 1844 .domain[0].discovery_base = UNCORE_DISCOVERY_TABLE_DEVICE, 1845 .domain[0].units_ignore = spr_uncore_units_ignore, 1846 }; 1847 1848 static const struct uncore_plat_init gnr_uncore_init __initconst = { 1849 .cpu_init = gnr_uncore_cpu_init, 1850 .pci_init = gnr_uncore_pci_init, 1851 .mmio_init = gnr_uncore_mmio_init, 1852 .domain[0].base_is_pci = true, 1853 .domain[0].discovery_base = UNCORE_DISCOVERY_TABLE_DEVICE, 1854 .domain[0].units_ignore = gnr_uncore_units_ignore, 1855 }; 1856 1857 static const struct uncore_plat_init dmr_uncore_init __initconst = { 1858 .pci_init = dmr_uncore_pci_init, 1859 .mmio_init = dmr_uncore_mmio_init, 1860 .domain[0].base_is_pci = true, 1861 .domain[0].discovery_base = DMR_UNCORE_DISCOVERY_TABLE_DEVICE, 1862 .domain[0].units_ignore = dmr_uncore_imh_units_ignore, 1863 .domain[1].discovery_base = CBB_UNCORE_DISCOVERY_MSR, 1864 .domain[1].units_ignore = dmr_uncore_cbb_units_ignore, 1865 .domain[1].global_init = uncore_mmio_global_init, 1866 }; 1867 1868 static const struct uncore_plat_init generic_uncore_init __initconst = { 1869 .cpu_init = intel_uncore_generic_uncore_cpu_init, 1870 .pci_init = intel_uncore_generic_uncore_pci_init, 1871 .mmio_init = intel_uncore_generic_uncore_mmio_init, 1872 .domain[0].base_is_pci = true, 1873 .domain[0].discovery_base = PCI_ANY_ID, 1874 .domain[1].discovery_base = UNCORE_DISCOVERY_MSR, 1875 }; 1876 1877 static const struct x86_cpu_id intel_uncore_match[] __initconst = { 1878 X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_uncore_init), 1879 X86_MATCH_VFM(INTEL_NEHALEM, &nhm_uncore_init), 1880 X86_MATCH_VFM(INTEL_WESTMERE, &nhm_uncore_init), 1881 X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_uncore_init), 1882 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_uncore_init), 1883 X86_MATCH_VFM(INTEL_IVYBRIDGE, &ivb_uncore_init), 1884 X86_MATCH_VFM(INTEL_HASWELL, &hsw_uncore_init), 1885 X86_MATCH_VFM(INTEL_HASWELL_L, &hsw_uncore_init), 1886 X86_MATCH_VFM(INTEL_HASWELL_G, &hsw_uncore_init), 1887 X86_MATCH_VFM(INTEL_BROADWELL, &bdw_uncore_init), 1888 X86_MATCH_VFM(INTEL_BROADWELL_G, &bdw_uncore_init), 1889 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snbep_uncore_init), 1890 X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhmex_uncore_init), 1891 X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhmex_uncore_init), 1892 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ivbep_uncore_init), 1893 X86_MATCH_VFM(INTEL_HASWELL_X, &hswep_uncore_init), 1894 X86_MATCH_VFM(INTEL_BROADWELL_X, &bdx_uncore_init), 1895 X86_MATCH_VFM(INTEL_BROADWELL_D, &bdx_uncore_init), 1896 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_uncore_init), 1897 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_uncore_init), 1898 X86_MATCH_VFM(INTEL_SKYLAKE, &skl_uncore_init), 1899 X86_MATCH_VFM(INTEL_SKYLAKE_L, &skl_uncore_init), 1900 X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_uncore_init), 1901 X86_MATCH_VFM(INTEL_KABYLAKE_L, &skl_uncore_init), 1902 X86_MATCH_VFM(INTEL_KABYLAKE, &skl_uncore_init), 1903 X86_MATCH_VFM(INTEL_COMETLAKE_L, &skl_uncore_init), 1904 X86_MATCH_VFM(INTEL_COMETLAKE, &skl_uncore_init), 1905 X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_uncore_init), 1906 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_uncore_init), 1907 X86_MATCH_VFM(INTEL_ICELAKE, &icl_uncore_init), 1908 X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_uncore_init), 1909 X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_uncore_init), 1910 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_uncore_init), 1911 X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_uncore_init), 1912 X86_MATCH_VFM(INTEL_ROCKETLAKE, &rkl_uncore_init), 1913 X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_uncore_init), 1914 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_uncore_init), 1915 X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_uncore_init), 1916 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_uncore_init), 1917 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init), 1918 X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init), 1919 X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init), 1920 X86_MATCH_VFM(INTEL_ARROWLAKE, &mtl_uncore_init), 1921 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &mtl_uncore_init), 1922 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init), 1923 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init), 1924 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init), 1925 X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init), 1926 X86_MATCH_VFM(INTEL_NOVALAKE, &nvl_uncore_init), 1927 X86_MATCH_VFM(INTEL_NOVALAKE_L, &nvl_uncore_init), 1928 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), 1929 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), 1930 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init), 1931 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_uncore_init), 1932 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &snr_uncore_init), 1933 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init), 1934 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init), 1935 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init), 1936 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init), 1937 X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, &dmr_uncore_init), 1938 {}, 1939 }; 1940 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); 1941 1942 static bool uncore_use_discovery(struct uncore_plat_init *config) 1943 { 1944 for (int i = 0; i < UNCORE_DISCOVERY_DOMAINS; i++) { 1945 if (config->domain[i].discovery_base) 1946 return true; 1947 } 1948 1949 return false; 1950 } 1951 1952 static int __init intel_uncore_init(void) 1953 { 1954 const struct x86_cpu_id *id; 1955 struct uncore_plat_init *uncore_init; 1956 int pret = 0, cret = 0, mret = 0, ret; 1957 1958 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 1959 return -ENODEV; 1960 1961 __uncore_max_dies = 1962 topology_max_packages() * topology_max_dies_per_package(); 1963 1964 id = x86_match_cpu(intel_uncore_match); 1965 if (!id) { 1966 uncore_init = (struct uncore_plat_init *)&generic_uncore_init; 1967 if (uncore_no_discover || !uncore_discovery(uncore_init)) 1968 return -ENODEV; 1969 } else { 1970 uncore_init = (struct uncore_plat_init *)id->driver_data; 1971 if (uncore_no_discover && uncore_use_discovery(uncore_init)) 1972 return -ENODEV; 1973 if (uncore_use_discovery(uncore_init) && 1974 !uncore_discovery(uncore_init)) 1975 return -ENODEV; 1976 } 1977 1978 if (uncore_init->pci_init) { 1979 pret = uncore_init->pci_init(); 1980 if (!pret) 1981 pret = uncore_pci_init(); 1982 } 1983 1984 if (uncore_init->cpu_init) { 1985 uncore_init->cpu_init(); 1986 cret = uncore_cpu_init(); 1987 } 1988 1989 if (uncore_init->mmio_init) { 1990 uncore_init->mmio_init(); 1991 mret = uncore_mmio_init(); 1992 } 1993 1994 if (cret && pret && mret) { 1995 ret = -ENODEV; 1996 goto free_discovery; 1997 } 1998 1999 /* Install hotplug callbacks to setup the targets for each package */ 2000 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 2001 "perf/x86/intel/uncore:online", 2002 uncore_event_cpu_online, 2003 uncore_event_cpu_offline); 2004 if (ret) 2005 goto err; 2006 return 0; 2007 2008 err: 2009 uncore_types_exit(uncore_msr_uncores); 2010 uncore_types_exit(uncore_mmio_uncores); 2011 uncore_pci_exit(); 2012 free_discovery: 2013 intel_uncore_clear_discovery_tables(); 2014 return ret; 2015 } 2016 module_init(intel_uncore_init); 2017 2018 static void __exit intel_uncore_exit(void) 2019 { 2020 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 2021 uncore_types_exit(uncore_msr_uncores); 2022 uncore_types_exit(uncore_mmio_uncores); 2023 uncore_pci_exit(); 2024 intel_uncore_clear_discovery_tables(); 2025 } 2026 module_exit(intel_uncore_exit); 2027