1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/slab.h> 3 #include <linux/pci.h> 4 #include <asm/apicdef.h> 5 #include <asm/intel-family.h> 6 #include <linux/io-64-nonatomic-lo-hi.h> 7 8 #include <linux/perf_event.h> 9 #include "../perf_event.h" 10 11 #define UNCORE_PMU_NAME_LEN 32 12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 14 15 #define UNCORE_FIXED_EVENT 0xff 16 #define UNCORE_PMC_IDX_MAX_GENERIC 8 17 #define UNCORE_PMC_IDX_MAX_FIXED 1 18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ 21 UNCORE_PMC_IDX_MAX_FIXED) 22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ 23 UNCORE_PMC_IDX_MAX_FREERUNNING) 24 25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 26 ((dev << 24) | (func << 16) | (type << 8) | idx) 27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 32 #define UNCORE_EXTRA_PCI_DEV 0xff 33 #define UNCORE_EXTRA_PCI_DEV_MAX 4 34 35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 36 #define UNCORE_EVENT_CONSTRAINT_RANGE(c, e, n) \ 37 EVENT_CONSTRAINT_RANGE(c, e, n, 0xff) 38 39 #define UNCORE_IGNORE_END -1 40 41 struct pci_extra_dev { 42 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 43 }; 44 45 struct intel_uncore_ops; 46 struct intel_uncore_pmu; 47 struct intel_uncore_box; 48 struct uncore_event_desc; 49 struct freerunning_counters; 50 struct intel_uncore_topology; 51 52 struct uncore_discovery_domain { 53 /* MSR address or PCI device used as the discovery base */ 54 u32 discovery_base; 55 bool base_is_pci; 56 int (*global_init)(u64 ctl); 57 58 /* The units in the discovery table should be ignored. */ 59 int *units_ignore; 60 }; 61 62 #define UNCORE_DISCOVERY_DOMAINS 2 63 struct uncore_plat_init { 64 void (*cpu_init)(void); 65 int (*pci_init)(void); 66 void (*mmio_init)(void); 67 68 struct uncore_discovery_domain domain[UNCORE_DISCOVERY_DOMAINS]; 69 }; 70 71 struct intel_uncore_type { 72 const char *name; 73 int num_counters; 74 int num_boxes; 75 int perf_ctr_bits; 76 int fixed_ctr_bits; 77 int num_freerunning_types; 78 int type_id; 79 unsigned perf_ctr; 80 unsigned event_ctl; 81 unsigned event_mask; 82 unsigned event_mask_ext; 83 unsigned fixed_ctr; 84 unsigned fixed_ctl; 85 unsigned box_ctl; 86 union { 87 unsigned msr_offset; 88 unsigned mmio_offset; 89 }; 90 unsigned mmio_map_size; 91 unsigned num_shared_regs:8; 92 unsigned single_fixed:1; 93 unsigned pair_ctr_ctl:1; 94 union { 95 u64 *msr_offsets; 96 u64 *pci_offsets; 97 u64 *mmio_offsets; 98 }; 99 struct event_constraint unconstrainted; 100 struct event_constraint *constraints; 101 struct intel_uncore_pmu *pmus; 102 struct intel_uncore_ops *ops; 103 struct uncore_event_desc *event_descs; 104 struct freerunning_counters *freerunning; 105 const struct attribute_group *attr_groups[4]; 106 const struct attribute_group **attr_update; 107 struct pmu *pmu; /* for custom pmu ops */ 108 struct rb_root *boxes; 109 /* 110 * Uncore PMU would store relevant platform topology configuration here 111 * to identify which platform component each PMON block of that type is 112 * supposed to monitor. 113 */ 114 struct intel_uncore_topology **topology; 115 /* 116 * Optional callbacks for managing mapping of Uncore units to PMONs 117 */ 118 int (*get_topology)(struct intel_uncore_type *type); 119 void (*set_mapping)(struct intel_uncore_type *type); 120 void (*cleanup_mapping)(struct intel_uncore_type *type); 121 /* 122 * Optional callbacks for extra uncore units cleanup 123 */ 124 void (*cleanup_extra_boxes)(struct intel_uncore_type *type); 125 }; 126 127 #define pmu_group attr_groups[0] 128 #define format_group attr_groups[1] 129 #define events_group attr_groups[2] 130 131 struct intel_uncore_ops { 132 void (*init_box)(struct intel_uncore_box *); 133 void (*exit_box)(struct intel_uncore_box *); 134 void (*disable_box)(struct intel_uncore_box *); 135 void (*enable_box)(struct intel_uncore_box *); 136 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 137 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 138 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 139 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 140 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 141 struct perf_event *); 142 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 143 }; 144 145 struct intel_uncore_pmu { 146 struct pmu pmu; 147 char name[UNCORE_PMU_NAME_LEN]; 148 int pmu_idx; 149 bool registered; 150 atomic_t activeboxes; 151 cpumask_t cpu_mask; 152 struct intel_uncore_type *type; 153 struct intel_uncore_box **boxes; 154 }; 155 156 struct intel_uncore_extra_reg { 157 raw_spinlock_t lock; 158 u64 config, config1, config2; 159 atomic_t ref; 160 }; 161 162 struct intel_uncore_box { 163 int dieid; /* Logical die ID */ 164 int n_active; /* number of active events */ 165 int n_events; 166 int cpu; /* cpu to collect events */ 167 unsigned long flags; 168 atomic_t refcnt; 169 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 170 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 171 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 172 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 173 u64 tags[UNCORE_PMC_IDX_MAX]; 174 struct pci_dev *pci_dev; 175 struct intel_uncore_pmu *pmu; 176 u64 hrtimer_duration; /* hrtimer timeout for this box */ 177 struct hrtimer hrtimer; 178 struct list_head list; 179 struct list_head active_list; 180 void __iomem *io_addr; 181 struct intel_uncore_extra_reg shared_regs[]; 182 }; 183 184 /* CFL uncore 8th cbox MSRs */ 185 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 186 #define CFL_UNC_CBO_7_PER_CTR0 0xf76 187 188 #define UNCORE_BOX_FLAG_INITIATED 0 189 /* event config registers are 8-byte apart */ 190 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 191 /* CFL 8th CBOX has different MSR space */ 192 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 193 194 struct uncore_event_desc { 195 struct device_attribute attr; 196 const char *config; 197 }; 198 199 struct freerunning_counters { 200 unsigned int counter_base; 201 unsigned int counter_offset; 202 unsigned int box_offset; 203 unsigned int num_counters; 204 unsigned int bits; 205 unsigned *box_offsets; 206 }; 207 208 struct uncore_iio_topology { 209 int pci_bus_no; 210 int segment; 211 }; 212 213 struct uncore_upi_topology { 214 int die_to; 215 int pmu_idx_to; 216 int enabled; 217 }; 218 219 struct intel_uncore_topology { 220 int pmu_idx; 221 union { 222 void *untyped; 223 struct uncore_iio_topology *iio; 224 struct uncore_upi_topology *upi; 225 }; 226 }; 227 228 struct pci2phy_map { 229 struct list_head list; 230 int segment; 231 int pbus_to_dieid[256]; 232 }; 233 234 struct pci2phy_map *__find_pci2phy_map(int segment); 235 int uncore_pcibus_to_dieid(struct pci_bus *bus); 236 int uncore_die_to_segment(int die); 237 int uncore_device_to_die(struct pci_dev *dev); 238 239 ssize_t uncore_event_show(struct device *dev, 240 struct device_attribute *attr, char *buf); 241 242 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev) 243 { 244 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); 245 } 246 247 #define to_device_attribute(n) container_of(n, struct device_attribute, attr) 248 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr) 249 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n)) 250 251 extern int __uncore_max_dies; 252 #define uncore_max_dies() (__uncore_max_dies) 253 254 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 255 { \ 256 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 257 .config = _config, \ 258 } 259 260 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 261 static ssize_t __uncore_##_var##_show(struct device *dev, \ 262 struct device_attribute *attr, \ 263 char *page) \ 264 { \ 265 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 266 return sprintf(page, _format "\n"); \ 267 } \ 268 static struct device_attribute format_attr_##_var = \ 269 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 270 271 static inline bool uncore_pmc_fixed(int idx) 272 { 273 return idx == UNCORE_PMC_IDX_FIXED; 274 } 275 276 static inline bool uncore_pmc_freerunning(int idx) 277 { 278 return idx == UNCORE_PMC_IDX_FREERUNNING; 279 } 280 281 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box, 282 unsigned long offset) 283 { 284 if (offset < box->pmu->type->mmio_map_size) 285 return true; 286 287 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n", 288 offset, box->pmu->type->name); 289 290 return false; 291 } 292 293 static inline 294 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) 295 { 296 return box->pmu->type->box_ctl + 297 box->pmu->type->mmio_offset * box->pmu->pmu_idx; 298 } 299 300 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 301 { 302 return box->pmu->type->box_ctl; 303 } 304 305 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 306 { 307 return box->pmu->type->fixed_ctl; 308 } 309 310 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 311 { 312 return box->pmu->type->fixed_ctr; 313 } 314 315 static inline 316 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 317 { 318 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) 319 return idx * 8 + box->pmu->type->event_ctl; 320 321 return idx * 4 + box->pmu->type->event_ctl; 322 } 323 324 static inline 325 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 326 { 327 return idx * 8 + box->pmu->type->perf_ctr; 328 } 329 330 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 331 { 332 struct intel_uncore_pmu *pmu = box->pmu; 333 return pmu->type->msr_offsets ? 334 pmu->type->msr_offsets[pmu->pmu_idx] : 335 pmu->type->msr_offset * pmu->pmu_idx; 336 } 337 338 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 339 { 340 if (!box->pmu->type->box_ctl) 341 return 0; 342 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 343 } 344 345 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 346 { 347 if (!box->pmu->type->fixed_ctl) 348 return 0; 349 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 350 } 351 352 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 353 { 354 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 355 } 356 357 358 /* 359 * In the uncore document, there is no event-code assigned to free running 360 * counters. Some events need to be defined to indicate the free running 361 * counters. The events are encoded as event-code + umask-code. 362 * 363 * The event-code for all free running counters is 0xff, which is the same as 364 * the fixed counters. 365 * 366 * The umask-code is used to distinguish a fixed counter and a free running 367 * counter, and different types of free running counters. 368 * - For fixed counters, the umask-code is 0x0X. 369 * X indicates the index of the fixed counter, which starts from 0. 370 * - For free running counters, the umask-code uses the rest of the space. 371 * It would bare the format of 0xXY. 372 * X stands for the type of free running counters, which starts from 1. 373 * Y stands for the index of free running counters of same type, which 374 * starts from 0. 375 * 376 * For example, there are three types of IIO free running counters on Skylake 377 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. 378 * The event-code for all the free running counters is 0xff. 379 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, 380 * which umask-code starts from 0x10. 381 * So 'ioclk' is encoded as event=0xff,umask=0x10 382 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is 383 * the second type, which umask-code starts from 0x20. 384 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 385 */ 386 static inline unsigned int uncore_freerunning_idx(u64 config) 387 { 388 return ((config >> 8) & 0xf); 389 } 390 391 #define UNCORE_FREERUNNING_UMASK_START 0x10 392 393 static inline unsigned int uncore_freerunning_type(u64 config) 394 { 395 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); 396 } 397 398 static inline 399 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, 400 struct perf_event *event) 401 { 402 unsigned int type = uncore_freerunning_type(event->hw.config); 403 unsigned int idx = uncore_freerunning_idx(event->hw.config); 404 struct intel_uncore_pmu *pmu = box->pmu; 405 406 return pmu->type->freerunning[type].counter_base + 407 pmu->type->freerunning[type].counter_offset * idx + 408 (pmu->type->freerunning[type].box_offsets ? 409 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : 410 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); 411 } 412 413 static inline 414 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 415 { 416 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 417 return CFL_UNC_CBO_7_PERFEVTSEL0 + 418 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 419 } else { 420 return box->pmu->type->event_ctl + 421 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 422 uncore_msr_box_offset(box); 423 } 424 } 425 426 static inline 427 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 428 { 429 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 430 return CFL_UNC_CBO_7_PER_CTR0 + 431 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 432 } else { 433 return box->pmu->type->perf_ctr + 434 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 435 uncore_msr_box_offset(box); 436 } 437 } 438 439 static inline 440 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 441 { 442 if (box->pci_dev || box->io_addr) 443 return uncore_pci_fixed_ctl(box); 444 else 445 return uncore_msr_fixed_ctl(box); 446 } 447 448 static inline 449 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 450 { 451 if (box->pci_dev || box->io_addr) 452 return uncore_pci_fixed_ctr(box); 453 else 454 return uncore_msr_fixed_ctr(box); 455 } 456 457 static inline 458 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 459 { 460 if (box->pci_dev || box->io_addr) 461 return uncore_pci_event_ctl(box, idx); 462 else 463 return uncore_msr_event_ctl(box, idx); 464 } 465 466 static inline 467 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 468 { 469 if (box->pci_dev || box->io_addr) 470 return uncore_pci_perf_ctr(box, idx); 471 else 472 return uncore_msr_perf_ctr(box, idx); 473 } 474 475 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 476 { 477 return box->pmu->type->perf_ctr_bits; 478 } 479 480 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 481 { 482 return box->pmu->type->fixed_ctr_bits; 483 } 484 485 static inline 486 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, 487 struct perf_event *event) 488 { 489 unsigned int type = uncore_freerunning_type(event->hw.config); 490 491 return box->pmu->type->freerunning[type].bits; 492 } 493 494 static inline int uncore_num_freerunning(struct intel_uncore_box *box, 495 struct perf_event *event) 496 { 497 unsigned int type = uncore_freerunning_type(event->hw.config); 498 499 return box->pmu->type->freerunning[type].num_counters; 500 } 501 502 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, 503 struct perf_event *event) 504 { 505 return box->pmu->type->num_freerunning_types; 506 } 507 508 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, 509 struct perf_event *event) 510 { 511 unsigned int type = uncore_freerunning_type(event->hw.config); 512 unsigned int idx = uncore_freerunning_idx(event->hw.config); 513 514 return (type < uncore_num_freerunning_types(box, event)) && 515 (idx < uncore_num_freerunning(box, event)); 516 } 517 518 static inline int uncore_num_counters(struct intel_uncore_box *box) 519 { 520 return box->pmu->type->num_counters; 521 } 522 523 static inline bool is_freerunning_event(struct perf_event *event) 524 { 525 u64 cfg = event->attr.config; 526 527 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && 528 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); 529 } 530 531 /* Check and reject invalid config */ 532 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, 533 struct perf_event *event) 534 { 535 if (is_freerunning_event(event)) 536 return 0; 537 538 return -EINVAL; 539 } 540 541 static inline void uncore_disable_event(struct intel_uncore_box *box, 542 struct perf_event *event) 543 { 544 box->pmu->type->ops->disable_event(box, event); 545 } 546 547 static inline void uncore_enable_event(struct intel_uncore_box *box, 548 struct perf_event *event) 549 { 550 box->pmu->type->ops->enable_event(box, event); 551 } 552 553 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 554 struct perf_event *event) 555 { 556 return box->pmu->type->ops->read_counter(box, event); 557 } 558 559 static inline void uncore_box_init(struct intel_uncore_box *box) 560 { 561 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 562 if (box->pmu->type->ops->init_box) 563 box->pmu->type->ops->init_box(box); 564 } 565 } 566 567 static inline void uncore_box_exit(struct intel_uncore_box *box) 568 { 569 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 570 if (box->pmu->type->ops->exit_box) 571 box->pmu->type->ops->exit_box(box); 572 } 573 } 574 575 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 576 { 577 return (box->dieid < 0); 578 } 579 580 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 581 { 582 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 583 } 584 585 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 586 { 587 return event->pmu_private; 588 } 589 590 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 591 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 592 void uncore_mmio_exit_box(struct intel_uncore_box *box); 593 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 594 struct perf_event *event); 595 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 596 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 597 void uncore_pmu_event_start(struct perf_event *event, int flags); 598 void uncore_pmu_event_stop(struct perf_event *event, int flags); 599 int uncore_pmu_event_add(struct perf_event *event, int flags); 600 void uncore_pmu_event_del(struct perf_event *event, int flags); 601 void uncore_pmu_event_read(struct perf_event *event); 602 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 603 struct event_constraint * 604 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 605 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 606 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 607 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu); 608 609 extern struct intel_uncore_type *empty_uncore[]; 610 extern struct intel_uncore_type **uncore_msr_uncores; 611 extern struct intel_uncore_type **uncore_pci_uncores; 612 extern struct intel_uncore_type **uncore_mmio_uncores; 613 extern struct pci_driver *uncore_pci_driver; 614 extern struct pci_driver *uncore_pci_sub_driver; 615 extern raw_spinlock_t pci2phy_map_lock; 616 extern struct list_head pci2phy_map_head; 617 extern struct pci_extra_dev *uncore_extra_pci_dev; 618 extern struct event_constraint uncore_constraint_empty; 619 extern int spr_uncore_units_ignore[]; 620 extern int gnr_uncore_units_ignore[]; 621 extern int dmr_uncore_imh_units_ignore[]; 622 extern int dmr_uncore_cbb_units_ignore[]; 623 624 /* uncore_snb.c */ 625 int snb_uncore_pci_init(void); 626 int ivb_uncore_pci_init(void); 627 int hsw_uncore_pci_init(void); 628 int bdw_uncore_pci_init(void); 629 int skl_uncore_pci_init(void); 630 void snb_uncore_cpu_init(void); 631 void nhm_uncore_cpu_init(void); 632 void skl_uncore_cpu_init(void); 633 void icl_uncore_cpu_init(void); 634 void tgl_uncore_cpu_init(void); 635 void adl_uncore_cpu_init(void); 636 void lnl_uncore_cpu_init(void); 637 void mtl_uncore_cpu_init(void); 638 void ptl_uncore_cpu_init(void); 639 void nvl_uncore_cpu_init(void); 640 void tgl_uncore_mmio_init(void); 641 void tgl_l_uncore_mmio_init(void); 642 void adl_uncore_mmio_init(void); 643 void lnl_uncore_mmio_init(void); 644 void ptl_uncore_mmio_init(void); 645 int snb_pci2phy_map_init(int devid); 646 647 /* uncore_snbep.c */ 648 int snbep_uncore_pci_init(void); 649 void snbep_uncore_cpu_init(void); 650 int ivbep_uncore_pci_init(void); 651 void ivbep_uncore_cpu_init(void); 652 int hswep_uncore_pci_init(void); 653 void hswep_uncore_cpu_init(void); 654 int bdx_uncore_pci_init(void); 655 void bdx_uncore_cpu_init(void); 656 int knl_uncore_pci_init(void); 657 void knl_uncore_cpu_init(void); 658 int skx_uncore_pci_init(void); 659 void skx_uncore_cpu_init(void); 660 int snr_uncore_pci_init(void); 661 void snr_uncore_cpu_init(void); 662 void snr_uncore_mmio_init(void); 663 int icx_uncore_pci_init(void); 664 void icx_uncore_cpu_init(void); 665 void icx_uncore_mmio_init(void); 666 int spr_uncore_pci_init(void); 667 void spr_uncore_cpu_init(void); 668 void spr_uncore_mmio_init(void); 669 int gnr_uncore_pci_init(void); 670 void gnr_uncore_cpu_init(void); 671 void gnr_uncore_mmio_init(void); 672 int dmr_uncore_pci_init(void); 673 void dmr_uncore_mmio_init(void); 674 675 /* uncore_nhmex.c */ 676 void nhmex_uncore_cpu_init(void); 677