1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/slab.h> 3 #include <linux/pci.h> 4 #include <asm/apicdef.h> 5 #include <asm/intel-family.h> 6 #include <linux/io-64-nonatomic-lo-hi.h> 7 8 #include <linux/perf_event.h> 9 #include "../perf_event.h" 10 11 #define UNCORE_PMU_NAME_LEN 32 12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 14 15 #define UNCORE_FIXED_EVENT 0xff 16 #define UNCORE_PMC_IDX_MAX_GENERIC 8 17 #define UNCORE_PMC_IDX_MAX_FIXED 1 18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1 19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \ 21 UNCORE_PMC_IDX_MAX_FIXED) 22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \ 23 UNCORE_PMC_IDX_MAX_FREERUNNING) 24 25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 26 ((dev << 24) | (func << 16) | (type << 8) | idx) 27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 32 #define UNCORE_EXTRA_PCI_DEV 0xff 33 #define UNCORE_EXTRA_PCI_DEV_MAX 4 34 35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 36 37 #define UNCORE_IGNORE_END -1 38 39 struct pci_extra_dev { 40 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 41 }; 42 43 struct intel_uncore_ops; 44 struct intel_uncore_pmu; 45 struct intel_uncore_box; 46 struct uncore_event_desc; 47 struct freerunning_counters; 48 struct intel_uncore_topology; 49 50 struct intel_uncore_type { 51 const char *name; 52 int num_counters; 53 int num_boxes; 54 int perf_ctr_bits; 55 int fixed_ctr_bits; 56 int num_freerunning_types; 57 int type_id; 58 unsigned perf_ctr; 59 unsigned event_ctl; 60 unsigned event_mask; 61 unsigned event_mask_ext; 62 unsigned fixed_ctr; 63 unsigned fixed_ctl; 64 unsigned box_ctl; 65 union { 66 unsigned msr_offset; 67 unsigned mmio_offset; 68 }; 69 unsigned mmio_map_size; 70 unsigned num_shared_regs:8; 71 unsigned single_fixed:1; 72 unsigned pair_ctr_ctl:1; 73 union { 74 u64 *msr_offsets; 75 u64 *pci_offsets; 76 u64 *mmio_offsets; 77 }; 78 struct event_constraint unconstrainted; 79 struct event_constraint *constraints; 80 struct intel_uncore_pmu *pmus; 81 struct intel_uncore_ops *ops; 82 struct uncore_event_desc *event_descs; 83 struct freerunning_counters *freerunning; 84 const struct attribute_group *attr_groups[4]; 85 const struct attribute_group **attr_update; 86 struct pmu *pmu; /* for custom pmu ops */ 87 struct rb_root *boxes; 88 /* 89 * Uncore PMU would store relevant platform topology configuration here 90 * to identify which platform component each PMON block of that type is 91 * supposed to monitor. 92 */ 93 struct intel_uncore_topology **topology; 94 /* 95 * Optional callbacks for managing mapping of Uncore units to PMONs 96 */ 97 int (*get_topology)(struct intel_uncore_type *type); 98 void (*set_mapping)(struct intel_uncore_type *type); 99 void (*cleanup_mapping)(struct intel_uncore_type *type); 100 /* 101 * Optional callbacks for extra uncore units cleanup 102 */ 103 void (*cleanup_extra_boxes)(struct intel_uncore_type *type); 104 }; 105 106 #define pmu_group attr_groups[0] 107 #define format_group attr_groups[1] 108 #define events_group attr_groups[2] 109 110 struct intel_uncore_ops { 111 void (*init_box)(struct intel_uncore_box *); 112 void (*exit_box)(struct intel_uncore_box *); 113 void (*disable_box)(struct intel_uncore_box *); 114 void (*enable_box)(struct intel_uncore_box *); 115 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 116 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 117 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 118 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 119 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 120 struct perf_event *); 121 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 122 }; 123 124 struct intel_uncore_pmu { 125 struct pmu pmu; 126 char name[UNCORE_PMU_NAME_LEN]; 127 int pmu_idx; 128 int func_id; 129 bool registered; 130 atomic_t activeboxes; 131 cpumask_t cpu_mask; 132 struct intel_uncore_type *type; 133 struct intel_uncore_box **boxes; 134 }; 135 136 struct intel_uncore_extra_reg { 137 raw_spinlock_t lock; 138 u64 config, config1, config2; 139 atomic_t ref; 140 }; 141 142 struct intel_uncore_box { 143 int dieid; /* Logical die ID */ 144 int n_active; /* number of active events */ 145 int n_events; 146 int cpu; /* cpu to collect events */ 147 unsigned long flags; 148 atomic_t refcnt; 149 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 150 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 151 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 152 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 153 u64 tags[UNCORE_PMC_IDX_MAX]; 154 struct pci_dev *pci_dev; 155 struct intel_uncore_pmu *pmu; 156 u64 hrtimer_duration; /* hrtimer timeout for this box */ 157 struct hrtimer hrtimer; 158 struct list_head list; 159 struct list_head active_list; 160 void __iomem *io_addr; 161 struct intel_uncore_extra_reg shared_regs[]; 162 }; 163 164 /* CFL uncore 8th cbox MSRs */ 165 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 166 #define CFL_UNC_CBO_7_PER_CTR0 0xf76 167 168 #define UNCORE_BOX_FLAG_INITIATED 0 169 /* event config registers are 8-byte apart */ 170 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 171 /* CFL 8th CBOX has different MSR space */ 172 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 173 174 struct uncore_event_desc { 175 struct device_attribute attr; 176 const char *config; 177 }; 178 179 struct freerunning_counters { 180 unsigned int counter_base; 181 unsigned int counter_offset; 182 unsigned int box_offset; 183 unsigned int num_counters; 184 unsigned int bits; 185 unsigned *box_offsets; 186 }; 187 188 struct uncore_iio_topology { 189 int pci_bus_no; 190 int segment; 191 }; 192 193 struct uncore_upi_topology { 194 int die_to; 195 int pmu_idx_to; 196 int enabled; 197 }; 198 199 struct intel_uncore_topology { 200 int pmu_idx; 201 union { 202 void *untyped; 203 struct uncore_iio_topology *iio; 204 struct uncore_upi_topology *upi; 205 }; 206 }; 207 208 struct pci2phy_map { 209 struct list_head list; 210 int segment; 211 int pbus_to_dieid[256]; 212 }; 213 214 struct pci2phy_map *__find_pci2phy_map(int segment); 215 int uncore_pcibus_to_dieid(struct pci_bus *bus); 216 int uncore_die_to_segment(int die); 217 int uncore_device_to_die(struct pci_dev *dev); 218 219 ssize_t uncore_event_show(struct device *dev, 220 struct device_attribute *attr, char *buf); 221 222 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev) 223 { 224 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); 225 } 226 227 #define to_device_attribute(n) container_of(n, struct device_attribute, attr) 228 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr) 229 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n)) 230 231 extern int __uncore_max_dies; 232 #define uncore_max_dies() (__uncore_max_dies) 233 234 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 235 { \ 236 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 237 .config = _config, \ 238 } 239 240 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 241 static ssize_t __uncore_##_var##_show(struct device *dev, \ 242 struct device_attribute *attr, \ 243 char *page) \ 244 { \ 245 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 246 return sprintf(page, _format "\n"); \ 247 } \ 248 static struct device_attribute format_attr_##_var = \ 249 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 250 251 static inline bool uncore_pmc_fixed(int idx) 252 { 253 return idx == UNCORE_PMC_IDX_FIXED; 254 } 255 256 static inline bool uncore_pmc_freerunning(int idx) 257 { 258 return idx == UNCORE_PMC_IDX_FREERUNNING; 259 } 260 261 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box, 262 unsigned long offset) 263 { 264 if (offset < box->pmu->type->mmio_map_size) 265 return true; 266 267 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n", 268 offset, box->pmu->type->name); 269 270 return false; 271 } 272 273 static inline 274 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) 275 { 276 return box->pmu->type->box_ctl + 277 box->pmu->type->mmio_offset * box->pmu->pmu_idx; 278 } 279 280 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 281 { 282 return box->pmu->type->box_ctl; 283 } 284 285 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 286 { 287 return box->pmu->type->fixed_ctl; 288 } 289 290 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 291 { 292 return box->pmu->type->fixed_ctr; 293 } 294 295 static inline 296 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 297 { 298 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) 299 return idx * 8 + box->pmu->type->event_ctl; 300 301 return idx * 4 + box->pmu->type->event_ctl; 302 } 303 304 static inline 305 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 306 { 307 return idx * 8 + box->pmu->type->perf_ctr; 308 } 309 310 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 311 { 312 struct intel_uncore_pmu *pmu = box->pmu; 313 return pmu->type->msr_offsets ? 314 pmu->type->msr_offsets[pmu->pmu_idx] : 315 pmu->type->msr_offset * pmu->pmu_idx; 316 } 317 318 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 319 { 320 if (!box->pmu->type->box_ctl) 321 return 0; 322 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 323 } 324 325 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 326 { 327 if (!box->pmu->type->fixed_ctl) 328 return 0; 329 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 330 } 331 332 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 333 { 334 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 335 } 336 337 338 /* 339 * In the uncore document, there is no event-code assigned to free running 340 * counters. Some events need to be defined to indicate the free running 341 * counters. The events are encoded as event-code + umask-code. 342 * 343 * The event-code for all free running counters is 0xff, which is the same as 344 * the fixed counters. 345 * 346 * The umask-code is used to distinguish a fixed counter and a free running 347 * counter, and different types of free running counters. 348 * - For fixed counters, the umask-code is 0x0X. 349 * X indicates the index of the fixed counter, which starts from 0. 350 * - For free running counters, the umask-code uses the rest of the space. 351 * It would bare the format of 0xXY. 352 * X stands for the type of free running counters, which starts from 1. 353 * Y stands for the index of free running counters of same type, which 354 * starts from 0. 355 * 356 * For example, there are three types of IIO free running counters on Skylake 357 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters. 358 * The event-code for all the free running counters is 0xff. 359 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type, 360 * which umask-code starts from 0x10. 361 * So 'ioclk' is encoded as event=0xff,umask=0x10 362 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is 363 * the second type, which umask-code starts from 0x20. 364 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22 365 */ 366 static inline unsigned int uncore_freerunning_idx(u64 config) 367 { 368 return ((config >> 8) & 0xf); 369 } 370 371 #define UNCORE_FREERUNNING_UMASK_START 0x10 372 373 static inline unsigned int uncore_freerunning_type(u64 config) 374 { 375 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf); 376 } 377 378 static inline 379 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, 380 struct perf_event *event) 381 { 382 unsigned int type = uncore_freerunning_type(event->hw.config); 383 unsigned int idx = uncore_freerunning_idx(event->hw.config); 384 struct intel_uncore_pmu *pmu = box->pmu; 385 386 return pmu->type->freerunning[type].counter_base + 387 pmu->type->freerunning[type].counter_offset * idx + 388 (pmu->type->freerunning[type].box_offsets ? 389 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : 390 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); 391 } 392 393 static inline 394 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 395 { 396 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 397 return CFL_UNC_CBO_7_PERFEVTSEL0 + 398 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 399 } else { 400 return box->pmu->type->event_ctl + 401 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 402 uncore_msr_box_offset(box); 403 } 404 } 405 406 static inline 407 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 408 { 409 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { 410 return CFL_UNC_CBO_7_PER_CTR0 + 411 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); 412 } else { 413 return box->pmu->type->perf_ctr + 414 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 415 uncore_msr_box_offset(box); 416 } 417 } 418 419 static inline 420 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 421 { 422 if (box->pci_dev || box->io_addr) 423 return uncore_pci_fixed_ctl(box); 424 else 425 return uncore_msr_fixed_ctl(box); 426 } 427 428 static inline 429 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 430 { 431 if (box->pci_dev || box->io_addr) 432 return uncore_pci_fixed_ctr(box); 433 else 434 return uncore_msr_fixed_ctr(box); 435 } 436 437 static inline 438 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 439 { 440 if (box->pci_dev || box->io_addr) 441 return uncore_pci_event_ctl(box, idx); 442 else 443 return uncore_msr_event_ctl(box, idx); 444 } 445 446 static inline 447 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 448 { 449 if (box->pci_dev || box->io_addr) 450 return uncore_pci_perf_ctr(box, idx); 451 else 452 return uncore_msr_perf_ctr(box, idx); 453 } 454 455 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 456 { 457 return box->pmu->type->perf_ctr_bits; 458 } 459 460 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 461 { 462 return box->pmu->type->fixed_ctr_bits; 463 } 464 465 static inline 466 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, 467 struct perf_event *event) 468 { 469 unsigned int type = uncore_freerunning_type(event->hw.config); 470 471 return box->pmu->type->freerunning[type].bits; 472 } 473 474 static inline int uncore_num_freerunning(struct intel_uncore_box *box, 475 struct perf_event *event) 476 { 477 unsigned int type = uncore_freerunning_type(event->hw.config); 478 479 return box->pmu->type->freerunning[type].num_counters; 480 } 481 482 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, 483 struct perf_event *event) 484 { 485 return box->pmu->type->num_freerunning_types; 486 } 487 488 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, 489 struct perf_event *event) 490 { 491 unsigned int type = uncore_freerunning_type(event->hw.config); 492 unsigned int idx = uncore_freerunning_idx(event->hw.config); 493 494 return (type < uncore_num_freerunning_types(box, event)) && 495 (idx < uncore_num_freerunning(box, event)); 496 } 497 498 static inline int uncore_num_counters(struct intel_uncore_box *box) 499 { 500 return box->pmu->type->num_counters; 501 } 502 503 static inline bool is_freerunning_event(struct perf_event *event) 504 { 505 u64 cfg = event->attr.config; 506 507 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) && 508 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); 509 } 510 511 /* Check and reject invalid config */ 512 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, 513 struct perf_event *event) 514 { 515 if (is_freerunning_event(event)) 516 return 0; 517 518 return -EINVAL; 519 } 520 521 static inline void uncore_disable_event(struct intel_uncore_box *box, 522 struct perf_event *event) 523 { 524 box->pmu->type->ops->disable_event(box, event); 525 } 526 527 static inline void uncore_enable_event(struct intel_uncore_box *box, 528 struct perf_event *event) 529 { 530 box->pmu->type->ops->enable_event(box, event); 531 } 532 533 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 534 struct perf_event *event) 535 { 536 return box->pmu->type->ops->read_counter(box, event); 537 } 538 539 static inline void uncore_box_init(struct intel_uncore_box *box) 540 { 541 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 542 if (box->pmu->type->ops->init_box) 543 box->pmu->type->ops->init_box(box); 544 } 545 } 546 547 static inline void uncore_box_exit(struct intel_uncore_box *box) 548 { 549 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 550 if (box->pmu->type->ops->exit_box) 551 box->pmu->type->ops->exit_box(box); 552 } 553 } 554 555 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 556 { 557 return (box->dieid < 0); 558 } 559 560 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 561 { 562 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 563 } 564 565 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 566 { 567 return event->pmu_private; 568 } 569 570 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 571 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 572 void uncore_mmio_exit_box(struct intel_uncore_box *box); 573 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, 574 struct perf_event *event); 575 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 576 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 577 void uncore_pmu_event_start(struct perf_event *event, int flags); 578 void uncore_pmu_event_stop(struct perf_event *event, int flags); 579 int uncore_pmu_event_add(struct perf_event *event, int flags); 580 void uncore_pmu_event_del(struct perf_event *event, int flags); 581 void uncore_pmu_event_read(struct perf_event *event); 582 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 583 struct event_constraint * 584 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 585 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 586 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 587 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu); 588 589 extern struct intel_uncore_type *empty_uncore[]; 590 extern struct intel_uncore_type **uncore_msr_uncores; 591 extern struct intel_uncore_type **uncore_pci_uncores; 592 extern struct intel_uncore_type **uncore_mmio_uncores; 593 extern struct pci_driver *uncore_pci_driver; 594 extern struct pci_driver *uncore_pci_sub_driver; 595 extern raw_spinlock_t pci2phy_map_lock; 596 extern struct list_head pci2phy_map_head; 597 extern struct pci_extra_dev *uncore_extra_pci_dev; 598 extern struct event_constraint uncore_constraint_empty; 599 extern int spr_uncore_units_ignore[]; 600 extern int gnr_uncore_units_ignore[]; 601 602 /* uncore_snb.c */ 603 int snb_uncore_pci_init(void); 604 int ivb_uncore_pci_init(void); 605 int hsw_uncore_pci_init(void); 606 int bdw_uncore_pci_init(void); 607 int skl_uncore_pci_init(void); 608 void snb_uncore_cpu_init(void); 609 void nhm_uncore_cpu_init(void); 610 void skl_uncore_cpu_init(void); 611 void icl_uncore_cpu_init(void); 612 void tgl_uncore_cpu_init(void); 613 void adl_uncore_cpu_init(void); 614 void lnl_uncore_cpu_init(void); 615 void mtl_uncore_cpu_init(void); 616 void tgl_uncore_mmio_init(void); 617 void tgl_l_uncore_mmio_init(void); 618 void adl_uncore_mmio_init(void); 619 void lnl_uncore_mmio_init(void); 620 int snb_pci2phy_map_init(int devid); 621 622 /* uncore_snbep.c */ 623 int snbep_uncore_pci_init(void); 624 void snbep_uncore_cpu_init(void); 625 int ivbep_uncore_pci_init(void); 626 void ivbep_uncore_cpu_init(void); 627 int hswep_uncore_pci_init(void); 628 void hswep_uncore_cpu_init(void); 629 int bdx_uncore_pci_init(void); 630 void bdx_uncore_cpu_init(void); 631 int knl_uncore_pci_init(void); 632 void knl_uncore_cpu_init(void); 633 int skx_uncore_pci_init(void); 634 void skx_uncore_cpu_init(void); 635 int snr_uncore_pci_init(void); 636 void snr_uncore_cpu_init(void); 637 void snr_uncore_mmio_init(void); 638 int icx_uncore_pci_init(void); 639 void icx_uncore_cpu_init(void); 640 void icx_uncore_mmio_init(void); 641 int spr_uncore_pci_init(void); 642 void spr_uncore_cpu_init(void); 643 void spr_uncore_mmio_init(void); 644 int gnr_uncore_pci_init(void); 645 void gnr_uncore_cpu_init(void); 646 void gnr_uncore_mmio_init(void); 647 648 /* uncore_nhmex.c */ 649 void nhmex_uncore_cpu_init(void); 650