1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 2 #include "uncore.h" 3 4 /* Uncore IMC PCI IDs */ 5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 13 14 /* SNB event control */ 15 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 16 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 17 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 18 #define SNB_UNC_CTL_EN (1 << 22) 19 #define SNB_UNC_CTL_INVERT (1 << 23) 20 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 21 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 22 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 23 24 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 25 SNB_UNC_CTL_UMASK_MASK | \ 26 SNB_UNC_CTL_EDGE_DET | \ 27 SNB_UNC_CTL_INVERT | \ 28 SNB_UNC_CTL_CMASK_MASK) 29 30 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 31 SNB_UNC_CTL_UMASK_MASK | \ 32 SNB_UNC_CTL_EDGE_DET | \ 33 SNB_UNC_CTL_INVERT | \ 34 NHM_UNC_CTL_CMASK_MASK) 35 36 /* SNB global control register */ 37 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 38 #define SNB_UNC_FIXED_CTR_CTRL 0x394 39 #define SNB_UNC_FIXED_CTR 0x395 40 41 /* SNB uncore global control */ 42 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 43 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 44 45 /* SNB Cbo register */ 46 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 47 #define SNB_UNC_CBO_0_PER_CTR0 0x706 48 #define SNB_UNC_CBO_MSR_OFFSET 0x10 49 50 /* SNB ARB register */ 51 #define SNB_UNC_ARB_PER_CTR0 0x3b0 52 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 53 #define SNB_UNC_ARB_MSR_OFFSET 0x10 54 55 /* NHM global control register */ 56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 57 #define NHM_UNC_FIXED_CTR 0x394 58 #define NHM_UNC_FIXED_CTR_CTRL 0x395 59 60 /* NHM uncore global control */ 61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 63 64 /* NHM uncore register */ 65 #define NHM_UNC_PERFEVTSEL0 0x3c0 66 #define NHM_UNC_UNCORE_PMC0 0x3b0 67 68 /* SKL uncore global control */ 69 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 70 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 71 72 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 73 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 74 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 75 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 76 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 77 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 78 79 /* Sandy Bridge uncore support */ 80 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 81 { 82 struct hw_perf_event *hwc = &event->hw; 83 84 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 85 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 86 else 87 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 88 } 89 90 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 91 { 92 wrmsrl(event->hw.config_base, 0); 93 } 94 95 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 96 { 97 if (box->pmu->pmu_idx == 0) { 98 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 99 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 100 } 101 } 102 103 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 104 { 105 if (box->pmu->pmu_idx == 0) 106 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 107 } 108 109 static struct uncore_event_desc snb_uncore_events[] = { 110 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 111 { /* end: all zeroes */ }, 112 }; 113 114 static struct attribute *snb_uncore_formats_attr[] = { 115 &format_attr_event.attr, 116 &format_attr_umask.attr, 117 &format_attr_edge.attr, 118 &format_attr_inv.attr, 119 &format_attr_cmask5.attr, 120 NULL, 121 }; 122 123 static struct attribute_group snb_uncore_format_group = { 124 .name = "format", 125 .attrs = snb_uncore_formats_attr, 126 }; 127 128 static struct intel_uncore_ops snb_uncore_msr_ops = { 129 .init_box = snb_uncore_msr_init_box, 130 .exit_box = snb_uncore_msr_exit_box, 131 .disable_event = snb_uncore_msr_disable_event, 132 .enable_event = snb_uncore_msr_enable_event, 133 .read_counter = uncore_msr_read_counter, 134 }; 135 136 static struct event_constraint snb_uncore_arb_constraints[] = { 137 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 138 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 139 EVENT_CONSTRAINT_END 140 }; 141 142 static struct intel_uncore_type snb_uncore_cbox = { 143 .name = "cbox", 144 .num_counters = 2, 145 .num_boxes = 4, 146 .perf_ctr_bits = 44, 147 .fixed_ctr_bits = 48, 148 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 149 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 150 .fixed_ctr = SNB_UNC_FIXED_CTR, 151 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 152 .single_fixed = 1, 153 .event_mask = SNB_UNC_RAW_EVENT_MASK, 154 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 155 .ops = &snb_uncore_msr_ops, 156 .format_group = &snb_uncore_format_group, 157 .event_descs = snb_uncore_events, 158 }; 159 160 static struct intel_uncore_type snb_uncore_arb = { 161 .name = "arb", 162 .num_counters = 2, 163 .num_boxes = 1, 164 .perf_ctr_bits = 44, 165 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 166 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 167 .event_mask = SNB_UNC_RAW_EVENT_MASK, 168 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 169 .constraints = snb_uncore_arb_constraints, 170 .ops = &snb_uncore_msr_ops, 171 .format_group = &snb_uncore_format_group, 172 }; 173 174 static struct intel_uncore_type *snb_msr_uncores[] = { 175 &snb_uncore_cbox, 176 &snb_uncore_arb, 177 NULL, 178 }; 179 180 void snb_uncore_cpu_init(void) 181 { 182 uncore_msr_uncores = snb_msr_uncores; 183 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 184 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 185 } 186 187 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 188 { 189 if (box->pmu->pmu_idx == 0) { 190 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 191 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 192 } 193 } 194 195 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 196 { 197 if (box->pmu->pmu_idx == 0) 198 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 199 } 200 201 static struct intel_uncore_ops skl_uncore_msr_ops = { 202 .init_box = skl_uncore_msr_init_box, 203 .exit_box = skl_uncore_msr_exit_box, 204 .disable_event = snb_uncore_msr_disable_event, 205 .enable_event = snb_uncore_msr_enable_event, 206 .read_counter = uncore_msr_read_counter, 207 }; 208 209 static struct intel_uncore_type skl_uncore_cbox = { 210 .name = "cbox", 211 .num_counters = 4, 212 .num_boxes = 5, 213 .perf_ctr_bits = 44, 214 .fixed_ctr_bits = 48, 215 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 216 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 217 .fixed_ctr = SNB_UNC_FIXED_CTR, 218 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 219 .single_fixed = 1, 220 .event_mask = SNB_UNC_RAW_EVENT_MASK, 221 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 222 .ops = &skl_uncore_msr_ops, 223 .format_group = &snb_uncore_format_group, 224 .event_descs = snb_uncore_events, 225 }; 226 227 static struct intel_uncore_type *skl_msr_uncores[] = { 228 &skl_uncore_cbox, 229 &snb_uncore_arb, 230 NULL, 231 }; 232 233 void skl_uncore_cpu_init(void) 234 { 235 uncore_msr_uncores = skl_msr_uncores; 236 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 237 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 238 snb_uncore_arb.ops = &skl_uncore_msr_ops; 239 } 240 241 enum { 242 SNB_PCI_UNCORE_IMC, 243 }; 244 245 static struct uncore_event_desc snb_uncore_imc_events[] = { 246 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 247 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 248 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 249 250 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 251 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 252 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 253 254 { /* end: all zeroes */ }, 255 }; 256 257 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 258 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 259 260 /* page size multiple covering all config regs */ 261 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 262 263 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 264 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 265 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 266 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 267 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 268 269 static struct attribute *snb_uncore_imc_formats_attr[] = { 270 &format_attr_event.attr, 271 NULL, 272 }; 273 274 static struct attribute_group snb_uncore_imc_format_group = { 275 .name = "format", 276 .attrs = snb_uncore_imc_formats_attr, 277 }; 278 279 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 280 { 281 struct pci_dev *pdev = box->pci_dev; 282 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 283 resource_size_t addr; 284 u32 pci_dword; 285 286 pci_read_config_dword(pdev, where, &pci_dword); 287 addr = pci_dword; 288 289 #ifdef CONFIG_PHYS_ADDR_T_64BIT 290 pci_read_config_dword(pdev, where + 4, &pci_dword); 291 addr |= ((resource_size_t)pci_dword << 32); 292 #endif 293 294 addr &= ~(PAGE_SIZE - 1); 295 296 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); 297 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 298 } 299 300 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) 301 { 302 iounmap(box->io_addr); 303 } 304 305 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 306 {} 307 308 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 309 {} 310 311 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 312 {} 313 314 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 315 {} 316 317 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 318 { 319 struct hw_perf_event *hwc = &event->hw; 320 321 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); 322 } 323 324 /* 325 * custom event_init() function because we define our own fixed, free 326 * running counters, so we do not want to conflict with generic uncore 327 * logic. Also simplifies processing 328 */ 329 static int snb_uncore_imc_event_init(struct perf_event *event) 330 { 331 struct intel_uncore_pmu *pmu; 332 struct intel_uncore_box *box; 333 struct hw_perf_event *hwc = &event->hw; 334 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 335 int idx, base; 336 337 if (event->attr.type != event->pmu->type) 338 return -ENOENT; 339 340 pmu = uncore_event_to_pmu(event); 341 /* no device found for this pmu */ 342 if (pmu->func_id < 0) 343 return -ENOENT; 344 345 /* Sampling not supported yet */ 346 if (hwc->sample_period) 347 return -EINVAL; 348 349 /* unsupported modes and filters */ 350 if (event->attr.exclude_user || 351 event->attr.exclude_kernel || 352 event->attr.exclude_hv || 353 event->attr.exclude_idle || 354 event->attr.exclude_host || 355 event->attr.exclude_guest || 356 event->attr.sample_period) /* no sampling */ 357 return -EINVAL; 358 359 /* 360 * Place all uncore events for a particular physical package 361 * onto a single cpu 362 */ 363 if (event->cpu < 0) 364 return -EINVAL; 365 366 /* check only supported bits are set */ 367 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 368 return -EINVAL; 369 370 box = uncore_pmu_to_box(pmu, event->cpu); 371 if (!box || box->cpu < 0) 372 return -EINVAL; 373 374 event->cpu = box->cpu; 375 event->pmu_private = box; 376 377 event->hw.idx = -1; 378 event->hw.last_tag = ~0ULL; 379 event->hw.extra_reg.idx = EXTRA_REG_NONE; 380 event->hw.branch_reg.idx = EXTRA_REG_NONE; 381 /* 382 * check event is known (whitelist, determines counter) 383 */ 384 switch (cfg) { 385 case SNB_UNCORE_PCI_IMC_DATA_READS: 386 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 387 idx = UNCORE_PMC_IDX_FIXED; 388 break; 389 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 390 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 391 idx = UNCORE_PMC_IDX_FIXED + 1; 392 break; 393 default: 394 return -EINVAL; 395 } 396 397 /* must be done before validate_group */ 398 event->hw.event_base = base; 399 event->hw.config = cfg; 400 event->hw.idx = idx; 401 402 /* no group validation needed, we have free running counters */ 403 404 return 0; 405 } 406 407 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 408 { 409 return 0; 410 } 411 412 static void snb_uncore_imc_event_start(struct perf_event *event, int flags) 413 { 414 struct intel_uncore_box *box = uncore_event_to_box(event); 415 u64 count; 416 417 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 418 return; 419 420 event->hw.state = 0; 421 box->n_active++; 422 423 list_add_tail(&event->active_entry, &box->active_list); 424 425 count = snb_uncore_imc_read_counter(box, event); 426 local64_set(&event->hw.prev_count, count); 427 428 if (box->n_active == 1) 429 uncore_pmu_start_hrtimer(box); 430 } 431 432 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) 433 { 434 struct intel_uncore_box *box = uncore_event_to_box(event); 435 struct hw_perf_event *hwc = &event->hw; 436 437 if (!(hwc->state & PERF_HES_STOPPED)) { 438 box->n_active--; 439 440 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 441 hwc->state |= PERF_HES_STOPPED; 442 443 list_del(&event->active_entry); 444 445 if (box->n_active == 0) 446 uncore_pmu_cancel_hrtimer(box); 447 } 448 449 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 450 /* 451 * Drain the remaining delta count out of a event 452 * that we are disabling: 453 */ 454 uncore_perf_event_update(box, event); 455 hwc->state |= PERF_HES_UPTODATE; 456 } 457 } 458 459 static int snb_uncore_imc_event_add(struct perf_event *event, int flags) 460 { 461 struct intel_uncore_box *box = uncore_event_to_box(event); 462 struct hw_perf_event *hwc = &event->hw; 463 464 if (!box) 465 return -ENODEV; 466 467 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 468 if (!(flags & PERF_EF_START)) 469 hwc->state |= PERF_HES_ARCH; 470 471 snb_uncore_imc_event_start(event, 0); 472 473 box->n_events++; 474 475 return 0; 476 } 477 478 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 479 { 480 struct intel_uncore_box *box = uncore_event_to_box(event); 481 int i; 482 483 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 484 485 for (i = 0; i < box->n_events; i++) { 486 if (event == box->event_list[i]) { 487 --box->n_events; 488 break; 489 } 490 } 491 } 492 493 int snb_pci2phy_map_init(int devid) 494 { 495 struct pci_dev *dev = NULL; 496 struct pci2phy_map *map; 497 int bus, segment; 498 499 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 500 if (!dev) 501 return -ENOTTY; 502 503 bus = dev->bus->number; 504 segment = pci_domain_nr(dev->bus); 505 506 raw_spin_lock(&pci2phy_map_lock); 507 map = __find_pci2phy_map(segment); 508 if (!map) { 509 raw_spin_unlock(&pci2phy_map_lock); 510 pci_dev_put(dev); 511 return -ENOMEM; 512 } 513 map->pbus_to_physid[bus] = 0; 514 raw_spin_unlock(&pci2phy_map_lock); 515 516 pci_dev_put(dev); 517 518 return 0; 519 } 520 521 static struct pmu snb_uncore_imc_pmu = { 522 .task_ctx_nr = perf_invalid_context, 523 .event_init = snb_uncore_imc_event_init, 524 .add = snb_uncore_imc_event_add, 525 .del = snb_uncore_imc_event_del, 526 .start = snb_uncore_imc_event_start, 527 .stop = snb_uncore_imc_event_stop, 528 .read = uncore_pmu_event_read, 529 }; 530 531 static struct intel_uncore_ops snb_uncore_imc_ops = { 532 .init_box = snb_uncore_imc_init_box, 533 .exit_box = snb_uncore_imc_exit_box, 534 .enable_box = snb_uncore_imc_enable_box, 535 .disable_box = snb_uncore_imc_disable_box, 536 .disable_event = snb_uncore_imc_disable_event, 537 .enable_event = snb_uncore_imc_enable_event, 538 .hw_config = snb_uncore_imc_hw_config, 539 .read_counter = snb_uncore_imc_read_counter, 540 }; 541 542 static struct intel_uncore_type snb_uncore_imc = { 543 .name = "imc", 544 .num_counters = 2, 545 .num_boxes = 1, 546 .fixed_ctr_bits = 32, 547 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, 548 .event_descs = snb_uncore_imc_events, 549 .format_group = &snb_uncore_imc_format_group, 550 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 551 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, 552 .ops = &snb_uncore_imc_ops, 553 .pmu = &snb_uncore_imc_pmu, 554 }; 555 556 static struct intel_uncore_type *snb_pci_uncores[] = { 557 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 558 NULL, 559 }; 560 561 static const struct pci_device_id snb_uncore_pci_ids[] = { 562 { /* IMC */ 563 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), 564 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 565 }, 566 { /* end: all zeroes */ }, 567 }; 568 569 static const struct pci_device_id ivb_uncore_pci_ids[] = { 570 { /* IMC */ 571 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), 572 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 573 }, 574 { /* IMC */ 575 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), 576 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 577 }, 578 { /* end: all zeroes */ }, 579 }; 580 581 static const struct pci_device_id hsw_uncore_pci_ids[] = { 582 { /* IMC */ 583 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 584 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 585 }, 586 { /* IMC */ 587 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), 588 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 589 }, 590 { /* end: all zeroes */ }, 591 }; 592 593 static const struct pci_device_id bdw_uncore_pci_ids[] = { 594 { /* IMC */ 595 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), 596 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 597 }, 598 { /* end: all zeroes */ }, 599 }; 600 601 static const struct pci_device_id skl_uncore_pci_ids[] = { 602 { /* IMC */ 603 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 604 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 605 }, 606 { /* IMC */ 607 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 608 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 609 }, 610 611 { /* end: all zeroes */ }, 612 }; 613 614 static struct pci_driver snb_uncore_pci_driver = { 615 .name = "snb_uncore", 616 .id_table = snb_uncore_pci_ids, 617 }; 618 619 static struct pci_driver ivb_uncore_pci_driver = { 620 .name = "ivb_uncore", 621 .id_table = ivb_uncore_pci_ids, 622 }; 623 624 static struct pci_driver hsw_uncore_pci_driver = { 625 .name = "hsw_uncore", 626 .id_table = hsw_uncore_pci_ids, 627 }; 628 629 static struct pci_driver bdw_uncore_pci_driver = { 630 .name = "bdw_uncore", 631 .id_table = bdw_uncore_pci_ids, 632 }; 633 634 static struct pci_driver skl_uncore_pci_driver = { 635 .name = "skl_uncore", 636 .id_table = skl_uncore_pci_ids, 637 }; 638 639 struct imc_uncore_pci_dev { 640 __u32 pci_id; 641 struct pci_driver *driver; 642 }; 643 #define IMC_DEV(a, d) \ 644 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 645 646 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 647 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 648 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 649 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 650 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 651 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 652 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 653 IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 654 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 655 { /* end marker */ } 656 }; 657 658 659 #define for_each_imc_pci_id(x, t) \ 660 for (x = (t); (x)->pci_id; x++) 661 662 static struct pci_driver *imc_uncore_find_dev(void) 663 { 664 const struct imc_uncore_pci_dev *p; 665 int ret; 666 667 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 668 ret = snb_pci2phy_map_init(p->pci_id); 669 if (ret == 0) 670 return p->driver; 671 } 672 return NULL; 673 } 674 675 static int imc_uncore_pci_init(void) 676 { 677 struct pci_driver *imc_drv = imc_uncore_find_dev(); 678 679 if (!imc_drv) 680 return -ENODEV; 681 682 uncore_pci_uncores = snb_pci_uncores; 683 uncore_pci_driver = imc_drv; 684 685 return 0; 686 } 687 688 int snb_uncore_pci_init(void) 689 { 690 return imc_uncore_pci_init(); 691 } 692 693 int ivb_uncore_pci_init(void) 694 { 695 return imc_uncore_pci_init(); 696 } 697 int hsw_uncore_pci_init(void) 698 { 699 return imc_uncore_pci_init(); 700 } 701 702 int bdw_uncore_pci_init(void) 703 { 704 return imc_uncore_pci_init(); 705 } 706 707 int skl_uncore_pci_init(void) 708 { 709 return imc_uncore_pci_init(); 710 } 711 712 /* end of Sandy Bridge uncore support */ 713 714 /* Nehalem uncore support */ 715 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 716 { 717 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 718 } 719 720 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 721 { 722 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 723 } 724 725 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 726 { 727 struct hw_perf_event *hwc = &event->hw; 728 729 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 730 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 731 else 732 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 733 } 734 735 static struct attribute *nhm_uncore_formats_attr[] = { 736 &format_attr_event.attr, 737 &format_attr_umask.attr, 738 &format_attr_edge.attr, 739 &format_attr_inv.attr, 740 &format_attr_cmask8.attr, 741 NULL, 742 }; 743 744 static struct attribute_group nhm_uncore_format_group = { 745 .name = "format", 746 .attrs = nhm_uncore_formats_attr, 747 }; 748 749 static struct uncore_event_desc nhm_uncore_events[] = { 750 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 751 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 752 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 753 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 754 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 755 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 756 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 757 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 758 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 759 { /* end: all zeroes */ }, 760 }; 761 762 static struct intel_uncore_ops nhm_uncore_msr_ops = { 763 .disable_box = nhm_uncore_msr_disable_box, 764 .enable_box = nhm_uncore_msr_enable_box, 765 .disable_event = snb_uncore_msr_disable_event, 766 .enable_event = nhm_uncore_msr_enable_event, 767 .read_counter = uncore_msr_read_counter, 768 }; 769 770 static struct intel_uncore_type nhm_uncore = { 771 .name = "", 772 .num_counters = 8, 773 .num_boxes = 1, 774 .perf_ctr_bits = 48, 775 .fixed_ctr_bits = 48, 776 .event_ctl = NHM_UNC_PERFEVTSEL0, 777 .perf_ctr = NHM_UNC_UNCORE_PMC0, 778 .fixed_ctr = NHM_UNC_FIXED_CTR, 779 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 780 .event_mask = NHM_UNC_RAW_EVENT_MASK, 781 .event_descs = nhm_uncore_events, 782 .ops = &nhm_uncore_msr_ops, 783 .format_group = &nhm_uncore_format_group, 784 }; 785 786 static struct intel_uncore_type *nhm_msr_uncores[] = { 787 &nhm_uncore, 788 NULL, 789 }; 790 791 void nhm_uncore_cpu_init(void) 792 { 793 uncore_msr_uncores = nhm_msr_uncores; 794 } 795 796 /* end of Nehalem uncore support */ 797