1 // SPDX-License-Identifier: GPL-2.0 2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 3 #include "uncore.h" 4 #include "uncore_discovery.h" 5 6 /* Uncore IMC PCI IDs */ 7 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 8 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 9 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 10 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 11 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 12 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 13 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 14 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c 15 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 16 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 17 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 18 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 19 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 20 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c 21 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 22 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 23 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f 24 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f 25 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910 26 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918 27 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc 28 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 29 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 30 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 31 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f 32 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f 33 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 34 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 35 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 36 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 37 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 38 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 39 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca 40 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 41 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c 42 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d 43 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0 44 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34 45 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35 46 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44 47 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54 48 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64 49 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51 50 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61 51 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71 52 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33 53 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43 54 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53 55 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63 56 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73 57 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 58 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 59 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02 60 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04 61 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12 62 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14 63 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 64 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43 65 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 66 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 67 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 68 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601 69 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602 70 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609 71 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a 72 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621 73 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623 74 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629 75 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637 76 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b 77 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648 78 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649 79 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650 80 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668 81 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670 82 #define PCI_DEVICE_ID_INTEL_ADL_17_IMC 0x4614 83 #define PCI_DEVICE_ID_INTEL_ADL_18_IMC 0x4617 84 #define PCI_DEVICE_ID_INTEL_ADL_19_IMC 0x4618 85 #define PCI_DEVICE_ID_INTEL_ADL_20_IMC 0x461B 86 #define PCI_DEVICE_ID_INTEL_ADL_21_IMC 0x461C 87 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC 0xA700 88 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC 0xA702 89 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC 0xA706 90 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC 0xA709 91 #define PCI_DEVICE_ID_INTEL_RPL_5_IMC 0xA701 92 #define PCI_DEVICE_ID_INTEL_RPL_6_IMC 0xA703 93 #define PCI_DEVICE_ID_INTEL_RPL_7_IMC 0xA704 94 #define PCI_DEVICE_ID_INTEL_RPL_8_IMC 0xA705 95 #define PCI_DEVICE_ID_INTEL_RPL_9_IMC 0xA706 96 #define PCI_DEVICE_ID_INTEL_RPL_10_IMC 0xA707 97 #define PCI_DEVICE_ID_INTEL_RPL_11_IMC 0xA708 98 #define PCI_DEVICE_ID_INTEL_RPL_12_IMC 0xA709 99 #define PCI_DEVICE_ID_INTEL_RPL_13_IMC 0xA70a 100 #define PCI_DEVICE_ID_INTEL_RPL_14_IMC 0xA70b 101 #define PCI_DEVICE_ID_INTEL_RPL_15_IMC 0xA715 102 #define PCI_DEVICE_ID_INTEL_RPL_16_IMC 0xA716 103 #define PCI_DEVICE_ID_INTEL_RPL_17_IMC 0xA717 104 #define PCI_DEVICE_ID_INTEL_RPL_18_IMC 0xA718 105 #define PCI_DEVICE_ID_INTEL_RPL_19_IMC 0xA719 106 #define PCI_DEVICE_ID_INTEL_RPL_20_IMC 0xA71A 107 #define PCI_DEVICE_ID_INTEL_RPL_21_IMC 0xA71B 108 #define PCI_DEVICE_ID_INTEL_RPL_22_IMC 0xA71C 109 #define PCI_DEVICE_ID_INTEL_RPL_23_IMC 0xA728 110 #define PCI_DEVICE_ID_INTEL_RPL_24_IMC 0xA729 111 #define PCI_DEVICE_ID_INTEL_RPL_25_IMC 0xA72A 112 #define PCI_DEVICE_ID_INTEL_MTL_1_IMC 0x7d00 113 #define PCI_DEVICE_ID_INTEL_MTL_2_IMC 0x7d01 114 #define PCI_DEVICE_ID_INTEL_MTL_3_IMC 0x7d02 115 #define PCI_DEVICE_ID_INTEL_MTL_4_IMC 0x7d05 116 #define PCI_DEVICE_ID_INTEL_MTL_5_IMC 0x7d10 117 #define PCI_DEVICE_ID_INTEL_MTL_6_IMC 0x7d14 118 #define PCI_DEVICE_ID_INTEL_MTL_7_IMC 0x7d15 119 #define PCI_DEVICE_ID_INTEL_MTL_8_IMC 0x7d16 120 #define PCI_DEVICE_ID_INTEL_MTL_9_IMC 0x7d21 121 #define PCI_DEVICE_ID_INTEL_MTL_10_IMC 0x7d22 122 #define PCI_DEVICE_ID_INTEL_MTL_11_IMC 0x7d23 123 #define PCI_DEVICE_ID_INTEL_MTL_12_IMC 0x7d24 124 #define PCI_DEVICE_ID_INTEL_MTL_13_IMC 0x7d28 125 126 127 #define IMC_UNCORE_DEV(a) \ 128 { \ 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC), \ 130 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), \ 131 } 132 133 /* SNB event control */ 134 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 135 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 136 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 137 #define SNB_UNC_CTL_EN (1 << 22) 138 #define SNB_UNC_CTL_INVERT (1 << 23) 139 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 140 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 141 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 142 143 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 144 SNB_UNC_CTL_UMASK_MASK | \ 145 SNB_UNC_CTL_EDGE_DET | \ 146 SNB_UNC_CTL_INVERT | \ 147 SNB_UNC_CTL_CMASK_MASK) 148 149 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 150 SNB_UNC_CTL_UMASK_MASK | \ 151 SNB_UNC_CTL_EDGE_DET | \ 152 SNB_UNC_CTL_INVERT | \ 153 NHM_UNC_CTL_CMASK_MASK) 154 155 /* SNB global control register */ 156 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 157 #define SNB_UNC_FIXED_CTR_CTRL 0x394 158 #define SNB_UNC_FIXED_CTR 0x395 159 160 /* SNB uncore global control */ 161 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 162 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 163 164 /* SNB Cbo register */ 165 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 166 #define SNB_UNC_CBO_0_PER_CTR0 0x706 167 #define SNB_UNC_CBO_MSR_OFFSET 0x10 168 169 /* SNB ARB register */ 170 #define SNB_UNC_ARB_PER_CTR0 0x3b0 171 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 172 #define SNB_UNC_ARB_MSR_OFFSET 0x10 173 174 /* NHM global control register */ 175 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 176 #define NHM_UNC_FIXED_CTR 0x394 177 #define NHM_UNC_FIXED_CTR_CTRL 0x395 178 179 /* NHM uncore global control */ 180 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 181 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 182 183 /* NHM uncore register */ 184 #define NHM_UNC_PERFEVTSEL0 0x3c0 185 #define NHM_UNC_UNCORE_PMC0 0x3b0 186 187 /* SKL uncore global control */ 188 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 189 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 190 191 /* ICL Cbo register */ 192 #define ICL_UNC_CBO_CONFIG 0x396 193 #define ICL_UNC_NUM_CBO_MASK 0xf 194 #define ICL_UNC_CBO_0_PER_CTR0 0x702 195 #define ICL_UNC_CBO_MSR_OFFSET 0x8 196 197 /* ICL ARB register */ 198 #define ICL_UNC_ARB_PER_CTR 0x3b1 199 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3 200 201 /* ADL uncore global control */ 202 #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0 203 #define ADL_UNC_FIXED_CTR_CTRL 0x2fde 204 #define ADL_UNC_FIXED_CTR 0x2fdf 205 206 /* ADL Cbo register */ 207 #define ADL_UNC_CBO_0_PER_CTR0 0x2002 208 #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000 209 #define ADL_UNC_CTL_THRESHOLD 0x3f000000 210 #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 211 SNB_UNC_CTL_UMASK_MASK | \ 212 SNB_UNC_CTL_EDGE_DET | \ 213 SNB_UNC_CTL_INVERT | \ 214 ADL_UNC_CTL_THRESHOLD) 215 216 /* ADL ARB register */ 217 #define ADL_UNC_ARB_PER_CTR0 0x2FD2 218 #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0 219 #define ADL_UNC_ARB_MSR_OFFSET 0x8 220 221 /* MTL Cbo register */ 222 #define MTL_UNC_CBO_0_PER_CTR0 0x2448 223 #define MTL_UNC_CBO_0_PERFEVTSEL0 0x2442 224 225 /* MTL HAC_ARB register */ 226 #define MTL_UNC_HAC_ARB_CTR 0x2018 227 #define MTL_UNC_HAC_ARB_CTRL 0x2012 228 229 /* MTL ARB register */ 230 #define MTL_UNC_ARB_CTR 0x2418 231 #define MTL_UNC_ARB_CTRL 0x2412 232 233 /* MTL cNCU register */ 234 #define MTL_UNC_CNCU_FIXED_CTR 0x2408 235 #define MTL_UNC_CNCU_FIXED_CTRL 0x2402 236 #define MTL_UNC_CNCU_BOX_CTL 0x240e 237 238 /* MTL sNCU register */ 239 #define MTL_UNC_SNCU_FIXED_CTR 0x2008 240 #define MTL_UNC_SNCU_FIXED_CTRL 0x2002 241 #define MTL_UNC_SNCU_BOX_CTL 0x200e 242 243 /* MTL HAC_CBO register */ 244 #define MTL_UNC_HBO_CTR 0x2048 245 #define MTL_UNC_HBO_CTRL 0x2042 246 247 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 248 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 249 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11"); 250 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 251 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 252 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 253 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 254 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29"); 255 DEFINE_UNCORE_FORMAT_ATTR(threshold2, threshold, "config:24-31"); 256 257 /* Sandy Bridge uncore support */ 258 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 259 { 260 struct hw_perf_event *hwc = &event->hw; 261 262 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 263 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 264 else 265 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); 266 } 267 268 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 269 { 270 wrmsrl(event->hw.config_base, 0); 271 } 272 273 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 274 { 275 if (box->pmu->pmu_idx == 0) { 276 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 277 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 278 } 279 } 280 281 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 282 { 283 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 284 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 285 } 286 287 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 288 { 289 if (box->pmu->pmu_idx == 0) 290 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); 291 } 292 293 static struct uncore_event_desc snb_uncore_events[] = { 294 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 295 { /* end: all zeroes */ }, 296 }; 297 298 static struct attribute *snb_uncore_formats_attr[] = { 299 &format_attr_event.attr, 300 &format_attr_umask.attr, 301 &format_attr_edge.attr, 302 &format_attr_inv.attr, 303 &format_attr_cmask5.attr, 304 NULL, 305 }; 306 307 static const struct attribute_group snb_uncore_format_group = { 308 .name = "format", 309 .attrs = snb_uncore_formats_attr, 310 }; 311 312 static struct intel_uncore_ops snb_uncore_msr_ops = { 313 .init_box = snb_uncore_msr_init_box, 314 .enable_box = snb_uncore_msr_enable_box, 315 .exit_box = snb_uncore_msr_exit_box, 316 .disable_event = snb_uncore_msr_disable_event, 317 .enable_event = snb_uncore_msr_enable_event, 318 .read_counter = uncore_msr_read_counter, 319 }; 320 321 static struct event_constraint snb_uncore_arb_constraints[] = { 322 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 323 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 324 EVENT_CONSTRAINT_END 325 }; 326 327 static struct intel_uncore_type snb_uncore_cbox = { 328 .name = "cbox", 329 .num_counters = 2, 330 .num_boxes = 4, 331 .perf_ctr_bits = 44, 332 .fixed_ctr_bits = 48, 333 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 334 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 335 .fixed_ctr = SNB_UNC_FIXED_CTR, 336 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 337 .single_fixed = 1, 338 .event_mask = SNB_UNC_RAW_EVENT_MASK, 339 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 340 .ops = &snb_uncore_msr_ops, 341 .format_group = &snb_uncore_format_group, 342 .event_descs = snb_uncore_events, 343 }; 344 345 static struct intel_uncore_type snb_uncore_arb = { 346 .name = "arb", 347 .num_counters = 2, 348 .num_boxes = 1, 349 .perf_ctr_bits = 44, 350 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 351 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 352 .event_mask = SNB_UNC_RAW_EVENT_MASK, 353 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 354 .constraints = snb_uncore_arb_constraints, 355 .ops = &snb_uncore_msr_ops, 356 .format_group = &snb_uncore_format_group, 357 }; 358 359 static struct intel_uncore_type *snb_msr_uncores[] = { 360 &snb_uncore_cbox, 361 &snb_uncore_arb, 362 NULL, 363 }; 364 365 void snb_uncore_cpu_init(void) 366 { 367 uncore_msr_uncores = snb_msr_uncores; 368 if (snb_uncore_cbox.num_boxes > topology_num_cores_per_package()) 369 snb_uncore_cbox.num_boxes = topology_num_cores_per_package(); 370 } 371 372 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 373 { 374 if (box->pmu->pmu_idx == 0) { 375 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 376 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 377 } 378 379 /* The 8th CBOX has different MSR space */ 380 if (box->pmu->pmu_idx == 7) 381 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); 382 } 383 384 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 385 { 386 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 387 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 388 } 389 390 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 391 { 392 if (box->pmu->pmu_idx == 0) 393 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 394 } 395 396 static struct intel_uncore_ops skl_uncore_msr_ops = { 397 .init_box = skl_uncore_msr_init_box, 398 .enable_box = skl_uncore_msr_enable_box, 399 .exit_box = skl_uncore_msr_exit_box, 400 .disable_event = snb_uncore_msr_disable_event, 401 .enable_event = snb_uncore_msr_enable_event, 402 .read_counter = uncore_msr_read_counter, 403 }; 404 405 static struct intel_uncore_type skl_uncore_cbox = { 406 .name = "cbox", 407 .num_counters = 4, 408 .num_boxes = 8, 409 .perf_ctr_bits = 44, 410 .fixed_ctr_bits = 48, 411 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 412 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 413 .fixed_ctr = SNB_UNC_FIXED_CTR, 414 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 415 .single_fixed = 1, 416 .event_mask = SNB_UNC_RAW_EVENT_MASK, 417 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 418 .ops = &skl_uncore_msr_ops, 419 .format_group = &snb_uncore_format_group, 420 .event_descs = snb_uncore_events, 421 }; 422 423 static struct intel_uncore_type *skl_msr_uncores[] = { 424 &skl_uncore_cbox, 425 &snb_uncore_arb, 426 NULL, 427 }; 428 429 void skl_uncore_cpu_init(void) 430 { 431 uncore_msr_uncores = skl_msr_uncores; 432 if (skl_uncore_cbox.num_boxes > topology_num_cores_per_package()) 433 skl_uncore_cbox.num_boxes = topology_num_cores_per_package(); 434 snb_uncore_arb.ops = &skl_uncore_msr_ops; 435 } 436 437 static struct intel_uncore_ops icl_uncore_msr_ops = { 438 .disable_event = snb_uncore_msr_disable_event, 439 .enable_event = snb_uncore_msr_enable_event, 440 .read_counter = uncore_msr_read_counter, 441 }; 442 443 static struct intel_uncore_type icl_uncore_cbox = { 444 .name = "cbox", 445 .num_counters = 2, 446 .perf_ctr_bits = 44, 447 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, 448 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 449 .event_mask = SNB_UNC_RAW_EVENT_MASK, 450 .msr_offset = ICL_UNC_CBO_MSR_OFFSET, 451 .ops = &icl_uncore_msr_ops, 452 .format_group = &snb_uncore_format_group, 453 }; 454 455 static struct uncore_event_desc icl_uncore_events[] = { 456 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), 457 { /* end: all zeroes */ }, 458 }; 459 460 static struct attribute *icl_uncore_clock_formats_attr[] = { 461 &format_attr_event.attr, 462 NULL, 463 }; 464 465 static struct attribute_group icl_uncore_clock_format_group = { 466 .name = "format", 467 .attrs = icl_uncore_clock_formats_attr, 468 }; 469 470 static struct intel_uncore_type icl_uncore_clockbox = { 471 .name = "clock", 472 .num_counters = 1, 473 .num_boxes = 1, 474 .fixed_ctr_bits = 48, 475 .fixed_ctr = SNB_UNC_FIXED_CTR, 476 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 477 .single_fixed = 1, 478 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 479 .format_group = &icl_uncore_clock_format_group, 480 .ops = &icl_uncore_msr_ops, 481 .event_descs = icl_uncore_events, 482 }; 483 484 static struct intel_uncore_type icl_uncore_arb = { 485 .name = "arb", 486 .num_counters = 1, 487 .num_boxes = 1, 488 .perf_ctr_bits = 44, 489 .perf_ctr = ICL_UNC_ARB_PER_CTR, 490 .event_ctl = ICL_UNC_ARB_PERFEVTSEL, 491 .event_mask = SNB_UNC_RAW_EVENT_MASK, 492 .ops = &icl_uncore_msr_ops, 493 .format_group = &snb_uncore_format_group, 494 }; 495 496 static struct intel_uncore_type *icl_msr_uncores[] = { 497 &icl_uncore_cbox, 498 &icl_uncore_arb, 499 &icl_uncore_clockbox, 500 NULL, 501 }; 502 503 static int icl_get_cbox_num(void) 504 { 505 u64 num_boxes; 506 507 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); 508 509 return num_boxes & ICL_UNC_NUM_CBO_MASK; 510 } 511 512 void icl_uncore_cpu_init(void) 513 { 514 uncore_msr_uncores = icl_msr_uncores; 515 icl_uncore_cbox.num_boxes = icl_get_cbox_num(); 516 } 517 518 static struct intel_uncore_type *tgl_msr_uncores[] = { 519 &icl_uncore_cbox, 520 &snb_uncore_arb, 521 &icl_uncore_clockbox, 522 NULL, 523 }; 524 525 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) 526 { 527 if (box->pmu->pmu_idx == 0) 528 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 529 } 530 531 void tgl_uncore_cpu_init(void) 532 { 533 uncore_msr_uncores = tgl_msr_uncores; 534 icl_uncore_cbox.num_boxes = icl_get_cbox_num(); 535 icl_uncore_cbox.ops = &skl_uncore_msr_ops; 536 icl_uncore_clockbox.ops = &skl_uncore_msr_ops; 537 snb_uncore_arb.ops = &skl_uncore_msr_ops; 538 skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box; 539 } 540 541 static void adl_uncore_msr_init_box(struct intel_uncore_box *box) 542 { 543 if (box->pmu->pmu_idx == 0) 544 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 545 } 546 547 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) 548 { 549 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 550 } 551 552 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) 553 { 554 if (box->pmu->pmu_idx == 0) 555 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); 556 } 557 558 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) 559 { 560 if (box->pmu->pmu_idx == 0) 561 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); 562 } 563 564 static struct intel_uncore_ops adl_uncore_msr_ops = { 565 .init_box = adl_uncore_msr_init_box, 566 .enable_box = adl_uncore_msr_enable_box, 567 .disable_box = adl_uncore_msr_disable_box, 568 .exit_box = adl_uncore_msr_exit_box, 569 .disable_event = snb_uncore_msr_disable_event, 570 .enable_event = snb_uncore_msr_enable_event, 571 .read_counter = uncore_msr_read_counter, 572 }; 573 574 static struct attribute *adl_uncore_formats_attr[] = { 575 &format_attr_event.attr, 576 &format_attr_umask.attr, 577 &format_attr_edge.attr, 578 &format_attr_inv.attr, 579 &format_attr_threshold.attr, 580 NULL, 581 }; 582 583 static const struct attribute_group adl_uncore_format_group = { 584 .name = "format", 585 .attrs = adl_uncore_formats_attr, 586 }; 587 588 static struct intel_uncore_type adl_uncore_cbox = { 589 .name = "cbox", 590 .num_counters = 2, 591 .perf_ctr_bits = 44, 592 .perf_ctr = ADL_UNC_CBO_0_PER_CTR0, 593 .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0, 594 .event_mask = ADL_UNC_RAW_EVENT_MASK, 595 .msr_offset = ICL_UNC_CBO_MSR_OFFSET, 596 .ops = &adl_uncore_msr_ops, 597 .format_group = &adl_uncore_format_group, 598 }; 599 600 static struct intel_uncore_type adl_uncore_arb = { 601 .name = "arb", 602 .num_counters = 2, 603 .num_boxes = 2, 604 .perf_ctr_bits = 44, 605 .perf_ctr = ADL_UNC_ARB_PER_CTR0, 606 .event_ctl = ADL_UNC_ARB_PERFEVTSEL0, 607 .event_mask = SNB_UNC_RAW_EVENT_MASK, 608 .msr_offset = ADL_UNC_ARB_MSR_OFFSET, 609 .constraints = snb_uncore_arb_constraints, 610 .ops = &adl_uncore_msr_ops, 611 .format_group = &snb_uncore_format_group, 612 }; 613 614 static struct intel_uncore_type adl_uncore_clockbox = { 615 .name = "clock", 616 .num_counters = 1, 617 .num_boxes = 1, 618 .fixed_ctr_bits = 48, 619 .fixed_ctr = ADL_UNC_FIXED_CTR, 620 .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL, 621 .single_fixed = 1, 622 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 623 .format_group = &icl_uncore_clock_format_group, 624 .ops = &adl_uncore_msr_ops, 625 .event_descs = icl_uncore_events, 626 }; 627 628 static struct intel_uncore_type *adl_msr_uncores[] = { 629 &adl_uncore_cbox, 630 &adl_uncore_arb, 631 &adl_uncore_clockbox, 632 NULL, 633 }; 634 635 void adl_uncore_cpu_init(void) 636 { 637 adl_uncore_cbox.num_boxes = icl_get_cbox_num(); 638 uncore_msr_uncores = adl_msr_uncores; 639 } 640 641 static struct intel_uncore_type mtl_uncore_cbox = { 642 .name = "cbox", 643 .num_counters = 2, 644 .perf_ctr_bits = 48, 645 .perf_ctr = MTL_UNC_CBO_0_PER_CTR0, 646 .event_ctl = MTL_UNC_CBO_0_PERFEVTSEL0, 647 .event_mask = ADL_UNC_RAW_EVENT_MASK, 648 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 649 .ops = &icl_uncore_msr_ops, 650 .format_group = &adl_uncore_format_group, 651 }; 652 653 static struct intel_uncore_type mtl_uncore_hac_arb = { 654 .name = "hac_arb", 655 .num_counters = 2, 656 .num_boxes = 2, 657 .perf_ctr_bits = 48, 658 .perf_ctr = MTL_UNC_HAC_ARB_CTR, 659 .event_ctl = MTL_UNC_HAC_ARB_CTRL, 660 .event_mask = ADL_UNC_RAW_EVENT_MASK, 661 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 662 .ops = &icl_uncore_msr_ops, 663 .format_group = &adl_uncore_format_group, 664 }; 665 666 static struct intel_uncore_type mtl_uncore_arb = { 667 .name = "arb", 668 .num_counters = 2, 669 .num_boxes = 2, 670 .perf_ctr_bits = 48, 671 .perf_ctr = MTL_UNC_ARB_CTR, 672 .event_ctl = MTL_UNC_ARB_CTRL, 673 .event_mask = ADL_UNC_RAW_EVENT_MASK, 674 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 675 .ops = &icl_uncore_msr_ops, 676 .format_group = &adl_uncore_format_group, 677 }; 678 679 static struct intel_uncore_type mtl_uncore_hac_cbox = { 680 .name = "hac_cbox", 681 .num_counters = 2, 682 .num_boxes = 2, 683 .perf_ctr_bits = 48, 684 .perf_ctr = MTL_UNC_HBO_CTR, 685 .event_ctl = MTL_UNC_HBO_CTRL, 686 .event_mask = ADL_UNC_RAW_EVENT_MASK, 687 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 688 .ops = &icl_uncore_msr_ops, 689 .format_group = &adl_uncore_format_group, 690 }; 691 692 static void mtl_uncore_msr_init_box(struct intel_uncore_box *box) 693 { 694 wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); 695 } 696 697 static struct intel_uncore_ops mtl_uncore_msr_ops = { 698 .init_box = mtl_uncore_msr_init_box, 699 .disable_event = snb_uncore_msr_disable_event, 700 .enable_event = snb_uncore_msr_enable_event, 701 .read_counter = uncore_msr_read_counter, 702 }; 703 704 static struct intel_uncore_type mtl_uncore_cncu = { 705 .name = "cncu", 706 .num_counters = 1, 707 .num_boxes = 1, 708 .box_ctl = MTL_UNC_CNCU_BOX_CTL, 709 .fixed_ctr_bits = 48, 710 .fixed_ctr = MTL_UNC_CNCU_FIXED_CTR, 711 .fixed_ctl = MTL_UNC_CNCU_FIXED_CTRL, 712 .single_fixed = 1, 713 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 714 .format_group = &icl_uncore_clock_format_group, 715 .ops = &mtl_uncore_msr_ops, 716 .event_descs = icl_uncore_events, 717 }; 718 719 static struct intel_uncore_type mtl_uncore_sncu = { 720 .name = "sncu", 721 .num_counters = 1, 722 .num_boxes = 1, 723 .box_ctl = MTL_UNC_SNCU_BOX_CTL, 724 .fixed_ctr_bits = 48, 725 .fixed_ctr = MTL_UNC_SNCU_FIXED_CTR, 726 .fixed_ctl = MTL_UNC_SNCU_FIXED_CTRL, 727 .single_fixed = 1, 728 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 729 .format_group = &icl_uncore_clock_format_group, 730 .ops = &mtl_uncore_msr_ops, 731 .event_descs = icl_uncore_events, 732 }; 733 734 static struct intel_uncore_type *mtl_msr_uncores[] = { 735 &mtl_uncore_cbox, 736 &mtl_uncore_hac_arb, 737 &mtl_uncore_arb, 738 &mtl_uncore_hac_cbox, 739 &mtl_uncore_cncu, 740 &mtl_uncore_sncu, 741 NULL 742 }; 743 744 void mtl_uncore_cpu_init(void) 745 { 746 mtl_uncore_cbox.num_boxes = icl_get_cbox_num(); 747 uncore_msr_uncores = mtl_msr_uncores; 748 } 749 750 static struct intel_uncore_type *lnl_msr_uncores[] = { 751 &mtl_uncore_cbox, 752 &mtl_uncore_arb, 753 NULL 754 }; 755 756 #define LNL_UNC_MSR_GLOBAL_CTL 0x240e 757 758 static void lnl_uncore_msr_init_box(struct intel_uncore_box *box) 759 { 760 if (box->pmu->pmu_idx == 0) 761 wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 762 } 763 764 static struct intel_uncore_ops lnl_uncore_msr_ops = { 765 .init_box = lnl_uncore_msr_init_box, 766 .disable_event = snb_uncore_msr_disable_event, 767 .enable_event = snb_uncore_msr_enable_event, 768 .read_counter = uncore_msr_read_counter, 769 }; 770 771 void lnl_uncore_cpu_init(void) 772 { 773 mtl_uncore_cbox.num_boxes = 4; 774 mtl_uncore_cbox.ops = &lnl_uncore_msr_ops; 775 uncore_msr_uncores = lnl_msr_uncores; 776 } 777 778 enum { 779 SNB_PCI_UNCORE_IMC, 780 }; 781 782 static struct uncore_event_desc snb_uncore_imc_events[] = { 783 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 784 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 785 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 786 787 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 788 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 789 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 790 791 INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"), 792 INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"), 793 INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"), 794 795 INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"), 796 INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"), 797 INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"), 798 799 INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"), 800 INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"), 801 INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"), 802 803 { /* end: all zeroes */ }, 804 }; 805 806 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 807 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 808 809 /* page size multiple covering all config regs */ 810 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 811 812 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 813 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 814 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 815 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 816 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 817 818 /* BW break down- legacy counters */ 819 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 820 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 821 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 822 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 823 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 824 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 825 826 enum perf_snb_uncore_imc_freerunning_types { 827 SNB_PCI_UNCORE_IMC_DATA_READS = 0, 828 SNB_PCI_UNCORE_IMC_DATA_WRITES, 829 SNB_PCI_UNCORE_IMC_GT_REQUESTS, 830 SNB_PCI_UNCORE_IMC_IA_REQUESTS, 831 SNB_PCI_UNCORE_IMC_IO_REQUESTS, 832 833 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 834 }; 835 836 static struct freerunning_counters snb_uncore_imc_freerunning[] = { 837 [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 838 0x0, 0x0, 1, 32 }, 839 [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, 840 0x0, 0x0, 1, 32 }, 841 [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, 842 0x0, 0x0, 1, 32 }, 843 [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, 844 0x0, 0x0, 1, 32 }, 845 [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, 846 0x0, 0x0, 1, 32 }, 847 }; 848 849 static struct attribute *snb_uncore_imc_formats_attr[] = { 850 &format_attr_event.attr, 851 NULL, 852 }; 853 854 static const struct attribute_group snb_uncore_imc_format_group = { 855 .name = "format", 856 .attrs = snb_uncore_imc_formats_attr, 857 }; 858 859 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 860 { 861 struct intel_uncore_type *type = box->pmu->type; 862 struct pci_dev *pdev = box->pci_dev; 863 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 864 resource_size_t addr; 865 u32 pci_dword; 866 867 pci_read_config_dword(pdev, where, &pci_dword); 868 addr = pci_dword; 869 870 #ifdef CONFIG_PHYS_ADDR_T_64BIT 871 pci_read_config_dword(pdev, where + 4, &pci_dword); 872 addr |= ((resource_size_t)pci_dword << 32); 873 #endif 874 875 addr &= ~(PAGE_SIZE - 1); 876 877 box->io_addr = ioremap(addr, type->mmio_map_size); 878 if (!box->io_addr) 879 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 880 881 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 882 } 883 884 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 885 {} 886 887 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 888 {} 889 890 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 891 {} 892 893 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 894 {} 895 896 /* 897 * Keep the custom event_init() function compatible with old event 898 * encoding for free running counters. 899 */ 900 static int snb_uncore_imc_event_init(struct perf_event *event) 901 { 902 struct intel_uncore_pmu *pmu; 903 struct intel_uncore_box *box; 904 struct hw_perf_event *hwc = &event->hw; 905 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 906 int idx, base; 907 908 if (event->attr.type != event->pmu->type) 909 return -ENOENT; 910 911 pmu = uncore_event_to_pmu(event); 912 /* no device found for this pmu */ 913 if (pmu->func_id < 0) 914 return -ENOENT; 915 916 /* Sampling not supported yet */ 917 if (hwc->sample_period) 918 return -EINVAL; 919 920 /* unsupported modes and filters */ 921 if (event->attr.sample_period) /* no sampling */ 922 return -EINVAL; 923 924 /* 925 * Place all uncore events for a particular physical package 926 * onto a single cpu 927 */ 928 if (event->cpu < 0) 929 return -EINVAL; 930 931 /* check only supported bits are set */ 932 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 933 return -EINVAL; 934 935 box = uncore_pmu_to_box(pmu, event->cpu); 936 if (!box || box->cpu < 0) 937 return -EINVAL; 938 939 event->cpu = box->cpu; 940 event->pmu_private = box; 941 942 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 943 944 event->hw.idx = -1; 945 event->hw.last_tag = ~0ULL; 946 event->hw.extra_reg.idx = EXTRA_REG_NONE; 947 event->hw.branch_reg.idx = EXTRA_REG_NONE; 948 /* 949 * check event is known (whitelist, determines counter) 950 */ 951 switch (cfg) { 952 case SNB_UNCORE_PCI_IMC_DATA_READS: 953 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 954 idx = UNCORE_PMC_IDX_FREERUNNING; 955 break; 956 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 957 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 958 idx = UNCORE_PMC_IDX_FREERUNNING; 959 break; 960 case SNB_UNCORE_PCI_IMC_GT_REQUESTS: 961 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; 962 idx = UNCORE_PMC_IDX_FREERUNNING; 963 break; 964 case SNB_UNCORE_PCI_IMC_IA_REQUESTS: 965 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; 966 idx = UNCORE_PMC_IDX_FREERUNNING; 967 break; 968 case SNB_UNCORE_PCI_IMC_IO_REQUESTS: 969 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; 970 idx = UNCORE_PMC_IDX_FREERUNNING; 971 break; 972 default: 973 return -EINVAL; 974 } 975 976 /* must be done before validate_group */ 977 event->hw.event_base = base; 978 event->hw.idx = idx; 979 980 /* Convert to standard encoding format for freerunning counters */ 981 event->hw.config = ((cfg - 1) << 8) | 0x10ff; 982 983 /* no group validation needed, we have free running counters */ 984 985 return 0; 986 } 987 988 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 989 { 990 return 0; 991 } 992 993 int snb_pci2phy_map_init(int devid) 994 { 995 struct pci_dev *dev = NULL; 996 struct pci2phy_map *map; 997 int bus, segment; 998 999 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 1000 if (!dev) 1001 return -ENOTTY; 1002 1003 bus = dev->bus->number; 1004 segment = pci_domain_nr(dev->bus); 1005 1006 raw_spin_lock(&pci2phy_map_lock); 1007 map = __find_pci2phy_map(segment); 1008 if (!map) { 1009 raw_spin_unlock(&pci2phy_map_lock); 1010 pci_dev_put(dev); 1011 return -ENOMEM; 1012 } 1013 map->pbus_to_dieid[bus] = 0; 1014 raw_spin_unlock(&pci2phy_map_lock); 1015 1016 pci_dev_put(dev); 1017 1018 return 0; 1019 } 1020 1021 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 1022 { 1023 struct hw_perf_event *hwc = &event->hw; 1024 1025 /* 1026 * SNB IMC counters are 32-bit and are laid out back to back 1027 * in MMIO space. Therefore we must use a 32-bit accessor function 1028 * using readq() from uncore_mmio_read_counter() causes problems 1029 * because it is reading 64-bit at a time. This is okay for the 1030 * uncore_perf_event_update() function because it drops the upper 1031 * 32-bits but not okay for plain uncore_read_counter() as invoked 1032 * in uncore_pmu_event_start(). 1033 */ 1034 return (u64)readl(box->io_addr + hwc->event_base); 1035 } 1036 1037 static struct pmu snb_uncore_imc_pmu = { 1038 .task_ctx_nr = perf_invalid_context, 1039 .event_init = snb_uncore_imc_event_init, 1040 .add = uncore_pmu_event_add, 1041 .del = uncore_pmu_event_del, 1042 .start = uncore_pmu_event_start, 1043 .stop = uncore_pmu_event_stop, 1044 .read = uncore_pmu_event_read, 1045 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 1046 }; 1047 1048 static struct intel_uncore_ops snb_uncore_imc_ops = { 1049 .init_box = snb_uncore_imc_init_box, 1050 .exit_box = uncore_mmio_exit_box, 1051 .enable_box = snb_uncore_imc_enable_box, 1052 .disable_box = snb_uncore_imc_disable_box, 1053 .disable_event = snb_uncore_imc_disable_event, 1054 .enable_event = snb_uncore_imc_enable_event, 1055 .hw_config = snb_uncore_imc_hw_config, 1056 .read_counter = snb_uncore_imc_read_counter, 1057 }; 1058 1059 static struct intel_uncore_type snb_uncore_imc = { 1060 .name = "imc", 1061 .num_counters = 5, 1062 .num_boxes = 1, 1063 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1064 .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, 1065 .freerunning = snb_uncore_imc_freerunning, 1066 .event_descs = snb_uncore_imc_events, 1067 .format_group = &snb_uncore_imc_format_group, 1068 .ops = &snb_uncore_imc_ops, 1069 .pmu = &snb_uncore_imc_pmu, 1070 }; 1071 1072 static struct intel_uncore_type *snb_pci_uncores[] = { 1073 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 1074 NULL, 1075 }; 1076 1077 static const struct pci_device_id snb_uncore_pci_ids[] = { 1078 IMC_UNCORE_DEV(SNB), 1079 { /* end: all zeroes */ }, 1080 }; 1081 1082 static const struct pci_device_id ivb_uncore_pci_ids[] = { 1083 IMC_UNCORE_DEV(IVB), 1084 IMC_UNCORE_DEV(IVB_E3), 1085 { /* end: all zeroes */ }, 1086 }; 1087 1088 static const struct pci_device_id hsw_uncore_pci_ids[] = { 1089 IMC_UNCORE_DEV(HSW), 1090 IMC_UNCORE_DEV(HSW_U), 1091 { /* end: all zeroes */ }, 1092 }; 1093 1094 static const struct pci_device_id bdw_uncore_pci_ids[] = { 1095 IMC_UNCORE_DEV(BDW), 1096 { /* end: all zeroes */ }, 1097 }; 1098 1099 static const struct pci_device_id skl_uncore_pci_ids[] = { 1100 IMC_UNCORE_DEV(SKL_Y), 1101 IMC_UNCORE_DEV(SKL_U), 1102 IMC_UNCORE_DEV(SKL_HD), 1103 IMC_UNCORE_DEV(SKL_HQ), 1104 IMC_UNCORE_DEV(SKL_SD), 1105 IMC_UNCORE_DEV(SKL_SQ), 1106 IMC_UNCORE_DEV(SKL_E3), 1107 IMC_UNCORE_DEV(KBL_Y), 1108 IMC_UNCORE_DEV(KBL_U), 1109 IMC_UNCORE_DEV(KBL_UQ), 1110 IMC_UNCORE_DEV(KBL_SD), 1111 IMC_UNCORE_DEV(KBL_SQ), 1112 IMC_UNCORE_DEV(KBL_HQ), 1113 IMC_UNCORE_DEV(KBL_WQ), 1114 IMC_UNCORE_DEV(CFL_2U), 1115 IMC_UNCORE_DEV(CFL_4U), 1116 IMC_UNCORE_DEV(CFL_4H), 1117 IMC_UNCORE_DEV(CFL_6H), 1118 IMC_UNCORE_DEV(CFL_2S_D), 1119 IMC_UNCORE_DEV(CFL_4S_D), 1120 IMC_UNCORE_DEV(CFL_6S_D), 1121 IMC_UNCORE_DEV(CFL_8S_D), 1122 IMC_UNCORE_DEV(CFL_4S_W), 1123 IMC_UNCORE_DEV(CFL_6S_W), 1124 IMC_UNCORE_DEV(CFL_8S_W), 1125 IMC_UNCORE_DEV(CFL_4S_S), 1126 IMC_UNCORE_DEV(CFL_6S_S), 1127 IMC_UNCORE_DEV(CFL_8S_S), 1128 IMC_UNCORE_DEV(AML_YD), 1129 IMC_UNCORE_DEV(AML_YQ), 1130 IMC_UNCORE_DEV(WHL_UQ), 1131 IMC_UNCORE_DEV(WHL_4_UQ), 1132 IMC_UNCORE_DEV(WHL_UD), 1133 IMC_UNCORE_DEV(CML_H1), 1134 IMC_UNCORE_DEV(CML_H2), 1135 IMC_UNCORE_DEV(CML_H3), 1136 IMC_UNCORE_DEV(CML_U1), 1137 IMC_UNCORE_DEV(CML_U2), 1138 IMC_UNCORE_DEV(CML_U3), 1139 IMC_UNCORE_DEV(CML_S1), 1140 IMC_UNCORE_DEV(CML_S2), 1141 IMC_UNCORE_DEV(CML_S3), 1142 IMC_UNCORE_DEV(CML_S4), 1143 IMC_UNCORE_DEV(CML_S5), 1144 { /* end: all zeroes */ }, 1145 }; 1146 1147 static const struct pci_device_id icl_uncore_pci_ids[] = { 1148 IMC_UNCORE_DEV(ICL_U), 1149 IMC_UNCORE_DEV(ICL_U2), 1150 IMC_UNCORE_DEV(RKL_1), 1151 IMC_UNCORE_DEV(RKL_2), 1152 { /* end: all zeroes */ }, 1153 }; 1154 1155 static struct pci_driver snb_uncore_pci_driver = { 1156 .name = "snb_uncore", 1157 .id_table = snb_uncore_pci_ids, 1158 }; 1159 1160 static struct pci_driver ivb_uncore_pci_driver = { 1161 .name = "ivb_uncore", 1162 .id_table = ivb_uncore_pci_ids, 1163 }; 1164 1165 static struct pci_driver hsw_uncore_pci_driver = { 1166 .name = "hsw_uncore", 1167 .id_table = hsw_uncore_pci_ids, 1168 }; 1169 1170 static struct pci_driver bdw_uncore_pci_driver = { 1171 .name = "bdw_uncore", 1172 .id_table = bdw_uncore_pci_ids, 1173 }; 1174 1175 static struct pci_driver skl_uncore_pci_driver = { 1176 .name = "skl_uncore", 1177 .id_table = skl_uncore_pci_ids, 1178 }; 1179 1180 static struct pci_driver icl_uncore_pci_driver = { 1181 .name = "icl_uncore", 1182 .id_table = icl_uncore_pci_ids, 1183 }; 1184 1185 struct imc_uncore_pci_dev { 1186 __u32 pci_id; 1187 struct pci_driver *driver; 1188 }; 1189 #define IMC_DEV(a, d) \ 1190 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 1191 1192 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 1193 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 1194 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 1195 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 1196 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 1197 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 1198 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 1199 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ 1200 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 1201 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ 1202 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 1203 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 1204 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 1205 IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ 1206 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ 1207 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ 1208 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ 1209 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ 1210 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ 1211 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */ 1212 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */ 1213 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ 1214 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ 1215 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ 1216 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ 1217 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ 1218 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ 1219 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ 1220 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ 1221 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ 1222 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ 1223 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ 1224 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ 1225 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ 1226 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ 1227 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */ 1228 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */ 1229 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ 1230 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ 1231 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */ 1232 IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver), 1233 IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver), 1234 IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver), 1235 IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver), 1236 IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver), 1237 IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver), 1238 IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver), 1239 IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver), 1240 IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver), 1241 IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver), 1242 IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver), 1243 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 1244 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 1245 IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver), 1246 IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver), 1247 { /* end marker */ } 1248 }; 1249 1250 1251 #define for_each_imc_pci_id(x, t) \ 1252 for (x = (t); (x)->pci_id; x++) 1253 1254 static struct pci_driver *imc_uncore_find_dev(void) 1255 { 1256 const struct imc_uncore_pci_dev *p; 1257 int ret; 1258 1259 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 1260 ret = snb_pci2phy_map_init(p->pci_id); 1261 if (ret == 0) 1262 return p->driver; 1263 } 1264 return NULL; 1265 } 1266 1267 static int imc_uncore_pci_init(void) 1268 { 1269 struct pci_driver *imc_drv = imc_uncore_find_dev(); 1270 1271 if (!imc_drv) 1272 return -ENODEV; 1273 1274 uncore_pci_uncores = snb_pci_uncores; 1275 uncore_pci_driver = imc_drv; 1276 1277 return 0; 1278 } 1279 1280 int snb_uncore_pci_init(void) 1281 { 1282 return imc_uncore_pci_init(); 1283 } 1284 1285 int ivb_uncore_pci_init(void) 1286 { 1287 return imc_uncore_pci_init(); 1288 } 1289 int hsw_uncore_pci_init(void) 1290 { 1291 return imc_uncore_pci_init(); 1292 } 1293 1294 int bdw_uncore_pci_init(void) 1295 { 1296 return imc_uncore_pci_init(); 1297 } 1298 1299 int skl_uncore_pci_init(void) 1300 { 1301 return imc_uncore_pci_init(); 1302 } 1303 1304 /* end of Sandy Bridge uncore support */ 1305 1306 /* Nehalem uncore support */ 1307 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 1308 { 1309 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); 1310 } 1311 1312 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 1313 { 1314 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 1315 } 1316 1317 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1318 { 1319 struct hw_perf_event *hwc = &event->hw; 1320 1321 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 1322 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 1323 else 1324 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 1325 } 1326 1327 static struct attribute *nhm_uncore_formats_attr[] = { 1328 &format_attr_event.attr, 1329 &format_attr_umask.attr, 1330 &format_attr_edge.attr, 1331 &format_attr_inv.attr, 1332 &format_attr_cmask8.attr, 1333 NULL, 1334 }; 1335 1336 static const struct attribute_group nhm_uncore_format_group = { 1337 .name = "format", 1338 .attrs = nhm_uncore_formats_attr, 1339 }; 1340 1341 static struct uncore_event_desc nhm_uncore_events[] = { 1342 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 1343 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 1344 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 1345 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 1346 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 1347 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 1348 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 1349 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 1350 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 1351 { /* end: all zeroes */ }, 1352 }; 1353 1354 static struct intel_uncore_ops nhm_uncore_msr_ops = { 1355 .disable_box = nhm_uncore_msr_disable_box, 1356 .enable_box = nhm_uncore_msr_enable_box, 1357 .disable_event = snb_uncore_msr_disable_event, 1358 .enable_event = nhm_uncore_msr_enable_event, 1359 .read_counter = uncore_msr_read_counter, 1360 }; 1361 1362 static struct intel_uncore_type nhm_uncore = { 1363 .name = "", 1364 .num_counters = 8, 1365 .num_boxes = 1, 1366 .perf_ctr_bits = 48, 1367 .fixed_ctr_bits = 48, 1368 .event_ctl = NHM_UNC_PERFEVTSEL0, 1369 .perf_ctr = NHM_UNC_UNCORE_PMC0, 1370 .fixed_ctr = NHM_UNC_FIXED_CTR, 1371 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 1372 .event_mask = NHM_UNC_RAW_EVENT_MASK, 1373 .event_descs = nhm_uncore_events, 1374 .ops = &nhm_uncore_msr_ops, 1375 .format_group = &nhm_uncore_format_group, 1376 }; 1377 1378 static struct intel_uncore_type *nhm_msr_uncores[] = { 1379 &nhm_uncore, 1380 NULL, 1381 }; 1382 1383 void nhm_uncore_cpu_init(void) 1384 { 1385 uncore_msr_uncores = nhm_msr_uncores; 1386 } 1387 1388 /* end of Nehalem uncore support */ 1389 1390 /* Tiger Lake MMIO uncore support */ 1391 1392 static const struct pci_device_id tgl_uncore_pci_ids[] = { 1393 IMC_UNCORE_DEV(TGL_U1), 1394 IMC_UNCORE_DEV(TGL_U2), 1395 IMC_UNCORE_DEV(TGL_U3), 1396 IMC_UNCORE_DEV(TGL_U4), 1397 IMC_UNCORE_DEV(TGL_H), 1398 IMC_UNCORE_DEV(ADL_1), 1399 IMC_UNCORE_DEV(ADL_2), 1400 IMC_UNCORE_DEV(ADL_3), 1401 IMC_UNCORE_DEV(ADL_4), 1402 IMC_UNCORE_DEV(ADL_5), 1403 IMC_UNCORE_DEV(ADL_6), 1404 IMC_UNCORE_DEV(ADL_7), 1405 IMC_UNCORE_DEV(ADL_8), 1406 IMC_UNCORE_DEV(ADL_9), 1407 IMC_UNCORE_DEV(ADL_10), 1408 IMC_UNCORE_DEV(ADL_11), 1409 IMC_UNCORE_DEV(ADL_12), 1410 IMC_UNCORE_DEV(ADL_13), 1411 IMC_UNCORE_DEV(ADL_14), 1412 IMC_UNCORE_DEV(ADL_15), 1413 IMC_UNCORE_DEV(ADL_16), 1414 IMC_UNCORE_DEV(ADL_17), 1415 IMC_UNCORE_DEV(ADL_18), 1416 IMC_UNCORE_DEV(ADL_19), 1417 IMC_UNCORE_DEV(ADL_20), 1418 IMC_UNCORE_DEV(ADL_21), 1419 IMC_UNCORE_DEV(RPL_1), 1420 IMC_UNCORE_DEV(RPL_2), 1421 IMC_UNCORE_DEV(RPL_3), 1422 IMC_UNCORE_DEV(RPL_4), 1423 IMC_UNCORE_DEV(RPL_5), 1424 IMC_UNCORE_DEV(RPL_6), 1425 IMC_UNCORE_DEV(RPL_7), 1426 IMC_UNCORE_DEV(RPL_8), 1427 IMC_UNCORE_DEV(RPL_9), 1428 IMC_UNCORE_DEV(RPL_10), 1429 IMC_UNCORE_DEV(RPL_11), 1430 IMC_UNCORE_DEV(RPL_12), 1431 IMC_UNCORE_DEV(RPL_13), 1432 IMC_UNCORE_DEV(RPL_14), 1433 IMC_UNCORE_DEV(RPL_15), 1434 IMC_UNCORE_DEV(RPL_16), 1435 IMC_UNCORE_DEV(RPL_17), 1436 IMC_UNCORE_DEV(RPL_18), 1437 IMC_UNCORE_DEV(RPL_19), 1438 IMC_UNCORE_DEV(RPL_20), 1439 IMC_UNCORE_DEV(RPL_21), 1440 IMC_UNCORE_DEV(RPL_22), 1441 IMC_UNCORE_DEV(RPL_23), 1442 IMC_UNCORE_DEV(RPL_24), 1443 IMC_UNCORE_DEV(RPL_25), 1444 IMC_UNCORE_DEV(MTL_1), 1445 IMC_UNCORE_DEV(MTL_2), 1446 IMC_UNCORE_DEV(MTL_3), 1447 IMC_UNCORE_DEV(MTL_4), 1448 IMC_UNCORE_DEV(MTL_5), 1449 IMC_UNCORE_DEV(MTL_6), 1450 IMC_UNCORE_DEV(MTL_7), 1451 IMC_UNCORE_DEV(MTL_8), 1452 IMC_UNCORE_DEV(MTL_9), 1453 IMC_UNCORE_DEV(MTL_10), 1454 IMC_UNCORE_DEV(MTL_11), 1455 IMC_UNCORE_DEV(MTL_12), 1456 IMC_UNCORE_DEV(MTL_13), 1457 { /* end: all zeroes */ } 1458 }; 1459 1460 enum perf_tgl_uncore_imc_freerunning_types { 1461 TGL_MMIO_UNCORE_IMC_DATA_TOTAL, 1462 TGL_MMIO_UNCORE_IMC_DATA_READ, 1463 TGL_MMIO_UNCORE_IMC_DATA_WRITE, 1464 TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX 1465 }; 1466 1467 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = { 1468 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 }, 1469 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 }, 1470 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 }, 1471 }; 1472 1473 static struct freerunning_counters tgl_uncore_imc_freerunning[] = { 1474 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 }, 1475 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 }, 1476 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 }, 1477 }; 1478 1479 static struct uncore_event_desc tgl_uncore_imc_events[] = { 1480 INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"), 1481 INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"), 1482 INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"), 1483 1484 INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"), 1485 INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"), 1486 INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"), 1487 1488 INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"), 1489 INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"), 1490 INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"), 1491 1492 { /* end: all zeroes */ } 1493 }; 1494 1495 static struct pci_dev *tgl_uncore_get_mc_dev(void) 1496 { 1497 const struct pci_device_id *ids = tgl_uncore_pci_ids; 1498 struct pci_dev *mc_dev = NULL; 1499 1500 while (ids && ids->vendor) { 1501 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL); 1502 if (mc_dev) 1503 return mc_dev; 1504 ids++; 1505 } 1506 1507 /* Just try to grab 00:00.0 device */ 1508 if (!mc_dev) 1509 mc_dev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); 1510 1511 return mc_dev; 1512 } 1513 1514 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 1515 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 1516 1517 static void 1518 uncore_get_box_mmio_addr(struct intel_uncore_box *box, 1519 unsigned int base_offset, 1520 int bar_offset, int step) 1521 { 1522 struct pci_dev *pdev = tgl_uncore_get_mc_dev(); 1523 struct intel_uncore_pmu *pmu = box->pmu; 1524 struct intel_uncore_type *type = pmu->type; 1525 resource_size_t addr; 1526 u32 bar; 1527 1528 if (!pdev) { 1529 pr_warn("perf uncore: Cannot find matched IMC device.\n"); 1530 return; 1531 } 1532 1533 pci_read_config_dword(pdev, bar_offset, &bar); 1534 if (!(bar & BIT(0))) { 1535 pr_warn("perf uncore: BAR 0x%x is disabled. Failed to map %s counters.\n", 1536 bar_offset, type->name); 1537 pci_dev_put(pdev); 1538 return; 1539 } 1540 bar &= ~BIT(0); 1541 addr = (resource_size_t)(bar + step * pmu->pmu_idx); 1542 1543 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1544 pci_read_config_dword(pdev, bar_offset + 4, &bar); 1545 addr |= ((resource_size_t)bar << 32); 1546 #endif 1547 1548 addr += base_offset; 1549 box->io_addr = ioremap(addr, type->mmio_map_size); 1550 if (!box->io_addr) 1551 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 1552 1553 pci_dev_put(pdev); 1554 } 1555 1556 static void __uncore_imc_init_box(struct intel_uncore_box *box, 1557 unsigned int base_offset) 1558 { 1559 uncore_get_box_mmio_addr(box, base_offset, 1560 SNB_UNCORE_PCI_IMC_BAR_OFFSET, 1561 TGL_UNCORE_MMIO_IMC_MEM_OFFSET); 1562 } 1563 1564 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 1565 { 1566 __uncore_imc_init_box(box, 0); 1567 } 1568 1569 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { 1570 .init_box = tgl_uncore_imc_freerunning_init_box, 1571 .exit_box = uncore_mmio_exit_box, 1572 .read_counter = uncore_mmio_read_counter, 1573 .hw_config = uncore_freerunning_hw_config, 1574 }; 1575 1576 static struct attribute *tgl_uncore_imc_formats_attr[] = { 1577 &format_attr_event.attr, 1578 &format_attr_umask.attr, 1579 NULL 1580 }; 1581 1582 static const struct attribute_group tgl_uncore_imc_format_group = { 1583 .name = "format", 1584 .attrs = tgl_uncore_imc_formats_attr, 1585 }; 1586 1587 static struct intel_uncore_type tgl_uncore_imc_free_running = { 1588 .name = "imc_free_running", 1589 .num_counters = 3, 1590 .num_boxes = 2, 1591 .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1592 .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE, 1593 .freerunning = tgl_uncore_imc_freerunning, 1594 .ops = &tgl_uncore_imc_freerunning_ops, 1595 .event_descs = tgl_uncore_imc_events, 1596 .format_group = &tgl_uncore_imc_format_group, 1597 }; 1598 1599 static struct intel_uncore_type *tgl_mmio_uncores[] = { 1600 &tgl_uncore_imc_free_running, 1601 NULL 1602 }; 1603 1604 void tgl_l_uncore_mmio_init(void) 1605 { 1606 tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning; 1607 uncore_mmio_uncores = tgl_mmio_uncores; 1608 } 1609 1610 void tgl_uncore_mmio_init(void) 1611 { 1612 uncore_mmio_uncores = tgl_mmio_uncores; 1613 } 1614 1615 /* end of Tiger Lake MMIO uncore support */ 1616 1617 /* Alder Lake MMIO uncore support */ 1618 #define ADL_UNCORE_IMC_BASE 0xd900 1619 #define ADL_UNCORE_IMC_MAP_SIZE 0x200 1620 #define ADL_UNCORE_IMC_CTR 0xe8 1621 #define ADL_UNCORE_IMC_CTRL 0xd0 1622 #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0 1623 #define ADL_UNCORE_IMC_BOX_CTL 0xc4 1624 #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800 1625 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100 1626 1627 #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0) 1628 #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1) 1629 #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2) 1630 #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \ 1631 ADL_UNCORE_IMC_CTL_RST_CTRS) 1632 1633 static void adl_uncore_imc_init_box(struct intel_uncore_box *box) 1634 { 1635 __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE); 1636 1637 /* The global control in MC1 can control both MCs. */ 1638 if (box->io_addr && (box->pmu->pmu_idx == 1)) 1639 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL); 1640 } 1641 1642 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box) 1643 { 1644 if (!box->io_addr) 1645 return; 1646 1647 writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box)); 1648 } 1649 1650 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box) 1651 { 1652 if (!box->io_addr) 1653 return; 1654 1655 writel(0, box->io_addr + uncore_mmio_box_ctl(box)); 1656 } 1657 1658 #define MMIO_UNCORE_COMMON_OPS() \ 1659 .exit_box = uncore_mmio_exit_box, \ 1660 .disable_box = adl_uncore_mmio_disable_box, \ 1661 .enable_box = adl_uncore_mmio_enable_box, \ 1662 .disable_event = intel_generic_uncore_mmio_disable_event, \ 1663 .enable_event = intel_generic_uncore_mmio_enable_event, \ 1664 .read_counter = uncore_mmio_read_counter, 1665 1666 static struct intel_uncore_ops adl_uncore_mmio_ops = { 1667 .init_box = adl_uncore_imc_init_box, 1668 MMIO_UNCORE_COMMON_OPS() 1669 }; 1670 1671 #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00 1672 #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 1673 ADL_UNC_CTL_CHMASK_MASK | \ 1674 SNB_UNC_CTL_EDGE_DET) 1675 1676 static struct attribute *adl_uncore_imc_formats_attr[] = { 1677 &format_attr_event.attr, 1678 &format_attr_chmask.attr, 1679 &format_attr_edge.attr, 1680 NULL, 1681 }; 1682 1683 static const struct attribute_group adl_uncore_imc_format_group = { 1684 .name = "format", 1685 .attrs = adl_uncore_imc_formats_attr, 1686 }; 1687 1688 static struct intel_uncore_type adl_uncore_imc = { 1689 .name = "imc", 1690 .num_counters = 5, 1691 .num_boxes = 2, 1692 .perf_ctr_bits = 64, 1693 .perf_ctr = ADL_UNCORE_IMC_CTR, 1694 .event_ctl = ADL_UNCORE_IMC_CTRL, 1695 .event_mask = ADL_UNC_IMC_EVENT_MASK, 1696 .box_ctl = ADL_UNCORE_IMC_BOX_CTL, 1697 .mmio_offset = 0, 1698 .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE, 1699 .ops = &adl_uncore_mmio_ops, 1700 .format_group = &adl_uncore_imc_format_group, 1701 }; 1702 1703 enum perf_adl_uncore_imc_freerunning_types { 1704 ADL_MMIO_UNCORE_IMC_DATA_TOTAL, 1705 ADL_MMIO_UNCORE_IMC_DATA_READ, 1706 ADL_MMIO_UNCORE_IMC_DATA_WRITE, 1707 ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX 1708 }; 1709 1710 static struct freerunning_counters adl_uncore_imc_freerunning[] = { 1711 [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 }, 1712 [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 }, 1713 [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 }, 1714 }; 1715 1716 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 1717 { 1718 __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE); 1719 } 1720 1721 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = { 1722 .init_box = adl_uncore_imc_freerunning_init_box, 1723 .exit_box = uncore_mmio_exit_box, 1724 .read_counter = uncore_mmio_read_counter, 1725 .hw_config = uncore_freerunning_hw_config, 1726 }; 1727 1728 static struct intel_uncore_type adl_uncore_imc_free_running = { 1729 .name = "imc_free_running", 1730 .num_counters = 3, 1731 .num_boxes = 2, 1732 .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1733 .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE, 1734 .freerunning = adl_uncore_imc_freerunning, 1735 .ops = &adl_uncore_imc_freerunning_ops, 1736 .event_descs = tgl_uncore_imc_events, 1737 .format_group = &tgl_uncore_imc_format_group, 1738 }; 1739 1740 static struct intel_uncore_type *adl_mmio_uncores[] = { 1741 &adl_uncore_imc, 1742 &adl_uncore_imc_free_running, 1743 NULL 1744 }; 1745 1746 void adl_uncore_mmio_init(void) 1747 { 1748 uncore_mmio_uncores = adl_mmio_uncores; 1749 } 1750 1751 /* end of Alder Lake MMIO uncore support */ 1752 1753 /* Lunar Lake MMIO uncore support */ 1754 #define LNL_UNCORE_PCI_SAFBAR_OFFSET 0x68 1755 #define LNL_UNCORE_MAP_SIZE 0x1000 1756 #define LNL_UNCORE_SNCU_BASE 0xE4B000 1757 #define LNL_UNCORE_SNCU_CTR 0x390 1758 #define LNL_UNCORE_SNCU_CTRL 0x398 1759 #define LNL_UNCORE_SNCU_BOX_CTL 0x380 1760 #define LNL_UNCORE_GLOBAL_CTL 0x700 1761 #define LNL_UNCORE_HBO_BASE 0xE54000 1762 #define LNL_UNCORE_HBO_OFFSET -4096 1763 #define LNL_UNCORE_HBO_CTR 0x570 1764 #define LNL_UNCORE_HBO_CTRL 0x550 1765 #define LNL_UNCORE_HBO_BOX_CTL 0x548 1766 1767 #define LNL_UNC_CTL_THRESHOLD 0xff000000 1768 #define LNL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 1769 SNB_UNC_CTL_UMASK_MASK | \ 1770 SNB_UNC_CTL_EDGE_DET | \ 1771 SNB_UNC_CTL_INVERT | \ 1772 LNL_UNC_CTL_THRESHOLD) 1773 1774 static struct attribute *lnl_uncore_formats_attr[] = { 1775 &format_attr_event.attr, 1776 &format_attr_umask.attr, 1777 &format_attr_edge.attr, 1778 &format_attr_inv.attr, 1779 &format_attr_threshold2.attr, 1780 NULL 1781 }; 1782 1783 static const struct attribute_group lnl_uncore_format_group = { 1784 .name = "format", 1785 .attrs = lnl_uncore_formats_attr, 1786 }; 1787 1788 static void lnl_uncore_hbo_init_box(struct intel_uncore_box *box) 1789 { 1790 uncore_get_box_mmio_addr(box, LNL_UNCORE_HBO_BASE, 1791 LNL_UNCORE_PCI_SAFBAR_OFFSET, 1792 LNL_UNCORE_HBO_OFFSET); 1793 } 1794 1795 static struct intel_uncore_ops lnl_uncore_hbo_ops = { 1796 .init_box = lnl_uncore_hbo_init_box, 1797 MMIO_UNCORE_COMMON_OPS() 1798 }; 1799 1800 static struct intel_uncore_type lnl_uncore_hbo = { 1801 .name = "hbo", 1802 .num_counters = 4, 1803 .num_boxes = 2, 1804 .perf_ctr_bits = 64, 1805 .perf_ctr = LNL_UNCORE_HBO_CTR, 1806 .event_ctl = LNL_UNCORE_HBO_CTRL, 1807 .event_mask = LNL_UNC_RAW_EVENT_MASK, 1808 .box_ctl = LNL_UNCORE_HBO_BOX_CTL, 1809 .mmio_map_size = LNL_UNCORE_MAP_SIZE, 1810 .ops = &lnl_uncore_hbo_ops, 1811 .format_group = &lnl_uncore_format_group, 1812 }; 1813 1814 static void lnl_uncore_sncu_init_box(struct intel_uncore_box *box) 1815 { 1816 uncore_get_box_mmio_addr(box, LNL_UNCORE_SNCU_BASE, 1817 LNL_UNCORE_PCI_SAFBAR_OFFSET, 1818 0); 1819 1820 if (box->io_addr) 1821 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + LNL_UNCORE_GLOBAL_CTL); 1822 } 1823 1824 static struct intel_uncore_ops lnl_uncore_sncu_ops = { 1825 .init_box = lnl_uncore_sncu_init_box, 1826 MMIO_UNCORE_COMMON_OPS() 1827 }; 1828 1829 static struct intel_uncore_type lnl_uncore_sncu = { 1830 .name = "sncu", 1831 .num_counters = 2, 1832 .num_boxes = 1, 1833 .perf_ctr_bits = 64, 1834 .perf_ctr = LNL_UNCORE_SNCU_CTR, 1835 .event_ctl = LNL_UNCORE_SNCU_CTRL, 1836 .event_mask = LNL_UNC_RAW_EVENT_MASK, 1837 .box_ctl = LNL_UNCORE_SNCU_BOX_CTL, 1838 .mmio_map_size = LNL_UNCORE_MAP_SIZE, 1839 .ops = &lnl_uncore_sncu_ops, 1840 .format_group = &lnl_uncore_format_group, 1841 }; 1842 1843 static struct intel_uncore_type *lnl_mmio_uncores[] = { 1844 &adl_uncore_imc, 1845 &lnl_uncore_hbo, 1846 &lnl_uncore_sncu, 1847 &adl_uncore_imc_free_running, 1848 NULL 1849 }; 1850 1851 void lnl_uncore_mmio_init(void) 1852 { 1853 uncore_mmio_uncores = lnl_mmio_uncores; 1854 } 1855 1856 /* end of Lunar Lake MMIO uncore support */ 1857