1 // SPDX-License-Identifier: GPL-2.0 2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 3 #include <asm/msr.h> 4 #include "uncore.h" 5 #include "uncore_discovery.h" 6 7 /* Uncore IMC PCI IDs */ 8 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 9 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 10 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 11 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 12 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 13 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 14 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 15 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c 16 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 17 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 18 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f 19 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f 20 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 21 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c 22 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 23 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 24 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f 25 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f 26 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910 27 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918 28 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc 29 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 30 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 31 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 32 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f 33 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f 34 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 35 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 36 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 37 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 38 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 39 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 40 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca 41 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 42 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c 43 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d 44 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0 45 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34 46 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35 47 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44 48 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54 49 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64 50 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51 51 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61 52 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71 53 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33 54 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43 55 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53 56 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63 57 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73 58 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 59 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 60 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02 61 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04 62 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12 63 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14 64 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 65 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43 66 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 67 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 68 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 69 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601 70 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602 71 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609 72 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a 73 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621 74 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623 75 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629 76 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637 77 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b 78 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648 79 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649 80 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650 81 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668 82 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670 83 #define PCI_DEVICE_ID_INTEL_ADL_17_IMC 0x4614 84 #define PCI_DEVICE_ID_INTEL_ADL_18_IMC 0x4617 85 #define PCI_DEVICE_ID_INTEL_ADL_19_IMC 0x4618 86 #define PCI_DEVICE_ID_INTEL_ADL_20_IMC 0x461B 87 #define PCI_DEVICE_ID_INTEL_ADL_21_IMC 0x461C 88 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC 0xA700 89 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC 0xA702 90 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC 0xA706 91 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC 0xA709 92 #define PCI_DEVICE_ID_INTEL_RPL_5_IMC 0xA701 93 #define PCI_DEVICE_ID_INTEL_RPL_6_IMC 0xA703 94 #define PCI_DEVICE_ID_INTEL_RPL_7_IMC 0xA704 95 #define PCI_DEVICE_ID_INTEL_RPL_8_IMC 0xA705 96 #define PCI_DEVICE_ID_INTEL_RPL_9_IMC 0xA706 97 #define PCI_DEVICE_ID_INTEL_RPL_10_IMC 0xA707 98 #define PCI_DEVICE_ID_INTEL_RPL_11_IMC 0xA708 99 #define PCI_DEVICE_ID_INTEL_RPL_12_IMC 0xA709 100 #define PCI_DEVICE_ID_INTEL_RPL_13_IMC 0xA70a 101 #define PCI_DEVICE_ID_INTEL_RPL_14_IMC 0xA70b 102 #define PCI_DEVICE_ID_INTEL_RPL_15_IMC 0xA715 103 #define PCI_DEVICE_ID_INTEL_RPL_16_IMC 0xA716 104 #define PCI_DEVICE_ID_INTEL_RPL_17_IMC 0xA717 105 #define PCI_DEVICE_ID_INTEL_RPL_18_IMC 0xA718 106 #define PCI_DEVICE_ID_INTEL_RPL_19_IMC 0xA719 107 #define PCI_DEVICE_ID_INTEL_RPL_20_IMC 0xA71A 108 #define PCI_DEVICE_ID_INTEL_RPL_21_IMC 0xA71B 109 #define PCI_DEVICE_ID_INTEL_RPL_22_IMC 0xA71C 110 #define PCI_DEVICE_ID_INTEL_RPL_23_IMC 0xA728 111 #define PCI_DEVICE_ID_INTEL_RPL_24_IMC 0xA729 112 #define PCI_DEVICE_ID_INTEL_RPL_25_IMC 0xA72A 113 #define PCI_DEVICE_ID_INTEL_MTL_1_IMC 0x7d00 114 #define PCI_DEVICE_ID_INTEL_MTL_2_IMC 0x7d01 115 #define PCI_DEVICE_ID_INTEL_MTL_3_IMC 0x7d02 116 #define PCI_DEVICE_ID_INTEL_MTL_4_IMC 0x7d05 117 #define PCI_DEVICE_ID_INTEL_MTL_5_IMC 0x7d10 118 #define PCI_DEVICE_ID_INTEL_MTL_6_IMC 0x7d14 119 #define PCI_DEVICE_ID_INTEL_MTL_7_IMC 0x7d15 120 #define PCI_DEVICE_ID_INTEL_MTL_8_IMC 0x7d16 121 #define PCI_DEVICE_ID_INTEL_MTL_9_IMC 0x7d21 122 #define PCI_DEVICE_ID_INTEL_MTL_10_IMC 0x7d22 123 #define PCI_DEVICE_ID_INTEL_MTL_11_IMC 0x7d23 124 #define PCI_DEVICE_ID_INTEL_MTL_12_IMC 0x7d24 125 #define PCI_DEVICE_ID_INTEL_MTL_13_IMC 0x7d28 126 127 128 #define IMC_UNCORE_DEV(a) \ 129 { \ 130 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC), \ 131 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), \ 132 } 133 134 /* SNB event control */ 135 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 136 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 137 #define SNB_UNC_CTL_EDGE_DET (1 << 18) 138 #define SNB_UNC_CTL_EN (1 << 22) 139 #define SNB_UNC_CTL_INVERT (1 << 23) 140 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 141 #define NHM_UNC_CTL_CMASK_MASK 0xff000000 142 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) 143 144 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 145 SNB_UNC_CTL_UMASK_MASK | \ 146 SNB_UNC_CTL_EDGE_DET | \ 147 SNB_UNC_CTL_INVERT | \ 148 SNB_UNC_CTL_CMASK_MASK) 149 150 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 151 SNB_UNC_CTL_UMASK_MASK | \ 152 SNB_UNC_CTL_EDGE_DET | \ 153 SNB_UNC_CTL_INVERT | \ 154 NHM_UNC_CTL_CMASK_MASK) 155 156 /* SNB global control register */ 157 #define SNB_UNC_PERF_GLOBAL_CTL 0x391 158 #define SNB_UNC_FIXED_CTR_CTRL 0x394 159 #define SNB_UNC_FIXED_CTR 0x395 160 161 /* SNB uncore global control */ 162 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) 163 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) 164 165 /* SNB Cbo register */ 166 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 167 #define SNB_UNC_CBO_0_PER_CTR0 0x706 168 #define SNB_UNC_CBO_MSR_OFFSET 0x10 169 170 /* SNB ARB register */ 171 #define SNB_UNC_ARB_PER_CTR0 0x3b0 172 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 173 #define SNB_UNC_ARB_MSR_OFFSET 0x10 174 175 /* NHM global control register */ 176 #define NHM_UNC_PERF_GLOBAL_CTL 0x391 177 #define NHM_UNC_FIXED_CTR 0x394 178 #define NHM_UNC_FIXED_CTR_CTRL 0x395 179 180 /* NHM uncore global control */ 181 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) 182 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) 183 184 /* NHM uncore register */ 185 #define NHM_UNC_PERFEVTSEL0 0x3c0 186 #define NHM_UNC_UNCORE_PMC0 0x3b0 187 188 /* SKL uncore global control */ 189 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 190 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 191 192 /* ICL Cbo register */ 193 #define ICL_UNC_CBO_CONFIG 0x396 194 #define ICL_UNC_NUM_CBO_MASK 0xf 195 #define ICL_UNC_CBO_0_PER_CTR0 0x702 196 #define ICL_UNC_CBO_MSR_OFFSET 0x8 197 198 /* ICL ARB register */ 199 #define ICL_UNC_ARB_PER_CTR 0x3b1 200 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3 201 202 /* ADL uncore global control */ 203 #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0 204 #define ADL_UNC_FIXED_CTR_CTRL 0x2fde 205 #define ADL_UNC_FIXED_CTR 0x2fdf 206 207 /* ADL Cbo register */ 208 #define ADL_UNC_CBO_0_PER_CTR0 0x2002 209 #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000 210 #define ADL_UNC_CTL_THRESHOLD 0x3f000000 211 #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 212 SNB_UNC_CTL_UMASK_MASK | \ 213 SNB_UNC_CTL_EDGE_DET | \ 214 SNB_UNC_CTL_INVERT | \ 215 ADL_UNC_CTL_THRESHOLD) 216 217 /* ADL ARB register */ 218 #define ADL_UNC_ARB_PER_CTR0 0x2FD2 219 #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0 220 #define ADL_UNC_ARB_MSR_OFFSET 0x8 221 222 /* MTL Cbo register */ 223 #define MTL_UNC_CBO_0_PER_CTR0 0x2448 224 #define MTL_UNC_CBO_0_PERFEVTSEL0 0x2442 225 226 /* MTL HAC_ARB register */ 227 #define MTL_UNC_HAC_ARB_CTR 0x2018 228 #define MTL_UNC_HAC_ARB_CTRL 0x2012 229 230 /* MTL ARB register */ 231 #define MTL_UNC_ARB_CTR 0x2418 232 #define MTL_UNC_ARB_CTRL 0x2412 233 234 /* MTL cNCU register */ 235 #define MTL_UNC_CNCU_FIXED_CTR 0x2408 236 #define MTL_UNC_CNCU_FIXED_CTRL 0x2402 237 #define MTL_UNC_CNCU_BOX_CTL 0x240e 238 239 /* MTL sNCU register */ 240 #define MTL_UNC_SNCU_FIXED_CTR 0x2008 241 #define MTL_UNC_SNCU_FIXED_CTRL 0x2002 242 #define MTL_UNC_SNCU_BOX_CTL 0x200e 243 244 /* MTL HAC_CBO register */ 245 #define MTL_UNC_HBO_CTR 0x2048 246 #define MTL_UNC_HBO_CTRL 0x2042 247 248 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 249 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 250 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11"); 251 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 252 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 253 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); 254 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); 255 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29"); 256 DEFINE_UNCORE_FORMAT_ATTR(threshold2, threshold, "config:24-31"); 257 258 /* Sandy Bridge uncore support */ 259 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 260 { 261 struct hw_perf_event *hwc = &event->hw; 262 263 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 264 wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 265 else 266 wrmsrq(hwc->config_base, SNB_UNC_CTL_EN); 267 } 268 269 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 270 { 271 wrmsrq(event->hw.config_base, 0); 272 } 273 274 static void snb_uncore_msr_init_box(struct intel_uncore_box *box) 275 { 276 if (box->pmu->pmu_idx == 0) { 277 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 278 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 279 } 280 } 281 282 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 283 { 284 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 285 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 286 } 287 288 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 289 { 290 if (box->pmu->pmu_idx == 0) 291 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0); 292 } 293 294 static struct uncore_event_desc snb_uncore_events[] = { 295 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 296 { /* end: all zeroes */ }, 297 }; 298 299 static struct attribute *snb_uncore_formats_attr[] = { 300 &format_attr_event.attr, 301 &format_attr_umask.attr, 302 &format_attr_edge.attr, 303 &format_attr_inv.attr, 304 &format_attr_cmask5.attr, 305 NULL, 306 }; 307 308 static const struct attribute_group snb_uncore_format_group = { 309 .name = "format", 310 .attrs = snb_uncore_formats_attr, 311 }; 312 313 static struct intel_uncore_ops snb_uncore_msr_ops = { 314 .init_box = snb_uncore_msr_init_box, 315 .enable_box = snb_uncore_msr_enable_box, 316 .exit_box = snb_uncore_msr_exit_box, 317 .disable_event = snb_uncore_msr_disable_event, 318 .enable_event = snb_uncore_msr_enable_event, 319 .read_counter = uncore_msr_read_counter, 320 }; 321 322 static struct event_constraint snb_uncore_arb_constraints[] = { 323 UNCORE_EVENT_CONSTRAINT(0x80, 0x1), 324 UNCORE_EVENT_CONSTRAINT(0x83, 0x1), 325 EVENT_CONSTRAINT_END 326 }; 327 328 static struct intel_uncore_type snb_uncore_cbox = { 329 .name = "cbox", 330 .num_counters = 2, 331 .num_boxes = 4, 332 .perf_ctr_bits = 44, 333 .fixed_ctr_bits = 48, 334 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 335 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 336 .fixed_ctr = SNB_UNC_FIXED_CTR, 337 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 338 .single_fixed = 1, 339 .event_mask = SNB_UNC_RAW_EVENT_MASK, 340 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 341 .ops = &snb_uncore_msr_ops, 342 .format_group = &snb_uncore_format_group, 343 .event_descs = snb_uncore_events, 344 }; 345 346 static struct intel_uncore_type snb_uncore_arb = { 347 .name = "arb", 348 .num_counters = 2, 349 .num_boxes = 1, 350 .perf_ctr_bits = 44, 351 .perf_ctr = SNB_UNC_ARB_PER_CTR0, 352 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, 353 .event_mask = SNB_UNC_RAW_EVENT_MASK, 354 .msr_offset = SNB_UNC_ARB_MSR_OFFSET, 355 .constraints = snb_uncore_arb_constraints, 356 .ops = &snb_uncore_msr_ops, 357 .format_group = &snb_uncore_format_group, 358 }; 359 360 static struct intel_uncore_type *snb_msr_uncores[] = { 361 &snb_uncore_cbox, 362 &snb_uncore_arb, 363 NULL, 364 }; 365 366 void snb_uncore_cpu_init(void) 367 { 368 uncore_msr_uncores = snb_msr_uncores; 369 if (snb_uncore_cbox.num_boxes > topology_num_cores_per_package()) 370 snb_uncore_cbox.num_boxes = topology_num_cores_per_package(); 371 } 372 373 static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 374 { 375 if (box->pmu->pmu_idx == 0) { 376 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 377 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 378 } 379 380 /* The 8th CBOX has different MSR space */ 381 if (box->pmu->pmu_idx == 7) 382 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); 383 } 384 385 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 386 { 387 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 388 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 389 } 390 391 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 392 { 393 if (box->pmu->pmu_idx == 0) 394 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0); 395 } 396 397 static struct intel_uncore_ops skl_uncore_msr_ops = { 398 .init_box = skl_uncore_msr_init_box, 399 .enable_box = skl_uncore_msr_enable_box, 400 .exit_box = skl_uncore_msr_exit_box, 401 .disable_event = snb_uncore_msr_disable_event, 402 .enable_event = snb_uncore_msr_enable_event, 403 .read_counter = uncore_msr_read_counter, 404 }; 405 406 static struct intel_uncore_type skl_uncore_cbox = { 407 .name = "cbox", 408 .num_counters = 4, 409 .num_boxes = 8, 410 .perf_ctr_bits = 44, 411 .fixed_ctr_bits = 48, 412 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 413 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 414 .fixed_ctr = SNB_UNC_FIXED_CTR, 415 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 416 .single_fixed = 1, 417 .event_mask = SNB_UNC_RAW_EVENT_MASK, 418 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 419 .ops = &skl_uncore_msr_ops, 420 .format_group = &snb_uncore_format_group, 421 .event_descs = snb_uncore_events, 422 }; 423 424 static struct intel_uncore_type *skl_msr_uncores[] = { 425 &skl_uncore_cbox, 426 &snb_uncore_arb, 427 NULL, 428 }; 429 430 void skl_uncore_cpu_init(void) 431 { 432 uncore_msr_uncores = skl_msr_uncores; 433 if (skl_uncore_cbox.num_boxes > topology_num_cores_per_package()) 434 skl_uncore_cbox.num_boxes = topology_num_cores_per_package(); 435 snb_uncore_arb.ops = &skl_uncore_msr_ops; 436 } 437 438 static struct intel_uncore_ops icl_uncore_msr_ops = { 439 .disable_event = snb_uncore_msr_disable_event, 440 .enable_event = snb_uncore_msr_enable_event, 441 .read_counter = uncore_msr_read_counter, 442 }; 443 444 static struct intel_uncore_type icl_uncore_cbox = { 445 .name = "cbox", 446 .num_counters = 2, 447 .perf_ctr_bits = 44, 448 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, 449 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 450 .event_mask = SNB_UNC_RAW_EVENT_MASK, 451 .msr_offset = ICL_UNC_CBO_MSR_OFFSET, 452 .ops = &icl_uncore_msr_ops, 453 .format_group = &snb_uncore_format_group, 454 }; 455 456 static struct uncore_event_desc icl_uncore_events[] = { 457 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), 458 { /* end: all zeroes */ }, 459 }; 460 461 static struct attribute *icl_uncore_clock_formats_attr[] = { 462 &format_attr_event.attr, 463 NULL, 464 }; 465 466 static struct attribute_group icl_uncore_clock_format_group = { 467 .name = "format", 468 .attrs = icl_uncore_clock_formats_attr, 469 }; 470 471 static struct intel_uncore_type icl_uncore_clockbox = { 472 .name = "clock", 473 .num_counters = 1, 474 .num_boxes = 1, 475 .fixed_ctr_bits = 48, 476 .fixed_ctr = SNB_UNC_FIXED_CTR, 477 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 478 .single_fixed = 1, 479 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 480 .format_group = &icl_uncore_clock_format_group, 481 .ops = &icl_uncore_msr_ops, 482 .event_descs = icl_uncore_events, 483 }; 484 485 static struct intel_uncore_type icl_uncore_arb = { 486 .name = "arb", 487 .num_counters = 1, 488 .num_boxes = 1, 489 .perf_ctr_bits = 44, 490 .perf_ctr = ICL_UNC_ARB_PER_CTR, 491 .event_ctl = ICL_UNC_ARB_PERFEVTSEL, 492 .event_mask = SNB_UNC_RAW_EVENT_MASK, 493 .ops = &icl_uncore_msr_ops, 494 .format_group = &snb_uncore_format_group, 495 }; 496 497 static struct intel_uncore_type *icl_msr_uncores[] = { 498 &icl_uncore_cbox, 499 &icl_uncore_arb, 500 &icl_uncore_clockbox, 501 NULL, 502 }; 503 504 static int icl_get_cbox_num(void) 505 { 506 u64 num_boxes; 507 508 rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes); 509 510 return num_boxes & ICL_UNC_NUM_CBO_MASK; 511 } 512 513 void icl_uncore_cpu_init(void) 514 { 515 uncore_msr_uncores = icl_msr_uncores; 516 icl_uncore_cbox.num_boxes = icl_get_cbox_num(); 517 } 518 519 static struct intel_uncore_type *tgl_msr_uncores[] = { 520 &icl_uncore_cbox, 521 &snb_uncore_arb, 522 &icl_uncore_clockbox, 523 NULL, 524 }; 525 526 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) 527 { 528 if (box->pmu->pmu_idx == 0) 529 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 530 } 531 532 void tgl_uncore_cpu_init(void) 533 { 534 uncore_msr_uncores = tgl_msr_uncores; 535 icl_uncore_cbox.num_boxes = icl_get_cbox_num(); 536 icl_uncore_cbox.ops = &skl_uncore_msr_ops; 537 icl_uncore_clockbox.ops = &skl_uncore_msr_ops; 538 snb_uncore_arb.ops = &skl_uncore_msr_ops; 539 skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box; 540 } 541 542 static void adl_uncore_msr_init_box(struct intel_uncore_box *box) 543 { 544 if (box->pmu->pmu_idx == 0) 545 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 546 } 547 548 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) 549 { 550 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 551 } 552 553 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) 554 { 555 if (box->pmu->pmu_idx == 0) 556 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); 557 } 558 559 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) 560 { 561 if (box->pmu->pmu_idx == 0) 562 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); 563 } 564 565 static struct intel_uncore_ops adl_uncore_msr_ops = { 566 .init_box = adl_uncore_msr_init_box, 567 .enable_box = adl_uncore_msr_enable_box, 568 .disable_box = adl_uncore_msr_disable_box, 569 .exit_box = adl_uncore_msr_exit_box, 570 .disable_event = snb_uncore_msr_disable_event, 571 .enable_event = snb_uncore_msr_enable_event, 572 .read_counter = uncore_msr_read_counter, 573 }; 574 575 static struct attribute *adl_uncore_formats_attr[] = { 576 &format_attr_event.attr, 577 &format_attr_umask.attr, 578 &format_attr_edge.attr, 579 &format_attr_inv.attr, 580 &format_attr_threshold.attr, 581 NULL, 582 }; 583 584 static const struct attribute_group adl_uncore_format_group = { 585 .name = "format", 586 .attrs = adl_uncore_formats_attr, 587 }; 588 589 static struct intel_uncore_type adl_uncore_cbox = { 590 .name = "cbox", 591 .num_counters = 2, 592 .perf_ctr_bits = 44, 593 .perf_ctr = ADL_UNC_CBO_0_PER_CTR0, 594 .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0, 595 .event_mask = ADL_UNC_RAW_EVENT_MASK, 596 .msr_offset = ICL_UNC_CBO_MSR_OFFSET, 597 .ops = &adl_uncore_msr_ops, 598 .format_group = &adl_uncore_format_group, 599 }; 600 601 static struct intel_uncore_type adl_uncore_arb = { 602 .name = "arb", 603 .num_counters = 2, 604 .num_boxes = 2, 605 .perf_ctr_bits = 44, 606 .perf_ctr = ADL_UNC_ARB_PER_CTR0, 607 .event_ctl = ADL_UNC_ARB_PERFEVTSEL0, 608 .event_mask = SNB_UNC_RAW_EVENT_MASK, 609 .msr_offset = ADL_UNC_ARB_MSR_OFFSET, 610 .constraints = snb_uncore_arb_constraints, 611 .ops = &adl_uncore_msr_ops, 612 .format_group = &snb_uncore_format_group, 613 }; 614 615 static struct intel_uncore_type adl_uncore_clockbox = { 616 .name = "clock", 617 .num_counters = 1, 618 .num_boxes = 1, 619 .fixed_ctr_bits = 48, 620 .fixed_ctr = ADL_UNC_FIXED_CTR, 621 .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL, 622 .single_fixed = 1, 623 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 624 .format_group = &icl_uncore_clock_format_group, 625 .ops = &adl_uncore_msr_ops, 626 .event_descs = icl_uncore_events, 627 }; 628 629 static struct intel_uncore_type *adl_msr_uncores[] = { 630 &adl_uncore_cbox, 631 &adl_uncore_arb, 632 &adl_uncore_clockbox, 633 NULL, 634 }; 635 636 void adl_uncore_cpu_init(void) 637 { 638 adl_uncore_cbox.num_boxes = icl_get_cbox_num(); 639 uncore_msr_uncores = adl_msr_uncores; 640 } 641 642 static struct intel_uncore_type mtl_uncore_cbox = { 643 .name = "cbox", 644 .num_counters = 2, 645 .perf_ctr_bits = 48, 646 .perf_ctr = MTL_UNC_CBO_0_PER_CTR0, 647 .event_ctl = MTL_UNC_CBO_0_PERFEVTSEL0, 648 .event_mask = ADL_UNC_RAW_EVENT_MASK, 649 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 650 .ops = &icl_uncore_msr_ops, 651 .format_group = &adl_uncore_format_group, 652 }; 653 654 static struct intel_uncore_type mtl_uncore_hac_arb = { 655 .name = "hac_arb", 656 .num_counters = 2, 657 .num_boxes = 2, 658 .perf_ctr_bits = 48, 659 .perf_ctr = MTL_UNC_HAC_ARB_CTR, 660 .event_ctl = MTL_UNC_HAC_ARB_CTRL, 661 .event_mask = ADL_UNC_RAW_EVENT_MASK, 662 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 663 .ops = &icl_uncore_msr_ops, 664 .format_group = &adl_uncore_format_group, 665 }; 666 667 static struct intel_uncore_type mtl_uncore_arb = { 668 .name = "arb", 669 .num_counters = 2, 670 .num_boxes = 2, 671 .perf_ctr_bits = 48, 672 .perf_ctr = MTL_UNC_ARB_CTR, 673 .event_ctl = MTL_UNC_ARB_CTRL, 674 .event_mask = ADL_UNC_RAW_EVENT_MASK, 675 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 676 .ops = &icl_uncore_msr_ops, 677 .format_group = &adl_uncore_format_group, 678 }; 679 680 static struct intel_uncore_type mtl_uncore_hac_cbox = { 681 .name = "hac_cbox", 682 .num_counters = 2, 683 .num_boxes = 2, 684 .perf_ctr_bits = 48, 685 .perf_ctr = MTL_UNC_HBO_CTR, 686 .event_ctl = MTL_UNC_HBO_CTRL, 687 .event_mask = ADL_UNC_RAW_EVENT_MASK, 688 .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 689 .ops = &icl_uncore_msr_ops, 690 .format_group = &adl_uncore_format_group, 691 }; 692 693 static void mtl_uncore_msr_init_box(struct intel_uncore_box *box) 694 { 695 wrmsrq(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); 696 } 697 698 static struct intel_uncore_ops mtl_uncore_msr_ops = { 699 .init_box = mtl_uncore_msr_init_box, 700 .disable_event = snb_uncore_msr_disable_event, 701 .enable_event = snb_uncore_msr_enable_event, 702 .read_counter = uncore_msr_read_counter, 703 }; 704 705 static struct intel_uncore_type mtl_uncore_cncu = { 706 .name = "cncu", 707 .num_counters = 1, 708 .num_boxes = 1, 709 .box_ctl = MTL_UNC_CNCU_BOX_CTL, 710 .fixed_ctr_bits = 48, 711 .fixed_ctr = MTL_UNC_CNCU_FIXED_CTR, 712 .fixed_ctl = MTL_UNC_CNCU_FIXED_CTRL, 713 .single_fixed = 1, 714 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 715 .format_group = &icl_uncore_clock_format_group, 716 .ops = &mtl_uncore_msr_ops, 717 .event_descs = icl_uncore_events, 718 }; 719 720 static struct intel_uncore_type mtl_uncore_sncu = { 721 .name = "sncu", 722 .num_counters = 1, 723 .num_boxes = 1, 724 .box_ctl = MTL_UNC_SNCU_BOX_CTL, 725 .fixed_ctr_bits = 48, 726 .fixed_ctr = MTL_UNC_SNCU_FIXED_CTR, 727 .fixed_ctl = MTL_UNC_SNCU_FIXED_CTRL, 728 .single_fixed = 1, 729 .event_mask = SNB_UNC_CTL_EV_SEL_MASK, 730 .format_group = &icl_uncore_clock_format_group, 731 .ops = &mtl_uncore_msr_ops, 732 .event_descs = icl_uncore_events, 733 }; 734 735 static struct intel_uncore_type *mtl_msr_uncores[] = { 736 &mtl_uncore_cbox, 737 &mtl_uncore_hac_arb, 738 &mtl_uncore_arb, 739 &mtl_uncore_hac_cbox, 740 &mtl_uncore_cncu, 741 &mtl_uncore_sncu, 742 NULL 743 }; 744 745 void mtl_uncore_cpu_init(void) 746 { 747 mtl_uncore_cbox.num_boxes = icl_get_cbox_num(); 748 uncore_msr_uncores = mtl_msr_uncores; 749 } 750 751 static struct intel_uncore_type *lnl_msr_uncores[] = { 752 &mtl_uncore_cbox, 753 &mtl_uncore_arb, 754 NULL 755 }; 756 757 #define LNL_UNC_MSR_GLOBAL_CTL 0x240e 758 759 static void lnl_uncore_msr_init_box(struct intel_uncore_box *box) 760 { 761 if (box->pmu->pmu_idx == 0) 762 wrmsrq(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); 763 } 764 765 static struct intel_uncore_ops lnl_uncore_msr_ops = { 766 .init_box = lnl_uncore_msr_init_box, 767 .disable_event = snb_uncore_msr_disable_event, 768 .enable_event = snb_uncore_msr_enable_event, 769 .read_counter = uncore_msr_read_counter, 770 }; 771 772 void lnl_uncore_cpu_init(void) 773 { 774 mtl_uncore_cbox.num_boxes = 4; 775 mtl_uncore_cbox.ops = &lnl_uncore_msr_ops; 776 uncore_msr_uncores = lnl_msr_uncores; 777 } 778 779 enum { 780 SNB_PCI_UNCORE_IMC, 781 }; 782 783 static struct uncore_event_desc snb_uncore_imc_events[] = { 784 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), 785 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), 786 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), 787 788 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), 789 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), 790 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), 791 792 INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"), 793 INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"), 794 INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"), 795 796 INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"), 797 INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"), 798 INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"), 799 800 INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"), 801 INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"), 802 INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"), 803 804 { /* end: all zeroes */ }, 805 }; 806 807 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff 808 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 809 810 /* page size multiple covering all config regs */ 811 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 812 813 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 814 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 815 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 816 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 817 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE 818 819 /* BW break down- legacy counters */ 820 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 821 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 822 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 823 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 824 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 825 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 826 827 enum perf_snb_uncore_imc_freerunning_types { 828 SNB_PCI_UNCORE_IMC_DATA_READS = 0, 829 SNB_PCI_UNCORE_IMC_DATA_WRITES, 830 SNB_PCI_UNCORE_IMC_GT_REQUESTS, 831 SNB_PCI_UNCORE_IMC_IA_REQUESTS, 832 SNB_PCI_UNCORE_IMC_IO_REQUESTS, 833 834 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 835 }; 836 837 static struct freerunning_counters snb_uncore_imc_freerunning[] = { 838 [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 839 0x0, 0x0, 1, 32 }, 840 [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, 841 0x0, 0x0, 1, 32 }, 842 [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, 843 0x0, 0x0, 1, 32 }, 844 [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, 845 0x0, 0x0, 1, 32 }, 846 [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, 847 0x0, 0x0, 1, 32 }, 848 }; 849 850 static struct attribute *snb_uncore_imc_formats_attr[] = { 851 &format_attr_event.attr, 852 NULL, 853 }; 854 855 static const struct attribute_group snb_uncore_imc_format_group = { 856 .name = "format", 857 .attrs = snb_uncore_imc_formats_attr, 858 }; 859 860 static void snb_uncore_imc_init_box(struct intel_uncore_box *box) 861 { 862 struct intel_uncore_type *type = box->pmu->type; 863 struct pci_dev *pdev = box->pci_dev; 864 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; 865 resource_size_t addr; 866 u32 pci_dword; 867 868 pci_read_config_dword(pdev, where, &pci_dword); 869 addr = pci_dword; 870 871 #ifdef CONFIG_PHYS_ADDR_T_64BIT 872 pci_read_config_dword(pdev, where + 4, &pci_dword); 873 addr |= ((resource_size_t)pci_dword << 32); 874 #endif 875 876 addr &= ~(PAGE_SIZE - 1); 877 878 box->io_addr = ioremap(addr, type->mmio_map_size); 879 if (!box->io_addr) 880 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 881 882 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; 883 } 884 885 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) 886 {} 887 888 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) 889 {} 890 891 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) 892 {} 893 894 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) 895 {} 896 897 /* 898 * Keep the custom event_init() function compatible with old event 899 * encoding for free running counters. 900 */ 901 static int snb_uncore_imc_event_init(struct perf_event *event) 902 { 903 struct intel_uncore_pmu *pmu; 904 struct intel_uncore_box *box; 905 struct hw_perf_event *hwc = &event->hw; 906 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; 907 int idx, base; 908 909 if (event->attr.type != event->pmu->type) 910 return -ENOENT; 911 912 pmu = uncore_event_to_pmu(event); 913 /* no device found for this pmu */ 914 if (!pmu->registered) 915 return -ENOENT; 916 917 /* Sampling not supported yet */ 918 if (hwc->sample_period) 919 return -EINVAL; 920 921 /* unsupported modes and filters */ 922 if (event->attr.sample_period) /* no sampling */ 923 return -EINVAL; 924 925 /* 926 * Place all uncore events for a particular physical package 927 * onto a single cpu 928 */ 929 if (event->cpu < 0) 930 return -EINVAL; 931 932 /* check only supported bits are set */ 933 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) 934 return -EINVAL; 935 936 box = uncore_pmu_to_box(pmu, event->cpu); 937 if (!box || box->cpu < 0) 938 return -EINVAL; 939 940 event->cpu = box->cpu; 941 event->pmu_private = box; 942 943 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 944 945 event->hw.idx = -1; 946 event->hw.last_tag = ~0ULL; 947 event->hw.extra_reg.idx = EXTRA_REG_NONE; 948 event->hw.branch_reg.idx = EXTRA_REG_NONE; 949 /* 950 * check event is known (whitelist, determines counter) 951 */ 952 switch (cfg) { 953 case SNB_UNCORE_PCI_IMC_DATA_READS: 954 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; 955 idx = UNCORE_PMC_IDX_FREERUNNING; 956 break; 957 case SNB_UNCORE_PCI_IMC_DATA_WRITES: 958 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; 959 idx = UNCORE_PMC_IDX_FREERUNNING; 960 break; 961 case SNB_UNCORE_PCI_IMC_GT_REQUESTS: 962 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; 963 idx = UNCORE_PMC_IDX_FREERUNNING; 964 break; 965 case SNB_UNCORE_PCI_IMC_IA_REQUESTS: 966 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; 967 idx = UNCORE_PMC_IDX_FREERUNNING; 968 break; 969 case SNB_UNCORE_PCI_IMC_IO_REQUESTS: 970 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; 971 idx = UNCORE_PMC_IDX_FREERUNNING; 972 break; 973 default: 974 return -EINVAL; 975 } 976 977 /* must be done before validate_group */ 978 event->hw.event_base = base; 979 event->hw.idx = idx; 980 981 /* Convert to standard encoding format for freerunning counters */ 982 event->hw.config = ((cfg - 1) << 8) | 0x10ff; 983 984 /* no group validation needed, we have free running counters */ 985 986 return 0; 987 } 988 989 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) 990 { 991 return 0; 992 } 993 994 int snb_pci2phy_map_init(int devid) 995 { 996 struct pci_dev *dev = NULL; 997 struct pci2phy_map *map; 998 int bus, segment; 999 1000 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 1001 if (!dev) 1002 return -ENOTTY; 1003 1004 bus = dev->bus->number; 1005 segment = pci_domain_nr(dev->bus); 1006 1007 raw_spin_lock(&pci2phy_map_lock); 1008 map = __find_pci2phy_map(segment); 1009 if (!map) { 1010 raw_spin_unlock(&pci2phy_map_lock); 1011 pci_dev_put(dev); 1012 return -ENOMEM; 1013 } 1014 map->pbus_to_dieid[bus] = 0; 1015 raw_spin_unlock(&pci2phy_map_lock); 1016 1017 pci_dev_put(dev); 1018 1019 return 0; 1020 } 1021 1022 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) 1023 { 1024 struct hw_perf_event *hwc = &event->hw; 1025 1026 /* 1027 * SNB IMC counters are 32-bit and are laid out back to back 1028 * in MMIO space. Therefore we must use a 32-bit accessor function 1029 * using readq() from uncore_mmio_read_counter() causes problems 1030 * because it is reading 64-bit at a time. This is okay for the 1031 * uncore_perf_event_update() function because it drops the upper 1032 * 32-bits but not okay for plain uncore_read_counter() as invoked 1033 * in uncore_pmu_event_start(). 1034 */ 1035 return (u64)readl(box->io_addr + hwc->event_base); 1036 } 1037 1038 static struct pmu snb_uncore_imc_pmu = { 1039 .task_ctx_nr = perf_invalid_context, 1040 .event_init = snb_uncore_imc_event_init, 1041 .add = uncore_pmu_event_add, 1042 .del = uncore_pmu_event_del, 1043 .start = uncore_pmu_event_start, 1044 .stop = uncore_pmu_event_stop, 1045 .read = uncore_pmu_event_read, 1046 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 1047 }; 1048 1049 static struct intel_uncore_ops snb_uncore_imc_ops = { 1050 .init_box = snb_uncore_imc_init_box, 1051 .exit_box = uncore_mmio_exit_box, 1052 .enable_box = snb_uncore_imc_enable_box, 1053 .disable_box = snb_uncore_imc_disable_box, 1054 .disable_event = snb_uncore_imc_disable_event, 1055 .enable_event = snb_uncore_imc_enable_event, 1056 .hw_config = snb_uncore_imc_hw_config, 1057 .read_counter = snb_uncore_imc_read_counter, 1058 }; 1059 1060 static struct intel_uncore_type snb_uncore_imc = { 1061 .name = "imc", 1062 .num_counters = 5, 1063 .num_boxes = 1, 1064 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1065 .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, 1066 .freerunning = snb_uncore_imc_freerunning, 1067 .event_descs = snb_uncore_imc_events, 1068 .format_group = &snb_uncore_imc_format_group, 1069 .ops = &snb_uncore_imc_ops, 1070 .pmu = &snb_uncore_imc_pmu, 1071 }; 1072 1073 static struct intel_uncore_type *snb_pci_uncores[] = { 1074 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, 1075 NULL, 1076 }; 1077 1078 static const struct pci_device_id snb_uncore_pci_ids[] = { 1079 IMC_UNCORE_DEV(SNB), 1080 { /* end: all zeroes */ }, 1081 }; 1082 1083 static const struct pci_device_id ivb_uncore_pci_ids[] = { 1084 IMC_UNCORE_DEV(IVB), 1085 IMC_UNCORE_DEV(IVB_E3), 1086 { /* end: all zeroes */ }, 1087 }; 1088 1089 static const struct pci_device_id hsw_uncore_pci_ids[] = { 1090 IMC_UNCORE_DEV(HSW), 1091 IMC_UNCORE_DEV(HSW_U), 1092 { /* end: all zeroes */ }, 1093 }; 1094 1095 static const struct pci_device_id bdw_uncore_pci_ids[] = { 1096 IMC_UNCORE_DEV(BDW), 1097 { /* end: all zeroes */ }, 1098 }; 1099 1100 static const struct pci_device_id skl_uncore_pci_ids[] = { 1101 IMC_UNCORE_DEV(SKL_Y), 1102 IMC_UNCORE_DEV(SKL_U), 1103 IMC_UNCORE_DEV(SKL_HD), 1104 IMC_UNCORE_DEV(SKL_HQ), 1105 IMC_UNCORE_DEV(SKL_SD), 1106 IMC_UNCORE_DEV(SKL_SQ), 1107 IMC_UNCORE_DEV(SKL_E3), 1108 IMC_UNCORE_DEV(KBL_Y), 1109 IMC_UNCORE_DEV(KBL_U), 1110 IMC_UNCORE_DEV(KBL_UQ), 1111 IMC_UNCORE_DEV(KBL_SD), 1112 IMC_UNCORE_DEV(KBL_SQ), 1113 IMC_UNCORE_DEV(KBL_HQ), 1114 IMC_UNCORE_DEV(KBL_WQ), 1115 IMC_UNCORE_DEV(CFL_2U), 1116 IMC_UNCORE_DEV(CFL_4U), 1117 IMC_UNCORE_DEV(CFL_4H), 1118 IMC_UNCORE_DEV(CFL_6H), 1119 IMC_UNCORE_DEV(CFL_2S_D), 1120 IMC_UNCORE_DEV(CFL_4S_D), 1121 IMC_UNCORE_DEV(CFL_6S_D), 1122 IMC_UNCORE_DEV(CFL_8S_D), 1123 IMC_UNCORE_DEV(CFL_4S_W), 1124 IMC_UNCORE_DEV(CFL_6S_W), 1125 IMC_UNCORE_DEV(CFL_8S_W), 1126 IMC_UNCORE_DEV(CFL_4S_S), 1127 IMC_UNCORE_DEV(CFL_6S_S), 1128 IMC_UNCORE_DEV(CFL_8S_S), 1129 IMC_UNCORE_DEV(AML_YD), 1130 IMC_UNCORE_DEV(AML_YQ), 1131 IMC_UNCORE_DEV(WHL_UQ), 1132 IMC_UNCORE_DEV(WHL_4_UQ), 1133 IMC_UNCORE_DEV(WHL_UD), 1134 IMC_UNCORE_DEV(CML_H1), 1135 IMC_UNCORE_DEV(CML_H2), 1136 IMC_UNCORE_DEV(CML_H3), 1137 IMC_UNCORE_DEV(CML_U1), 1138 IMC_UNCORE_DEV(CML_U2), 1139 IMC_UNCORE_DEV(CML_U3), 1140 IMC_UNCORE_DEV(CML_S1), 1141 IMC_UNCORE_DEV(CML_S2), 1142 IMC_UNCORE_DEV(CML_S3), 1143 IMC_UNCORE_DEV(CML_S4), 1144 IMC_UNCORE_DEV(CML_S5), 1145 { /* end: all zeroes */ }, 1146 }; 1147 1148 static const struct pci_device_id icl_uncore_pci_ids[] = { 1149 IMC_UNCORE_DEV(ICL_U), 1150 IMC_UNCORE_DEV(ICL_U2), 1151 IMC_UNCORE_DEV(RKL_1), 1152 IMC_UNCORE_DEV(RKL_2), 1153 { /* end: all zeroes */ }, 1154 }; 1155 1156 static struct pci_driver snb_uncore_pci_driver = { 1157 .name = "snb_uncore", 1158 .id_table = snb_uncore_pci_ids, 1159 }; 1160 1161 static struct pci_driver ivb_uncore_pci_driver = { 1162 .name = "ivb_uncore", 1163 .id_table = ivb_uncore_pci_ids, 1164 }; 1165 1166 static struct pci_driver hsw_uncore_pci_driver = { 1167 .name = "hsw_uncore", 1168 .id_table = hsw_uncore_pci_ids, 1169 }; 1170 1171 static struct pci_driver bdw_uncore_pci_driver = { 1172 .name = "bdw_uncore", 1173 .id_table = bdw_uncore_pci_ids, 1174 }; 1175 1176 static struct pci_driver skl_uncore_pci_driver = { 1177 .name = "skl_uncore", 1178 .id_table = skl_uncore_pci_ids, 1179 }; 1180 1181 static struct pci_driver icl_uncore_pci_driver = { 1182 .name = "icl_uncore", 1183 .id_table = icl_uncore_pci_ids, 1184 }; 1185 1186 struct imc_uncore_pci_dev { 1187 __u32 pci_id; 1188 struct pci_driver *driver; 1189 }; 1190 #define IMC_DEV(a, d) \ 1191 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } 1192 1193 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { 1194 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), 1195 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 1196 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 1197 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 1198 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 1199 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 1200 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ 1201 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 1202 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ 1203 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ 1204 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ 1205 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ 1206 IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ 1207 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ 1208 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ 1209 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ 1210 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ 1211 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ 1212 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */ 1213 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */ 1214 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ 1215 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ 1216 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ 1217 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ 1218 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ 1219 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ 1220 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ 1221 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ 1222 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ 1223 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ 1224 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ 1225 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ 1226 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ 1227 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ 1228 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */ 1229 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */ 1230 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ 1231 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ 1232 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */ 1233 IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver), 1234 IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver), 1235 IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver), 1236 IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver), 1237 IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver), 1238 IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver), 1239 IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver), 1240 IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver), 1241 IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver), 1242 IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver), 1243 IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver), 1244 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 1245 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ 1246 IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver), 1247 IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver), 1248 { /* end marker */ } 1249 }; 1250 1251 1252 #define for_each_imc_pci_id(x, t) \ 1253 for (x = (t); (x)->pci_id; x++) 1254 1255 static struct pci_driver *imc_uncore_find_dev(void) 1256 { 1257 const struct imc_uncore_pci_dev *p; 1258 int ret; 1259 1260 for_each_imc_pci_id(p, desktop_imc_pci_ids) { 1261 ret = snb_pci2phy_map_init(p->pci_id); 1262 if (ret == 0) 1263 return p->driver; 1264 } 1265 return NULL; 1266 } 1267 1268 static int imc_uncore_pci_init(void) 1269 { 1270 struct pci_driver *imc_drv = imc_uncore_find_dev(); 1271 1272 if (!imc_drv) 1273 return -ENODEV; 1274 1275 uncore_pci_uncores = snb_pci_uncores; 1276 uncore_pci_driver = imc_drv; 1277 1278 return 0; 1279 } 1280 1281 int snb_uncore_pci_init(void) 1282 { 1283 return imc_uncore_pci_init(); 1284 } 1285 1286 int ivb_uncore_pci_init(void) 1287 { 1288 return imc_uncore_pci_init(); 1289 } 1290 int hsw_uncore_pci_init(void) 1291 { 1292 return imc_uncore_pci_init(); 1293 } 1294 1295 int bdw_uncore_pci_init(void) 1296 { 1297 return imc_uncore_pci_init(); 1298 } 1299 1300 int skl_uncore_pci_init(void) 1301 { 1302 return imc_uncore_pci_init(); 1303 } 1304 1305 /* end of Sandy Bridge uncore support */ 1306 1307 /* Nehalem uncore support */ 1308 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) 1309 { 1310 wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, 0); 1311 } 1312 1313 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) 1314 { 1315 wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); 1316 } 1317 1318 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1319 { 1320 struct hw_perf_event *hwc = &event->hw; 1321 1322 if (hwc->idx < UNCORE_PMC_IDX_FIXED) 1323 wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); 1324 else 1325 wrmsrq(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); 1326 } 1327 1328 static struct attribute *nhm_uncore_formats_attr[] = { 1329 &format_attr_event.attr, 1330 &format_attr_umask.attr, 1331 &format_attr_edge.attr, 1332 &format_attr_inv.attr, 1333 &format_attr_cmask8.attr, 1334 NULL, 1335 }; 1336 1337 static const struct attribute_group nhm_uncore_format_group = { 1338 .name = "format", 1339 .attrs = nhm_uncore_formats_attr, 1340 }; 1341 1342 static struct uncore_event_desc nhm_uncore_events[] = { 1343 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 1344 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), 1345 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), 1346 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), 1347 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), 1348 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), 1349 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), 1350 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), 1351 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), 1352 { /* end: all zeroes */ }, 1353 }; 1354 1355 static struct intel_uncore_ops nhm_uncore_msr_ops = { 1356 .disable_box = nhm_uncore_msr_disable_box, 1357 .enable_box = nhm_uncore_msr_enable_box, 1358 .disable_event = snb_uncore_msr_disable_event, 1359 .enable_event = nhm_uncore_msr_enable_event, 1360 .read_counter = uncore_msr_read_counter, 1361 }; 1362 1363 static struct intel_uncore_type nhm_uncore = { 1364 .name = "", 1365 .num_counters = 8, 1366 .num_boxes = 1, 1367 .perf_ctr_bits = 48, 1368 .fixed_ctr_bits = 48, 1369 .event_ctl = NHM_UNC_PERFEVTSEL0, 1370 .perf_ctr = NHM_UNC_UNCORE_PMC0, 1371 .fixed_ctr = NHM_UNC_FIXED_CTR, 1372 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, 1373 .event_mask = NHM_UNC_RAW_EVENT_MASK, 1374 .event_descs = nhm_uncore_events, 1375 .ops = &nhm_uncore_msr_ops, 1376 .format_group = &nhm_uncore_format_group, 1377 }; 1378 1379 static struct intel_uncore_type *nhm_msr_uncores[] = { 1380 &nhm_uncore, 1381 NULL, 1382 }; 1383 1384 void nhm_uncore_cpu_init(void) 1385 { 1386 uncore_msr_uncores = nhm_msr_uncores; 1387 } 1388 1389 /* end of Nehalem uncore support */ 1390 1391 /* Tiger Lake MMIO uncore support */ 1392 1393 static const struct pci_device_id tgl_uncore_pci_ids[] = { 1394 IMC_UNCORE_DEV(TGL_U1), 1395 IMC_UNCORE_DEV(TGL_U2), 1396 IMC_UNCORE_DEV(TGL_U3), 1397 IMC_UNCORE_DEV(TGL_U4), 1398 IMC_UNCORE_DEV(TGL_H), 1399 IMC_UNCORE_DEV(ADL_1), 1400 IMC_UNCORE_DEV(ADL_2), 1401 IMC_UNCORE_DEV(ADL_3), 1402 IMC_UNCORE_DEV(ADL_4), 1403 IMC_UNCORE_DEV(ADL_5), 1404 IMC_UNCORE_DEV(ADL_6), 1405 IMC_UNCORE_DEV(ADL_7), 1406 IMC_UNCORE_DEV(ADL_8), 1407 IMC_UNCORE_DEV(ADL_9), 1408 IMC_UNCORE_DEV(ADL_10), 1409 IMC_UNCORE_DEV(ADL_11), 1410 IMC_UNCORE_DEV(ADL_12), 1411 IMC_UNCORE_DEV(ADL_13), 1412 IMC_UNCORE_DEV(ADL_14), 1413 IMC_UNCORE_DEV(ADL_15), 1414 IMC_UNCORE_DEV(ADL_16), 1415 IMC_UNCORE_DEV(ADL_17), 1416 IMC_UNCORE_DEV(ADL_18), 1417 IMC_UNCORE_DEV(ADL_19), 1418 IMC_UNCORE_DEV(ADL_20), 1419 IMC_UNCORE_DEV(ADL_21), 1420 IMC_UNCORE_DEV(RPL_1), 1421 IMC_UNCORE_DEV(RPL_2), 1422 IMC_UNCORE_DEV(RPL_3), 1423 IMC_UNCORE_DEV(RPL_4), 1424 IMC_UNCORE_DEV(RPL_5), 1425 IMC_UNCORE_DEV(RPL_6), 1426 IMC_UNCORE_DEV(RPL_7), 1427 IMC_UNCORE_DEV(RPL_8), 1428 IMC_UNCORE_DEV(RPL_9), 1429 IMC_UNCORE_DEV(RPL_10), 1430 IMC_UNCORE_DEV(RPL_11), 1431 IMC_UNCORE_DEV(RPL_12), 1432 IMC_UNCORE_DEV(RPL_13), 1433 IMC_UNCORE_DEV(RPL_14), 1434 IMC_UNCORE_DEV(RPL_15), 1435 IMC_UNCORE_DEV(RPL_16), 1436 IMC_UNCORE_DEV(RPL_17), 1437 IMC_UNCORE_DEV(RPL_18), 1438 IMC_UNCORE_DEV(RPL_19), 1439 IMC_UNCORE_DEV(RPL_20), 1440 IMC_UNCORE_DEV(RPL_21), 1441 IMC_UNCORE_DEV(RPL_22), 1442 IMC_UNCORE_DEV(RPL_23), 1443 IMC_UNCORE_DEV(RPL_24), 1444 IMC_UNCORE_DEV(RPL_25), 1445 IMC_UNCORE_DEV(MTL_1), 1446 IMC_UNCORE_DEV(MTL_2), 1447 IMC_UNCORE_DEV(MTL_3), 1448 IMC_UNCORE_DEV(MTL_4), 1449 IMC_UNCORE_DEV(MTL_5), 1450 IMC_UNCORE_DEV(MTL_6), 1451 IMC_UNCORE_DEV(MTL_7), 1452 IMC_UNCORE_DEV(MTL_8), 1453 IMC_UNCORE_DEV(MTL_9), 1454 IMC_UNCORE_DEV(MTL_10), 1455 IMC_UNCORE_DEV(MTL_11), 1456 IMC_UNCORE_DEV(MTL_12), 1457 IMC_UNCORE_DEV(MTL_13), 1458 { /* end: all zeroes */ } 1459 }; 1460 1461 enum perf_tgl_uncore_imc_freerunning_types { 1462 TGL_MMIO_UNCORE_IMC_DATA_TOTAL, 1463 TGL_MMIO_UNCORE_IMC_DATA_READ, 1464 TGL_MMIO_UNCORE_IMC_DATA_WRITE, 1465 TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX 1466 }; 1467 1468 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = { 1469 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 }, 1470 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 }, 1471 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 }, 1472 }; 1473 1474 static struct freerunning_counters tgl_uncore_imc_freerunning[] = { 1475 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 }, 1476 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 }, 1477 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 }, 1478 }; 1479 1480 static struct uncore_event_desc tgl_uncore_imc_events[] = { 1481 INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"), 1482 INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"), 1483 INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"), 1484 1485 INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"), 1486 INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"), 1487 INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"), 1488 1489 INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"), 1490 INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"), 1491 INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"), 1492 1493 { /* end: all zeroes */ } 1494 }; 1495 1496 static struct pci_dev *tgl_uncore_get_mc_dev(void) 1497 { 1498 const struct pci_device_id *ids = tgl_uncore_pci_ids; 1499 struct pci_dev *mc_dev = NULL; 1500 1501 while (ids && ids->vendor) { 1502 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL); 1503 if (mc_dev) 1504 return mc_dev; 1505 ids++; 1506 } 1507 1508 /* Just try to grab 00:00.0 device */ 1509 if (!mc_dev) 1510 mc_dev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); 1511 1512 return mc_dev; 1513 } 1514 1515 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 1516 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 1517 1518 static void 1519 uncore_get_box_mmio_addr(struct intel_uncore_box *box, 1520 unsigned int base_offset, 1521 int bar_offset, int step) 1522 { 1523 struct pci_dev *pdev = tgl_uncore_get_mc_dev(); 1524 struct intel_uncore_pmu *pmu = box->pmu; 1525 struct intel_uncore_type *type = pmu->type; 1526 resource_size_t addr; 1527 u32 bar; 1528 1529 if (!pdev) { 1530 pr_warn("perf uncore: Cannot find matched IMC device.\n"); 1531 return; 1532 } 1533 1534 pci_read_config_dword(pdev, bar_offset, &bar); 1535 if (!(bar & BIT(0))) { 1536 pr_warn("perf uncore: BAR 0x%x is disabled. Failed to map %s counters.\n", 1537 bar_offset, type->name); 1538 pci_dev_put(pdev); 1539 return; 1540 } 1541 bar &= ~BIT(0); 1542 addr = (resource_size_t)(bar + step * pmu->pmu_idx); 1543 1544 #ifdef CONFIG_PHYS_ADDR_T_64BIT 1545 pci_read_config_dword(pdev, bar_offset + 4, &bar); 1546 addr |= ((resource_size_t)bar << 32); 1547 #endif 1548 1549 addr += base_offset; 1550 box->io_addr = ioremap(addr, type->mmio_map_size); 1551 if (!box->io_addr) 1552 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 1553 1554 pci_dev_put(pdev); 1555 } 1556 1557 static void __uncore_imc_init_box(struct intel_uncore_box *box, 1558 unsigned int base_offset) 1559 { 1560 uncore_get_box_mmio_addr(box, base_offset, 1561 SNB_UNCORE_PCI_IMC_BAR_OFFSET, 1562 TGL_UNCORE_MMIO_IMC_MEM_OFFSET); 1563 } 1564 1565 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 1566 { 1567 __uncore_imc_init_box(box, 0); 1568 } 1569 1570 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { 1571 .init_box = tgl_uncore_imc_freerunning_init_box, 1572 .exit_box = uncore_mmio_exit_box, 1573 .read_counter = uncore_mmio_read_counter, 1574 .hw_config = uncore_freerunning_hw_config, 1575 }; 1576 1577 static struct attribute *tgl_uncore_imc_formats_attr[] = { 1578 &format_attr_event.attr, 1579 &format_attr_umask.attr, 1580 NULL 1581 }; 1582 1583 static const struct attribute_group tgl_uncore_imc_format_group = { 1584 .name = "format", 1585 .attrs = tgl_uncore_imc_formats_attr, 1586 }; 1587 1588 static struct intel_uncore_type tgl_uncore_imc_free_running = { 1589 .name = "imc_free_running", 1590 .num_counters = 3, 1591 .num_boxes = 2, 1592 .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1593 .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE, 1594 .freerunning = tgl_uncore_imc_freerunning, 1595 .ops = &tgl_uncore_imc_freerunning_ops, 1596 .event_descs = tgl_uncore_imc_events, 1597 .format_group = &tgl_uncore_imc_format_group, 1598 }; 1599 1600 static struct intel_uncore_type *tgl_mmio_uncores[] = { 1601 &tgl_uncore_imc_free_running, 1602 NULL 1603 }; 1604 1605 void tgl_l_uncore_mmio_init(void) 1606 { 1607 tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning; 1608 uncore_mmio_uncores = tgl_mmio_uncores; 1609 } 1610 1611 void tgl_uncore_mmio_init(void) 1612 { 1613 uncore_mmio_uncores = tgl_mmio_uncores; 1614 } 1615 1616 /* end of Tiger Lake MMIO uncore support */ 1617 1618 /* Alder Lake MMIO uncore support */ 1619 #define ADL_UNCORE_IMC_BASE 0xd900 1620 #define ADL_UNCORE_IMC_MAP_SIZE 0x200 1621 #define ADL_UNCORE_IMC_CTR 0xe8 1622 #define ADL_UNCORE_IMC_CTRL 0xd0 1623 #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0 1624 #define ADL_UNCORE_IMC_BOX_CTL 0xc4 1625 #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800 1626 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100 1627 1628 #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0) 1629 #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1) 1630 #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2) 1631 #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \ 1632 ADL_UNCORE_IMC_CTL_RST_CTRS) 1633 1634 static void adl_uncore_imc_init_box(struct intel_uncore_box *box) 1635 { 1636 __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE); 1637 1638 /* The global control in MC1 can control both MCs. */ 1639 if (box->io_addr && (box->pmu->pmu_idx == 1)) 1640 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL); 1641 } 1642 1643 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box) 1644 { 1645 if (!box->io_addr) 1646 return; 1647 1648 writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box)); 1649 } 1650 1651 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box) 1652 { 1653 if (!box->io_addr) 1654 return; 1655 1656 writel(0, box->io_addr + uncore_mmio_box_ctl(box)); 1657 } 1658 1659 #define MMIO_UNCORE_COMMON_OPS() \ 1660 .exit_box = uncore_mmio_exit_box, \ 1661 .disable_box = adl_uncore_mmio_disable_box, \ 1662 .enable_box = adl_uncore_mmio_enable_box, \ 1663 .disable_event = intel_generic_uncore_mmio_disable_event, \ 1664 .enable_event = intel_generic_uncore_mmio_enable_event, \ 1665 .read_counter = uncore_mmio_read_counter, 1666 1667 static struct intel_uncore_ops adl_uncore_mmio_ops = { 1668 .init_box = adl_uncore_imc_init_box, 1669 MMIO_UNCORE_COMMON_OPS() 1670 }; 1671 1672 #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00 1673 #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 1674 ADL_UNC_CTL_CHMASK_MASK | \ 1675 SNB_UNC_CTL_EDGE_DET) 1676 1677 static struct attribute *adl_uncore_imc_formats_attr[] = { 1678 &format_attr_event.attr, 1679 &format_attr_chmask.attr, 1680 &format_attr_edge.attr, 1681 NULL, 1682 }; 1683 1684 static const struct attribute_group adl_uncore_imc_format_group = { 1685 .name = "format", 1686 .attrs = adl_uncore_imc_formats_attr, 1687 }; 1688 1689 static struct intel_uncore_type adl_uncore_imc = { 1690 .name = "imc", 1691 .num_counters = 5, 1692 .num_boxes = 2, 1693 .perf_ctr_bits = 64, 1694 .perf_ctr = ADL_UNCORE_IMC_CTR, 1695 .event_ctl = ADL_UNCORE_IMC_CTRL, 1696 .event_mask = ADL_UNC_IMC_EVENT_MASK, 1697 .box_ctl = ADL_UNCORE_IMC_BOX_CTL, 1698 .mmio_offset = 0, 1699 .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE, 1700 .ops = &adl_uncore_mmio_ops, 1701 .format_group = &adl_uncore_imc_format_group, 1702 }; 1703 1704 enum perf_adl_uncore_imc_freerunning_types { 1705 ADL_MMIO_UNCORE_IMC_DATA_TOTAL, 1706 ADL_MMIO_UNCORE_IMC_DATA_READ, 1707 ADL_MMIO_UNCORE_IMC_DATA_WRITE, 1708 ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX 1709 }; 1710 1711 static struct freerunning_counters adl_uncore_imc_freerunning[] = { 1712 [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 }, 1713 [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 }, 1714 [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 }, 1715 }; 1716 1717 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 1718 { 1719 __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE); 1720 } 1721 1722 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = { 1723 .init_box = adl_uncore_imc_freerunning_init_box, 1724 .exit_box = uncore_mmio_exit_box, 1725 .read_counter = uncore_mmio_read_counter, 1726 .hw_config = uncore_freerunning_hw_config, 1727 }; 1728 1729 static struct intel_uncore_type adl_uncore_imc_free_running = { 1730 .name = "imc_free_running", 1731 .num_counters = 3, 1732 .num_boxes = 2, 1733 .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, 1734 .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE, 1735 .freerunning = adl_uncore_imc_freerunning, 1736 .ops = &adl_uncore_imc_freerunning_ops, 1737 .event_descs = tgl_uncore_imc_events, 1738 .format_group = &tgl_uncore_imc_format_group, 1739 }; 1740 1741 static struct intel_uncore_type *adl_mmio_uncores[] = { 1742 &adl_uncore_imc, 1743 &adl_uncore_imc_free_running, 1744 NULL 1745 }; 1746 1747 void adl_uncore_mmio_init(void) 1748 { 1749 uncore_mmio_uncores = adl_mmio_uncores; 1750 } 1751 1752 /* end of Alder Lake MMIO uncore support */ 1753 1754 /* Lunar Lake MMIO uncore support */ 1755 #define LNL_UNCORE_PCI_SAFBAR_OFFSET 0x68 1756 #define LNL_UNCORE_MAP_SIZE 0x1000 1757 #define LNL_UNCORE_SNCU_BASE 0xE4B000 1758 #define LNL_UNCORE_SNCU_CTR 0x390 1759 #define LNL_UNCORE_SNCU_CTRL 0x398 1760 #define LNL_UNCORE_SNCU_BOX_CTL 0x380 1761 #define LNL_UNCORE_GLOBAL_CTL 0x700 1762 #define LNL_UNCORE_HBO_BASE 0xE54000 1763 #define LNL_UNCORE_HBO_OFFSET -4096 1764 #define LNL_UNCORE_HBO_CTR 0x570 1765 #define LNL_UNCORE_HBO_CTRL 0x550 1766 #define LNL_UNCORE_HBO_BOX_CTL 0x548 1767 1768 #define LNL_UNC_CTL_THRESHOLD 0xff000000 1769 #define LNL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ 1770 SNB_UNC_CTL_UMASK_MASK | \ 1771 SNB_UNC_CTL_EDGE_DET | \ 1772 SNB_UNC_CTL_INVERT | \ 1773 LNL_UNC_CTL_THRESHOLD) 1774 1775 static struct attribute *lnl_uncore_formats_attr[] = { 1776 &format_attr_event.attr, 1777 &format_attr_umask.attr, 1778 &format_attr_edge.attr, 1779 &format_attr_inv.attr, 1780 &format_attr_threshold2.attr, 1781 NULL 1782 }; 1783 1784 static const struct attribute_group lnl_uncore_format_group = { 1785 .name = "format", 1786 .attrs = lnl_uncore_formats_attr, 1787 }; 1788 1789 static void lnl_uncore_hbo_init_box(struct intel_uncore_box *box) 1790 { 1791 uncore_get_box_mmio_addr(box, LNL_UNCORE_HBO_BASE, 1792 LNL_UNCORE_PCI_SAFBAR_OFFSET, 1793 LNL_UNCORE_HBO_OFFSET); 1794 } 1795 1796 static struct intel_uncore_ops lnl_uncore_hbo_ops = { 1797 .init_box = lnl_uncore_hbo_init_box, 1798 MMIO_UNCORE_COMMON_OPS() 1799 }; 1800 1801 static struct intel_uncore_type lnl_uncore_hbo = { 1802 .name = "hbo", 1803 .num_counters = 4, 1804 .num_boxes = 2, 1805 .perf_ctr_bits = 64, 1806 .perf_ctr = LNL_UNCORE_HBO_CTR, 1807 .event_ctl = LNL_UNCORE_HBO_CTRL, 1808 .event_mask = LNL_UNC_RAW_EVENT_MASK, 1809 .box_ctl = LNL_UNCORE_HBO_BOX_CTL, 1810 .mmio_map_size = LNL_UNCORE_MAP_SIZE, 1811 .ops = &lnl_uncore_hbo_ops, 1812 .format_group = &lnl_uncore_format_group, 1813 }; 1814 1815 static void lnl_uncore_sncu_init_box(struct intel_uncore_box *box) 1816 { 1817 uncore_get_box_mmio_addr(box, LNL_UNCORE_SNCU_BASE, 1818 LNL_UNCORE_PCI_SAFBAR_OFFSET, 1819 0); 1820 1821 if (box->io_addr) 1822 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + LNL_UNCORE_GLOBAL_CTL); 1823 } 1824 1825 static struct intel_uncore_ops lnl_uncore_sncu_ops = { 1826 .init_box = lnl_uncore_sncu_init_box, 1827 MMIO_UNCORE_COMMON_OPS() 1828 }; 1829 1830 static struct intel_uncore_type lnl_uncore_sncu = { 1831 .name = "sncu", 1832 .num_counters = 2, 1833 .num_boxes = 1, 1834 .perf_ctr_bits = 64, 1835 .perf_ctr = LNL_UNCORE_SNCU_CTR, 1836 .event_ctl = LNL_UNCORE_SNCU_CTRL, 1837 .event_mask = LNL_UNC_RAW_EVENT_MASK, 1838 .box_ctl = LNL_UNCORE_SNCU_BOX_CTL, 1839 .mmio_map_size = LNL_UNCORE_MAP_SIZE, 1840 .ops = &lnl_uncore_sncu_ops, 1841 .format_group = &lnl_uncore_format_group, 1842 }; 1843 1844 static struct intel_uncore_type *lnl_mmio_uncores[] = { 1845 &adl_uncore_imc, 1846 &lnl_uncore_hbo, 1847 &lnl_uncore_sncu, 1848 &adl_uncore_imc_free_running, 1849 NULL 1850 }; 1851 1852 void lnl_uncore_mmio_init(void) 1853 { 1854 uncore_mmio_uncores = lnl_mmio_uncores; 1855 } 1856 1857 /* end of Lunar Lake MMIO uncore support */ 1858