1 /* 2 * Support cstate residency counters 3 * 4 * Copyright (C) 2015, Intel Corp. 5 * Author: Kan Liang (kan.liang@intel.com) 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Library General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Library General Public License for more details. 16 * 17 */ 18 19 /* 20 * This file export cstate related free running (read-only) counters 21 * for perf. These counters may be use simultaneously by other tools, 22 * such as turbostat. However, it still make sense to implement them 23 * in perf. Because we can conveniently collect them together with 24 * other events, and allow to use them from tools without special MSR 25 * access code. 26 * 27 * The events only support system-wide mode counting. There is no 28 * sampling support because it is not supported by the hardware. 29 * 30 * According to counters' scope and category, two PMUs are registered 31 * with the perf_event core subsystem. 32 * - 'cstate_core': The counter is available for each physical core. 33 * The counters include CORE_C*_RESIDENCY. 34 * - 'cstate_pkg': The counter is available for each physical package. 35 * The counters include PKG_C*_RESIDENCY. 36 * 37 * All of these counters are specified in the Intel® 64 and IA-32 38 * Architectures Software Developer.s Manual Vol3b. 39 * 40 * Model specific counters: 41 * MSR_CORE_C1_RES: CORE C1 Residency Counter 42 * perf code: 0x00 43 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL 44 * MTL,SRF,GRR,ARL,LNL,PTL 45 * Scope: Core (each processor core has a MSR) 46 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter 47 * perf code: 0x01 48 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, 49 * CNL,KBL,CML,TNT 50 * Scope: Core 51 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 52 * perf code: 0x02 53 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 54 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, 55 * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, 56 * GRR,ARL,LNL,PTL 57 * Scope: Core 58 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 59 * perf code: 0x03 60 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, 61 * ICL,TGL,RKL,ADL,RPL,MTL,ARL,LNL, 62 * PTL 63 * Scope: Core 64 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 65 * perf code: 0x00 66 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, 67 * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL, 68 * RPL,SPR,MTL,ARL,LNL,SRF,PTL 69 * Scope: Package (physical package) 70 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 71 * perf code: 0x01 72 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, 73 * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL, 74 * ADL,RPL,MTL,ARL 75 * Scope: Package (physical package) 76 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 77 * perf code: 0x02 78 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 79 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, 80 * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, 81 * ARL,LNL,PTL 82 * Scope: Package (physical package) 83 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 84 * perf code: 0x03 85 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, 86 * KBL,CML,ICL,TGL,RKL 87 * Scope: Package (physical package) 88 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. 89 * perf code: 0x04 90 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, 91 * ADL,RPL,MTL,ARL 92 * Scope: Package (physical package) 93 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. 94 * perf code: 0x05 95 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL 96 * Scope: Package (physical package) 97 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 98 * perf code: 0x06 99 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, 100 * TNT,RKL,ADL,RPL,MTL,ARL,LNL,PTL 101 * Scope: Package (physical package) 102 * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. 103 * perf code: 0x00 104 * Available model: SRF,GRR 105 * Scope: A cluster of cores shared L2 cache 106 * 107 */ 108 109 #include <linux/module.h> 110 #include <linux/slab.h> 111 #include <linux/perf_event.h> 112 #include <linux/nospec.h> 113 #include <asm/cpu_device_id.h> 114 #include <asm/intel-family.h> 115 #include <asm/msr.h> 116 #include "../perf_event.h" 117 #include "../probe.h" 118 119 MODULE_DESCRIPTION("Support for Intel cstate performance events"); 120 MODULE_LICENSE("GPL"); 121 122 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ 123 static ssize_t __cstate_##_var##_show(struct device *dev, \ 124 struct device_attribute *attr, \ 125 char *page) \ 126 { \ 127 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 128 return sprintf(page, _format "\n"); \ 129 } \ 130 static struct device_attribute format_attr_##_var = \ 131 __ATTR(_name, 0444, __cstate_##_var##_show, NULL) 132 133 /* Model -> events mapping */ 134 struct cstate_model { 135 unsigned long core_events; 136 unsigned long pkg_events; 137 unsigned long module_events; 138 unsigned long quirks; 139 }; 140 141 /* Quirk flags */ 142 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 143 #define KNL_CORE_C6_MSR (1UL << 1) 144 145 /* cstate_core PMU */ 146 static struct pmu cstate_core_pmu; 147 static bool has_cstate_core; 148 149 enum perf_cstate_core_events { 150 PERF_CSTATE_CORE_C1_RES = 0, 151 PERF_CSTATE_CORE_C3_RES, 152 PERF_CSTATE_CORE_C6_RES, 153 PERF_CSTATE_CORE_C7_RES, 154 155 PERF_CSTATE_CORE_EVENT_MAX, 156 }; 157 158 PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00"); 159 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01"); 160 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02"); 161 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03"); 162 163 static unsigned long core_msr_mask; 164 165 PMU_EVENT_GROUP(events, cstate_core_c1); 166 PMU_EVENT_GROUP(events, cstate_core_c3); 167 PMU_EVENT_GROUP(events, cstate_core_c6); 168 PMU_EVENT_GROUP(events, cstate_core_c7); 169 170 static bool test_msr(int idx, void *data) 171 { 172 return test_bit(idx, (unsigned long *) data); 173 } 174 175 static struct perf_msr core_msr[] = { 176 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr }, 177 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr }, 178 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr }, 179 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr }, 180 }; 181 182 static struct attribute *attrs_empty[] = { 183 NULL, 184 }; 185 186 /* 187 * There are no default events, but we need to create 188 * "events" group (with empty attrs) before updating 189 * it with detected events. 190 */ 191 static struct attribute_group cstate_events_attr_group = { 192 .name = "events", 193 .attrs = attrs_empty, 194 }; 195 196 DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63"); 197 static struct attribute *cstate_format_attrs[] = { 198 &format_attr_cstate_event.attr, 199 NULL, 200 }; 201 202 static struct attribute_group cstate_format_attr_group = { 203 .name = "format", 204 .attrs = cstate_format_attrs, 205 }; 206 207 static const struct attribute_group *cstate_attr_groups[] = { 208 &cstate_events_attr_group, 209 &cstate_format_attr_group, 210 NULL, 211 }; 212 213 /* cstate_pkg PMU */ 214 static struct pmu cstate_pkg_pmu; 215 static bool has_cstate_pkg; 216 217 enum perf_cstate_pkg_events { 218 PERF_CSTATE_PKG_C2_RES = 0, 219 PERF_CSTATE_PKG_C3_RES, 220 PERF_CSTATE_PKG_C6_RES, 221 PERF_CSTATE_PKG_C7_RES, 222 PERF_CSTATE_PKG_C8_RES, 223 PERF_CSTATE_PKG_C9_RES, 224 PERF_CSTATE_PKG_C10_RES, 225 226 PERF_CSTATE_PKG_EVENT_MAX, 227 }; 228 229 PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00"); 230 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01"); 231 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02"); 232 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03"); 233 PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04"); 234 PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05"); 235 PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06"); 236 237 static unsigned long pkg_msr_mask; 238 239 PMU_EVENT_GROUP(events, cstate_pkg_c2); 240 PMU_EVENT_GROUP(events, cstate_pkg_c3); 241 PMU_EVENT_GROUP(events, cstate_pkg_c6); 242 PMU_EVENT_GROUP(events, cstate_pkg_c7); 243 PMU_EVENT_GROUP(events, cstate_pkg_c8); 244 PMU_EVENT_GROUP(events, cstate_pkg_c9); 245 PMU_EVENT_GROUP(events, cstate_pkg_c10); 246 247 static struct perf_msr pkg_msr[] = { 248 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr }, 249 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr }, 250 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr }, 251 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr }, 252 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr }, 253 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr }, 254 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, 255 }; 256 257 /* cstate_module PMU */ 258 static struct pmu cstate_module_pmu; 259 static bool has_cstate_module; 260 261 enum perf_cstate_module_events { 262 PERF_CSTATE_MODULE_C6_RES = 0, 263 264 PERF_CSTATE_MODULE_EVENT_MAX, 265 }; 266 267 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00"); 268 269 static unsigned long module_msr_mask; 270 271 PMU_EVENT_GROUP(events, cstate_module_c6); 272 273 static struct perf_msr module_msr[] = { 274 [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, 275 }; 276 277 static int cstate_pmu_event_init(struct perf_event *event) 278 { 279 u64 cfg = event->attr.config; 280 281 if (event->attr.type != event->pmu->type) 282 return -ENOENT; 283 284 /* unsupported modes and filters */ 285 if (event->attr.sample_period) /* no sampling */ 286 return -EINVAL; 287 288 if (event->cpu < 0) 289 return -EINVAL; 290 291 if (event->pmu == &cstate_core_pmu) { 292 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX) 293 return -EINVAL; 294 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX); 295 if (!(core_msr_mask & (1 << cfg))) 296 return -EINVAL; 297 event->hw.event_base = core_msr[cfg].msr; 298 } else if (event->pmu == &cstate_pkg_pmu) { 299 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 300 return -EINVAL; 301 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); 302 if (!(pkg_msr_mask & (1 << cfg))) 303 return -EINVAL; 304 event->hw.event_base = pkg_msr[cfg].msr; 305 } else if (event->pmu == &cstate_module_pmu) { 306 if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) 307 return -EINVAL; 308 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX); 309 if (!(module_msr_mask & (1 << cfg))) 310 return -EINVAL; 311 event->hw.event_base = module_msr[cfg].msr; 312 } else { 313 return -ENOENT; 314 } 315 316 event->hw.config = cfg; 317 event->hw.idx = -1; 318 return 0; 319 } 320 321 static inline u64 cstate_pmu_read_counter(struct perf_event *event) 322 { 323 u64 val; 324 325 rdmsrq(event->hw.event_base, val); 326 return val; 327 } 328 329 static void cstate_pmu_event_update(struct perf_event *event) 330 { 331 struct hw_perf_event *hwc = &event->hw; 332 u64 prev_raw_count, new_raw_count; 333 334 prev_raw_count = local64_read(&hwc->prev_count); 335 do { 336 new_raw_count = cstate_pmu_read_counter(event); 337 } while (!local64_try_cmpxchg(&hwc->prev_count, 338 &prev_raw_count, new_raw_count)); 339 340 local64_add(new_raw_count - prev_raw_count, &event->count); 341 } 342 343 static void cstate_pmu_event_start(struct perf_event *event, int mode) 344 { 345 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); 346 } 347 348 static void cstate_pmu_event_stop(struct perf_event *event, int mode) 349 { 350 cstate_pmu_event_update(event); 351 } 352 353 static void cstate_pmu_event_del(struct perf_event *event, int mode) 354 { 355 cstate_pmu_event_stop(event, PERF_EF_UPDATE); 356 } 357 358 static int cstate_pmu_event_add(struct perf_event *event, int mode) 359 { 360 if (mode & PERF_EF_START) 361 cstate_pmu_event_start(event, mode); 362 363 return 0; 364 } 365 366 static const struct attribute_group *core_attr_update[] = { 367 &group_cstate_core_c1, 368 &group_cstate_core_c3, 369 &group_cstate_core_c6, 370 &group_cstate_core_c7, 371 NULL, 372 }; 373 374 static const struct attribute_group *pkg_attr_update[] = { 375 &group_cstate_pkg_c2, 376 &group_cstate_pkg_c3, 377 &group_cstate_pkg_c6, 378 &group_cstate_pkg_c7, 379 &group_cstate_pkg_c8, 380 &group_cstate_pkg_c9, 381 &group_cstate_pkg_c10, 382 NULL, 383 }; 384 385 static const struct attribute_group *module_attr_update[] = { 386 &group_cstate_module_c6, 387 NULL 388 }; 389 390 static struct pmu cstate_core_pmu = { 391 .attr_groups = cstate_attr_groups, 392 .attr_update = core_attr_update, 393 .name = "cstate_core", 394 .task_ctx_nr = perf_invalid_context, 395 .event_init = cstate_pmu_event_init, 396 .add = cstate_pmu_event_add, 397 .del = cstate_pmu_event_del, 398 .start = cstate_pmu_event_start, 399 .stop = cstate_pmu_event_stop, 400 .read = cstate_pmu_event_update, 401 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 402 .scope = PERF_PMU_SCOPE_CORE, 403 .module = THIS_MODULE, 404 }; 405 406 static struct pmu cstate_pkg_pmu = { 407 .attr_groups = cstate_attr_groups, 408 .attr_update = pkg_attr_update, 409 .name = "cstate_pkg", 410 .task_ctx_nr = perf_invalid_context, 411 .event_init = cstate_pmu_event_init, 412 .add = cstate_pmu_event_add, 413 .del = cstate_pmu_event_del, 414 .start = cstate_pmu_event_start, 415 .stop = cstate_pmu_event_stop, 416 .read = cstate_pmu_event_update, 417 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 418 .scope = PERF_PMU_SCOPE_PKG, 419 .module = THIS_MODULE, 420 }; 421 422 static struct pmu cstate_module_pmu = { 423 .attr_groups = cstate_attr_groups, 424 .attr_update = module_attr_update, 425 .name = "cstate_module", 426 .task_ctx_nr = perf_invalid_context, 427 .event_init = cstate_pmu_event_init, 428 .add = cstate_pmu_event_add, 429 .del = cstate_pmu_event_del, 430 .start = cstate_pmu_event_start, 431 .stop = cstate_pmu_event_stop, 432 .read = cstate_pmu_event_update, 433 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 434 .scope = PERF_PMU_SCOPE_CLUSTER, 435 .module = THIS_MODULE, 436 }; 437 438 static const struct cstate_model nhm_cstates __initconst = { 439 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 440 BIT(PERF_CSTATE_CORE_C6_RES), 441 442 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) | 443 BIT(PERF_CSTATE_PKG_C6_RES) | 444 BIT(PERF_CSTATE_PKG_C7_RES), 445 }; 446 447 static const struct cstate_model snb_cstates __initconst = { 448 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 449 BIT(PERF_CSTATE_CORE_C6_RES) | 450 BIT(PERF_CSTATE_CORE_C7_RES), 451 452 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 453 BIT(PERF_CSTATE_PKG_C3_RES) | 454 BIT(PERF_CSTATE_PKG_C6_RES) | 455 BIT(PERF_CSTATE_PKG_C7_RES), 456 }; 457 458 static const struct cstate_model hswult_cstates __initconst = { 459 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 460 BIT(PERF_CSTATE_CORE_C6_RES) | 461 BIT(PERF_CSTATE_CORE_C7_RES), 462 463 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 464 BIT(PERF_CSTATE_PKG_C3_RES) | 465 BIT(PERF_CSTATE_PKG_C6_RES) | 466 BIT(PERF_CSTATE_PKG_C7_RES) | 467 BIT(PERF_CSTATE_PKG_C8_RES) | 468 BIT(PERF_CSTATE_PKG_C9_RES) | 469 BIT(PERF_CSTATE_PKG_C10_RES), 470 }; 471 472 static const struct cstate_model cnl_cstates __initconst = { 473 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 474 BIT(PERF_CSTATE_CORE_C3_RES) | 475 BIT(PERF_CSTATE_CORE_C6_RES) | 476 BIT(PERF_CSTATE_CORE_C7_RES), 477 478 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 479 BIT(PERF_CSTATE_PKG_C3_RES) | 480 BIT(PERF_CSTATE_PKG_C6_RES) | 481 BIT(PERF_CSTATE_PKG_C7_RES) | 482 BIT(PERF_CSTATE_PKG_C8_RES) | 483 BIT(PERF_CSTATE_PKG_C9_RES) | 484 BIT(PERF_CSTATE_PKG_C10_RES), 485 }; 486 487 static const struct cstate_model icl_cstates __initconst = { 488 .core_events = BIT(PERF_CSTATE_CORE_C6_RES) | 489 BIT(PERF_CSTATE_CORE_C7_RES), 490 491 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 492 BIT(PERF_CSTATE_PKG_C3_RES) | 493 BIT(PERF_CSTATE_PKG_C6_RES) | 494 BIT(PERF_CSTATE_PKG_C7_RES) | 495 BIT(PERF_CSTATE_PKG_C8_RES) | 496 BIT(PERF_CSTATE_PKG_C9_RES) | 497 BIT(PERF_CSTATE_PKG_C10_RES), 498 }; 499 500 static const struct cstate_model icx_cstates __initconst = { 501 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 502 BIT(PERF_CSTATE_CORE_C6_RES), 503 504 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 505 BIT(PERF_CSTATE_PKG_C6_RES), 506 }; 507 508 static const struct cstate_model adl_cstates __initconst = { 509 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 510 BIT(PERF_CSTATE_CORE_C6_RES) | 511 BIT(PERF_CSTATE_CORE_C7_RES), 512 513 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 514 BIT(PERF_CSTATE_PKG_C3_RES) | 515 BIT(PERF_CSTATE_PKG_C6_RES) | 516 BIT(PERF_CSTATE_PKG_C8_RES) | 517 BIT(PERF_CSTATE_PKG_C10_RES), 518 }; 519 520 static const struct cstate_model lnl_cstates __initconst = { 521 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 522 BIT(PERF_CSTATE_CORE_C6_RES) | 523 BIT(PERF_CSTATE_CORE_C7_RES), 524 525 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 526 BIT(PERF_CSTATE_PKG_C6_RES) | 527 BIT(PERF_CSTATE_PKG_C10_RES), 528 }; 529 530 static const struct cstate_model slm_cstates __initconst = { 531 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 532 BIT(PERF_CSTATE_CORE_C6_RES), 533 534 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), 535 .quirks = SLM_PKG_C6_USE_C7_MSR, 536 }; 537 538 539 static const struct cstate_model knl_cstates __initconst = { 540 .core_events = BIT(PERF_CSTATE_CORE_C6_RES), 541 542 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 543 BIT(PERF_CSTATE_PKG_C3_RES) | 544 BIT(PERF_CSTATE_PKG_C6_RES), 545 .quirks = KNL_CORE_C6_MSR, 546 }; 547 548 549 static const struct cstate_model glm_cstates __initconst = { 550 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 551 BIT(PERF_CSTATE_CORE_C3_RES) | 552 BIT(PERF_CSTATE_CORE_C6_RES), 553 554 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 555 BIT(PERF_CSTATE_PKG_C3_RES) | 556 BIT(PERF_CSTATE_PKG_C6_RES) | 557 BIT(PERF_CSTATE_PKG_C10_RES), 558 }; 559 560 static const struct cstate_model grr_cstates __initconst = { 561 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 562 BIT(PERF_CSTATE_CORE_C6_RES), 563 564 .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), 565 }; 566 567 static const struct cstate_model srf_cstates __initconst = { 568 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 569 BIT(PERF_CSTATE_CORE_C6_RES), 570 571 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 572 BIT(PERF_CSTATE_PKG_C6_RES), 573 574 .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), 575 }; 576 577 578 static const struct x86_cpu_id intel_cstates_match[] __initconst = { 579 X86_MATCH_VFM(INTEL_NEHALEM, &nhm_cstates), 580 X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_cstates), 581 X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhm_cstates), 582 583 X86_MATCH_VFM(INTEL_WESTMERE, &nhm_cstates), 584 X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_cstates), 585 X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhm_cstates), 586 587 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_cstates), 588 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snb_cstates), 589 590 X86_MATCH_VFM(INTEL_IVYBRIDGE, &snb_cstates), 591 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &snb_cstates), 592 593 X86_MATCH_VFM(INTEL_HASWELL, &snb_cstates), 594 X86_MATCH_VFM(INTEL_HASWELL_X, &snb_cstates), 595 X86_MATCH_VFM(INTEL_HASWELL_G, &snb_cstates), 596 597 X86_MATCH_VFM(INTEL_HASWELL_L, &hswult_cstates), 598 599 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &slm_cstates), 600 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &slm_cstates), 601 X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &slm_cstates), 602 603 X86_MATCH_VFM(INTEL_BROADWELL, &snb_cstates), 604 X86_MATCH_VFM(INTEL_BROADWELL_D, &snb_cstates), 605 X86_MATCH_VFM(INTEL_BROADWELL_G, &snb_cstates), 606 X86_MATCH_VFM(INTEL_BROADWELL_X, &snb_cstates), 607 608 X86_MATCH_VFM(INTEL_SKYLAKE_L, &snb_cstates), 609 X86_MATCH_VFM(INTEL_SKYLAKE, &snb_cstates), 610 X86_MATCH_VFM(INTEL_SKYLAKE_X, &snb_cstates), 611 612 X86_MATCH_VFM(INTEL_KABYLAKE_L, &hswult_cstates), 613 X86_MATCH_VFM(INTEL_KABYLAKE, &hswult_cstates), 614 X86_MATCH_VFM(INTEL_COMETLAKE_L, &hswult_cstates), 615 X86_MATCH_VFM(INTEL_COMETLAKE, &hswult_cstates), 616 617 X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnl_cstates), 618 619 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_cstates), 620 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_cstates), 621 622 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &glm_cstates), 623 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &glm_cstates), 624 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &glm_cstates), 625 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &glm_cstates), 626 X86_MATCH_VFM(INTEL_ATOM_TREMONT, &glm_cstates), 627 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &glm_cstates), 628 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_cstates), 629 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &srf_cstates), 630 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &grr_cstates), 631 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &srf_cstates), 632 633 X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_cstates), 634 X86_MATCH_VFM(INTEL_ICELAKE, &icl_cstates), 635 X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_cstates), 636 X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_cstates), 637 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &icx_cstates), 638 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &icx_cstates), 639 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &icx_cstates), 640 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &icx_cstates), 641 642 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &icl_cstates), 643 X86_MATCH_VFM(INTEL_TIGERLAKE, &icl_cstates), 644 X86_MATCH_VFM(INTEL_ROCKETLAKE, &icl_cstates), 645 X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_cstates), 646 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_cstates), 647 X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_cstates), 648 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_cstates), 649 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates), 650 X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates), 651 X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates), 652 X86_MATCH_VFM(INTEL_ARROWLAKE, &adl_cstates), 653 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &adl_cstates), 654 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &adl_cstates), 655 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_cstates), 656 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &lnl_cstates), 657 { }, 658 }; 659 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 660 661 static int __init cstate_probe(const struct cstate_model *cm) 662 { 663 /* SLM has different MSR for PKG C6 */ 664 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 665 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 666 667 /* KNL has different MSR for CORE C6 */ 668 if (cm->quirks & KNL_CORE_C6_MSR) 669 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY; 670 671 672 core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX, 673 true, (void *) &cm->core_events); 674 675 pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, 676 true, (void *) &cm->pkg_events); 677 678 module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX, 679 true, (void *) &cm->module_events); 680 681 has_cstate_core = !!core_msr_mask; 682 has_cstate_pkg = !!pkg_msr_mask; 683 has_cstate_module = !!module_msr_mask; 684 685 return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV; 686 } 687 688 static inline void cstate_cleanup(void) 689 { 690 if (has_cstate_core) 691 perf_pmu_unregister(&cstate_core_pmu); 692 693 if (has_cstate_pkg) 694 perf_pmu_unregister(&cstate_pkg_pmu); 695 696 if (has_cstate_module) 697 perf_pmu_unregister(&cstate_module_pmu); 698 } 699 700 static int __init cstate_init(void) 701 { 702 int err; 703 704 if (has_cstate_core) { 705 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 706 if (err) { 707 has_cstate_core = false; 708 pr_info("Failed to register cstate core pmu\n"); 709 cstate_cleanup(); 710 return err; 711 } 712 } 713 714 if (has_cstate_pkg) { 715 if (topology_max_dies_per_package() > 1) { 716 /* CLX-AP is multi-die and the cstate is die-scope */ 717 cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE; 718 err = perf_pmu_register(&cstate_pkg_pmu, 719 "cstate_die", -1); 720 } else { 721 err = perf_pmu_register(&cstate_pkg_pmu, 722 cstate_pkg_pmu.name, -1); 723 } 724 if (err) { 725 has_cstate_pkg = false; 726 pr_info("Failed to register cstate pkg pmu\n"); 727 cstate_cleanup(); 728 return err; 729 } 730 } 731 732 if (has_cstate_module) { 733 err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1); 734 if (err) { 735 has_cstate_module = false; 736 pr_info("Failed to register cstate cluster pmu\n"); 737 cstate_cleanup(); 738 return err; 739 } 740 } 741 return 0; 742 } 743 744 static int __init cstate_pmu_init(void) 745 { 746 const struct x86_cpu_id *id; 747 int err; 748 749 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 750 return -ENODEV; 751 752 id = x86_match_cpu(intel_cstates_match); 753 if (!id) 754 return -ENODEV; 755 756 err = cstate_probe((const struct cstate_model *) id->driver_data); 757 if (err) 758 return err; 759 760 return cstate_init(); 761 } 762 module_init(cstate_pmu_init); 763 764 static void __exit cstate_pmu_exit(void) 765 { 766 cstate_cleanup(); 767 } 768 module_exit(cstate_pmu_exit); 769