16aec1ad7SBorislav Petkov /* 2940b2f2fSBorislav Petkov * Support cstate residency counters 36aec1ad7SBorislav Petkov * 46aec1ad7SBorislav Petkov * Copyright (C) 2015, Intel Corp. 56aec1ad7SBorislav Petkov * Author: Kan Liang (kan.liang@intel.com) 66aec1ad7SBorislav Petkov * 76aec1ad7SBorislav Petkov * This library is free software; you can redistribute it and/or 86aec1ad7SBorislav Petkov * modify it under the terms of the GNU Library General Public 96aec1ad7SBorislav Petkov * License as published by the Free Software Foundation; either 106aec1ad7SBorislav Petkov * version 2 of the License, or (at your option) any later version. 116aec1ad7SBorislav Petkov * 126aec1ad7SBorislav Petkov * This library is distributed in the hope that it will be useful, 136aec1ad7SBorislav Petkov * but WITHOUT ANY WARRANTY; without even the implied warranty of 146aec1ad7SBorislav Petkov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 156aec1ad7SBorislav Petkov * Library General Public License for more details. 166aec1ad7SBorislav Petkov * 176aec1ad7SBorislav Petkov */ 186aec1ad7SBorislav Petkov 196aec1ad7SBorislav Petkov /* 206aec1ad7SBorislav Petkov * This file export cstate related free running (read-only) counters 216aec1ad7SBorislav Petkov * for perf. These counters may be use simultaneously by other tools, 226aec1ad7SBorislav Petkov * such as turbostat. However, it still make sense to implement them 236aec1ad7SBorislav Petkov * in perf. Because we can conveniently collect them together with 246aec1ad7SBorislav Petkov * other events, and allow to use them from tools without special MSR 256aec1ad7SBorislav Petkov * access code. 266aec1ad7SBorislav Petkov * 276aec1ad7SBorislav Petkov * The events only support system-wide mode counting. There is no 286aec1ad7SBorislav Petkov * sampling support because it is not supported by the hardware. 296aec1ad7SBorislav Petkov * 306aec1ad7SBorislav Petkov * According to counters' scope and category, two PMUs are registered 316aec1ad7SBorislav Petkov * with the perf_event core subsystem. 326aec1ad7SBorislav Petkov * - 'cstate_core': The counter is available for each physical core. 336aec1ad7SBorislav Petkov * The counters include CORE_C*_RESIDENCY. 346aec1ad7SBorislav Petkov * - 'cstate_pkg': The counter is available for each physical package. 356aec1ad7SBorislav Petkov * The counters include PKG_C*_RESIDENCY. 366aec1ad7SBorislav Petkov * 376aec1ad7SBorislav Petkov * All of these counters are specified in the Intel® 64 and IA-32 386aec1ad7SBorislav Petkov * Architectures Software Developer.s Manual Vol3b. 396aec1ad7SBorislav Petkov * 406aec1ad7SBorislav Petkov * Model specific counters: 416aec1ad7SBorislav Petkov * MSR_CORE_C1_RES: CORE C1 Residency Counter 426aec1ad7SBorislav Petkov * perf code: 0x00 43*2da202aaSKan Liang * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL 446aec1ad7SBorislav Petkov * Scope: Core (each processor core has a MSR) 456aec1ad7SBorislav Petkov * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter 466aec1ad7SBorislav Petkov * perf code: 0x01 471159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, 48ecf71fbcSKan Liang * CNL,KBL,CML,TNT 496aec1ad7SBorislav Petkov * Scope: Core 506aec1ad7SBorislav Petkov * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 516aec1ad7SBorislav Petkov * perf code: 0x02 521159e094SHarry Pan * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 5387bf399fSZhang Rui * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, 54*2da202aaSKan Liang * TGL,TNT,RKL,ADL,RPL 556aec1ad7SBorislav Petkov * Scope: Core 566aec1ad7SBorislav Petkov * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 576aec1ad7SBorislav Petkov * perf code: 0x03 58f1857a24SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, 59*2da202aaSKan Liang * ICL,TGL,RKL,ADL,RPL 606aec1ad7SBorislav Petkov * Scope: Core 616aec1ad7SBorislav Petkov * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 626aec1ad7SBorislav Petkov * perf code: 0x00 631ffa6c04SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, 64*2da202aaSKan Liang * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL, 65*2da202aaSKan Liang * RPL 666aec1ad7SBorislav Petkov * Scope: Package (physical package) 676aec1ad7SBorislav Petkov * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 686aec1ad7SBorislav Petkov * perf code: 0x01 691159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, 70d0ca946bSKan Liang * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL, 71*2da202aaSKan Liang * ADL,RPL 726aec1ad7SBorislav Petkov * Scope: Package (physical package) 736aec1ad7SBorislav Petkov * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 746aec1ad7SBorislav Petkov * perf code: 0x02 75ecf71fbcSKan Liang * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 7687bf399fSZhang Rui * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, 77*2da202aaSKan Liang * TGL,TNT,RKL,ADL,RPL 786aec1ad7SBorislav Petkov * Scope: Package (physical package) 796aec1ad7SBorislav Petkov * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 806aec1ad7SBorislav Petkov * perf code: 0x03 811ffa6c04SKan Liang * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, 82*2da202aaSKan Liang * KBL,CML,ICL,TGL,RKL,ADL,RPL 836aec1ad7SBorislav Petkov * Scope: Package (physical package) 846aec1ad7SBorislav Petkov * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. 856aec1ad7SBorislav Petkov * perf code: 0x04 86d0ca946bSKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, 87*2da202aaSKan Liang * ADL,RPL 886aec1ad7SBorislav Petkov * Scope: Package (physical package) 896aec1ad7SBorislav Petkov * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. 906aec1ad7SBorislav Petkov * perf code: 0x05 91d0ca946bSKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, 92*2da202aaSKan Liang * ADL,RPL 936aec1ad7SBorislav Petkov * Scope: Package (physical package) 946aec1ad7SBorislav Petkov * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 956aec1ad7SBorislav Petkov * perf code: 0x06 96ecf71fbcSKan Liang * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, 97*2da202aaSKan Liang * TNT,RKL,ADL,RPL 986aec1ad7SBorislav Petkov * Scope: Package (physical package) 996aec1ad7SBorislav Petkov * 1006aec1ad7SBorislav Petkov */ 1016aec1ad7SBorislav Petkov 1026aec1ad7SBorislav Petkov #include <linux/module.h> 1036aec1ad7SBorislav Petkov #include <linux/slab.h> 1046aec1ad7SBorislav Petkov #include <linux/perf_event.h> 105a5f81290SPeter Zijlstra #include <linux/nospec.h> 1066aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h> 107bf4ad541SDave Hansen #include <asm/intel-family.h> 10827f6d22bSBorislav Petkov #include "../perf_event.h" 1098f2a28c5SJiri Olsa #include "../probe.h" 1106aec1ad7SBorislav Petkov 111c7afba32SThomas Gleixner MODULE_LICENSE("GPL"); 112c7afba32SThomas Gleixner 1136aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ 114ebd19fc3SSami Tolvanen static ssize_t __cstate_##_var##_show(struct device *dev, \ 115ebd19fc3SSami Tolvanen struct device_attribute *attr, \ 1166aec1ad7SBorislav Petkov char *page) \ 1176aec1ad7SBorislav Petkov { \ 1186aec1ad7SBorislav Petkov BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 1196aec1ad7SBorislav Petkov return sprintf(page, _format "\n"); \ 1206aec1ad7SBorislav Petkov } \ 121ebd19fc3SSami Tolvanen static struct device_attribute format_attr_##_var = \ 1226aec1ad7SBorislav Petkov __ATTR(_name, 0444, __cstate_##_var##_show, NULL) 1236aec1ad7SBorislav Petkov 1246aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 1256aec1ad7SBorislav Petkov struct device_attribute *attr, 1266aec1ad7SBorislav Petkov char *buf); 1276aec1ad7SBorislav Petkov 128424646eeSThomas Gleixner /* Model -> events mapping */ 129424646eeSThomas Gleixner struct cstate_model { 130424646eeSThomas Gleixner unsigned long core_events; 131424646eeSThomas Gleixner unsigned long pkg_events; 132424646eeSThomas Gleixner unsigned long quirks; 133424646eeSThomas Gleixner }; 134424646eeSThomas Gleixner 135424646eeSThomas Gleixner /* Quirk flags */ 136424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 137889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR (1UL << 1) 138424646eeSThomas Gleixner 1396aec1ad7SBorislav Petkov struct perf_cstate_msr { 1406aec1ad7SBorislav Petkov u64 msr; 1416aec1ad7SBorislav Petkov struct perf_pmu_events_attr *attr; 1426aec1ad7SBorislav Petkov }; 1436aec1ad7SBorislav Petkov 1446aec1ad7SBorislav Petkov 1456aec1ad7SBorislav Petkov /* cstate_core PMU */ 1466aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu; 1476aec1ad7SBorislav Petkov static bool has_cstate_core; 1486aec1ad7SBorislav Petkov 149424646eeSThomas Gleixner enum perf_cstate_core_events { 1506aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C1_RES = 0, 1516aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C3_RES, 1526aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C6_RES, 1536aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C7_RES, 1546aec1ad7SBorislav Petkov 1556aec1ad7SBorislav Petkov PERF_CSTATE_CORE_EVENT_MAX, 1566aec1ad7SBorislav Petkov }; 1576aec1ad7SBorislav Petkov 1588f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00"); 1598f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01"); 1608f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02"); 1618f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03"); 1626aec1ad7SBorislav Petkov 1638f2a28c5SJiri Olsa static unsigned long core_msr_mask; 1648f2a28c5SJiri Olsa 1658f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c1); 1668f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c3); 1678f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c6); 1688f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c7); 1698f2a28c5SJiri Olsa 1708f2a28c5SJiri Olsa static bool test_msr(int idx, void *data) 1718f2a28c5SJiri Olsa { 1728f2a28c5SJiri Olsa return test_bit(idx, (unsigned long *) data); 1738f2a28c5SJiri Olsa } 1748f2a28c5SJiri Olsa 1758f2a28c5SJiri Olsa static struct perf_msr core_msr[] = { 1768f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr }, 1778f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr }, 1788f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr }, 1798f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr }, 1806aec1ad7SBorislav Petkov }; 1816aec1ad7SBorislav Petkov 1828f2a28c5SJiri Olsa static struct attribute *attrs_empty[] = { 1836aec1ad7SBorislav Petkov NULL, 1846aec1ad7SBorislav Petkov }; 1856aec1ad7SBorislav Petkov 1868f2a28c5SJiri Olsa /* 1878f2a28c5SJiri Olsa * There are no default events, but we need to create 1888f2a28c5SJiri Olsa * "events" group (with empty attrs) before updating 1898f2a28c5SJiri Olsa * it with detected events. 1908f2a28c5SJiri Olsa */ 1916aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = { 1926aec1ad7SBorislav Petkov .name = "events", 1938f2a28c5SJiri Olsa .attrs = attrs_empty, 1946aec1ad7SBorislav Petkov }; 1956aec1ad7SBorislav Petkov 1966aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); 1976aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = { 1986aec1ad7SBorislav Petkov &format_attr_core_event.attr, 1996aec1ad7SBorislav Petkov NULL, 2006aec1ad7SBorislav Petkov }; 2016aec1ad7SBorislav Petkov 2026aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = { 2036aec1ad7SBorislav Petkov .name = "format", 2046aec1ad7SBorislav Petkov .attrs = core_format_attrs, 2056aec1ad7SBorislav Petkov }; 2066aec1ad7SBorislav Petkov 2076aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask; 2086aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); 2096aec1ad7SBorislav Petkov 2106aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = { 2116aec1ad7SBorislav Petkov &dev_attr_cpumask.attr, 2126aec1ad7SBorislav Petkov NULL, 2136aec1ad7SBorislav Petkov }; 2146aec1ad7SBorislav Petkov 2156aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = { 2166aec1ad7SBorislav Petkov .attrs = cstate_cpumask_attrs, 2176aec1ad7SBorislav Petkov }; 2186aec1ad7SBorislav Petkov 2196aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = { 2206aec1ad7SBorislav Petkov &core_events_attr_group, 2216aec1ad7SBorislav Petkov &core_format_attr_group, 2226aec1ad7SBorislav Petkov &cpumask_attr_group, 2236aec1ad7SBorislav Petkov NULL, 2246aec1ad7SBorislav Petkov }; 2256aec1ad7SBorislav Petkov 2266aec1ad7SBorislav Petkov /* cstate_pkg PMU */ 2276aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu; 2286aec1ad7SBorislav Petkov static bool has_cstate_pkg; 2296aec1ad7SBorislav Petkov 230424646eeSThomas Gleixner enum perf_cstate_pkg_events { 2316aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C2_RES = 0, 2326aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C3_RES, 2336aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C6_RES, 2346aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C7_RES, 2356aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C8_RES, 2366aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C9_RES, 2376aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C10_RES, 2386aec1ad7SBorislav Petkov 2396aec1ad7SBorislav Petkov PERF_CSTATE_PKG_EVENT_MAX, 2406aec1ad7SBorislav Petkov }; 2416aec1ad7SBorislav Petkov 2428f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00"); 2438f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01"); 2448f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02"); 2458f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03"); 2468f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04"); 2478f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05"); 2488f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06"); 2496aec1ad7SBorislav Petkov 2508f2a28c5SJiri Olsa static unsigned long pkg_msr_mask; 2516aec1ad7SBorislav Petkov 2528f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c2); 2538f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c3); 2548f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c6); 2558f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c7); 2568f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c8); 2578f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c9); 2588f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c10); 2598f2a28c5SJiri Olsa 2608f2a28c5SJiri Olsa static struct perf_msr pkg_msr[] = { 2618f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr }, 2628f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr }, 2638f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr }, 2648f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr }, 2658f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr }, 2668f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr }, 2678f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, 2686aec1ad7SBorislav Petkov }; 2696aec1ad7SBorislav Petkov 2706aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = { 2716aec1ad7SBorislav Petkov .name = "events", 2728f2a28c5SJiri Olsa .attrs = attrs_empty, 2736aec1ad7SBorislav Petkov }; 2746aec1ad7SBorislav Petkov 2756aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); 2766aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = { 2776aec1ad7SBorislav Petkov &format_attr_pkg_event.attr, 2786aec1ad7SBorislav Petkov NULL, 2796aec1ad7SBorislav Petkov }; 2806aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = { 2816aec1ad7SBorislav Petkov .name = "format", 2826aec1ad7SBorislav Petkov .attrs = pkg_format_attrs, 2836aec1ad7SBorislav Petkov }; 2846aec1ad7SBorislav Petkov 2856aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask; 2866aec1ad7SBorislav Petkov 2876aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = { 2886aec1ad7SBorislav Petkov &pkg_events_attr_group, 2896aec1ad7SBorislav Petkov &pkg_format_attr_group, 2906aec1ad7SBorislav Petkov &cpumask_attr_group, 2916aec1ad7SBorislav Petkov NULL, 2926aec1ad7SBorislav Petkov }; 2936aec1ad7SBorislav Petkov 2946aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 2956aec1ad7SBorislav Petkov struct device_attribute *attr, 2966aec1ad7SBorislav Petkov char *buf) 2976aec1ad7SBorislav Petkov { 2986aec1ad7SBorislav Petkov struct pmu *pmu = dev_get_drvdata(dev); 2996aec1ad7SBorislav Petkov 3006aec1ad7SBorislav Petkov if (pmu == &cstate_core_pmu) 3016aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); 3026aec1ad7SBorislav Petkov else if (pmu == &cstate_pkg_pmu) 3036aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); 3046aec1ad7SBorislav Petkov else 3056aec1ad7SBorislav Petkov return 0; 3066aec1ad7SBorislav Petkov } 3076aec1ad7SBorislav Petkov 3086aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event) 3096aec1ad7SBorislav Petkov { 3106aec1ad7SBorislav Petkov u64 cfg = event->attr.config; 31149de0493SThomas Gleixner int cpu; 3126aec1ad7SBorislav Petkov 3136aec1ad7SBorislav Petkov if (event->attr.type != event->pmu->type) 3146aec1ad7SBorislav Petkov return -ENOENT; 3156aec1ad7SBorislav Petkov 3166aec1ad7SBorislav Petkov /* unsupported modes and filters */ 3172ff40250SAndrew Murray if (event->attr.sample_period) /* no sampling */ 3186aec1ad7SBorislav Petkov return -EINVAL; 3196aec1ad7SBorislav Petkov 32049de0493SThomas Gleixner if (event->cpu < 0) 32149de0493SThomas Gleixner return -EINVAL; 32249de0493SThomas Gleixner 3236aec1ad7SBorislav Petkov if (event->pmu == &cstate_core_pmu) { 3246aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_CORE_EVENT_MAX) 3256aec1ad7SBorislav Petkov return -EINVAL; 3268f2a28c5SJiri Olsa cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX); 3278f2a28c5SJiri Olsa if (!(core_msr_mask & (1 << cfg))) 3286aec1ad7SBorislav Petkov return -EINVAL; 3296aec1ad7SBorislav Petkov event->hw.event_base = core_msr[cfg].msr; 33049de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_core_cpu_mask, 33149de0493SThomas Gleixner topology_sibling_cpumask(event->cpu)); 3326aec1ad7SBorislav Petkov } else if (event->pmu == &cstate_pkg_pmu) { 3336aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 3346aec1ad7SBorislav Petkov return -EINVAL; 335a5f81290SPeter Zijlstra cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); 3368f2a28c5SJiri Olsa if (!(pkg_msr_mask & (1 << cfg))) 3376aec1ad7SBorislav Petkov return -EINVAL; 3386aec1ad7SBorislav Petkov event->hw.event_base = pkg_msr[cfg].msr; 33949de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_pkg_cpu_mask, 340cb63ba0fSKan Liang topology_die_cpumask(event->cpu)); 34149de0493SThomas Gleixner } else { 3426aec1ad7SBorislav Petkov return -ENOENT; 34349de0493SThomas Gleixner } 3446aec1ad7SBorislav Petkov 34549de0493SThomas Gleixner if (cpu >= nr_cpu_ids) 34649de0493SThomas Gleixner return -ENODEV; 34749de0493SThomas Gleixner 34849de0493SThomas Gleixner event->cpu = cpu; 3496aec1ad7SBorislav Petkov event->hw.config = cfg; 3506aec1ad7SBorislav Petkov event->hw.idx = -1; 35149de0493SThomas Gleixner return 0; 3526aec1ad7SBorislav Petkov } 3536aec1ad7SBorislav Petkov 3546aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event) 3556aec1ad7SBorislav Petkov { 3566aec1ad7SBorislav Petkov u64 val; 3576aec1ad7SBorislav Petkov 3586aec1ad7SBorislav Petkov rdmsrl(event->hw.event_base, val); 3596aec1ad7SBorislav Petkov return val; 3606aec1ad7SBorislav Petkov } 3616aec1ad7SBorislav Petkov 3626aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event) 3636aec1ad7SBorislav Petkov { 3646aec1ad7SBorislav Petkov struct hw_perf_event *hwc = &event->hw; 3656aec1ad7SBorislav Petkov u64 prev_raw_count, new_raw_count; 3666aec1ad7SBorislav Petkov 3676aec1ad7SBorislav Petkov again: 3686aec1ad7SBorislav Petkov prev_raw_count = local64_read(&hwc->prev_count); 3696aec1ad7SBorislav Petkov new_raw_count = cstate_pmu_read_counter(event); 3706aec1ad7SBorislav Petkov 3716aec1ad7SBorislav Petkov if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 3726aec1ad7SBorislav Petkov new_raw_count) != prev_raw_count) 3736aec1ad7SBorislav Petkov goto again; 3746aec1ad7SBorislav Petkov 3756aec1ad7SBorislav Petkov local64_add(new_raw_count - prev_raw_count, &event->count); 3766aec1ad7SBorislav Petkov } 3776aec1ad7SBorislav Petkov 3786aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode) 3796aec1ad7SBorislav Petkov { 3806aec1ad7SBorislav Petkov local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); 3816aec1ad7SBorislav Petkov } 3826aec1ad7SBorislav Petkov 3836aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode) 3846aec1ad7SBorislav Petkov { 3856aec1ad7SBorislav Petkov cstate_pmu_event_update(event); 3866aec1ad7SBorislav Petkov } 3876aec1ad7SBorislav Petkov 3886aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode) 3896aec1ad7SBorislav Petkov { 3906aec1ad7SBorislav Petkov cstate_pmu_event_stop(event, PERF_EF_UPDATE); 3916aec1ad7SBorislav Petkov } 3926aec1ad7SBorislav Petkov 3936aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode) 3946aec1ad7SBorislav Petkov { 3956aec1ad7SBorislav Petkov if (mode & PERF_EF_START) 3966aec1ad7SBorislav Petkov cstate_pmu_event_start(event, mode); 3976aec1ad7SBorislav Petkov 3986aec1ad7SBorislav Petkov return 0; 3996aec1ad7SBorislav Petkov } 4006aec1ad7SBorislav Petkov 40149de0493SThomas Gleixner /* 40249de0493SThomas Gleixner * Check if exiting cpu is the designated reader. If so migrate the 40349de0493SThomas Gleixner * events when there is a valid target available 40449de0493SThomas Gleixner */ 40577c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu) 4066aec1ad7SBorislav Petkov { 40749de0493SThomas Gleixner unsigned int target; 4086aec1ad7SBorislav Petkov 40949de0493SThomas Gleixner if (has_cstate_core && 41049de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) { 4116aec1ad7SBorislav Petkov 41249de0493SThomas Gleixner target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 41349de0493SThomas Gleixner /* Migrate events if there is a valid target */ 41449de0493SThomas Gleixner if (target < nr_cpu_ids) { 4156aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_core_cpu_mask); 4166aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); 4176aec1ad7SBorislav Petkov } 4186aec1ad7SBorislav Petkov } 41949de0493SThomas Gleixner 42049de0493SThomas Gleixner if (has_cstate_pkg && 42149de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) { 42249de0493SThomas Gleixner 423cb63ba0fSKan Liang target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 42449de0493SThomas Gleixner /* Migrate events if there is a valid target */ 42549de0493SThomas Gleixner if (target < nr_cpu_ids) { 4266aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_pkg_cpu_mask); 4276aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); 4286aec1ad7SBorislav Petkov } 4296aec1ad7SBorislav Petkov } 43077c34ef1SSebastian Andrzej Siewior return 0; 43149de0493SThomas Gleixner } 4326aec1ad7SBorislav Petkov 43377c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu) 4346aec1ad7SBorislav Petkov { 43549de0493SThomas Gleixner unsigned int target; 4366aec1ad7SBorislav Petkov 43749de0493SThomas Gleixner /* 43849de0493SThomas Gleixner * If this is the first online thread of that core, set it in 43949de0493SThomas Gleixner * the core cpu mask as the designated reader. 44049de0493SThomas Gleixner */ 44149de0493SThomas Gleixner target = cpumask_any_and(&cstate_core_cpu_mask, 44249de0493SThomas Gleixner topology_sibling_cpumask(cpu)); 44349de0493SThomas Gleixner 44449de0493SThomas Gleixner if (has_cstate_core && target >= nr_cpu_ids) 4456aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_core_cpu_mask); 4466aec1ad7SBorislav Petkov 44749de0493SThomas Gleixner /* 44849de0493SThomas Gleixner * If this is the first online thread of that package, set it 44949de0493SThomas Gleixner * in the package cpu mask as the designated reader. 45049de0493SThomas Gleixner */ 45149de0493SThomas Gleixner target = cpumask_any_and(&cstate_pkg_cpu_mask, 452cb63ba0fSKan Liang topology_die_cpumask(cpu)); 45349de0493SThomas Gleixner if (has_cstate_pkg && target >= nr_cpu_ids) 4546aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); 4556aec1ad7SBorislav Petkov 45677c34ef1SSebastian Andrzej Siewior return 0; 4576aec1ad7SBorislav Petkov } 458c7afba32SThomas Gleixner 459d9f3b450SValdis Klētnieks static const struct attribute_group *core_attr_update[] = { 4608f2a28c5SJiri Olsa &group_cstate_core_c1, 4618f2a28c5SJiri Olsa &group_cstate_core_c3, 4628f2a28c5SJiri Olsa &group_cstate_core_c6, 4638f2a28c5SJiri Olsa &group_cstate_core_c7, 4648f2a28c5SJiri Olsa NULL, 4658f2a28c5SJiri Olsa }; 4668f2a28c5SJiri Olsa 467d9f3b450SValdis Klētnieks static const struct attribute_group *pkg_attr_update[] = { 4688f2a28c5SJiri Olsa &group_cstate_pkg_c2, 4698f2a28c5SJiri Olsa &group_cstate_pkg_c3, 4708f2a28c5SJiri Olsa &group_cstate_pkg_c6, 4718f2a28c5SJiri Olsa &group_cstate_pkg_c7, 4728f2a28c5SJiri Olsa &group_cstate_pkg_c8, 4738f2a28c5SJiri Olsa &group_cstate_pkg_c9, 4748f2a28c5SJiri Olsa &group_cstate_pkg_c10, 4758f2a28c5SJiri Olsa NULL, 4768f2a28c5SJiri Olsa }; 4778f2a28c5SJiri Olsa 478424646eeSThomas Gleixner static struct pmu cstate_core_pmu = { 479424646eeSThomas Gleixner .attr_groups = core_attr_groups, 4808f2a28c5SJiri Olsa .attr_update = core_attr_update, 481424646eeSThomas Gleixner .name = "cstate_core", 482424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 483424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 484424646eeSThomas Gleixner .add = cstate_pmu_event_add, 485424646eeSThomas Gleixner .del = cstate_pmu_event_del, 486424646eeSThomas Gleixner .start = cstate_pmu_event_start, 487424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 488424646eeSThomas Gleixner .read = cstate_pmu_event_update, 4892ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 49074545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 491424646eeSThomas Gleixner }; 492424646eeSThomas Gleixner 493424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = { 494424646eeSThomas Gleixner .attr_groups = pkg_attr_groups, 4958f2a28c5SJiri Olsa .attr_update = pkg_attr_update, 496424646eeSThomas Gleixner .name = "cstate_pkg", 497424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 498424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 499424646eeSThomas Gleixner .add = cstate_pmu_event_add, 500424646eeSThomas Gleixner .del = cstate_pmu_event_del, 501424646eeSThomas Gleixner .start = cstate_pmu_event_start, 502424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 503424646eeSThomas Gleixner .read = cstate_pmu_event_update, 5042ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 50574545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 506424646eeSThomas Gleixner }; 507424646eeSThomas Gleixner 508424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = { 509424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 510424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 511424646eeSThomas Gleixner 512424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) | 513424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 514424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 515424646eeSThomas Gleixner }; 516424646eeSThomas Gleixner 517424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = { 518424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 519424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 520424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 521424646eeSThomas Gleixner 522424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 523424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 524424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 525424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 526424646eeSThomas Gleixner }; 527424646eeSThomas Gleixner 528424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = { 529424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 530424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 531424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 532424646eeSThomas Gleixner 533424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 534424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 535424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 536424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES) | 537424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C8_RES) | 538424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C9_RES) | 539424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C10_RES), 540424646eeSThomas Gleixner }; 541424646eeSThomas Gleixner 5421159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = { 5431159e094SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 5441159e094SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 5451159e094SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES) | 5461159e094SHarry Pan BIT(PERF_CSTATE_CORE_C7_RES), 5471159e094SHarry Pan 5481159e094SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 5491159e094SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 5501159e094SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 5511159e094SHarry Pan BIT(PERF_CSTATE_PKG_C7_RES) | 5521159e094SHarry Pan BIT(PERF_CSTATE_PKG_C8_RES) | 5531159e094SHarry Pan BIT(PERF_CSTATE_PKG_C9_RES) | 5541159e094SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 5551159e094SHarry Pan }; 5561159e094SHarry Pan 557f1857a24SKan Liang static const struct cstate_model icl_cstates __initconst = { 558f1857a24SKan Liang .core_events = BIT(PERF_CSTATE_CORE_C6_RES) | 559f1857a24SKan Liang BIT(PERF_CSTATE_CORE_C7_RES), 560f1857a24SKan Liang 561f1857a24SKan Liang .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 562f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C3_RES) | 563f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C6_RES) | 564f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C7_RES) | 565f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C8_RES) | 566f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C9_RES) | 567f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C10_RES), 568f1857a24SKan Liang }; 569f1857a24SKan Liang 57087bf399fSZhang Rui static const struct cstate_model icx_cstates __initconst = { 57187bf399fSZhang Rui .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 57287bf399fSZhang Rui BIT(PERF_CSTATE_CORE_C6_RES), 57387bf399fSZhang Rui 57487bf399fSZhang Rui .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 57587bf399fSZhang Rui BIT(PERF_CSTATE_PKG_C6_RES), 57687bf399fSZhang Rui }; 57787bf399fSZhang Rui 578d0ca946bSKan Liang static const struct cstate_model adl_cstates __initconst = { 579d0ca946bSKan Liang .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 580d0ca946bSKan Liang BIT(PERF_CSTATE_CORE_C6_RES) | 581d0ca946bSKan Liang BIT(PERF_CSTATE_CORE_C7_RES), 582d0ca946bSKan Liang 583d0ca946bSKan Liang .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 584d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C3_RES) | 585d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C6_RES) | 586d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C7_RES) | 587d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C8_RES) | 588d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C9_RES) | 589d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C10_RES), 590d0ca946bSKan Liang }; 591d0ca946bSKan Liang 592424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = { 593424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 594424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 595424646eeSThomas Gleixner 596424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), 597424646eeSThomas Gleixner .quirks = SLM_PKG_C6_USE_C7_MSR, 598424646eeSThomas Gleixner }; 599424646eeSThomas Gleixner 600889882bcSLukasz Odzioba 601889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = { 602889882bcSLukasz Odzioba .core_events = BIT(PERF_CSTATE_CORE_C6_RES), 603889882bcSLukasz Odzioba 604889882bcSLukasz Odzioba .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 605889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C3_RES) | 606889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C6_RES), 607889882bcSLukasz Odzioba .quirks = KNL_CORE_C6_MSR, 608889882bcSLukasz Odzioba }; 609889882bcSLukasz Odzioba 610889882bcSLukasz Odzioba 6115c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = { 6125c10b048SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 6135c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 6145c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES), 6155c10b048SHarry Pan 6165c10b048SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 6175c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 6185c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 6195c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 6205c10b048SHarry Pan }; 6215c10b048SHarry Pan 622889882bcSLukasz Odzioba 623424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = { 624ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), 625ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates), 626ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates), 627424646eeSThomas Gleixner 628ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates), 629ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates), 630ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates), 631424646eeSThomas Gleixner 632ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates), 633ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates), 634424646eeSThomas Gleixner 635ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates), 636ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates), 637424646eeSThomas Gleixner 638ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates), 639ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates), 640ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates), 641424646eeSThomas Gleixner 642ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates), 643424646eeSThomas Gleixner 644ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates), 645ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates), 646ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates), 647424646eeSThomas Gleixner 648ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates), 649ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates), 650ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates), 651ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates), 652424646eeSThomas Gleixner 653ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates), 654ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates), 655ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates), 656889882bcSLukasz Odzioba 657ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates), 658ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates), 659ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates), 660ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates), 661f2029b1eSSrinivas Pandruvada 662ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates), 6631159e094SHarry Pan 664ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates), 665ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates), 6665c10b048SHarry Pan 667ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates), 668ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates), 669ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates), 670ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates), 671ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), 6725b16ef2eSHarry Pan X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), 673f08c47d1SKan Liang 674ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), 675ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), 67687bf399fSZhang Rui X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), 67787bf399fSZhang Rui X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), 67887bf399fSZhang Rui 679ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), 680ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), 681cbea5639SKan Liang X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates), 682d0ca946bSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates), 683d0ca946bSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates), 684*2da202aaSKan Liang X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates), 685424646eeSThomas Gleixner { }, 686424646eeSThomas Gleixner }; 687424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 688424646eeSThomas Gleixner 689424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm) 6906aec1ad7SBorislav Petkov { 6916aec1ad7SBorislav Petkov /* SLM has different MSR for PKG C6 */ 692424646eeSThomas Gleixner if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 6936aec1ad7SBorislav Petkov pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 6946aec1ad7SBorislav Petkov 695889882bcSLukasz Odzioba /* KNL has different MSR for CORE C6 */ 696889882bcSLukasz Odzioba if (cm->quirks & KNL_CORE_C6_MSR) 697889882bcSLukasz Odzioba pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY; 698889882bcSLukasz Odzioba 699889882bcSLukasz Odzioba 7008f2a28c5SJiri Olsa core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX, 7018f2a28c5SJiri Olsa true, (void *) &cm->core_events); 7026aec1ad7SBorislav Petkov 7038f2a28c5SJiri Olsa pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, 7048f2a28c5SJiri Olsa true, (void *) &cm->pkg_events); 7058f2a28c5SJiri Olsa 7068f2a28c5SJiri Olsa has_cstate_core = !!core_msr_mask; 7078f2a28c5SJiri Olsa has_cstate_pkg = !!pkg_msr_mask; 7086aec1ad7SBorislav Petkov 7096aec1ad7SBorislav Petkov return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; 7106aec1ad7SBorislav Petkov } 7116aec1ad7SBorislav Petkov 712c7afba32SThomas Gleixner static inline void cstate_cleanup(void) 7136aec1ad7SBorislav Petkov { 714834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); 715834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); 716834fcd29SThomas Gleixner 717d29859e7SThomas Gleixner if (has_cstate_core) 718d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_core_pmu); 719d29859e7SThomas Gleixner 720d29859e7SThomas Gleixner if (has_cstate_pkg) 721d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_pkg_pmu); 722d29859e7SThomas Gleixner } 723d29859e7SThomas Gleixner 724d29859e7SThomas Gleixner static int __init cstate_init(void) 725d29859e7SThomas Gleixner { 72677c34ef1SSebastian Andrzej Siewior int err; 7276aec1ad7SBorislav Petkov 72877c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, 729834fcd29SThomas Gleixner "perf/x86/cstate:starting", cstate_cpu_init, NULL); 73077c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, 731834fcd29SThomas Gleixner "perf/x86/cstate:online", NULL, cstate_cpu_exit); 7326aec1ad7SBorislav Petkov 7336aec1ad7SBorislav Petkov if (has_cstate_core) { 7346aec1ad7SBorislav Petkov err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 735d29859e7SThomas Gleixner if (err) { 736d29859e7SThomas Gleixner has_cstate_core = false; 737d29859e7SThomas Gleixner pr_info("Failed to register cstate core pmu\n"); 738834fcd29SThomas Gleixner cstate_cleanup(); 73977c34ef1SSebastian Andrzej Siewior return err; 740d29859e7SThomas Gleixner } 7416aec1ad7SBorislav Petkov } 7426aec1ad7SBorislav Petkov 7436aec1ad7SBorislav Petkov if (has_cstate_pkg) { 744cb63ba0fSKan Liang if (topology_max_die_per_package() > 1) { 745cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 746cb63ba0fSKan Liang "cstate_die", -1); 747cb63ba0fSKan Liang } else { 748cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 749cb63ba0fSKan Liang cstate_pkg_pmu.name, -1); 750cb63ba0fSKan Liang } 751d29859e7SThomas Gleixner if (err) { 752d29859e7SThomas Gleixner has_cstate_pkg = false; 753d29859e7SThomas Gleixner pr_info("Failed to register cstate pkg pmu\n"); 754d29859e7SThomas Gleixner cstate_cleanup(); 75577c34ef1SSebastian Andrzej Siewior return err; 7566aec1ad7SBorislav Petkov } 7576aec1ad7SBorislav Petkov } 758834fcd29SThomas Gleixner return 0; 759d29859e7SThomas Gleixner } 7606aec1ad7SBorislav Petkov 7616aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void) 7626aec1ad7SBorislav Petkov { 763424646eeSThomas Gleixner const struct x86_cpu_id *id; 7646aec1ad7SBorislav Petkov int err; 7656aec1ad7SBorislav Petkov 766424646eeSThomas Gleixner if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 7676aec1ad7SBorislav Petkov return -ENODEV; 7686aec1ad7SBorislav Petkov 769424646eeSThomas Gleixner id = x86_match_cpu(intel_cstates_match); 770424646eeSThomas Gleixner if (!id) 771424646eeSThomas Gleixner return -ENODEV; 772424646eeSThomas Gleixner 773424646eeSThomas Gleixner err = cstate_probe((const struct cstate_model *) id->driver_data); 7746aec1ad7SBorislav Petkov if (err) 7756aec1ad7SBorislav Petkov return err; 7766aec1ad7SBorislav Petkov 777d29859e7SThomas Gleixner return cstate_init(); 7786aec1ad7SBorislav Petkov } 779c7afba32SThomas Gleixner module_init(cstate_pmu_init); 780c7afba32SThomas Gleixner 781c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void) 782c7afba32SThomas Gleixner { 783c7afba32SThomas Gleixner cstate_cleanup(); 784c7afba32SThomas Gleixner } 785c7afba32SThomas Gleixner module_exit(cstate_pmu_exit); 786