16aec1ad7SBorislav Petkov /* 2940b2f2fSBorislav Petkov * Support cstate residency counters 36aec1ad7SBorislav Petkov * 46aec1ad7SBorislav Petkov * Copyright (C) 2015, Intel Corp. 56aec1ad7SBorislav Petkov * Author: Kan Liang (kan.liang@intel.com) 66aec1ad7SBorislav Petkov * 76aec1ad7SBorislav Petkov * This library is free software; you can redistribute it and/or 86aec1ad7SBorislav Petkov * modify it under the terms of the GNU Library General Public 96aec1ad7SBorislav Petkov * License as published by the Free Software Foundation; either 106aec1ad7SBorislav Petkov * version 2 of the License, or (at your option) any later version. 116aec1ad7SBorislav Petkov * 126aec1ad7SBorislav Petkov * This library is distributed in the hope that it will be useful, 136aec1ad7SBorislav Petkov * but WITHOUT ANY WARRANTY; without even the implied warranty of 146aec1ad7SBorislav Petkov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 156aec1ad7SBorislav Petkov * Library General Public License for more details. 166aec1ad7SBorislav Petkov * 176aec1ad7SBorislav Petkov */ 186aec1ad7SBorislav Petkov 196aec1ad7SBorislav Petkov /* 206aec1ad7SBorislav Petkov * This file export cstate related free running (read-only) counters 216aec1ad7SBorislav Petkov * for perf. These counters may be use simultaneously by other tools, 226aec1ad7SBorislav Petkov * such as turbostat. However, it still make sense to implement them 236aec1ad7SBorislav Petkov * in perf. Because we can conveniently collect them together with 246aec1ad7SBorislav Petkov * other events, and allow to use them from tools without special MSR 256aec1ad7SBorislav Petkov * access code. 266aec1ad7SBorislav Petkov * 276aec1ad7SBorislav Petkov * The events only support system-wide mode counting. There is no 286aec1ad7SBorislav Petkov * sampling support because it is not supported by the hardware. 296aec1ad7SBorislav Petkov * 306aec1ad7SBorislav Petkov * According to counters' scope and category, two PMUs are registered 316aec1ad7SBorislav Petkov * with the perf_event core subsystem. 326aec1ad7SBorislav Petkov * - 'cstate_core': The counter is available for each physical core. 336aec1ad7SBorislav Petkov * The counters include CORE_C*_RESIDENCY. 346aec1ad7SBorislav Petkov * - 'cstate_pkg': The counter is available for each physical package. 356aec1ad7SBorislav Petkov * The counters include PKG_C*_RESIDENCY. 366aec1ad7SBorislav Petkov * 376aec1ad7SBorislav Petkov * All of these counters are specified in the Intel® 64 and IA-32 386aec1ad7SBorislav Petkov * Architectures Software Developer.s Manual Vol3b. 396aec1ad7SBorislav Petkov * 406aec1ad7SBorislav Petkov * Model specific counters: 416aec1ad7SBorislav Petkov * MSR_CORE_C1_RES: CORE C1 Residency Counter 426aec1ad7SBorislav Petkov * perf code: 0x00 43*d0ca946bSKan Liang * Available model: SLM,AMT,GLM,CNL,TNT,ADL 446aec1ad7SBorislav Petkov * Scope: Core (each processor core has a MSR) 456aec1ad7SBorislav Petkov * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter 466aec1ad7SBorislav Petkov * perf code: 0x01 471159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, 48ecf71fbcSKan Liang * CNL,KBL,CML,TNT 496aec1ad7SBorislav Petkov * Scope: Core 506aec1ad7SBorislav Petkov * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 516aec1ad7SBorislav Petkov * perf code: 0x02 521159e094SHarry Pan * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 53ecf71fbcSKan Liang * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, 54*d0ca946bSKan Liang * TNT,RKL,ADL 556aec1ad7SBorislav Petkov * Scope: Core 566aec1ad7SBorislav Petkov * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 576aec1ad7SBorislav Petkov * perf code: 0x03 58f1857a24SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, 59*d0ca946bSKan Liang * ICL,TGL,RKL,ADL 606aec1ad7SBorislav Petkov * Scope: Core 616aec1ad7SBorislav Petkov * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 626aec1ad7SBorislav Petkov * perf code: 0x00 631ffa6c04SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, 64*d0ca946bSKan Liang * KBL,CML,ICL,TGL,TNT,RKL,ADL 656aec1ad7SBorislav Petkov * Scope: Package (physical package) 666aec1ad7SBorislav Petkov * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 676aec1ad7SBorislav Petkov * perf code: 0x01 681159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, 69*d0ca946bSKan Liang * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL, 70*d0ca946bSKan Liang * ADL 716aec1ad7SBorislav Petkov * Scope: Package (physical package) 726aec1ad7SBorislav Petkov * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 736aec1ad7SBorislav Petkov * perf code: 0x02 74ecf71fbcSKan Liang * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 75ecf71fbcSKan Liang * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, 76*d0ca946bSKan Liang * TNT,RKL,ADL 776aec1ad7SBorislav Petkov * Scope: Package (physical package) 786aec1ad7SBorislav Petkov * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 796aec1ad7SBorislav Petkov * perf code: 0x03 801ffa6c04SKan Liang * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, 81*d0ca946bSKan Liang * KBL,CML,ICL,TGL,RKL,ADL 826aec1ad7SBorislav Petkov * Scope: Package (physical package) 836aec1ad7SBorislav Petkov * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. 846aec1ad7SBorislav Petkov * perf code: 0x04 85*d0ca946bSKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, 86*d0ca946bSKan Liang * ADL 876aec1ad7SBorislav Petkov * Scope: Package (physical package) 886aec1ad7SBorislav Petkov * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. 896aec1ad7SBorislav Petkov * perf code: 0x05 90*d0ca946bSKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, 91*d0ca946bSKan Liang * ADL 926aec1ad7SBorislav Petkov * Scope: Package (physical package) 936aec1ad7SBorislav Petkov * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 946aec1ad7SBorislav Petkov * perf code: 0x06 95ecf71fbcSKan Liang * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, 96*d0ca946bSKan Liang * TNT,RKL,ADL 976aec1ad7SBorislav Petkov * Scope: Package (physical package) 986aec1ad7SBorislav Petkov * 996aec1ad7SBorislav Petkov */ 1006aec1ad7SBorislav Petkov 1016aec1ad7SBorislav Petkov #include <linux/module.h> 1026aec1ad7SBorislav Petkov #include <linux/slab.h> 1036aec1ad7SBorislav Petkov #include <linux/perf_event.h> 104a5f81290SPeter Zijlstra #include <linux/nospec.h> 1056aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h> 106bf4ad541SDave Hansen #include <asm/intel-family.h> 10727f6d22bSBorislav Petkov #include "../perf_event.h" 1088f2a28c5SJiri Olsa #include "../probe.h" 1096aec1ad7SBorislav Petkov 110c7afba32SThomas Gleixner MODULE_LICENSE("GPL"); 111c7afba32SThomas Gleixner 1126aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ 113ebd19fc3SSami Tolvanen static ssize_t __cstate_##_var##_show(struct device *dev, \ 114ebd19fc3SSami Tolvanen struct device_attribute *attr, \ 1156aec1ad7SBorislav Petkov char *page) \ 1166aec1ad7SBorislav Petkov { \ 1176aec1ad7SBorislav Petkov BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 1186aec1ad7SBorislav Petkov return sprintf(page, _format "\n"); \ 1196aec1ad7SBorislav Petkov } \ 120ebd19fc3SSami Tolvanen static struct device_attribute format_attr_##_var = \ 1216aec1ad7SBorislav Petkov __ATTR(_name, 0444, __cstate_##_var##_show, NULL) 1226aec1ad7SBorislav Petkov 1236aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 1246aec1ad7SBorislav Petkov struct device_attribute *attr, 1256aec1ad7SBorislav Petkov char *buf); 1266aec1ad7SBorislav Petkov 127424646eeSThomas Gleixner /* Model -> events mapping */ 128424646eeSThomas Gleixner struct cstate_model { 129424646eeSThomas Gleixner unsigned long core_events; 130424646eeSThomas Gleixner unsigned long pkg_events; 131424646eeSThomas Gleixner unsigned long quirks; 132424646eeSThomas Gleixner }; 133424646eeSThomas Gleixner 134424646eeSThomas Gleixner /* Quirk flags */ 135424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 136889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR (1UL << 1) 137424646eeSThomas Gleixner 1386aec1ad7SBorislav Petkov struct perf_cstate_msr { 1396aec1ad7SBorislav Petkov u64 msr; 1406aec1ad7SBorislav Petkov struct perf_pmu_events_attr *attr; 1416aec1ad7SBorislav Petkov }; 1426aec1ad7SBorislav Petkov 1436aec1ad7SBorislav Petkov 1446aec1ad7SBorislav Petkov /* cstate_core PMU */ 1456aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu; 1466aec1ad7SBorislav Petkov static bool has_cstate_core; 1476aec1ad7SBorislav Petkov 148424646eeSThomas Gleixner enum perf_cstate_core_events { 1496aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C1_RES = 0, 1506aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C3_RES, 1516aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C6_RES, 1526aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C7_RES, 1536aec1ad7SBorislav Petkov 1546aec1ad7SBorislav Petkov PERF_CSTATE_CORE_EVENT_MAX, 1556aec1ad7SBorislav Petkov }; 1566aec1ad7SBorislav Petkov 1578f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00"); 1588f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01"); 1598f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02"); 1608f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03"); 1616aec1ad7SBorislav Petkov 1628f2a28c5SJiri Olsa static unsigned long core_msr_mask; 1638f2a28c5SJiri Olsa 1648f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c1); 1658f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c3); 1668f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c6); 1678f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c7); 1688f2a28c5SJiri Olsa 1698f2a28c5SJiri Olsa static bool test_msr(int idx, void *data) 1708f2a28c5SJiri Olsa { 1718f2a28c5SJiri Olsa return test_bit(idx, (unsigned long *) data); 1728f2a28c5SJiri Olsa } 1738f2a28c5SJiri Olsa 1748f2a28c5SJiri Olsa static struct perf_msr core_msr[] = { 1758f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr }, 1768f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr }, 1778f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr }, 1788f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr }, 1796aec1ad7SBorislav Petkov }; 1806aec1ad7SBorislav Petkov 1818f2a28c5SJiri Olsa static struct attribute *attrs_empty[] = { 1826aec1ad7SBorislav Petkov NULL, 1836aec1ad7SBorislav Petkov }; 1846aec1ad7SBorislav Petkov 1858f2a28c5SJiri Olsa /* 1868f2a28c5SJiri Olsa * There are no default events, but we need to create 1878f2a28c5SJiri Olsa * "events" group (with empty attrs) before updating 1888f2a28c5SJiri Olsa * it with detected events. 1898f2a28c5SJiri Olsa */ 1906aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = { 1916aec1ad7SBorislav Petkov .name = "events", 1928f2a28c5SJiri Olsa .attrs = attrs_empty, 1936aec1ad7SBorislav Petkov }; 1946aec1ad7SBorislav Petkov 1956aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); 1966aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = { 1976aec1ad7SBorislav Petkov &format_attr_core_event.attr, 1986aec1ad7SBorislav Petkov NULL, 1996aec1ad7SBorislav Petkov }; 2006aec1ad7SBorislav Petkov 2016aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = { 2026aec1ad7SBorislav Petkov .name = "format", 2036aec1ad7SBorislav Petkov .attrs = core_format_attrs, 2046aec1ad7SBorislav Petkov }; 2056aec1ad7SBorislav Petkov 2066aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask; 2076aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); 2086aec1ad7SBorislav Petkov 2096aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = { 2106aec1ad7SBorislav Petkov &dev_attr_cpumask.attr, 2116aec1ad7SBorislav Petkov NULL, 2126aec1ad7SBorislav Petkov }; 2136aec1ad7SBorislav Petkov 2146aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = { 2156aec1ad7SBorislav Petkov .attrs = cstate_cpumask_attrs, 2166aec1ad7SBorislav Petkov }; 2176aec1ad7SBorislav Petkov 2186aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = { 2196aec1ad7SBorislav Petkov &core_events_attr_group, 2206aec1ad7SBorislav Petkov &core_format_attr_group, 2216aec1ad7SBorislav Petkov &cpumask_attr_group, 2226aec1ad7SBorislav Petkov NULL, 2236aec1ad7SBorislav Petkov }; 2246aec1ad7SBorislav Petkov 2256aec1ad7SBorislav Petkov /* cstate_pkg PMU */ 2266aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu; 2276aec1ad7SBorislav Petkov static bool has_cstate_pkg; 2286aec1ad7SBorislav Petkov 229424646eeSThomas Gleixner enum perf_cstate_pkg_events { 2306aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C2_RES = 0, 2316aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C3_RES, 2326aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C6_RES, 2336aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C7_RES, 2346aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C8_RES, 2356aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C9_RES, 2366aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C10_RES, 2376aec1ad7SBorislav Petkov 2386aec1ad7SBorislav Petkov PERF_CSTATE_PKG_EVENT_MAX, 2396aec1ad7SBorislav Petkov }; 2406aec1ad7SBorislav Petkov 2418f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00"); 2428f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01"); 2438f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02"); 2448f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03"); 2458f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04"); 2468f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05"); 2478f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06"); 2486aec1ad7SBorislav Petkov 2498f2a28c5SJiri Olsa static unsigned long pkg_msr_mask; 2506aec1ad7SBorislav Petkov 2518f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c2); 2528f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c3); 2538f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c6); 2548f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c7); 2558f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c8); 2568f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c9); 2578f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c10); 2588f2a28c5SJiri Olsa 2598f2a28c5SJiri Olsa static struct perf_msr pkg_msr[] = { 2608f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr }, 2618f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr }, 2628f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr }, 2638f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr }, 2648f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr }, 2658f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr }, 2668f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, 2676aec1ad7SBorislav Petkov }; 2686aec1ad7SBorislav Petkov 2696aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = { 2706aec1ad7SBorislav Petkov .name = "events", 2718f2a28c5SJiri Olsa .attrs = attrs_empty, 2726aec1ad7SBorislav Petkov }; 2736aec1ad7SBorislav Petkov 2746aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); 2756aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = { 2766aec1ad7SBorislav Petkov &format_attr_pkg_event.attr, 2776aec1ad7SBorislav Petkov NULL, 2786aec1ad7SBorislav Petkov }; 2796aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = { 2806aec1ad7SBorislav Petkov .name = "format", 2816aec1ad7SBorislav Petkov .attrs = pkg_format_attrs, 2826aec1ad7SBorislav Petkov }; 2836aec1ad7SBorislav Petkov 2846aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask; 2856aec1ad7SBorislav Petkov 2866aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = { 2876aec1ad7SBorislav Petkov &pkg_events_attr_group, 2886aec1ad7SBorislav Petkov &pkg_format_attr_group, 2896aec1ad7SBorislav Petkov &cpumask_attr_group, 2906aec1ad7SBorislav Petkov NULL, 2916aec1ad7SBorislav Petkov }; 2926aec1ad7SBorislav Petkov 2936aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 2946aec1ad7SBorislav Petkov struct device_attribute *attr, 2956aec1ad7SBorislav Petkov char *buf) 2966aec1ad7SBorislav Petkov { 2976aec1ad7SBorislav Petkov struct pmu *pmu = dev_get_drvdata(dev); 2986aec1ad7SBorislav Petkov 2996aec1ad7SBorislav Petkov if (pmu == &cstate_core_pmu) 3006aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); 3016aec1ad7SBorislav Petkov else if (pmu == &cstate_pkg_pmu) 3026aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); 3036aec1ad7SBorislav Petkov else 3046aec1ad7SBorislav Petkov return 0; 3056aec1ad7SBorislav Petkov } 3066aec1ad7SBorislav Petkov 3076aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event) 3086aec1ad7SBorislav Petkov { 3096aec1ad7SBorislav Petkov u64 cfg = event->attr.config; 31049de0493SThomas Gleixner int cpu; 3116aec1ad7SBorislav Petkov 3126aec1ad7SBorislav Petkov if (event->attr.type != event->pmu->type) 3136aec1ad7SBorislav Petkov return -ENOENT; 3146aec1ad7SBorislav Petkov 3156aec1ad7SBorislav Petkov /* unsupported modes and filters */ 3162ff40250SAndrew Murray if (event->attr.sample_period) /* no sampling */ 3176aec1ad7SBorislav Petkov return -EINVAL; 3186aec1ad7SBorislav Petkov 31949de0493SThomas Gleixner if (event->cpu < 0) 32049de0493SThomas Gleixner return -EINVAL; 32149de0493SThomas Gleixner 3226aec1ad7SBorislav Petkov if (event->pmu == &cstate_core_pmu) { 3236aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_CORE_EVENT_MAX) 3246aec1ad7SBorislav Petkov return -EINVAL; 3258f2a28c5SJiri Olsa cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX); 3268f2a28c5SJiri Olsa if (!(core_msr_mask & (1 << cfg))) 3276aec1ad7SBorislav Petkov return -EINVAL; 3286aec1ad7SBorislav Petkov event->hw.event_base = core_msr[cfg].msr; 32949de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_core_cpu_mask, 33049de0493SThomas Gleixner topology_sibling_cpumask(event->cpu)); 3316aec1ad7SBorislav Petkov } else if (event->pmu == &cstate_pkg_pmu) { 3326aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 3336aec1ad7SBorislav Petkov return -EINVAL; 334a5f81290SPeter Zijlstra cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); 3358f2a28c5SJiri Olsa if (!(pkg_msr_mask & (1 << cfg))) 3366aec1ad7SBorislav Petkov return -EINVAL; 3376aec1ad7SBorislav Petkov event->hw.event_base = pkg_msr[cfg].msr; 33849de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_pkg_cpu_mask, 339cb63ba0fSKan Liang topology_die_cpumask(event->cpu)); 34049de0493SThomas Gleixner } else { 3416aec1ad7SBorislav Petkov return -ENOENT; 34249de0493SThomas Gleixner } 3436aec1ad7SBorislav Petkov 34449de0493SThomas Gleixner if (cpu >= nr_cpu_ids) 34549de0493SThomas Gleixner return -ENODEV; 34649de0493SThomas Gleixner 34749de0493SThomas Gleixner event->cpu = cpu; 3486aec1ad7SBorislav Petkov event->hw.config = cfg; 3496aec1ad7SBorislav Petkov event->hw.idx = -1; 35049de0493SThomas Gleixner return 0; 3516aec1ad7SBorislav Petkov } 3526aec1ad7SBorislav Petkov 3536aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event) 3546aec1ad7SBorislav Petkov { 3556aec1ad7SBorislav Petkov u64 val; 3566aec1ad7SBorislav Petkov 3576aec1ad7SBorislav Petkov rdmsrl(event->hw.event_base, val); 3586aec1ad7SBorislav Petkov return val; 3596aec1ad7SBorislav Petkov } 3606aec1ad7SBorislav Petkov 3616aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event) 3626aec1ad7SBorislav Petkov { 3636aec1ad7SBorislav Petkov struct hw_perf_event *hwc = &event->hw; 3646aec1ad7SBorislav Petkov u64 prev_raw_count, new_raw_count; 3656aec1ad7SBorislav Petkov 3666aec1ad7SBorislav Petkov again: 3676aec1ad7SBorislav Petkov prev_raw_count = local64_read(&hwc->prev_count); 3686aec1ad7SBorislav Petkov new_raw_count = cstate_pmu_read_counter(event); 3696aec1ad7SBorislav Petkov 3706aec1ad7SBorislav Petkov if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 3716aec1ad7SBorislav Petkov new_raw_count) != prev_raw_count) 3726aec1ad7SBorislav Petkov goto again; 3736aec1ad7SBorislav Petkov 3746aec1ad7SBorislav Petkov local64_add(new_raw_count - prev_raw_count, &event->count); 3756aec1ad7SBorislav Petkov } 3766aec1ad7SBorislav Petkov 3776aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode) 3786aec1ad7SBorislav Petkov { 3796aec1ad7SBorislav Petkov local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); 3806aec1ad7SBorislav Petkov } 3816aec1ad7SBorislav Petkov 3826aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode) 3836aec1ad7SBorislav Petkov { 3846aec1ad7SBorislav Petkov cstate_pmu_event_update(event); 3856aec1ad7SBorislav Petkov } 3866aec1ad7SBorislav Petkov 3876aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode) 3886aec1ad7SBorislav Petkov { 3896aec1ad7SBorislav Petkov cstate_pmu_event_stop(event, PERF_EF_UPDATE); 3906aec1ad7SBorislav Petkov } 3916aec1ad7SBorislav Petkov 3926aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode) 3936aec1ad7SBorislav Petkov { 3946aec1ad7SBorislav Petkov if (mode & PERF_EF_START) 3956aec1ad7SBorislav Petkov cstate_pmu_event_start(event, mode); 3966aec1ad7SBorislav Petkov 3976aec1ad7SBorislav Petkov return 0; 3986aec1ad7SBorislav Petkov } 3996aec1ad7SBorislav Petkov 40049de0493SThomas Gleixner /* 40149de0493SThomas Gleixner * Check if exiting cpu is the designated reader. If so migrate the 40249de0493SThomas Gleixner * events when there is a valid target available 40349de0493SThomas Gleixner */ 40477c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu) 4056aec1ad7SBorislav Petkov { 40649de0493SThomas Gleixner unsigned int target; 4076aec1ad7SBorislav Petkov 40849de0493SThomas Gleixner if (has_cstate_core && 40949de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) { 4106aec1ad7SBorislav Petkov 41149de0493SThomas Gleixner target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 41249de0493SThomas Gleixner /* Migrate events if there is a valid target */ 41349de0493SThomas Gleixner if (target < nr_cpu_ids) { 4146aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_core_cpu_mask); 4156aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); 4166aec1ad7SBorislav Petkov } 4176aec1ad7SBorislav Petkov } 41849de0493SThomas Gleixner 41949de0493SThomas Gleixner if (has_cstate_pkg && 42049de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) { 42149de0493SThomas Gleixner 422cb63ba0fSKan Liang target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 42349de0493SThomas Gleixner /* Migrate events if there is a valid target */ 42449de0493SThomas Gleixner if (target < nr_cpu_ids) { 4256aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_pkg_cpu_mask); 4266aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); 4276aec1ad7SBorislav Petkov } 4286aec1ad7SBorislav Petkov } 42977c34ef1SSebastian Andrzej Siewior return 0; 43049de0493SThomas Gleixner } 4316aec1ad7SBorislav Petkov 43277c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu) 4336aec1ad7SBorislav Petkov { 43449de0493SThomas Gleixner unsigned int target; 4356aec1ad7SBorislav Petkov 43649de0493SThomas Gleixner /* 43749de0493SThomas Gleixner * If this is the first online thread of that core, set it in 43849de0493SThomas Gleixner * the core cpu mask as the designated reader. 43949de0493SThomas Gleixner */ 44049de0493SThomas Gleixner target = cpumask_any_and(&cstate_core_cpu_mask, 44149de0493SThomas Gleixner topology_sibling_cpumask(cpu)); 44249de0493SThomas Gleixner 44349de0493SThomas Gleixner if (has_cstate_core && target >= nr_cpu_ids) 4446aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_core_cpu_mask); 4456aec1ad7SBorislav Petkov 44649de0493SThomas Gleixner /* 44749de0493SThomas Gleixner * If this is the first online thread of that package, set it 44849de0493SThomas Gleixner * in the package cpu mask as the designated reader. 44949de0493SThomas Gleixner */ 45049de0493SThomas Gleixner target = cpumask_any_and(&cstate_pkg_cpu_mask, 451cb63ba0fSKan Liang topology_die_cpumask(cpu)); 45249de0493SThomas Gleixner if (has_cstate_pkg && target >= nr_cpu_ids) 4536aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); 4546aec1ad7SBorislav Petkov 45577c34ef1SSebastian Andrzej Siewior return 0; 4566aec1ad7SBorislav Petkov } 457c7afba32SThomas Gleixner 458d9f3b450SValdis Klētnieks static const struct attribute_group *core_attr_update[] = { 4598f2a28c5SJiri Olsa &group_cstate_core_c1, 4608f2a28c5SJiri Olsa &group_cstate_core_c3, 4618f2a28c5SJiri Olsa &group_cstate_core_c6, 4628f2a28c5SJiri Olsa &group_cstate_core_c7, 4638f2a28c5SJiri Olsa NULL, 4648f2a28c5SJiri Olsa }; 4658f2a28c5SJiri Olsa 466d9f3b450SValdis Klētnieks static const struct attribute_group *pkg_attr_update[] = { 4678f2a28c5SJiri Olsa &group_cstate_pkg_c2, 4688f2a28c5SJiri Olsa &group_cstate_pkg_c3, 4698f2a28c5SJiri Olsa &group_cstate_pkg_c6, 4708f2a28c5SJiri Olsa &group_cstate_pkg_c7, 4718f2a28c5SJiri Olsa &group_cstate_pkg_c8, 4728f2a28c5SJiri Olsa &group_cstate_pkg_c9, 4738f2a28c5SJiri Olsa &group_cstate_pkg_c10, 4748f2a28c5SJiri Olsa NULL, 4758f2a28c5SJiri Olsa }; 4768f2a28c5SJiri Olsa 477424646eeSThomas Gleixner static struct pmu cstate_core_pmu = { 478424646eeSThomas Gleixner .attr_groups = core_attr_groups, 4798f2a28c5SJiri Olsa .attr_update = core_attr_update, 480424646eeSThomas Gleixner .name = "cstate_core", 481424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 482424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 483424646eeSThomas Gleixner .add = cstate_pmu_event_add, 484424646eeSThomas Gleixner .del = cstate_pmu_event_del, 485424646eeSThomas Gleixner .start = cstate_pmu_event_start, 486424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 487424646eeSThomas Gleixner .read = cstate_pmu_event_update, 4882ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 48974545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 490424646eeSThomas Gleixner }; 491424646eeSThomas Gleixner 492424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = { 493424646eeSThomas Gleixner .attr_groups = pkg_attr_groups, 4948f2a28c5SJiri Olsa .attr_update = pkg_attr_update, 495424646eeSThomas Gleixner .name = "cstate_pkg", 496424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 497424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 498424646eeSThomas Gleixner .add = cstate_pmu_event_add, 499424646eeSThomas Gleixner .del = cstate_pmu_event_del, 500424646eeSThomas Gleixner .start = cstate_pmu_event_start, 501424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 502424646eeSThomas Gleixner .read = cstate_pmu_event_update, 5032ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 50474545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 505424646eeSThomas Gleixner }; 506424646eeSThomas Gleixner 507424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = { 508424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 509424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 510424646eeSThomas Gleixner 511424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) | 512424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 513424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 514424646eeSThomas Gleixner }; 515424646eeSThomas Gleixner 516424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = { 517424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 518424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 519424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 520424646eeSThomas Gleixner 521424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 522424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 523424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 524424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 525424646eeSThomas Gleixner }; 526424646eeSThomas Gleixner 527424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = { 528424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 529424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 530424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 531424646eeSThomas Gleixner 532424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 533424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 534424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 535424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES) | 536424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C8_RES) | 537424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C9_RES) | 538424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C10_RES), 539424646eeSThomas Gleixner }; 540424646eeSThomas Gleixner 5411159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = { 5421159e094SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 5431159e094SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 5441159e094SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES) | 5451159e094SHarry Pan BIT(PERF_CSTATE_CORE_C7_RES), 5461159e094SHarry Pan 5471159e094SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 5481159e094SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 5491159e094SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 5501159e094SHarry Pan BIT(PERF_CSTATE_PKG_C7_RES) | 5511159e094SHarry Pan BIT(PERF_CSTATE_PKG_C8_RES) | 5521159e094SHarry Pan BIT(PERF_CSTATE_PKG_C9_RES) | 5531159e094SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 5541159e094SHarry Pan }; 5551159e094SHarry Pan 556f1857a24SKan Liang static const struct cstate_model icl_cstates __initconst = { 557f1857a24SKan Liang .core_events = BIT(PERF_CSTATE_CORE_C6_RES) | 558f1857a24SKan Liang BIT(PERF_CSTATE_CORE_C7_RES), 559f1857a24SKan Liang 560f1857a24SKan Liang .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 561f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C3_RES) | 562f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C6_RES) | 563f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C7_RES) | 564f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C8_RES) | 565f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C9_RES) | 566f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C10_RES), 567f1857a24SKan Liang }; 568f1857a24SKan Liang 569*d0ca946bSKan Liang static const struct cstate_model adl_cstates __initconst = { 570*d0ca946bSKan Liang .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 571*d0ca946bSKan Liang BIT(PERF_CSTATE_CORE_C6_RES) | 572*d0ca946bSKan Liang BIT(PERF_CSTATE_CORE_C7_RES), 573*d0ca946bSKan Liang 574*d0ca946bSKan Liang .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 575*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C3_RES) | 576*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C6_RES) | 577*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C7_RES) | 578*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C8_RES) | 579*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C9_RES) | 580*d0ca946bSKan Liang BIT(PERF_CSTATE_PKG_C10_RES), 581*d0ca946bSKan Liang }; 582*d0ca946bSKan Liang 583424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = { 584424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 585424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 586424646eeSThomas Gleixner 587424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), 588424646eeSThomas Gleixner .quirks = SLM_PKG_C6_USE_C7_MSR, 589424646eeSThomas Gleixner }; 590424646eeSThomas Gleixner 591889882bcSLukasz Odzioba 592889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = { 593889882bcSLukasz Odzioba .core_events = BIT(PERF_CSTATE_CORE_C6_RES), 594889882bcSLukasz Odzioba 595889882bcSLukasz Odzioba .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 596889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C3_RES) | 597889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C6_RES), 598889882bcSLukasz Odzioba .quirks = KNL_CORE_C6_MSR, 599889882bcSLukasz Odzioba }; 600889882bcSLukasz Odzioba 601889882bcSLukasz Odzioba 6025c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = { 6035c10b048SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 6045c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 6055c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES), 6065c10b048SHarry Pan 6075c10b048SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 6085c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 6095c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 6105c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 6115c10b048SHarry Pan }; 6125c10b048SHarry Pan 613889882bcSLukasz Odzioba 614424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = { 615ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), 616ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates), 617ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates), 618424646eeSThomas Gleixner 619ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates), 620ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates), 621ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates), 622424646eeSThomas Gleixner 623ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates), 624ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates), 625424646eeSThomas Gleixner 626ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates), 627ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates), 628424646eeSThomas Gleixner 629ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates), 630ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates), 631ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates), 632424646eeSThomas Gleixner 633ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates), 634424646eeSThomas Gleixner 635ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates), 636ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates), 637ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates), 638424646eeSThomas Gleixner 639ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates), 640ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates), 641ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates), 642ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates), 643424646eeSThomas Gleixner 644ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates), 645ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates), 646ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates), 647889882bcSLukasz Odzioba 648ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates), 649ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates), 650ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates), 651ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates), 652f2029b1eSSrinivas Pandruvada 653ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates), 6541159e094SHarry Pan 655ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates), 656ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates), 6575c10b048SHarry Pan 658ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates), 659ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates), 660ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates), 661ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates), 662ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), 6635b16ef2eSHarry Pan X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), 664f08c47d1SKan Liang 665ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), 666ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), 667ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), 668ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), 669cbea5639SKan Liang X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates), 670*d0ca946bSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates), 671*d0ca946bSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates), 672424646eeSThomas Gleixner { }, 673424646eeSThomas Gleixner }; 674424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 675424646eeSThomas Gleixner 676424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm) 6776aec1ad7SBorislav Petkov { 6786aec1ad7SBorislav Petkov /* SLM has different MSR for PKG C6 */ 679424646eeSThomas Gleixner if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 6806aec1ad7SBorislav Petkov pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 6816aec1ad7SBorislav Petkov 682889882bcSLukasz Odzioba /* KNL has different MSR for CORE C6 */ 683889882bcSLukasz Odzioba if (cm->quirks & KNL_CORE_C6_MSR) 684889882bcSLukasz Odzioba pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY; 685889882bcSLukasz Odzioba 686889882bcSLukasz Odzioba 6878f2a28c5SJiri Olsa core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX, 6888f2a28c5SJiri Olsa true, (void *) &cm->core_events); 6896aec1ad7SBorislav Petkov 6908f2a28c5SJiri Olsa pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, 6918f2a28c5SJiri Olsa true, (void *) &cm->pkg_events); 6928f2a28c5SJiri Olsa 6938f2a28c5SJiri Olsa has_cstate_core = !!core_msr_mask; 6948f2a28c5SJiri Olsa has_cstate_pkg = !!pkg_msr_mask; 6956aec1ad7SBorislav Petkov 6966aec1ad7SBorislav Petkov return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; 6976aec1ad7SBorislav Petkov } 6986aec1ad7SBorislav Petkov 699c7afba32SThomas Gleixner static inline void cstate_cleanup(void) 7006aec1ad7SBorislav Petkov { 701834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); 702834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); 703834fcd29SThomas Gleixner 704d29859e7SThomas Gleixner if (has_cstate_core) 705d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_core_pmu); 706d29859e7SThomas Gleixner 707d29859e7SThomas Gleixner if (has_cstate_pkg) 708d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_pkg_pmu); 709d29859e7SThomas Gleixner } 710d29859e7SThomas Gleixner 711d29859e7SThomas Gleixner static int __init cstate_init(void) 712d29859e7SThomas Gleixner { 71377c34ef1SSebastian Andrzej Siewior int err; 7146aec1ad7SBorislav Petkov 71577c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, 716834fcd29SThomas Gleixner "perf/x86/cstate:starting", cstate_cpu_init, NULL); 71777c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, 718834fcd29SThomas Gleixner "perf/x86/cstate:online", NULL, cstate_cpu_exit); 7196aec1ad7SBorislav Petkov 7206aec1ad7SBorislav Petkov if (has_cstate_core) { 7216aec1ad7SBorislav Petkov err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 722d29859e7SThomas Gleixner if (err) { 723d29859e7SThomas Gleixner has_cstate_core = false; 724d29859e7SThomas Gleixner pr_info("Failed to register cstate core pmu\n"); 725834fcd29SThomas Gleixner cstate_cleanup(); 72677c34ef1SSebastian Andrzej Siewior return err; 727d29859e7SThomas Gleixner } 7286aec1ad7SBorislav Petkov } 7296aec1ad7SBorislav Petkov 7306aec1ad7SBorislav Petkov if (has_cstate_pkg) { 731cb63ba0fSKan Liang if (topology_max_die_per_package() > 1) { 732cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 733cb63ba0fSKan Liang "cstate_die", -1); 734cb63ba0fSKan Liang } else { 735cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 736cb63ba0fSKan Liang cstate_pkg_pmu.name, -1); 737cb63ba0fSKan Liang } 738d29859e7SThomas Gleixner if (err) { 739d29859e7SThomas Gleixner has_cstate_pkg = false; 740d29859e7SThomas Gleixner pr_info("Failed to register cstate pkg pmu\n"); 741d29859e7SThomas Gleixner cstate_cleanup(); 74277c34ef1SSebastian Andrzej Siewior return err; 7436aec1ad7SBorislav Petkov } 7446aec1ad7SBorislav Petkov } 745834fcd29SThomas Gleixner return 0; 746d29859e7SThomas Gleixner } 7476aec1ad7SBorislav Petkov 7486aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void) 7496aec1ad7SBorislav Petkov { 750424646eeSThomas Gleixner const struct x86_cpu_id *id; 7516aec1ad7SBorislav Petkov int err; 7526aec1ad7SBorislav Petkov 753424646eeSThomas Gleixner if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 7546aec1ad7SBorislav Petkov return -ENODEV; 7556aec1ad7SBorislav Petkov 756424646eeSThomas Gleixner id = x86_match_cpu(intel_cstates_match); 757424646eeSThomas Gleixner if (!id) 758424646eeSThomas Gleixner return -ENODEV; 759424646eeSThomas Gleixner 760424646eeSThomas Gleixner err = cstate_probe((const struct cstate_model *) id->driver_data); 7616aec1ad7SBorislav Petkov if (err) 7626aec1ad7SBorislav Petkov return err; 7636aec1ad7SBorislav Petkov 764d29859e7SThomas Gleixner return cstate_init(); 7656aec1ad7SBorislav Petkov } 766c7afba32SThomas Gleixner module_init(cstate_pmu_init); 767c7afba32SThomas Gleixner 768c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void) 769c7afba32SThomas Gleixner { 770c7afba32SThomas Gleixner cstate_cleanup(); 771c7afba32SThomas Gleixner } 772c7afba32SThomas Gleixner module_exit(cstate_pmu_exit); 773