16aec1ad7SBorislav Petkov /* 2940b2f2fSBorislav Petkov * Support cstate residency counters 36aec1ad7SBorislav Petkov * 46aec1ad7SBorislav Petkov * Copyright (C) 2015, Intel Corp. 56aec1ad7SBorislav Petkov * Author: Kan Liang (kan.liang@intel.com) 66aec1ad7SBorislav Petkov * 76aec1ad7SBorislav Petkov * This library is free software; you can redistribute it and/or 86aec1ad7SBorislav Petkov * modify it under the terms of the GNU Library General Public 96aec1ad7SBorislav Petkov * License as published by the Free Software Foundation; either 106aec1ad7SBorislav Petkov * version 2 of the License, or (at your option) any later version. 116aec1ad7SBorislav Petkov * 126aec1ad7SBorislav Petkov * This library is distributed in the hope that it will be useful, 136aec1ad7SBorislav Petkov * but WITHOUT ANY WARRANTY; without even the implied warranty of 146aec1ad7SBorislav Petkov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 156aec1ad7SBorislav Petkov * Library General Public License for more details. 166aec1ad7SBorislav Petkov * 176aec1ad7SBorislav Petkov */ 186aec1ad7SBorislav Petkov 196aec1ad7SBorislav Petkov /* 206aec1ad7SBorislav Petkov * This file export cstate related free running (read-only) counters 216aec1ad7SBorislav Petkov * for perf. These counters may be use simultaneously by other tools, 226aec1ad7SBorislav Petkov * such as turbostat. However, it still make sense to implement them 236aec1ad7SBorislav Petkov * in perf. Because we can conveniently collect them together with 246aec1ad7SBorislav Petkov * other events, and allow to use them from tools without special MSR 256aec1ad7SBorislav Petkov * access code. 266aec1ad7SBorislav Petkov * 276aec1ad7SBorislav Petkov * The events only support system-wide mode counting. There is no 286aec1ad7SBorislav Petkov * sampling support because it is not supported by the hardware. 296aec1ad7SBorislav Petkov * 306aec1ad7SBorislav Petkov * According to counters' scope and category, two PMUs are registered 316aec1ad7SBorislav Petkov * with the perf_event core subsystem. 326aec1ad7SBorislav Petkov * - 'cstate_core': The counter is available for each physical core. 336aec1ad7SBorislav Petkov * The counters include CORE_C*_RESIDENCY. 346aec1ad7SBorislav Petkov * - 'cstate_pkg': The counter is available for each physical package. 356aec1ad7SBorislav Petkov * The counters include PKG_C*_RESIDENCY. 366aec1ad7SBorislav Petkov * 376aec1ad7SBorislav Petkov * All of these counters are specified in the Intel® 64 and IA-32 386aec1ad7SBorislav Petkov * Architectures Software Developer.s Manual Vol3b. 396aec1ad7SBorislav Petkov * 406aec1ad7SBorislav Petkov * Model specific counters: 416aec1ad7SBorislav Petkov * MSR_CORE_C1_RES: CORE C1 Residency Counter 426aec1ad7SBorislav Petkov * perf code: 0x00 431159e094SHarry Pan * Available model: SLM,AMT,GLM,CNL 446aec1ad7SBorislav Petkov * Scope: Core (each processor core has a MSR) 456aec1ad7SBorislav Petkov * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter 466aec1ad7SBorislav Petkov * perf code: 0x01 471159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, 481ffa6c04SKan Liang * CNL,KBL,CML 496aec1ad7SBorislav Petkov * Scope: Core 506aec1ad7SBorislav Petkov * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 516aec1ad7SBorislav Petkov * perf code: 0x02 521159e094SHarry Pan * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, 53*f1857a24SKan Liang * SKL,KNL,GLM,CNL,KBL,CML,ICL 546aec1ad7SBorislav Petkov * Scope: Core 556aec1ad7SBorislav Petkov * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 566aec1ad7SBorislav Petkov * perf code: 0x03 57*f1857a24SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML, 58*f1857a24SKan Liang * ICL 596aec1ad7SBorislav Petkov * Scope: Core 606aec1ad7SBorislav Petkov * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 616aec1ad7SBorislav Petkov * perf code: 0x00 621ffa6c04SKan Liang * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, 63*f1857a24SKan Liang * KBL,CML,ICL 646aec1ad7SBorislav Petkov * Scope: Package (physical package) 656aec1ad7SBorislav Petkov * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 666aec1ad7SBorislav Petkov * perf code: 0x01 671159e094SHarry Pan * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, 68*f1857a24SKan Liang * GLM,CNL,KBL,CML,ICL 696aec1ad7SBorislav Petkov * Scope: Package (physical package) 706aec1ad7SBorislav Petkov * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 716aec1ad7SBorislav Petkov * perf code: 0x02 72889882bcSLukasz Odzioba * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW 73*f1857a24SKan Liang * SKL,KNL,GLM,CNL,KBL,CML,ICL 746aec1ad7SBorislav Petkov * Scope: Package (physical package) 756aec1ad7SBorislav Petkov * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 766aec1ad7SBorislav Petkov * perf code: 0x03 771ffa6c04SKan Liang * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, 78*f1857a24SKan Liang * KBL,CML,ICL 796aec1ad7SBorislav Petkov * Scope: Package (physical package) 806aec1ad7SBorislav Petkov * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. 816aec1ad7SBorislav Petkov * perf code: 0x04 82*f1857a24SKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL 836aec1ad7SBorislav Petkov * Scope: Package (physical package) 846aec1ad7SBorislav Petkov * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. 856aec1ad7SBorislav Petkov * perf code: 0x05 86*f1857a24SKan Liang * Available model: HSW ULT,KBL,CNL,CML,ICL 876aec1ad7SBorislav Petkov * Scope: Package (physical package) 886aec1ad7SBorislav Petkov * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 896aec1ad7SBorislav Petkov * perf code: 0x06 90*f1857a24SKan Liang * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL 916aec1ad7SBorislav Petkov * Scope: Package (physical package) 926aec1ad7SBorislav Petkov * 936aec1ad7SBorislav Petkov */ 946aec1ad7SBorislav Petkov 956aec1ad7SBorislav Petkov #include <linux/module.h> 966aec1ad7SBorislav Petkov #include <linux/slab.h> 976aec1ad7SBorislav Petkov #include <linux/perf_event.h> 98a5f81290SPeter Zijlstra #include <linux/nospec.h> 996aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h> 100bf4ad541SDave Hansen #include <asm/intel-family.h> 10127f6d22bSBorislav Petkov #include "../perf_event.h" 1028f2a28c5SJiri Olsa #include "../probe.h" 1036aec1ad7SBorislav Petkov 104c7afba32SThomas Gleixner MODULE_LICENSE("GPL"); 105c7afba32SThomas Gleixner 1066aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ 1076aec1ad7SBorislav Petkov static ssize_t __cstate_##_var##_show(struct kobject *kobj, \ 1086aec1ad7SBorislav Petkov struct kobj_attribute *attr, \ 1096aec1ad7SBorislav Petkov char *page) \ 1106aec1ad7SBorislav Petkov { \ 1116aec1ad7SBorislav Petkov BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 1126aec1ad7SBorislav Petkov return sprintf(page, _format "\n"); \ 1136aec1ad7SBorislav Petkov } \ 1146aec1ad7SBorislav Petkov static struct kobj_attribute format_attr_##_var = \ 1156aec1ad7SBorislav Petkov __ATTR(_name, 0444, __cstate_##_var##_show, NULL) 1166aec1ad7SBorislav Petkov 1176aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 1186aec1ad7SBorislav Petkov struct device_attribute *attr, 1196aec1ad7SBorislav Petkov char *buf); 1206aec1ad7SBorislav Petkov 121424646eeSThomas Gleixner /* Model -> events mapping */ 122424646eeSThomas Gleixner struct cstate_model { 123424646eeSThomas Gleixner unsigned long core_events; 124424646eeSThomas Gleixner unsigned long pkg_events; 125424646eeSThomas Gleixner unsigned long quirks; 126424646eeSThomas Gleixner }; 127424646eeSThomas Gleixner 128424646eeSThomas Gleixner /* Quirk flags */ 129424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 130889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR (1UL << 1) 131424646eeSThomas Gleixner 1326aec1ad7SBorislav Petkov struct perf_cstate_msr { 1336aec1ad7SBorislav Petkov u64 msr; 1346aec1ad7SBorislav Petkov struct perf_pmu_events_attr *attr; 1356aec1ad7SBorislav Petkov }; 1366aec1ad7SBorislav Petkov 1376aec1ad7SBorislav Petkov 1386aec1ad7SBorislav Petkov /* cstate_core PMU */ 1396aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu; 1406aec1ad7SBorislav Petkov static bool has_cstate_core; 1416aec1ad7SBorislav Petkov 142424646eeSThomas Gleixner enum perf_cstate_core_events { 1436aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C1_RES = 0, 1446aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C3_RES, 1456aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C6_RES, 1466aec1ad7SBorislav Petkov PERF_CSTATE_CORE_C7_RES, 1476aec1ad7SBorislav Petkov 1486aec1ad7SBorislav Petkov PERF_CSTATE_CORE_EVENT_MAX, 1496aec1ad7SBorislav Petkov }; 1506aec1ad7SBorislav Petkov 1518f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00"); 1528f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01"); 1538f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02"); 1548f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03"); 1556aec1ad7SBorislav Petkov 1568f2a28c5SJiri Olsa static unsigned long core_msr_mask; 1578f2a28c5SJiri Olsa 1588f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c1); 1598f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c3); 1608f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c6); 1618f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c7); 1628f2a28c5SJiri Olsa 1638f2a28c5SJiri Olsa static bool test_msr(int idx, void *data) 1648f2a28c5SJiri Olsa { 1658f2a28c5SJiri Olsa return test_bit(idx, (unsigned long *) data); 1668f2a28c5SJiri Olsa } 1678f2a28c5SJiri Olsa 1688f2a28c5SJiri Olsa static struct perf_msr core_msr[] = { 1698f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr }, 1708f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr }, 1718f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr }, 1728f2a28c5SJiri Olsa [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr }, 1736aec1ad7SBorislav Petkov }; 1746aec1ad7SBorislav Petkov 1758f2a28c5SJiri Olsa static struct attribute *attrs_empty[] = { 1766aec1ad7SBorislav Petkov NULL, 1776aec1ad7SBorislav Petkov }; 1786aec1ad7SBorislav Petkov 1798f2a28c5SJiri Olsa /* 1808f2a28c5SJiri Olsa * There are no default events, but we need to create 1818f2a28c5SJiri Olsa * "events" group (with empty attrs) before updating 1828f2a28c5SJiri Olsa * it with detected events. 1838f2a28c5SJiri Olsa */ 1846aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = { 1856aec1ad7SBorislav Petkov .name = "events", 1868f2a28c5SJiri Olsa .attrs = attrs_empty, 1876aec1ad7SBorislav Petkov }; 1886aec1ad7SBorislav Petkov 1896aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); 1906aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = { 1916aec1ad7SBorislav Petkov &format_attr_core_event.attr, 1926aec1ad7SBorislav Petkov NULL, 1936aec1ad7SBorislav Petkov }; 1946aec1ad7SBorislav Petkov 1956aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = { 1966aec1ad7SBorislav Petkov .name = "format", 1976aec1ad7SBorislav Petkov .attrs = core_format_attrs, 1986aec1ad7SBorislav Petkov }; 1996aec1ad7SBorislav Petkov 2006aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask; 2016aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); 2026aec1ad7SBorislav Petkov 2036aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = { 2046aec1ad7SBorislav Petkov &dev_attr_cpumask.attr, 2056aec1ad7SBorislav Petkov NULL, 2066aec1ad7SBorislav Petkov }; 2076aec1ad7SBorislav Petkov 2086aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = { 2096aec1ad7SBorislav Petkov .attrs = cstate_cpumask_attrs, 2106aec1ad7SBorislav Petkov }; 2116aec1ad7SBorislav Petkov 2126aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = { 2136aec1ad7SBorislav Petkov &core_events_attr_group, 2146aec1ad7SBorislav Petkov &core_format_attr_group, 2156aec1ad7SBorislav Petkov &cpumask_attr_group, 2166aec1ad7SBorislav Petkov NULL, 2176aec1ad7SBorislav Petkov }; 2186aec1ad7SBorislav Petkov 2196aec1ad7SBorislav Petkov /* cstate_pkg PMU */ 2206aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu; 2216aec1ad7SBorislav Petkov static bool has_cstate_pkg; 2226aec1ad7SBorislav Petkov 223424646eeSThomas Gleixner enum perf_cstate_pkg_events { 2246aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C2_RES = 0, 2256aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C3_RES, 2266aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C6_RES, 2276aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C7_RES, 2286aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C8_RES, 2296aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C9_RES, 2306aec1ad7SBorislav Petkov PERF_CSTATE_PKG_C10_RES, 2316aec1ad7SBorislav Petkov 2326aec1ad7SBorislav Petkov PERF_CSTATE_PKG_EVENT_MAX, 2336aec1ad7SBorislav Petkov }; 2346aec1ad7SBorislav Petkov 2358f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00"); 2368f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01"); 2378f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02"); 2388f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03"); 2398f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04"); 2408f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05"); 2418f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06"); 2426aec1ad7SBorislav Petkov 2438f2a28c5SJiri Olsa static unsigned long pkg_msr_mask; 2446aec1ad7SBorislav Petkov 2458f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c2); 2468f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c3); 2478f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c6); 2488f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c7); 2498f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c8); 2508f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c9); 2518f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c10); 2528f2a28c5SJiri Olsa 2538f2a28c5SJiri Olsa static struct perf_msr pkg_msr[] = { 2548f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr }, 2558f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr }, 2568f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr }, 2578f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr }, 2588f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr }, 2598f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr }, 2608f2a28c5SJiri Olsa [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, 2616aec1ad7SBorislav Petkov }; 2626aec1ad7SBorislav Petkov 2636aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = { 2646aec1ad7SBorislav Petkov .name = "events", 2658f2a28c5SJiri Olsa .attrs = attrs_empty, 2666aec1ad7SBorislav Petkov }; 2676aec1ad7SBorislav Petkov 2686aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); 2696aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = { 2706aec1ad7SBorislav Petkov &format_attr_pkg_event.attr, 2716aec1ad7SBorislav Petkov NULL, 2726aec1ad7SBorislav Petkov }; 2736aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = { 2746aec1ad7SBorislav Petkov .name = "format", 2756aec1ad7SBorislav Petkov .attrs = pkg_format_attrs, 2766aec1ad7SBorislav Petkov }; 2776aec1ad7SBorislav Petkov 2786aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask; 2796aec1ad7SBorislav Petkov 2806aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = { 2816aec1ad7SBorislav Petkov &pkg_events_attr_group, 2826aec1ad7SBorislav Petkov &pkg_format_attr_group, 2836aec1ad7SBorislav Petkov &cpumask_attr_group, 2846aec1ad7SBorislav Petkov NULL, 2856aec1ad7SBorislav Petkov }; 2866aec1ad7SBorislav Petkov 2876aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev, 2886aec1ad7SBorislav Petkov struct device_attribute *attr, 2896aec1ad7SBorislav Petkov char *buf) 2906aec1ad7SBorislav Petkov { 2916aec1ad7SBorislav Petkov struct pmu *pmu = dev_get_drvdata(dev); 2926aec1ad7SBorislav Petkov 2936aec1ad7SBorislav Petkov if (pmu == &cstate_core_pmu) 2946aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); 2956aec1ad7SBorislav Petkov else if (pmu == &cstate_pkg_pmu) 2966aec1ad7SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); 2976aec1ad7SBorislav Petkov else 2986aec1ad7SBorislav Petkov return 0; 2996aec1ad7SBorislav Petkov } 3006aec1ad7SBorislav Petkov 3016aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event) 3026aec1ad7SBorislav Petkov { 3036aec1ad7SBorislav Petkov u64 cfg = event->attr.config; 30449de0493SThomas Gleixner int cpu; 3056aec1ad7SBorislav Petkov 3066aec1ad7SBorislav Petkov if (event->attr.type != event->pmu->type) 3076aec1ad7SBorislav Petkov return -ENOENT; 3086aec1ad7SBorislav Petkov 3096aec1ad7SBorislav Petkov /* unsupported modes and filters */ 3102ff40250SAndrew Murray if (event->attr.sample_period) /* no sampling */ 3116aec1ad7SBorislav Petkov return -EINVAL; 3126aec1ad7SBorislav Petkov 31349de0493SThomas Gleixner if (event->cpu < 0) 31449de0493SThomas Gleixner return -EINVAL; 31549de0493SThomas Gleixner 3166aec1ad7SBorislav Petkov if (event->pmu == &cstate_core_pmu) { 3176aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_CORE_EVENT_MAX) 3186aec1ad7SBorislav Petkov return -EINVAL; 3198f2a28c5SJiri Olsa cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX); 3208f2a28c5SJiri Olsa if (!(core_msr_mask & (1 << cfg))) 3216aec1ad7SBorislav Petkov return -EINVAL; 3226aec1ad7SBorislav Petkov event->hw.event_base = core_msr[cfg].msr; 32349de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_core_cpu_mask, 32449de0493SThomas Gleixner topology_sibling_cpumask(event->cpu)); 3256aec1ad7SBorislav Petkov } else if (event->pmu == &cstate_pkg_pmu) { 3266aec1ad7SBorislav Petkov if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 3276aec1ad7SBorislav Petkov return -EINVAL; 328a5f81290SPeter Zijlstra cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); 3298f2a28c5SJiri Olsa if (!(pkg_msr_mask & (1 << cfg))) 3306aec1ad7SBorislav Petkov return -EINVAL; 3316aec1ad7SBorislav Petkov event->hw.event_base = pkg_msr[cfg].msr; 33249de0493SThomas Gleixner cpu = cpumask_any_and(&cstate_pkg_cpu_mask, 333cb63ba0fSKan Liang topology_die_cpumask(event->cpu)); 33449de0493SThomas Gleixner } else { 3356aec1ad7SBorislav Petkov return -ENOENT; 33649de0493SThomas Gleixner } 3376aec1ad7SBorislav Petkov 33849de0493SThomas Gleixner if (cpu >= nr_cpu_ids) 33949de0493SThomas Gleixner return -ENODEV; 34049de0493SThomas Gleixner 34149de0493SThomas Gleixner event->cpu = cpu; 3426aec1ad7SBorislav Petkov event->hw.config = cfg; 3436aec1ad7SBorislav Petkov event->hw.idx = -1; 34449de0493SThomas Gleixner return 0; 3456aec1ad7SBorislav Petkov } 3466aec1ad7SBorislav Petkov 3476aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event) 3486aec1ad7SBorislav Petkov { 3496aec1ad7SBorislav Petkov u64 val; 3506aec1ad7SBorislav Petkov 3516aec1ad7SBorislav Petkov rdmsrl(event->hw.event_base, val); 3526aec1ad7SBorislav Petkov return val; 3536aec1ad7SBorislav Petkov } 3546aec1ad7SBorislav Petkov 3556aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event) 3566aec1ad7SBorislav Petkov { 3576aec1ad7SBorislav Petkov struct hw_perf_event *hwc = &event->hw; 3586aec1ad7SBorislav Petkov u64 prev_raw_count, new_raw_count; 3596aec1ad7SBorislav Petkov 3606aec1ad7SBorislav Petkov again: 3616aec1ad7SBorislav Petkov prev_raw_count = local64_read(&hwc->prev_count); 3626aec1ad7SBorislav Petkov new_raw_count = cstate_pmu_read_counter(event); 3636aec1ad7SBorislav Petkov 3646aec1ad7SBorislav Petkov if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 3656aec1ad7SBorislav Petkov new_raw_count) != prev_raw_count) 3666aec1ad7SBorislav Petkov goto again; 3676aec1ad7SBorislav Petkov 3686aec1ad7SBorislav Petkov local64_add(new_raw_count - prev_raw_count, &event->count); 3696aec1ad7SBorislav Petkov } 3706aec1ad7SBorislav Petkov 3716aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode) 3726aec1ad7SBorislav Petkov { 3736aec1ad7SBorislav Petkov local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event)); 3746aec1ad7SBorislav Petkov } 3756aec1ad7SBorislav Petkov 3766aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode) 3776aec1ad7SBorislav Petkov { 3786aec1ad7SBorislav Petkov cstate_pmu_event_update(event); 3796aec1ad7SBorislav Petkov } 3806aec1ad7SBorislav Petkov 3816aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode) 3826aec1ad7SBorislav Petkov { 3836aec1ad7SBorislav Petkov cstate_pmu_event_stop(event, PERF_EF_UPDATE); 3846aec1ad7SBorislav Petkov } 3856aec1ad7SBorislav Petkov 3866aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode) 3876aec1ad7SBorislav Petkov { 3886aec1ad7SBorislav Petkov if (mode & PERF_EF_START) 3896aec1ad7SBorislav Petkov cstate_pmu_event_start(event, mode); 3906aec1ad7SBorislav Petkov 3916aec1ad7SBorislav Petkov return 0; 3926aec1ad7SBorislav Petkov } 3936aec1ad7SBorislav Petkov 39449de0493SThomas Gleixner /* 39549de0493SThomas Gleixner * Check if exiting cpu is the designated reader. If so migrate the 39649de0493SThomas Gleixner * events when there is a valid target available 39749de0493SThomas Gleixner */ 39877c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu) 3996aec1ad7SBorislav Petkov { 40049de0493SThomas Gleixner unsigned int target; 4016aec1ad7SBorislav Petkov 40249de0493SThomas Gleixner if (has_cstate_core && 40349de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) { 4046aec1ad7SBorislav Petkov 40549de0493SThomas Gleixner target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 40649de0493SThomas Gleixner /* Migrate events if there is a valid target */ 40749de0493SThomas Gleixner if (target < nr_cpu_ids) { 4086aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_core_cpu_mask); 4096aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); 4106aec1ad7SBorislav Petkov } 4116aec1ad7SBorislav Petkov } 41249de0493SThomas Gleixner 41349de0493SThomas Gleixner if (has_cstate_pkg && 41449de0493SThomas Gleixner cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) { 41549de0493SThomas Gleixner 416cb63ba0fSKan Liang target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 41749de0493SThomas Gleixner /* Migrate events if there is a valid target */ 41849de0493SThomas Gleixner if (target < nr_cpu_ids) { 4196aec1ad7SBorislav Petkov cpumask_set_cpu(target, &cstate_pkg_cpu_mask); 4206aec1ad7SBorislav Petkov perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); 4216aec1ad7SBorislav Petkov } 4226aec1ad7SBorislav Petkov } 42377c34ef1SSebastian Andrzej Siewior return 0; 42449de0493SThomas Gleixner } 4256aec1ad7SBorislav Petkov 42677c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu) 4276aec1ad7SBorislav Petkov { 42849de0493SThomas Gleixner unsigned int target; 4296aec1ad7SBorislav Petkov 43049de0493SThomas Gleixner /* 43149de0493SThomas Gleixner * If this is the first online thread of that core, set it in 43249de0493SThomas Gleixner * the core cpu mask as the designated reader. 43349de0493SThomas Gleixner */ 43449de0493SThomas Gleixner target = cpumask_any_and(&cstate_core_cpu_mask, 43549de0493SThomas Gleixner topology_sibling_cpumask(cpu)); 43649de0493SThomas Gleixner 43749de0493SThomas Gleixner if (has_cstate_core && target >= nr_cpu_ids) 4386aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_core_cpu_mask); 4396aec1ad7SBorislav Petkov 44049de0493SThomas Gleixner /* 44149de0493SThomas Gleixner * If this is the first online thread of that package, set it 44249de0493SThomas Gleixner * in the package cpu mask as the designated reader. 44349de0493SThomas Gleixner */ 44449de0493SThomas Gleixner target = cpumask_any_and(&cstate_pkg_cpu_mask, 445cb63ba0fSKan Liang topology_die_cpumask(cpu)); 44649de0493SThomas Gleixner if (has_cstate_pkg && target >= nr_cpu_ids) 4476aec1ad7SBorislav Petkov cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); 4486aec1ad7SBorislav Petkov 44977c34ef1SSebastian Andrzej Siewior return 0; 4506aec1ad7SBorislav Petkov } 451c7afba32SThomas Gleixner 452d9f3b450SValdis Klētnieks static const struct attribute_group *core_attr_update[] = { 4538f2a28c5SJiri Olsa &group_cstate_core_c1, 4548f2a28c5SJiri Olsa &group_cstate_core_c3, 4558f2a28c5SJiri Olsa &group_cstate_core_c6, 4568f2a28c5SJiri Olsa &group_cstate_core_c7, 4578f2a28c5SJiri Olsa NULL, 4588f2a28c5SJiri Olsa }; 4598f2a28c5SJiri Olsa 460d9f3b450SValdis Klētnieks static const struct attribute_group *pkg_attr_update[] = { 4618f2a28c5SJiri Olsa &group_cstate_pkg_c2, 4628f2a28c5SJiri Olsa &group_cstate_pkg_c3, 4638f2a28c5SJiri Olsa &group_cstate_pkg_c6, 4648f2a28c5SJiri Olsa &group_cstate_pkg_c7, 4658f2a28c5SJiri Olsa &group_cstate_pkg_c8, 4668f2a28c5SJiri Olsa &group_cstate_pkg_c9, 4678f2a28c5SJiri Olsa &group_cstate_pkg_c10, 4688f2a28c5SJiri Olsa NULL, 4698f2a28c5SJiri Olsa }; 4708f2a28c5SJiri Olsa 471424646eeSThomas Gleixner static struct pmu cstate_core_pmu = { 472424646eeSThomas Gleixner .attr_groups = core_attr_groups, 4738f2a28c5SJiri Olsa .attr_update = core_attr_update, 474424646eeSThomas Gleixner .name = "cstate_core", 475424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 476424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 477424646eeSThomas Gleixner .add = cstate_pmu_event_add, 478424646eeSThomas Gleixner .del = cstate_pmu_event_del, 479424646eeSThomas Gleixner .start = cstate_pmu_event_start, 480424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 481424646eeSThomas Gleixner .read = cstate_pmu_event_update, 4822ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 48374545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 484424646eeSThomas Gleixner }; 485424646eeSThomas Gleixner 486424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = { 487424646eeSThomas Gleixner .attr_groups = pkg_attr_groups, 4888f2a28c5SJiri Olsa .attr_update = pkg_attr_update, 489424646eeSThomas Gleixner .name = "cstate_pkg", 490424646eeSThomas Gleixner .task_ctx_nr = perf_invalid_context, 491424646eeSThomas Gleixner .event_init = cstate_pmu_event_init, 492424646eeSThomas Gleixner .add = cstate_pmu_event_add, 493424646eeSThomas Gleixner .del = cstate_pmu_event_del, 494424646eeSThomas Gleixner .start = cstate_pmu_event_start, 495424646eeSThomas Gleixner .stop = cstate_pmu_event_stop, 496424646eeSThomas Gleixner .read = cstate_pmu_event_update, 4972ff40250SAndrew Murray .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 49874545f63SDavid Carrillo-Cisneros .module = THIS_MODULE, 499424646eeSThomas Gleixner }; 500424646eeSThomas Gleixner 501424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = { 502424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 503424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 504424646eeSThomas Gleixner 505424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) | 506424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 507424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 508424646eeSThomas Gleixner }; 509424646eeSThomas Gleixner 510424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = { 511424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 512424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 513424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 514424646eeSThomas Gleixner 515424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 516424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 517424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 518424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES), 519424646eeSThomas Gleixner }; 520424646eeSThomas Gleixner 521424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = { 522424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | 523424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES) | 524424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C7_RES), 525424646eeSThomas Gleixner 526424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 527424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C3_RES) | 528424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C6_RES) | 529424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C7_RES) | 530424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C8_RES) | 531424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C9_RES) | 532424646eeSThomas Gleixner BIT(PERF_CSTATE_PKG_C10_RES), 533424646eeSThomas Gleixner }; 534424646eeSThomas Gleixner 5351159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = { 5361159e094SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 5371159e094SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 5381159e094SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES) | 5391159e094SHarry Pan BIT(PERF_CSTATE_CORE_C7_RES), 5401159e094SHarry Pan 5411159e094SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 5421159e094SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 5431159e094SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 5441159e094SHarry Pan BIT(PERF_CSTATE_PKG_C7_RES) | 5451159e094SHarry Pan BIT(PERF_CSTATE_PKG_C8_RES) | 5461159e094SHarry Pan BIT(PERF_CSTATE_PKG_C9_RES) | 5471159e094SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 5481159e094SHarry Pan }; 5491159e094SHarry Pan 550*f1857a24SKan Liang static const struct cstate_model icl_cstates __initconst = { 551*f1857a24SKan Liang .core_events = BIT(PERF_CSTATE_CORE_C6_RES) | 552*f1857a24SKan Liang BIT(PERF_CSTATE_CORE_C7_RES), 553*f1857a24SKan Liang 554*f1857a24SKan Liang .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 555*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C3_RES) | 556*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C6_RES) | 557*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C7_RES) | 558*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C8_RES) | 559*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C9_RES) | 560*f1857a24SKan Liang BIT(PERF_CSTATE_PKG_C10_RES), 561*f1857a24SKan Liang }; 562*f1857a24SKan Liang 563424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = { 564424646eeSThomas Gleixner .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 565424646eeSThomas Gleixner BIT(PERF_CSTATE_CORE_C6_RES), 566424646eeSThomas Gleixner 567424646eeSThomas Gleixner .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), 568424646eeSThomas Gleixner .quirks = SLM_PKG_C6_USE_C7_MSR, 569424646eeSThomas Gleixner }; 570424646eeSThomas Gleixner 571889882bcSLukasz Odzioba 572889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = { 573889882bcSLukasz Odzioba .core_events = BIT(PERF_CSTATE_CORE_C6_RES), 574889882bcSLukasz Odzioba 575889882bcSLukasz Odzioba .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 576889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C3_RES) | 577889882bcSLukasz Odzioba BIT(PERF_CSTATE_PKG_C6_RES), 578889882bcSLukasz Odzioba .quirks = KNL_CORE_C6_MSR, 579889882bcSLukasz Odzioba }; 580889882bcSLukasz Odzioba 581889882bcSLukasz Odzioba 5825c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = { 5835c10b048SHarry Pan .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | 5845c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C3_RES) | 5855c10b048SHarry Pan BIT(PERF_CSTATE_CORE_C6_RES), 5865c10b048SHarry Pan 5875c10b048SHarry Pan .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | 5885c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C3_RES) | 5895c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C6_RES) | 5905c10b048SHarry Pan BIT(PERF_CSTATE_PKG_C10_RES), 5915c10b048SHarry Pan }; 5925c10b048SHarry Pan 593889882bcSLukasz Odzioba 594424646eeSThomas Gleixner #define X86_CSTATES_MODEL(model, states) \ 595424646eeSThomas Gleixner { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } 596424646eeSThomas Gleixner 597424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = { 598bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM, nhm_cstates), 599bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates), 600bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates), 601424646eeSThomas Gleixner 602bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE, nhm_cstates), 603bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates), 604bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates), 605424646eeSThomas Gleixner 606bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE, snb_cstates), 607bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates), 608424646eeSThomas Gleixner 609bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates), 610bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates), 611424646eeSThomas Gleixner 612c66f78a6SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates), 613bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), 6145e741407SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_G, snb_cstates), 615424646eeSThomas Gleixner 616af239c44SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates), 617424646eeSThomas Gleixner 618f2c4db1bSPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), 6195ebb34edSPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_D, slm_cstates), 620bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), 621424646eeSThomas Gleixner 622c66f78a6SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), 6235ebb34edSPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_D, snb_cstates), 6245e741407SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates), 625bf4ad541SDave Hansen X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), 626424646eeSThomas Gleixner 627af239c44SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates), 628c66f78a6SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), 629b09c146fSKan Liang X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), 630889882bcSLukasz Odzioba 631af239c44SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates), 632c66f78a6SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates), 6331ffa6c04SKan Liang X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE_L, hswult_cstates), 6341ffa6c04SKan Liang X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE, hswult_cstates), 635f2029b1eSSrinivas Pandruvada 636af239c44SPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates), 6371159e094SHarry Pan 638889882bcSLukasz Odzioba X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), 6391dba23b1SPiotr Luc X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), 6405c10b048SHarry Pan 6415c10b048SHarry Pan X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), 6425ebb34edSPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates), 643b09c146fSKan Liang 644f2c4db1bSPeter Zijlstra X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), 645f08c47d1SKan Liang 646*f1857a24SKan Liang X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates), 647*f1857a24SKan Liang X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates), 648424646eeSThomas Gleixner { }, 649424646eeSThomas Gleixner }; 650424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 651424646eeSThomas Gleixner 652424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm) 6536aec1ad7SBorislav Petkov { 6546aec1ad7SBorislav Petkov /* SLM has different MSR for PKG C6 */ 655424646eeSThomas Gleixner if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 6566aec1ad7SBorislav Petkov pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 6576aec1ad7SBorislav Petkov 658889882bcSLukasz Odzioba /* KNL has different MSR for CORE C6 */ 659889882bcSLukasz Odzioba if (cm->quirks & KNL_CORE_C6_MSR) 660889882bcSLukasz Odzioba pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY; 661889882bcSLukasz Odzioba 662889882bcSLukasz Odzioba 6638f2a28c5SJiri Olsa core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX, 6648f2a28c5SJiri Olsa true, (void *) &cm->core_events); 6656aec1ad7SBorislav Petkov 6668f2a28c5SJiri Olsa pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, 6678f2a28c5SJiri Olsa true, (void *) &cm->pkg_events); 6688f2a28c5SJiri Olsa 6698f2a28c5SJiri Olsa has_cstate_core = !!core_msr_mask; 6708f2a28c5SJiri Olsa has_cstate_pkg = !!pkg_msr_mask; 6716aec1ad7SBorislav Petkov 6726aec1ad7SBorislav Petkov return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; 6736aec1ad7SBorislav Petkov } 6746aec1ad7SBorislav Petkov 675c7afba32SThomas Gleixner static inline void cstate_cleanup(void) 6766aec1ad7SBorislav Petkov { 677834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); 678834fcd29SThomas Gleixner cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); 679834fcd29SThomas Gleixner 680d29859e7SThomas Gleixner if (has_cstate_core) 681d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_core_pmu); 682d29859e7SThomas Gleixner 683d29859e7SThomas Gleixner if (has_cstate_pkg) 684d29859e7SThomas Gleixner perf_pmu_unregister(&cstate_pkg_pmu); 685d29859e7SThomas Gleixner } 686d29859e7SThomas Gleixner 687d29859e7SThomas Gleixner static int __init cstate_init(void) 688d29859e7SThomas Gleixner { 68977c34ef1SSebastian Andrzej Siewior int err; 6906aec1ad7SBorislav Petkov 69177c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, 692834fcd29SThomas Gleixner "perf/x86/cstate:starting", cstate_cpu_init, NULL); 69377c34ef1SSebastian Andrzej Siewior cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, 694834fcd29SThomas Gleixner "perf/x86/cstate:online", NULL, cstate_cpu_exit); 6956aec1ad7SBorislav Petkov 6966aec1ad7SBorislav Petkov if (has_cstate_core) { 6976aec1ad7SBorislav Petkov err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 698d29859e7SThomas Gleixner if (err) { 699d29859e7SThomas Gleixner has_cstate_core = false; 700d29859e7SThomas Gleixner pr_info("Failed to register cstate core pmu\n"); 701834fcd29SThomas Gleixner cstate_cleanup(); 70277c34ef1SSebastian Andrzej Siewior return err; 703d29859e7SThomas Gleixner } 7046aec1ad7SBorislav Petkov } 7056aec1ad7SBorislav Petkov 7066aec1ad7SBorislav Petkov if (has_cstate_pkg) { 707cb63ba0fSKan Liang if (topology_max_die_per_package() > 1) { 708cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 709cb63ba0fSKan Liang "cstate_die", -1); 710cb63ba0fSKan Liang } else { 711cb63ba0fSKan Liang err = perf_pmu_register(&cstate_pkg_pmu, 712cb63ba0fSKan Liang cstate_pkg_pmu.name, -1); 713cb63ba0fSKan Liang } 714d29859e7SThomas Gleixner if (err) { 715d29859e7SThomas Gleixner has_cstate_pkg = false; 716d29859e7SThomas Gleixner pr_info("Failed to register cstate pkg pmu\n"); 717d29859e7SThomas Gleixner cstate_cleanup(); 71877c34ef1SSebastian Andrzej Siewior return err; 7196aec1ad7SBorislav Petkov } 7206aec1ad7SBorislav Petkov } 721834fcd29SThomas Gleixner return 0; 722d29859e7SThomas Gleixner } 7236aec1ad7SBorislav Petkov 7246aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void) 7256aec1ad7SBorislav Petkov { 726424646eeSThomas Gleixner const struct x86_cpu_id *id; 7276aec1ad7SBorislav Petkov int err; 7286aec1ad7SBorislav Petkov 729424646eeSThomas Gleixner if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 7306aec1ad7SBorislav Petkov return -ENODEV; 7316aec1ad7SBorislav Petkov 732424646eeSThomas Gleixner id = x86_match_cpu(intel_cstates_match); 733424646eeSThomas Gleixner if (!id) 734424646eeSThomas Gleixner return -ENODEV; 735424646eeSThomas Gleixner 736424646eeSThomas Gleixner err = cstate_probe((const struct cstate_model *) id->driver_data); 7376aec1ad7SBorislav Petkov if (err) 7386aec1ad7SBorislav Petkov return err; 7396aec1ad7SBorislav Petkov 740d29859e7SThomas Gleixner return cstate_init(); 7416aec1ad7SBorislav Petkov } 742c7afba32SThomas Gleixner module_init(cstate_pmu_init); 743c7afba32SThomas Gleixner 744c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void) 745c7afba32SThomas Gleixner { 746c7afba32SThomas Gleixner cstate_cleanup(); 747c7afba32SThomas Gleixner } 748c7afba32SThomas Gleixner module_exit(cstate_pmu_exit); 749