xref: /linux/arch/x86/events/intel/cstate.c (revision d9f3b450f206332b7ef3d78b5a85b6c20ad00fd2)
16aec1ad7SBorislav Petkov /*
2940b2f2fSBorislav Petkov  * Support cstate residency counters
36aec1ad7SBorislav Petkov  *
46aec1ad7SBorislav Petkov  * Copyright (C) 2015, Intel Corp.
56aec1ad7SBorislav Petkov  * Author: Kan Liang (kan.liang@intel.com)
66aec1ad7SBorislav Petkov  *
76aec1ad7SBorislav Petkov  * This library is free software; you can redistribute it and/or
86aec1ad7SBorislav Petkov  * modify it under the terms of the GNU Library General Public
96aec1ad7SBorislav Petkov  * License as published by the Free Software Foundation; either
106aec1ad7SBorislav Petkov  * version 2 of the License, or (at your option) any later version.
116aec1ad7SBorislav Petkov  *
126aec1ad7SBorislav Petkov  * This library is distributed in the hope that it will be useful,
136aec1ad7SBorislav Petkov  * but WITHOUT ANY WARRANTY; without even the implied warranty of
146aec1ad7SBorislav Petkov  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
156aec1ad7SBorislav Petkov  * Library General Public License for more details.
166aec1ad7SBorislav Petkov  *
176aec1ad7SBorislav Petkov  */
186aec1ad7SBorislav Petkov 
196aec1ad7SBorislav Petkov /*
206aec1ad7SBorislav Petkov  * This file export cstate related free running (read-only) counters
216aec1ad7SBorislav Petkov  * for perf. These counters may be use simultaneously by other tools,
226aec1ad7SBorislav Petkov  * such as turbostat. However, it still make sense to implement them
236aec1ad7SBorislav Petkov  * in perf. Because we can conveniently collect them together with
246aec1ad7SBorislav Petkov  * other events, and allow to use them from tools without special MSR
256aec1ad7SBorislav Petkov  * access code.
266aec1ad7SBorislav Petkov  *
276aec1ad7SBorislav Petkov  * The events only support system-wide mode counting. There is no
286aec1ad7SBorislav Petkov  * sampling support because it is not supported by the hardware.
296aec1ad7SBorislav Petkov  *
306aec1ad7SBorislav Petkov  * According to counters' scope and category, two PMUs are registered
316aec1ad7SBorislav Petkov  * with the perf_event core subsystem.
326aec1ad7SBorislav Petkov  *  - 'cstate_core': The counter is available for each physical core.
336aec1ad7SBorislav Petkov  *    The counters include CORE_C*_RESIDENCY.
346aec1ad7SBorislav Petkov  *  - 'cstate_pkg': The counter is available for each physical package.
356aec1ad7SBorislav Petkov  *    The counters include PKG_C*_RESIDENCY.
366aec1ad7SBorislav Petkov  *
376aec1ad7SBorislav Petkov  * All of these counters are specified in the Intel® 64 and IA-32
386aec1ad7SBorislav Petkov  * Architectures Software Developer.s Manual Vol3b.
396aec1ad7SBorislav Petkov  *
406aec1ad7SBorislav Petkov  * Model specific counters:
416aec1ad7SBorislav Petkov  *	MSR_CORE_C1_RES: CORE C1 Residency Counter
426aec1ad7SBorislav Petkov  *			 perf code: 0x00
431159e094SHarry Pan  *			 Available model: SLM,AMT,GLM,CNL
446aec1ad7SBorislav Petkov  *			 Scope: Core (each processor core has a MSR)
456aec1ad7SBorislav Petkov  *	MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
466aec1ad7SBorislav Petkov  *			       perf code: 0x01
471159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
481159e094SHarry Pan 						CNL
496aec1ad7SBorislav Petkov  *			       Scope: Core
506aec1ad7SBorislav Petkov  *	MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
516aec1ad7SBorislav Petkov  *			       perf code: 0x02
521159e094SHarry Pan  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
531159e094SHarry Pan  *						SKL,KNL,GLM,CNL
546aec1ad7SBorislav Petkov  *			       Scope: Core
556aec1ad7SBorislav Petkov  *	MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
566aec1ad7SBorislav Petkov  *			       perf code: 0x03
571159e094SHarry Pan  *			       Available model: SNB,IVB,HSW,BDW,SKL,CNL
586aec1ad7SBorislav Petkov  *			       Scope: Core
596aec1ad7SBorislav Petkov  *	MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
606aec1ad7SBorislav Petkov  *			       perf code: 0x00
611159e094SHarry Pan  *			       Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
626aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
636aec1ad7SBorislav Petkov  *	MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
646aec1ad7SBorislav Petkov  *			       perf code: 0x01
651159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
661159e094SHarry Pan  *						GLM,CNL
676aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
686aec1ad7SBorislav Petkov  *	MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
696aec1ad7SBorislav Petkov  *			       perf code: 0x02
70889882bcSLukasz Odzioba  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
711159e094SHarry Pan  *						SKL,KNL,GLM,CNL
726aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
736aec1ad7SBorislav Petkov  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
746aec1ad7SBorislav Petkov  *			       perf code: 0x03
751159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
766aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
776aec1ad7SBorislav Petkov  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
786aec1ad7SBorislav Petkov  *			       perf code: 0x04
7982c99f7aSHarry Pan  *			       Available model: HSW ULT,KBL,CNL
806aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
816aec1ad7SBorislav Petkov  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
826aec1ad7SBorislav Petkov  *			       perf code: 0x05
8382c99f7aSHarry Pan  *			       Available model: HSW ULT,KBL,CNL
846aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
856aec1ad7SBorislav Petkov  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
866aec1ad7SBorislav Petkov  *			       perf code: 0x06
8782c99f7aSHarry Pan  *			       Available model: HSW ULT,KBL,GLM,CNL
886aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
896aec1ad7SBorislav Petkov  *
906aec1ad7SBorislav Petkov  */
916aec1ad7SBorislav Petkov 
926aec1ad7SBorislav Petkov #include <linux/module.h>
936aec1ad7SBorislav Petkov #include <linux/slab.h>
946aec1ad7SBorislav Petkov #include <linux/perf_event.h>
95a5f81290SPeter Zijlstra #include <linux/nospec.h>
966aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h>
97bf4ad541SDave Hansen #include <asm/intel-family.h>
9827f6d22bSBorislav Petkov #include "../perf_event.h"
998f2a28c5SJiri Olsa #include "../probe.h"
1006aec1ad7SBorislav Petkov 
101c7afba32SThomas Gleixner MODULE_LICENSE("GPL");
102c7afba32SThomas Gleixner 
1036aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
1046aec1ad7SBorislav Petkov static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
1056aec1ad7SBorislav Petkov 				struct kobj_attribute *attr,	\
1066aec1ad7SBorislav Petkov 				char *page)			\
1076aec1ad7SBorislav Petkov {								\
1086aec1ad7SBorislav Petkov 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
1096aec1ad7SBorislav Petkov 	return sprintf(page, _format "\n");			\
1106aec1ad7SBorislav Petkov }								\
1116aec1ad7SBorislav Petkov static struct kobj_attribute format_attr_##_var =		\
1126aec1ad7SBorislav Petkov 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
1136aec1ad7SBorislav Petkov 
1146aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
1156aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
1166aec1ad7SBorislav Petkov 				       char *buf);
1176aec1ad7SBorislav Petkov 
118424646eeSThomas Gleixner /* Model -> events mapping */
119424646eeSThomas Gleixner struct cstate_model {
120424646eeSThomas Gleixner 	unsigned long		core_events;
121424646eeSThomas Gleixner 	unsigned long		pkg_events;
122424646eeSThomas Gleixner 	unsigned long		quirks;
123424646eeSThomas Gleixner };
124424646eeSThomas Gleixner 
125424646eeSThomas Gleixner /* Quirk flags */
126424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR	(1UL << 0)
127889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR		(1UL << 1)
128424646eeSThomas Gleixner 
1296aec1ad7SBorislav Petkov struct perf_cstate_msr {
1306aec1ad7SBorislav Petkov 	u64	msr;
1316aec1ad7SBorislav Petkov 	struct	perf_pmu_events_attr *attr;
1326aec1ad7SBorislav Petkov };
1336aec1ad7SBorislav Petkov 
1346aec1ad7SBorislav Petkov 
1356aec1ad7SBorislav Petkov /* cstate_core PMU */
1366aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu;
1376aec1ad7SBorislav Petkov static bool has_cstate_core;
1386aec1ad7SBorislav Petkov 
139424646eeSThomas Gleixner enum perf_cstate_core_events {
1406aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C1_RES = 0,
1416aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C3_RES,
1426aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C6_RES,
1436aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C7_RES,
1446aec1ad7SBorislav Petkov 
1456aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_EVENT_MAX,
1466aec1ad7SBorislav Petkov };
1476aec1ad7SBorislav Petkov 
1488f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
1498f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
1508f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
1518f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
1526aec1ad7SBorislav Petkov 
1538f2a28c5SJiri Olsa static unsigned long core_msr_mask;
1548f2a28c5SJiri Olsa 
1558f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c1);
1568f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c3);
1578f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c6);
1588f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c7);
1598f2a28c5SJiri Olsa 
1608f2a28c5SJiri Olsa static bool test_msr(int idx, void *data)
1618f2a28c5SJiri Olsa {
1628f2a28c5SJiri Olsa 	return test_bit(idx, (unsigned long *) data);
1638f2a28c5SJiri Olsa }
1648f2a28c5SJiri Olsa 
1658f2a28c5SJiri Olsa static struct perf_msr core_msr[] = {
1668f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&group_cstate_core_c1,	test_msr },
1678f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&group_cstate_core_c3,	test_msr },
1688f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&group_cstate_core_c6,	test_msr },
1698f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&group_cstate_core_c7,	test_msr },
1706aec1ad7SBorislav Petkov };
1716aec1ad7SBorislav Petkov 
1728f2a28c5SJiri Olsa static struct attribute *attrs_empty[] = {
1736aec1ad7SBorislav Petkov 	NULL,
1746aec1ad7SBorislav Petkov };
1756aec1ad7SBorislav Petkov 
1768f2a28c5SJiri Olsa /*
1778f2a28c5SJiri Olsa  * There are no default events, but we need to create
1788f2a28c5SJiri Olsa  * "events" group (with empty attrs) before updating
1798f2a28c5SJiri Olsa  * it with detected events.
1808f2a28c5SJiri Olsa  */
1816aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = {
1826aec1ad7SBorislav Petkov 	.name = "events",
1838f2a28c5SJiri Olsa 	.attrs = attrs_empty,
1846aec1ad7SBorislav Petkov };
1856aec1ad7SBorislav Petkov 
1866aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
1876aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = {
1886aec1ad7SBorislav Petkov 	&format_attr_core_event.attr,
1896aec1ad7SBorislav Petkov 	NULL,
1906aec1ad7SBorislav Petkov };
1916aec1ad7SBorislav Petkov 
1926aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = {
1936aec1ad7SBorislav Petkov 	.name = "format",
1946aec1ad7SBorislav Petkov 	.attrs = core_format_attrs,
1956aec1ad7SBorislav Petkov };
1966aec1ad7SBorislav Petkov 
1976aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask;
1986aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
1996aec1ad7SBorislav Petkov 
2006aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = {
2016aec1ad7SBorislav Petkov 	&dev_attr_cpumask.attr,
2026aec1ad7SBorislav Petkov 	NULL,
2036aec1ad7SBorislav Petkov };
2046aec1ad7SBorislav Petkov 
2056aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = {
2066aec1ad7SBorislav Petkov 	.attrs = cstate_cpumask_attrs,
2076aec1ad7SBorislav Petkov };
2086aec1ad7SBorislav Petkov 
2096aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = {
2106aec1ad7SBorislav Petkov 	&core_events_attr_group,
2116aec1ad7SBorislav Petkov 	&core_format_attr_group,
2126aec1ad7SBorislav Petkov 	&cpumask_attr_group,
2136aec1ad7SBorislav Petkov 	NULL,
2146aec1ad7SBorislav Petkov };
2156aec1ad7SBorislav Petkov 
2166aec1ad7SBorislav Petkov /* cstate_pkg PMU */
2176aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu;
2186aec1ad7SBorislav Petkov static bool has_cstate_pkg;
2196aec1ad7SBorislav Petkov 
220424646eeSThomas Gleixner enum perf_cstate_pkg_events {
2216aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C2_RES = 0,
2226aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C3_RES,
2236aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C6_RES,
2246aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C7_RES,
2256aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C8_RES,
2266aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C9_RES,
2276aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C10_RES,
2286aec1ad7SBorislav Petkov 
2296aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_EVENT_MAX,
2306aec1ad7SBorislav Petkov };
2316aec1ad7SBorislav Petkov 
2328f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c2-residency,  attr_cstate_pkg_c2,  "event=0x00");
2338f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency,  attr_cstate_pkg_c3,  "event=0x01");
2348f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency,  attr_cstate_pkg_c6,  "event=0x02");
2358f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency,  attr_cstate_pkg_c7,  "event=0x03");
2368f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c8-residency,  attr_cstate_pkg_c8,  "event=0x04");
2378f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c9-residency,  attr_cstate_pkg_c9,  "event=0x05");
2388f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
2396aec1ad7SBorislav Petkov 
2408f2a28c5SJiri Olsa static unsigned long pkg_msr_mask;
2416aec1ad7SBorislav Petkov 
2428f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c2);
2438f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c3);
2448f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c6);
2458f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c7);
2468f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c8);
2478f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c9);
2488f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c10);
2498f2a28c5SJiri Olsa 
2508f2a28c5SJiri Olsa static struct perf_msr pkg_msr[] = {
2518f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C2_RES]  = { MSR_PKG_C2_RESIDENCY,	&group_cstate_pkg_c2,	test_msr },
2528f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C3_RES]  = { MSR_PKG_C3_RESIDENCY,	&group_cstate_pkg_c3,	test_msr },
2538f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C6_RES]  = { MSR_PKG_C6_RESIDENCY,	&group_cstate_pkg_c6,	test_msr },
2548f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C7_RES]  = { MSR_PKG_C7_RESIDENCY,	&group_cstate_pkg_c7,	test_msr },
2558f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C8_RES]  = { MSR_PKG_C8_RESIDENCY,	&group_cstate_pkg_c8,	test_msr },
2568f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C9_RES]  = { MSR_PKG_C9_RESIDENCY,	&group_cstate_pkg_c9,	test_msr },
2578f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&group_cstate_pkg_c10,	test_msr },
2586aec1ad7SBorislav Petkov };
2596aec1ad7SBorislav Petkov 
2606aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = {
2616aec1ad7SBorislav Petkov 	.name = "events",
2628f2a28c5SJiri Olsa 	.attrs = attrs_empty,
2636aec1ad7SBorislav Petkov };
2646aec1ad7SBorislav Petkov 
2656aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
2666aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = {
2676aec1ad7SBorislav Petkov 	&format_attr_pkg_event.attr,
2686aec1ad7SBorislav Petkov 	NULL,
2696aec1ad7SBorislav Petkov };
2706aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = {
2716aec1ad7SBorislav Petkov 	.name = "format",
2726aec1ad7SBorislav Petkov 	.attrs = pkg_format_attrs,
2736aec1ad7SBorislav Petkov };
2746aec1ad7SBorislav Petkov 
2756aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask;
2766aec1ad7SBorislav Petkov 
2776aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = {
2786aec1ad7SBorislav Petkov 	&pkg_events_attr_group,
2796aec1ad7SBorislav Petkov 	&pkg_format_attr_group,
2806aec1ad7SBorislav Petkov 	&cpumask_attr_group,
2816aec1ad7SBorislav Petkov 	NULL,
2826aec1ad7SBorislav Petkov };
2836aec1ad7SBorislav Petkov 
2846aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
2856aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
2866aec1ad7SBorislav Petkov 				       char *buf)
2876aec1ad7SBorislav Petkov {
2886aec1ad7SBorislav Petkov 	struct pmu *pmu = dev_get_drvdata(dev);
2896aec1ad7SBorislav Petkov 
2906aec1ad7SBorislav Petkov 	if (pmu == &cstate_core_pmu)
2916aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
2926aec1ad7SBorislav Petkov 	else if (pmu == &cstate_pkg_pmu)
2936aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
2946aec1ad7SBorislav Petkov 	else
2956aec1ad7SBorislav Petkov 		return 0;
2966aec1ad7SBorislav Petkov }
2976aec1ad7SBorislav Petkov 
2986aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event)
2996aec1ad7SBorislav Petkov {
3006aec1ad7SBorislav Petkov 	u64 cfg = event->attr.config;
30149de0493SThomas Gleixner 	int cpu;
3026aec1ad7SBorislav Petkov 
3036aec1ad7SBorislav Petkov 	if (event->attr.type != event->pmu->type)
3046aec1ad7SBorislav Petkov 		return -ENOENT;
3056aec1ad7SBorislav Petkov 
3066aec1ad7SBorislav Petkov 	/* unsupported modes and filters */
3072ff40250SAndrew Murray 	if (event->attr.sample_period) /* no sampling */
3086aec1ad7SBorislav Petkov 		return -EINVAL;
3096aec1ad7SBorislav Petkov 
31049de0493SThomas Gleixner 	if (event->cpu < 0)
31149de0493SThomas Gleixner 		return -EINVAL;
31249de0493SThomas Gleixner 
3136aec1ad7SBorislav Petkov 	if (event->pmu == &cstate_core_pmu) {
3146aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
3156aec1ad7SBorislav Petkov 			return -EINVAL;
3168f2a28c5SJiri Olsa 		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
3178f2a28c5SJiri Olsa 		if (!(core_msr_mask & (1 << cfg)))
3186aec1ad7SBorislav Petkov 			return -EINVAL;
3196aec1ad7SBorislav Petkov 		event->hw.event_base = core_msr[cfg].msr;
32049de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_core_cpu_mask,
32149de0493SThomas Gleixner 				      topology_sibling_cpumask(event->cpu));
3226aec1ad7SBorislav Petkov 	} else if (event->pmu == &cstate_pkg_pmu) {
3236aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
3246aec1ad7SBorislav Petkov 			return -EINVAL;
325a5f81290SPeter Zijlstra 		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
3268f2a28c5SJiri Olsa 		if (!(pkg_msr_mask & (1 << cfg)))
3276aec1ad7SBorislav Petkov 			return -EINVAL;
3286aec1ad7SBorislav Petkov 		event->hw.event_base = pkg_msr[cfg].msr;
32949de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
330cb63ba0fSKan Liang 				      topology_die_cpumask(event->cpu));
33149de0493SThomas Gleixner 	} else {
3326aec1ad7SBorislav Petkov 		return -ENOENT;
33349de0493SThomas Gleixner 	}
3346aec1ad7SBorislav Petkov 
33549de0493SThomas Gleixner 	if (cpu >= nr_cpu_ids)
33649de0493SThomas Gleixner 		return -ENODEV;
33749de0493SThomas Gleixner 
33849de0493SThomas Gleixner 	event->cpu = cpu;
3396aec1ad7SBorislav Petkov 	event->hw.config = cfg;
3406aec1ad7SBorislav Petkov 	event->hw.idx = -1;
34149de0493SThomas Gleixner 	return 0;
3426aec1ad7SBorislav Petkov }
3436aec1ad7SBorislav Petkov 
3446aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event)
3456aec1ad7SBorislav Petkov {
3466aec1ad7SBorislav Petkov 	u64 val;
3476aec1ad7SBorislav Petkov 
3486aec1ad7SBorislav Petkov 	rdmsrl(event->hw.event_base, val);
3496aec1ad7SBorislav Petkov 	return val;
3506aec1ad7SBorislav Petkov }
3516aec1ad7SBorislav Petkov 
3526aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event)
3536aec1ad7SBorislav Petkov {
3546aec1ad7SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
3556aec1ad7SBorislav Petkov 	u64 prev_raw_count, new_raw_count;
3566aec1ad7SBorislav Petkov 
3576aec1ad7SBorislav Petkov again:
3586aec1ad7SBorislav Petkov 	prev_raw_count = local64_read(&hwc->prev_count);
3596aec1ad7SBorislav Petkov 	new_raw_count = cstate_pmu_read_counter(event);
3606aec1ad7SBorislav Petkov 
3616aec1ad7SBorislav Petkov 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
3626aec1ad7SBorislav Petkov 			    new_raw_count) != prev_raw_count)
3636aec1ad7SBorislav Petkov 		goto again;
3646aec1ad7SBorislav Petkov 
3656aec1ad7SBorislav Petkov 	local64_add(new_raw_count - prev_raw_count, &event->count);
3666aec1ad7SBorislav Petkov }
3676aec1ad7SBorislav Petkov 
3686aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode)
3696aec1ad7SBorislav Petkov {
3706aec1ad7SBorislav Petkov 	local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
3716aec1ad7SBorislav Petkov }
3726aec1ad7SBorislav Petkov 
3736aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode)
3746aec1ad7SBorislav Petkov {
3756aec1ad7SBorislav Petkov 	cstate_pmu_event_update(event);
3766aec1ad7SBorislav Petkov }
3776aec1ad7SBorislav Petkov 
3786aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode)
3796aec1ad7SBorislav Petkov {
3806aec1ad7SBorislav Petkov 	cstate_pmu_event_stop(event, PERF_EF_UPDATE);
3816aec1ad7SBorislav Petkov }
3826aec1ad7SBorislav Petkov 
3836aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode)
3846aec1ad7SBorislav Petkov {
3856aec1ad7SBorislav Petkov 	if (mode & PERF_EF_START)
3866aec1ad7SBorislav Petkov 		cstate_pmu_event_start(event, mode);
3876aec1ad7SBorislav Petkov 
3886aec1ad7SBorislav Petkov 	return 0;
3896aec1ad7SBorislav Petkov }
3906aec1ad7SBorislav Petkov 
39149de0493SThomas Gleixner /*
39249de0493SThomas Gleixner  * Check if exiting cpu is the designated reader. If so migrate the
39349de0493SThomas Gleixner  * events when there is a valid target available
39449de0493SThomas Gleixner  */
39577c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu)
3966aec1ad7SBorislav Petkov {
39749de0493SThomas Gleixner 	unsigned int target;
3986aec1ad7SBorislav Petkov 
39949de0493SThomas Gleixner 	if (has_cstate_core &&
40049de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
4016aec1ad7SBorislav Petkov 
40249de0493SThomas Gleixner 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
40349de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
40449de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
4056aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_core_cpu_mask);
4066aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
4076aec1ad7SBorislav Petkov 		}
4086aec1ad7SBorislav Petkov 	}
40949de0493SThomas Gleixner 
41049de0493SThomas Gleixner 	if (has_cstate_pkg &&
41149de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
41249de0493SThomas Gleixner 
413cb63ba0fSKan Liang 		target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
41449de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
41549de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
4166aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
4176aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
4186aec1ad7SBorislav Petkov 		}
4196aec1ad7SBorislav Petkov 	}
42077c34ef1SSebastian Andrzej Siewior 	return 0;
42149de0493SThomas Gleixner }
4226aec1ad7SBorislav Petkov 
42377c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu)
4246aec1ad7SBorislav Petkov {
42549de0493SThomas Gleixner 	unsigned int target;
4266aec1ad7SBorislav Petkov 
42749de0493SThomas Gleixner 	/*
42849de0493SThomas Gleixner 	 * If this is the first online thread of that core, set it in
42949de0493SThomas Gleixner 	 * the core cpu mask as the designated reader.
43049de0493SThomas Gleixner 	 */
43149de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_core_cpu_mask,
43249de0493SThomas Gleixner 				 topology_sibling_cpumask(cpu));
43349de0493SThomas Gleixner 
43449de0493SThomas Gleixner 	if (has_cstate_core && target >= nr_cpu_ids)
4356aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
4366aec1ad7SBorislav Petkov 
43749de0493SThomas Gleixner 	/*
43849de0493SThomas Gleixner 	 * If this is the first online thread of that package, set it
43949de0493SThomas Gleixner 	 * in the package cpu mask as the designated reader.
44049de0493SThomas Gleixner 	 */
44149de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_pkg_cpu_mask,
442cb63ba0fSKan Liang 				 topology_die_cpumask(cpu));
44349de0493SThomas Gleixner 	if (has_cstate_pkg && target >= nr_cpu_ids)
4446aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
4456aec1ad7SBorislav Petkov 
44677c34ef1SSebastian Andrzej Siewior 	return 0;
4476aec1ad7SBorislav Petkov }
448c7afba32SThomas Gleixner 
449*d9f3b450SValdis Klētnieks static const struct attribute_group *core_attr_update[] = {
4508f2a28c5SJiri Olsa 	&group_cstate_core_c1,
4518f2a28c5SJiri Olsa 	&group_cstate_core_c3,
4528f2a28c5SJiri Olsa 	&group_cstate_core_c6,
4538f2a28c5SJiri Olsa 	&group_cstate_core_c7,
4548f2a28c5SJiri Olsa 	NULL,
4558f2a28c5SJiri Olsa };
4568f2a28c5SJiri Olsa 
457*d9f3b450SValdis Klētnieks static const struct attribute_group *pkg_attr_update[] = {
4588f2a28c5SJiri Olsa 	&group_cstate_pkg_c2,
4598f2a28c5SJiri Olsa 	&group_cstate_pkg_c3,
4608f2a28c5SJiri Olsa 	&group_cstate_pkg_c6,
4618f2a28c5SJiri Olsa 	&group_cstate_pkg_c7,
4628f2a28c5SJiri Olsa 	&group_cstate_pkg_c8,
4638f2a28c5SJiri Olsa 	&group_cstate_pkg_c9,
4648f2a28c5SJiri Olsa 	&group_cstate_pkg_c10,
4658f2a28c5SJiri Olsa 	NULL,
4668f2a28c5SJiri Olsa };
4678f2a28c5SJiri Olsa 
468424646eeSThomas Gleixner static struct pmu cstate_core_pmu = {
469424646eeSThomas Gleixner 	.attr_groups	= core_attr_groups,
4708f2a28c5SJiri Olsa 	.attr_update	= core_attr_update,
471424646eeSThomas Gleixner 	.name		= "cstate_core",
472424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
473424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
474424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
475424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
476424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
477424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
478424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
4792ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
48074545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
481424646eeSThomas Gleixner };
482424646eeSThomas Gleixner 
483424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = {
484424646eeSThomas Gleixner 	.attr_groups	= pkg_attr_groups,
4858f2a28c5SJiri Olsa 	.attr_update	= pkg_attr_update,
486424646eeSThomas Gleixner 	.name		= "cstate_pkg",
487424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
488424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
489424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
490424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
491424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
492424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
493424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
4942ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
49574545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
496424646eeSThomas Gleixner };
497424646eeSThomas Gleixner 
498424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = {
499424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
500424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
501424646eeSThomas Gleixner 
502424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C3_RES) |
503424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
504424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
505424646eeSThomas Gleixner };
506424646eeSThomas Gleixner 
507424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = {
508424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
509424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
510424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
511424646eeSThomas Gleixner 
512424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
513424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
514424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
515424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
516424646eeSThomas Gleixner };
517424646eeSThomas Gleixner 
518424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = {
519424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
520424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
521424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
522424646eeSThomas Gleixner 
523424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
524424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
525424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
526424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES) |
527424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C8_RES) |
528424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C9_RES) |
529424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C10_RES),
530424646eeSThomas Gleixner };
531424646eeSThomas Gleixner 
5321159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = {
5331159e094SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
5341159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
5351159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES) |
5361159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C7_RES),
5371159e094SHarry Pan 
5381159e094SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
5391159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
5401159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
5411159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C7_RES) |
5421159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C8_RES) |
5431159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C9_RES) |
5441159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
5451159e094SHarry Pan };
5461159e094SHarry Pan 
547424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = {
548424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
549424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
550424646eeSThomas Gleixner 
551424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C6_RES),
552424646eeSThomas Gleixner 	.quirks			= SLM_PKG_C6_USE_C7_MSR,
553424646eeSThomas Gleixner };
554424646eeSThomas Gleixner 
555889882bcSLukasz Odzioba 
556889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = {
557889882bcSLukasz Odzioba 	.core_events		= BIT(PERF_CSTATE_CORE_C6_RES),
558889882bcSLukasz Odzioba 
559889882bcSLukasz Odzioba 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
560889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C3_RES) |
561889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C6_RES),
562889882bcSLukasz Odzioba 	.quirks			= KNL_CORE_C6_MSR,
563889882bcSLukasz Odzioba };
564889882bcSLukasz Odzioba 
565889882bcSLukasz Odzioba 
5665c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = {
5675c10b048SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
5685c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
5695c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES),
5705c10b048SHarry Pan 
5715c10b048SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
5725c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
5735c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
5745c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
5755c10b048SHarry Pan };
5765c10b048SHarry Pan 
577889882bcSLukasz Odzioba 
578424646eeSThomas Gleixner #define X86_CSTATES_MODEL(model, states)				\
579424646eeSThomas Gleixner 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
580424646eeSThomas Gleixner 
581424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = {
582bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
583bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
584bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
585424646eeSThomas Gleixner 
586bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
587bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
588bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
589424646eeSThomas Gleixner 
590bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
591bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
592424646eeSThomas Gleixner 
593bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
594bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
595424646eeSThomas Gleixner 
596bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
597bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,	   snb_cstates),
598bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
599424646eeSThomas Gleixner 
600bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
601424646eeSThomas Gleixner 
602f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
603f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
604bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
605424646eeSThomas Gleixner 
606bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
607bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
608bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
609bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
610424646eeSThomas Gleixner 
611bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
612bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
613b09c146fSKan Liang 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
614889882bcSLukasz Odzioba 
61582c99f7aSHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  hswult_cstates),
61682c99f7aSHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
617f2029b1eSSrinivas Pandruvada 
6181159e094SHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
6191159e094SHarry Pan 
620889882bcSLukasz Odzioba 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
6211dba23b1SPiotr Luc 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
6225c10b048SHarry Pan 
6235c10b048SHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
624f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
625b09c146fSKan Liang 
626f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
627f08c47d1SKan Liang 
628f08c47d1SKan Liang 	X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
6292a538fdaSKan Liang 	X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_DESKTOP, snb_cstates),
630424646eeSThomas Gleixner 	{ },
631424646eeSThomas Gleixner };
632424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
633424646eeSThomas Gleixner 
634424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm)
6356aec1ad7SBorislav Petkov {
6366aec1ad7SBorislav Petkov 	/* SLM has different MSR for PKG C6 */
637424646eeSThomas Gleixner 	if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
6386aec1ad7SBorislav Petkov 		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
6396aec1ad7SBorislav Petkov 
640889882bcSLukasz Odzioba 	/* KNL has different MSR for CORE C6 */
641889882bcSLukasz Odzioba 	if (cm->quirks & KNL_CORE_C6_MSR)
642889882bcSLukasz Odzioba 		pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
643889882bcSLukasz Odzioba 
644889882bcSLukasz Odzioba 
6458f2a28c5SJiri Olsa 	core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
6468f2a28c5SJiri Olsa 				       true, (void *) &cm->core_events);
6476aec1ad7SBorislav Petkov 
6488f2a28c5SJiri Olsa 	pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
6498f2a28c5SJiri Olsa 				      true, (void *) &cm->pkg_events);
6508f2a28c5SJiri Olsa 
6518f2a28c5SJiri Olsa 	has_cstate_core = !!core_msr_mask;
6528f2a28c5SJiri Olsa 	has_cstate_pkg  = !!pkg_msr_mask;
6536aec1ad7SBorislav Petkov 
6546aec1ad7SBorislav Petkov 	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
6556aec1ad7SBorislav Petkov }
6566aec1ad7SBorislav Petkov 
657c7afba32SThomas Gleixner static inline void cstate_cleanup(void)
6586aec1ad7SBorislav Petkov {
659834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
660834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
661834fcd29SThomas Gleixner 
662d29859e7SThomas Gleixner 	if (has_cstate_core)
663d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_core_pmu);
664d29859e7SThomas Gleixner 
665d29859e7SThomas Gleixner 	if (has_cstate_pkg)
666d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_pkg_pmu);
667d29859e7SThomas Gleixner }
668d29859e7SThomas Gleixner 
669d29859e7SThomas Gleixner static int __init cstate_init(void)
670d29859e7SThomas Gleixner {
67177c34ef1SSebastian Andrzej Siewior 	int err;
6726aec1ad7SBorislav Petkov 
67377c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
674834fcd29SThomas Gleixner 			  "perf/x86/cstate:starting", cstate_cpu_init, NULL);
67577c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
676834fcd29SThomas Gleixner 			  "perf/x86/cstate:online", NULL, cstate_cpu_exit);
6776aec1ad7SBorislav Petkov 
6786aec1ad7SBorislav Petkov 	if (has_cstate_core) {
6796aec1ad7SBorislav Petkov 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
680d29859e7SThomas Gleixner 		if (err) {
681d29859e7SThomas Gleixner 			has_cstate_core = false;
682d29859e7SThomas Gleixner 			pr_info("Failed to register cstate core pmu\n");
683834fcd29SThomas Gleixner 			cstate_cleanup();
68477c34ef1SSebastian Andrzej Siewior 			return err;
685d29859e7SThomas Gleixner 		}
6866aec1ad7SBorislav Petkov 	}
6876aec1ad7SBorislav Petkov 
6886aec1ad7SBorislav Petkov 	if (has_cstate_pkg) {
689cb63ba0fSKan Liang 		if (topology_max_die_per_package() > 1) {
690cb63ba0fSKan Liang 			err = perf_pmu_register(&cstate_pkg_pmu,
691cb63ba0fSKan Liang 						"cstate_die", -1);
692cb63ba0fSKan Liang 		} else {
693cb63ba0fSKan Liang 			err = perf_pmu_register(&cstate_pkg_pmu,
694cb63ba0fSKan Liang 						cstate_pkg_pmu.name, -1);
695cb63ba0fSKan Liang 		}
696d29859e7SThomas Gleixner 		if (err) {
697d29859e7SThomas Gleixner 			has_cstate_pkg = false;
698d29859e7SThomas Gleixner 			pr_info("Failed to register cstate pkg pmu\n");
699d29859e7SThomas Gleixner 			cstate_cleanup();
70077c34ef1SSebastian Andrzej Siewior 			return err;
7016aec1ad7SBorislav Petkov 		}
7026aec1ad7SBorislav Petkov 	}
703834fcd29SThomas Gleixner 	return 0;
704d29859e7SThomas Gleixner }
7056aec1ad7SBorislav Petkov 
7066aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void)
7076aec1ad7SBorislav Petkov {
708424646eeSThomas Gleixner 	const struct x86_cpu_id *id;
7096aec1ad7SBorislav Petkov 	int err;
7106aec1ad7SBorislav Petkov 
711424646eeSThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
7126aec1ad7SBorislav Petkov 		return -ENODEV;
7136aec1ad7SBorislav Petkov 
714424646eeSThomas Gleixner 	id = x86_match_cpu(intel_cstates_match);
715424646eeSThomas Gleixner 	if (!id)
716424646eeSThomas Gleixner 		return -ENODEV;
717424646eeSThomas Gleixner 
718424646eeSThomas Gleixner 	err = cstate_probe((const struct cstate_model *) id->driver_data);
7196aec1ad7SBorislav Petkov 	if (err)
7206aec1ad7SBorislav Petkov 		return err;
7216aec1ad7SBorislav Petkov 
722d29859e7SThomas Gleixner 	return cstate_init();
7236aec1ad7SBorislav Petkov }
724c7afba32SThomas Gleixner module_init(cstate_pmu_init);
725c7afba32SThomas Gleixner 
726c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void)
727c7afba32SThomas Gleixner {
728c7afba32SThomas Gleixner 	cstate_cleanup();
729c7afba32SThomas Gleixner }
730c7afba32SThomas Gleixner module_exit(cstate_pmu_exit);
731