xref: /linux/drivers/powercap/intel_rapl_msr.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Running Average Power Limit (RAPL) Driver via MSR interface
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/types.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/log2.h>
15 #include <linux/bitmap.h>
16 #include <linux/delay.h>
17 #include <linux/sysfs.h>
18 #include <linux/cpu.h>
19 #include <linux/powercap.h>
20 #include <linux/suspend.h>
21 #include <linux/intel_rapl.h>
22 #include <linux/processor.h>
23 #include <linux/platform_device.h>
24 
25 #include <asm/cpu_device_id.h>
26 #include <asm/intel-family.h>
27 #include <asm/msr.h>
28 
29 /* Local defines */
30 #define MSR_PLATFORM_POWER_LIMIT	0x0000065C
31 #define MSR_VR_CURRENT_CONFIG		0x00000601
32 
33 /* private data for RAPL MSR Interface */
34 static struct rapl_if_priv *rapl_msr_priv;
35 
36 static bool rapl_msr_pmu __ro_after_init;
37 
38 static struct rapl_if_priv rapl_msr_priv_intel = {
39 	.type = RAPL_IF_MSR,
40 	.reg_unit.msr = MSR_RAPL_POWER_UNIT,
41 	.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr	= MSR_PKG_POWER_LIMIT,
42 	.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr	= MSR_PKG_ENERGY_STATUS,
43 	.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr	= MSR_PKG_PERF_STATUS,
44 	.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr	= MSR_PKG_POWER_INFO,
45 	.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr	= MSR_PP0_POWER_LIMIT,
46 	.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr	= MSR_PP0_ENERGY_STATUS,
47 	.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr	= MSR_PP0_POLICY,
48 	.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr	= MSR_PP1_POWER_LIMIT,
49 	.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr	= MSR_PP1_ENERGY_STATUS,
50 	.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr	= MSR_PP1_POLICY,
51 	.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr	= MSR_DRAM_POWER_LIMIT,
52 	.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr	= MSR_DRAM_ENERGY_STATUS,
53 	.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr	= MSR_DRAM_PERF_STATUS,
54 	.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr	= MSR_DRAM_POWER_INFO,
55 	.regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr	= MSR_PLATFORM_POWER_LIMIT,
56 	.regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr	= MSR_PLATFORM_ENERGY_STATUS,
57 	.limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
58 	.limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2),
59 };
60 
61 static struct rapl_if_priv rapl_msr_priv_amd = {
62 	.type = RAPL_IF_MSR,
63 	.reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT,
64 	.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr	= MSR_AMD_PKG_ENERGY_STATUS,
65 	.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr	= MSR_AMD_CORE_ENERGY_STATUS,
66 };
67 
68 /* Handles CPU hotplug on multi-socket systems.
69  * If a CPU goes online as the first CPU of the physical package
70  * we add the RAPL package to the system. Similarly, when the last
71  * CPU of the package is removed, we remove the RAPL package and its
72  * associated domains. Cooling devices are handled accordingly at
73  * per-domain level.
74  */
75 static int rapl_cpu_online(unsigned int cpu)
76 {
77 	struct rapl_package *rp;
78 
79 	rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
80 	if (!rp) {
81 		rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
82 		if (IS_ERR(rp))
83 			return PTR_ERR(rp);
84 		if (rapl_msr_pmu)
85 			rapl_package_add_pmu_locked(rp);
86 	}
87 	cpumask_set_cpu(cpu, &rp->cpumask);
88 	return 0;
89 }
90 
91 static int rapl_cpu_down_prep(unsigned int cpu)
92 {
93 	struct rapl_package *rp;
94 	int lead_cpu;
95 
96 	rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
97 	if (!rp)
98 		return 0;
99 
100 	cpumask_clear_cpu(cpu, &rp->cpumask);
101 	lead_cpu = cpumask_first(&rp->cpumask);
102 	if (lead_cpu >= nr_cpu_ids) {
103 		if (rapl_msr_pmu)
104 			rapl_package_remove_pmu_locked(rp);
105 		rapl_remove_package_cpuslocked(rp);
106 	} else if (rp->lead_cpu == cpu) {
107 		rp->lead_cpu = lead_cpu;
108 	}
109 
110 	return 0;
111 }
112 
113 static int rapl_msr_read_raw(int cpu, struct reg_action *ra, bool pmu_ctx)
114 {
115 	/*
116 	 * When called from PMU context, perform MSR read directly using
117 	 * rdmsrq() without IPI overhead. Package-scoped MSRs are readable
118 	 * from any CPU in the package.
119 	 */
120 	if (pmu_ctx) {
121 		rdmsrq(ra->reg.msr, ra->value);
122 		goto out;
123 	}
124 
125 	if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
126 		pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
127 		return -EIO;
128 	}
129 
130 out:
131 	ra->value &= ra->mask;
132 	return 0;
133 }
134 
135 static void rapl_msr_update_func(void *info)
136 {
137 	struct reg_action *ra = info;
138 	u64 val;
139 
140 	ra->err = rdmsrq_safe(ra->reg.msr, &val);
141 	if (ra->err)
142 		return;
143 
144 	val &= ~ra->mask;
145 	val |= ra->value;
146 
147 	ra->err = wrmsrq_safe(ra->reg.msr, val);
148 }
149 
150 static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
151 {
152 	int ret;
153 
154 	ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
155 	if (WARN_ON_ONCE(ret))
156 		return ret;
157 
158 	return ra->err;
159 }
160 
161 /* List of verified CPUs. */
162 static const struct x86_cpu_id pl4_support_ids[] = {
163 	X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
164 	X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
165 	X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
166 	X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
167 	X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL),
168 	X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
169 	X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
170 	X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
171 	X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
172 	X86_MATCH_VFM(INTEL_ARROWLAKE_U, NULL),
173 	X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
174 	X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL),
175 	X86_MATCH_VFM(INTEL_WILDCATLAKE_L, NULL),
176 	X86_MATCH_VFM(INTEL_NOVALAKE, NULL),
177 	X86_MATCH_VFM(INTEL_NOVALAKE_L, NULL),
178 	{}
179 };
180 
181 /* List of MSR-based RAPL PMU support CPUs */
182 static const struct x86_cpu_id pmu_support_ids[] = {
183 	X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL),
184 	X86_MATCH_VFM(INTEL_WILDCATLAKE_L, NULL),
185 	{}
186 };
187 
188 static int rapl_msr_probe(struct platform_device *pdev)
189 {
190 	const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
191 	int ret;
192 
193 	switch (boot_cpu_data.x86_vendor) {
194 	case X86_VENDOR_INTEL:
195 		rapl_msr_priv = &rapl_msr_priv_intel;
196 		break;
197 	case X86_VENDOR_HYGON:
198 	case X86_VENDOR_AMD:
199 		rapl_msr_priv = &rapl_msr_priv_amd;
200 		break;
201 	default:
202 		pr_err("intel-rapl does not support CPU vendor %d\n", boot_cpu_data.x86_vendor);
203 		return -ENODEV;
204 	}
205 	rapl_msr_priv->read_raw = rapl_msr_read_raw;
206 	rapl_msr_priv->write_raw = rapl_msr_write_raw;
207 
208 	if (id) {
209 		rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4);
210 		rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr =
211 			MSR_VR_CURRENT_CONFIG;
212 		pr_info("PL4 support detected.\n");
213 	}
214 
215 	if (x86_match_cpu(pmu_support_ids)) {
216 		rapl_msr_pmu = true;
217 		pr_info("MSR-based RAPL PMU support enabled\n");
218 	}
219 
220 	rapl_msr_priv->control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
221 	if (IS_ERR(rapl_msr_priv->control_type)) {
222 		pr_debug("failed to register powercap control_type.\n");
223 		return PTR_ERR(rapl_msr_priv->control_type);
224 	}
225 
226 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
227 				rapl_cpu_online, rapl_cpu_down_prep);
228 	if (ret < 0)
229 		goto out;
230 	rapl_msr_priv->pcap_rapl_online = ret;
231 
232 	return 0;
233 
234 out:
235 	if (ret)
236 		powercap_unregister_control_type(rapl_msr_priv->control_type);
237 	return ret;
238 }
239 
240 static void rapl_msr_remove(struct platform_device *pdev)
241 {
242 	cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
243 	powercap_unregister_control_type(rapl_msr_priv->control_type);
244 }
245 
246 static const struct platform_device_id rapl_msr_ids[] = {
247 	{ .name = "intel_rapl_msr", },
248 	{}
249 };
250 MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
251 
252 static struct platform_driver intel_rapl_msr_driver = {
253 	.probe = rapl_msr_probe,
254 	.remove = rapl_msr_remove,
255 	.id_table = rapl_msr_ids,
256 	.driver = {
257 		.name = "intel_rapl_msr",
258 	},
259 };
260 
261 module_platform_driver(intel_rapl_msr_driver);
262 
263 MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit) control via MSR interface");
264 MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
265 MODULE_LICENSE("GPL v2");
266