1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Running Average Power Limit (RAPL) Driver via MSR interface
4 * Copyright (c) 2019, Intel Corporation.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/types.h>
12 #include <linux/device.h>
13 #include <linux/slab.h>
14 #include <linux/log2.h>
15 #include <linux/bitmap.h>
16 #include <linux/delay.h>
17 #include <linux/sysfs.h>
18 #include <linux/cpu.h>
19 #include <linux/powercap.h>
20 #include <linux/suspend.h>
21 #include <linux/intel_rapl.h>
22 #include <linux/processor.h>
23 #include <linux/platform_device.h>
24
25 #include <asm/cpu_device_id.h>
26 #include <asm/intel-family.h>
27 #include <asm/msr.h>
28
29 /* Local defines */
30 #define MSR_PLATFORM_POWER_LIMIT 0x0000065C
31 #define MSR_VR_CURRENT_CONFIG 0x00000601
32
33 /* private data for RAPL MSR Interface */
34 static struct rapl_if_priv *rapl_msr_priv;
35
36 static struct rapl_if_priv rapl_msr_priv_intel = {
37 .type = RAPL_IF_MSR,
38 .reg_unit.msr = MSR_RAPL_POWER_UNIT,
39 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT,
40 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS,
41 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS,
42 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO,
43 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT,
44 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS,
45 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY,
46 .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP1_POWER_LIMIT,
47 .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP1_ENERGY_STATUS,
48 .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP1_POLICY,
49 .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_DRAM_POWER_LIMIT,
50 .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr = MSR_DRAM_ENERGY_STATUS,
51 .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr = MSR_DRAM_PERF_STATUS,
52 .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr = MSR_DRAM_POWER_INFO,
53 .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PLATFORM_POWER_LIMIT,
54 .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr = MSR_PLATFORM_ENERGY_STATUS,
55 .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
56 .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2),
57 };
58
59 static struct rapl_if_priv rapl_msr_priv_amd = {
60 .type = RAPL_IF_MSR,
61 .reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT,
62 .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_PKG_ENERGY_STATUS,
63 .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_CORE_ENERGY_STATUS,
64 };
65
66 /* Handles CPU hotplug on multi-socket systems.
67 * If a CPU goes online as the first CPU of the physical package
68 * we add the RAPL package to the system. Similarly, when the last
69 * CPU of the package is removed, we remove the RAPL package and its
70 * associated domains. Cooling devices are handled accordingly at
71 * per-domain level.
72 */
rapl_cpu_online(unsigned int cpu)73 static int rapl_cpu_online(unsigned int cpu)
74 {
75 struct rapl_package *rp;
76
77 rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
78 if (!rp) {
79 rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
80 if (IS_ERR(rp))
81 return PTR_ERR(rp);
82 }
83 cpumask_set_cpu(cpu, &rp->cpumask);
84 return 0;
85 }
86
rapl_cpu_down_prep(unsigned int cpu)87 static int rapl_cpu_down_prep(unsigned int cpu)
88 {
89 struct rapl_package *rp;
90 int lead_cpu;
91
92 rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
93 if (!rp)
94 return 0;
95
96 cpumask_clear_cpu(cpu, &rp->cpumask);
97 lead_cpu = cpumask_first(&rp->cpumask);
98 if (lead_cpu >= nr_cpu_ids)
99 rapl_remove_package_cpuslocked(rp);
100 else if (rp->lead_cpu == cpu)
101 rp->lead_cpu = lead_cpu;
102 return 0;
103 }
104
rapl_msr_read_raw(int cpu,struct reg_action * ra)105 static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
106 {
107 if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
108 pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
109 return -EIO;
110 }
111 ra->value &= ra->mask;
112 return 0;
113 }
114
rapl_msr_update_func(void * info)115 static void rapl_msr_update_func(void *info)
116 {
117 struct reg_action *ra = info;
118 u64 val;
119
120 ra->err = rdmsrq_safe(ra->reg.msr, &val);
121 if (ra->err)
122 return;
123
124 val &= ~ra->mask;
125 val |= ra->value;
126
127 ra->err = wrmsrq_safe(ra->reg.msr, val);
128 }
129
rapl_msr_write_raw(int cpu,struct reg_action * ra)130 static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
131 {
132 int ret;
133
134 ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
135 if (WARN_ON_ONCE(ret))
136 return ret;
137
138 return ra->err;
139 }
140
141 /* List of verified CPUs. */
142 static const struct x86_cpu_id pl4_support_ids[] = {
143 X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
144 X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
145 X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
146 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL),
147 X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
148 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
149 X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
150 X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
151 X86_MATCH_VFM(INTEL_ARROWLAKE_U, NULL),
152 X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
153 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL),
154 {}
155 };
156
rapl_msr_probe(struct platform_device * pdev)157 static int rapl_msr_probe(struct platform_device *pdev)
158 {
159 const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
160 int ret;
161
162 switch (boot_cpu_data.x86_vendor) {
163 case X86_VENDOR_INTEL:
164 rapl_msr_priv = &rapl_msr_priv_intel;
165 break;
166 case X86_VENDOR_HYGON:
167 case X86_VENDOR_AMD:
168 rapl_msr_priv = &rapl_msr_priv_amd;
169 break;
170 default:
171 pr_err("intel-rapl does not support CPU vendor %d\n", boot_cpu_data.x86_vendor);
172 return -ENODEV;
173 }
174 rapl_msr_priv->read_raw = rapl_msr_read_raw;
175 rapl_msr_priv->write_raw = rapl_msr_write_raw;
176
177 if (id) {
178 rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4);
179 rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr =
180 MSR_VR_CURRENT_CONFIG;
181 pr_info("PL4 support detected.\n");
182 }
183
184 rapl_msr_priv->control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
185 if (IS_ERR(rapl_msr_priv->control_type)) {
186 pr_debug("failed to register powercap control_type.\n");
187 return PTR_ERR(rapl_msr_priv->control_type);
188 }
189
190 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
191 rapl_cpu_online, rapl_cpu_down_prep);
192 if (ret < 0)
193 goto out;
194 rapl_msr_priv->pcap_rapl_online = ret;
195
196 return 0;
197
198 out:
199 if (ret)
200 powercap_unregister_control_type(rapl_msr_priv->control_type);
201 return ret;
202 }
203
rapl_msr_remove(struct platform_device * pdev)204 static void rapl_msr_remove(struct platform_device *pdev)
205 {
206 cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
207 powercap_unregister_control_type(rapl_msr_priv->control_type);
208 }
209
210 static const struct platform_device_id rapl_msr_ids[] = {
211 { .name = "intel_rapl_msr", },
212 {}
213 };
214 MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
215
216 static struct platform_driver intel_rapl_msr_driver = {
217 .probe = rapl_msr_probe,
218 .remove = rapl_msr_remove,
219 .id_table = rapl_msr_ids,
220 .driver = {
221 .name = "intel_rapl_msr",
222 },
223 };
224
225 module_platform_driver(intel_rapl_msr_driver);
226
227 MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit) control via MSR interface");
228 MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
229 MODULE_LICENSE("GPL v2");
230