xref: /linux/drivers/platform/x86/intel/plr_tpmi.c (revision 4c2cd91bff6371b58e672e8791c3bfa70c1b821f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Performance Limit Reasons via TPMI
4  *
5  * Copyright (c) 2024, Intel Corporation.
6  */
7 
8 #include <linux/array_size.h>
9 #include <linux/auxiliary_bus.h>
10 #include <linux/bitfield.h>
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/gfp_types.h>
16 #include <linux/intel_tpmi.h>
17 #include <linux/intel_vsec.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kstrtox.h>
21 #include <linux/lockdep.h>
22 #include <linux/module.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/mutex.h>
25 #include <linux/notifier.h>
26 #include <linux/seq_file.h>
27 #include <linux/sprintf.h>
28 #include <linux/types.h>
29 
30 #include "tpmi_power_domains.h"
31 
32 #define PLR_HEADER		0x00
33 #define PLR_MAILBOX_INTERFACE	0x08
34 #define PLR_MAILBOX_DATA	0x10
35 #define PLR_DIE_LEVEL		0x18
36 
37 #define PLR_MODULE_ID_MASK	GENMASK_ULL(19, 12)
38 #define PLR_RUN_BUSY		BIT_ULL(63)
39 
40 #define PLR_COMMAND_WRITE	1
41 
42 #define PLR_INVALID		GENMASK_ULL(63, 0)
43 
44 #define PLR_TIMEOUT_US		5
45 #define PLR_TIMEOUT_MAX_US	1000
46 
47 #define PLR_COARSE_REASON_BITS	32
48 
49 struct tpmi_plr;
50 
51 struct tpmi_plr_die {
52 	void __iomem *base;
53 	struct mutex lock; /* Protect access to PLR mailbox */
54 	int package_id;
55 	int die_id;
56 	struct tpmi_plr *plr;
57 };
58 
59 struct tpmi_plr {
60 	struct dentry *dbgfs_dir;
61 	struct tpmi_plr_die *die_info;
62 	int num_dies;
63 	struct auxiliary_device *auxdev;
64 	struct notifier_block nb;
65 	struct mutex lock;	/* Protect access to dbgfs_dir */
66 };
67 
68 static const char * const plr_coarse_reasons[] = {
69 	"FREQUENCY",
70 	"CURRENT",
71 	"POWER",
72 	"THERMAL",
73 	"PLATFORM",
74 	"MCP",
75 	"RAS",
76 	"MISC",
77 	"QOS",
78 	"DFC",
79 };
80 
81 static const char * const plr_fine_reasons[] = {
82 	"FREQUENCY_CDYN0",
83 	"FREQUENCY_CDYN1",
84 	"FREQUENCY_CDYN2",
85 	"FREQUENCY_CDYN3",
86 	"FREQUENCY_CDYN4",
87 	"FREQUENCY_CDYN5",
88 	"FREQUENCY_FCT",
89 	"FREQUENCY_PCS_TRL",
90 	"CURRENT_MTPMAX",
91 	"POWER_FAST_RAPL",
92 	"POWER_PKG_PL1_MSR_TPMI",
93 	"POWER_PKG_PL1_MMIO",
94 	"POWER_PKG_PL1_PCS",
95 	"POWER_PKG_PL2_MSR_TPMI",
96 	"POWER_PKG_PL2_MMIO",
97 	"POWER_PKG_PL2_PCS",
98 	"POWER_PLATFORM_PL1_MSR_TPMI",
99 	"POWER_PLATFORM_PL1_MMIO",
100 	"POWER_PLATFORM_PL1_PCS",
101 	"POWER_PLATFORM_PL2_MSR_TPMI",
102 	"POWER_PLATFORM_PL2_MMIO",
103 	"POWER_PLATFORM_PL2_PCS",
104 	"UNKNOWN(22)",
105 	"THERMAL_PER_CORE",
106 	"DFC_UFS",
107 	"PLATFORM_PROCHOT",
108 	"PLATFORM_HOT_VR",
109 	"UNKNOWN(27)",
110 	"UNKNOWN(28)",
111 	"MISC_PCS_PSTATE",
112 };
113 
plr_read(struct tpmi_plr_die * plr_die,int offset)114 static u64 plr_read(struct tpmi_plr_die *plr_die, int offset)
115 {
116 	return readq(plr_die->base + offset);
117 }
118 
plr_write(u64 val,struct tpmi_plr_die * plr_die,int offset)119 static void plr_write(u64 val, struct tpmi_plr_die *plr_die, int offset)
120 {
121 	writeq(val, plr_die->base + offset);
122 }
123 
plr_read_cpu_status(struct tpmi_plr_die * plr_die,int cpu,u64 * status)124 static int plr_read_cpu_status(struct tpmi_plr_die *plr_die, int cpu,
125 			       u64 *status)
126 {
127 	u64 regval;
128 	int ret;
129 
130 	lockdep_assert_held(&plr_die->lock);
131 
132 	regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
133 	regval |= PLR_RUN_BUSY;
134 
135 	plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
136 
137 	ret = readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
138 				 !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
139 				 PLR_TIMEOUT_MAX_US);
140 	if (ret)
141 		return ret;
142 
143 	*status = plr_read(plr_die, PLR_MAILBOX_DATA);
144 
145 	return 0;
146 }
147 
plr_clear_cpu_status(struct tpmi_plr_die * plr_die,int cpu)148 static int plr_clear_cpu_status(struct tpmi_plr_die *plr_die, int cpu)
149 {
150 	u64 regval;
151 
152 	lockdep_assert_held(&plr_die->lock);
153 
154 	regval = FIELD_PREP(PLR_MODULE_ID_MASK, tpmi_get_punit_core_number(cpu));
155 	regval |= PLR_RUN_BUSY | PLR_COMMAND_WRITE;
156 
157 	plr_write(0, plr_die, PLR_MAILBOX_DATA);
158 
159 	plr_write(regval, plr_die, PLR_MAILBOX_INTERFACE);
160 
161 	return readq_poll_timeout(plr_die->base + PLR_MAILBOX_INTERFACE, regval,
162 				  !(regval & PLR_RUN_BUSY), PLR_TIMEOUT_US,
163 				  PLR_TIMEOUT_MAX_US);
164 }
165 
plr_print_bits(struct seq_file * s,u64 val,int bits)166 static void plr_print_bits(struct seq_file *s, u64 val, int bits)
167 {
168 	const unsigned long mask[] = { BITMAP_FROM_U64(val) };
169 	int bit, index;
170 
171 	for_each_set_bit(bit, mask, bits) {
172 		const char *str = NULL;
173 
174 		if (bit < PLR_COARSE_REASON_BITS) {
175 			if (bit < ARRAY_SIZE(plr_coarse_reasons))
176 				str = plr_coarse_reasons[bit];
177 		} else {
178 			index = bit - PLR_COARSE_REASON_BITS;
179 			if (index < ARRAY_SIZE(plr_fine_reasons))
180 				str = plr_fine_reasons[index];
181 		}
182 
183 		if (str)
184 			seq_printf(s, " %s", str);
185 		else
186 			seq_printf(s, " UNKNOWN(%d)", bit);
187 	}
188 
189 	if (!val)
190 		seq_puts(s, " none");
191 
192 	seq_putc(s, '\n');
193 }
194 
plr_status_show(struct seq_file * s,void * unused)195 static int plr_status_show(struct seq_file *s, void *unused)
196 {
197 	struct tpmi_plr_die *plr_die = s->private;
198 	int ret;
199 	u64 val;
200 
201 	val = plr_read(plr_die, PLR_DIE_LEVEL);
202 	seq_puts(s, "cpus");
203 	plr_print_bits(s, val, 32);
204 
205 	guard(mutex)(&plr_die->lock);
206 
207 	for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
208 		if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
209 			continue;
210 
211 		if (plr_die->package_id != topology_physical_package_id(cpu))
212 			continue;
213 
214 		seq_printf(s, "cpu%d", cpu);
215 		ret = plr_read_cpu_status(plr_die, cpu, &val);
216 		if (ret) {
217 			dev_err(&plr_die->plr->auxdev->dev, "Failed to read PLR for cpu %d, ret=%d\n",
218 				cpu, ret);
219 			return ret;
220 		}
221 
222 		plr_print_bits(s, val, 64);
223 	}
224 
225 	return 0;
226 }
227 
plr_status_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)228 static ssize_t plr_status_write(struct file *filp, const char __user *ubuf,
229 				size_t count, loff_t *ppos)
230 {
231 	struct seq_file *s = filp->private_data;
232 	struct tpmi_plr_die *plr_die = s->private;
233 	bool val;
234 	int ret;
235 
236 	ret = kstrtobool_from_user(ubuf, count, &val);
237 	if (ret)
238 		return ret;
239 
240 	if (val != 0)
241 		return -EINVAL;
242 
243 	plr_write(0, plr_die, PLR_DIE_LEVEL);
244 
245 	guard(mutex)(&plr_die->lock);
246 
247 	for (int cpu = 0; cpu < nr_cpu_ids; cpu++) {
248 		if (plr_die->die_id != tpmi_get_power_domain_id(cpu))
249 			continue;
250 
251 		if (plr_die->package_id != topology_physical_package_id(cpu))
252 			continue;
253 
254 		plr_clear_cpu_status(plr_die, cpu);
255 	}
256 
257 	return count;
258 }
259 DEFINE_SHOW_STORE_ATTRIBUTE(plr_status);
260 
intel_plr_notify(struct notifier_block * self,unsigned long action,void * data)261 static int intel_plr_notify(struct notifier_block *self, unsigned long action, void *data)
262 {
263 	struct tpmi_plr *plr = container_of(self, struct tpmi_plr, nb);
264 
265 	if (action == TPMI_CORE_EXIT) {
266 		guard(mutex)(&plr->lock);
267 		plr->dbgfs_dir = NULL;
268 	}
269 
270 	return NOTIFY_DONE;
271 }
272 
intel_plr_register_notifier(struct notifier_block * nb)273 static int intel_plr_register_notifier(struct notifier_block *nb)
274 {
275 	nb->notifier_call = intel_plr_notify;
276 	nb->priority = 0;
277 	return tpmi_register_notifier(nb);
278 }
279 
intel_plr_unregister_notifier(struct notifier_block * nb)280 static void intel_plr_unregister_notifier(struct notifier_block *nb)
281 {
282 	tpmi_unregister_notifier(nb);
283 }
284 
intel_plr_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)285 static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
286 {
287 	struct oobmsm_plat_info *plat_info;
288 	struct dentry *dentry;
289 	int i, num_resources;
290 	struct resource *res;
291 	struct tpmi_plr *plr;
292 	void __iomem *base;
293 	char name[17];
294 	int err;
295 
296 	plat_info = tpmi_get_platform_data(auxdev);
297 	if (!plat_info)
298 		return dev_err_probe(&auxdev->dev, -EINVAL, "No platform info\n");
299 
300 	dentry = tpmi_get_debugfs_dir(auxdev);
301 	if (!dentry)
302 		return dev_err_probe(&auxdev->dev, -ENODEV, "No TPMI debugfs directory.\n");
303 
304 	num_resources = tpmi_get_resource_count(auxdev);
305 	if (!num_resources)
306 		return -EINVAL;
307 
308 	plr = devm_kzalloc(&auxdev->dev, sizeof(*plr), GFP_KERNEL);
309 	if (!plr)
310 		return -ENOMEM;
311 
312 	err = devm_mutex_init(&auxdev->dev, &plr->lock);
313 	if (err)
314 		return err;
315 
316 	intel_plr_register_notifier(&plr->nb);
317 
318 	plr->die_info = devm_kcalloc(&auxdev->dev, num_resources, sizeof(*plr->die_info),
319 				     GFP_KERNEL);
320 	if (!plr->die_info) {
321 		err = -ENOMEM;
322 		goto err_notify;
323 	}
324 
325 	plr->num_dies = num_resources;
326 	plr->dbgfs_dir = debugfs_create_dir("plr", dentry);
327 	plr->auxdev = auxdev;
328 
329 	for (i = 0; i < num_resources; i++) {
330 		res = tpmi_get_resource_at_index(auxdev, i);
331 		if (!res) {
332 			err = dev_err_probe(&auxdev->dev, -EINVAL, "No resource\n");
333 			goto err;
334 		}
335 
336 		base = devm_ioremap_resource(&auxdev->dev, res);
337 		if (IS_ERR(base)) {
338 			err = PTR_ERR(base);
339 			goto err;
340 		}
341 
342 		plr->die_info[i].base = base;
343 		plr->die_info[i].package_id = plat_info->package_id;
344 		plr->die_info[i].die_id = i;
345 		plr->die_info[i].plr = plr;
346 		mutex_init(&plr->die_info[i].lock);
347 
348 		if (plr_read(&plr->die_info[i], PLR_HEADER) == PLR_INVALID)
349 			continue;
350 
351 		snprintf(name, sizeof(name), "domain%d", i);
352 
353 		dentry = debugfs_create_dir(name, plr->dbgfs_dir);
354 		debugfs_create_file("status", 0644, dentry, &plr->die_info[i],
355 				    &plr_status_fops);
356 	}
357 
358 	auxiliary_set_drvdata(auxdev, plr);
359 
360 	return 0;
361 
362 err:
363 	debugfs_remove_recursive(plr->dbgfs_dir);
364 err_notify:
365 	intel_plr_unregister_notifier(&plr->nb);
366 
367 	return err;
368 }
369 
intel_plr_remove(struct auxiliary_device * auxdev)370 static void intel_plr_remove(struct auxiliary_device *auxdev)
371 {
372 	struct tpmi_plr *plr = auxiliary_get_drvdata(auxdev);
373 
374 	intel_plr_unregister_notifier(&plr->nb);
375 
376 	guard(mutex)(&plr->lock);
377 	debugfs_remove_recursive(plr->dbgfs_dir);
378 }
379 
380 static const struct auxiliary_device_id intel_plr_id_table[] = {
381 	{ .name = "intel_vsec.tpmi-plr" },
382 	{}
383 };
384 MODULE_DEVICE_TABLE(auxiliary, intel_plr_id_table);
385 
386 static struct auxiliary_driver intel_plr_aux_driver = {
387 	.id_table       = intel_plr_id_table,
388 	.remove         = intel_plr_remove,
389 	.probe          = intel_plr_probe,
390 };
391 module_auxiliary_driver(intel_plr_aux_driver);
392 
393 MODULE_IMPORT_NS("INTEL_TPMI");
394 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
395 MODULE_DESCRIPTION("Intel TPMI PLR Driver");
396 MODULE_LICENSE("GPL");
397