xref: /linux/drivers/platform/x86/amd/hfi/hfi.c (revision 9d588a1140b9ae211581a7a154d0b806d8cd8238)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Hardware Feedback Interface Driver
4  *
5  * Copyright (C) 2025 Advanced Micro Devices, Inc. All Rights Reserved.
6  *
7  * Authors: Perry Yuan <Perry.Yuan@amd.com>
8  *          Mario Limonciello <mario.limonciello@amd.com>
9  */
10 
11 #define pr_fmt(fmt)  "amd-hfi: " fmt
12 
13 #include <linux/acpi.h>
14 #include <linux/cpu.h>
15 #include <linux/debugfs.h>
16 #include <linux/gfp.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mailbox_client.h>
22 #include <linux/mutex.h>
23 #include <linux/percpu-defs.h>
24 #include <linux/platform_device.h>
25 #include <linux/smp.h>
26 #include <linux/topology.h>
27 #include <linux/workqueue.h>
28 
29 #include <asm/cpu_device_id.h>
30 
31 #include <acpi/pcc.h>
32 #include <acpi/cppc_acpi.h>
33 
34 #define AMD_HFI_DRIVER		"amd_hfi"
35 #define AMD_HFI_MAILBOX_COUNT		1
36 #define AMD_HETERO_RANKING_TABLE_VER	2
37 
38 #define AMD_HETERO_CPUID_27	0x80000027
39 
40 static struct platform_device *device;
41 
42 /**
43  * struct amd_shmem_info - Shared memory table for AMD HFI
44  *
45  * @header:	The PCCT table header including signature, length flags and command.
46  * @version_number:		Version number of the table
47  * @n_logical_processors:	Number of logical processors
48  * @n_capabilities:		Number of ranking dimensions (performance, efficiency, etc)
49  * @table_update_context:	Command being sent over the subspace
50  * @n_bitmaps:			Number of 32-bit bitmaps to enumerate all the APIC IDs
51  *				This is based on the maximum APIC ID enumerated in the system
52  * @reserved:			24 bit spare
53  * @table_data:			Bit Map(s) of enabled logical processors
54  *				Followed by the ranking data for each logical processor
55  */
56 struct amd_shmem_info {
57 	struct acpi_pcct_ext_pcc_shared_memory header;
58 	u32	version_number		:8,
59 		n_logical_processors	:8,
60 		n_capabilities		:8,
61 		table_update_context	:8;
62 	u32	n_bitmaps		:8,
63 		reserved		:24;
64 	u32	table_data[];
65 };
66 
67 struct amd_hfi_data {
68 	const char	*name;
69 	struct device	*dev;
70 
71 	/* PCCT table related */
72 	struct pcc_mbox_chan	*pcc_chan;
73 	void __iomem		*pcc_comm_addr;
74 	struct acpi_subtable_header	*pcct_entry;
75 	struct amd_shmem_info	*shmem;
76 
77 	struct dentry *dbgfs_dir;
78 };
79 
80 /**
81  * struct amd_hfi_classes - HFI class capabilities per CPU
82  * @perf:	Performance capability
83  * @eff:	Power efficiency capability
84  *
85  * Capabilities of a logical processor in the ranking table. These capabilities
86  * are unitless and specific to each HFI class.
87  */
88 struct amd_hfi_classes {
89 	u32	perf;
90 	u32	eff;
91 };
92 
93 /**
94  * struct amd_hfi_cpuinfo - HFI workload class info per CPU
95  * @cpu:		CPU index
96  * @apic_id:		APIC id of the current CPU
97  * @class_index:	workload class ID index
98  * @nr_class:		max number of workload class supported
99  * @ipcc_scores:	ipcc scores for each class
100  * @amd_hfi_classes:	current CPU workload class ranking data
101  *
102  * Parameters of a logical processor linked with hardware feedback class.
103  */
104 struct amd_hfi_cpuinfo {
105 	int		cpu;
106 	u32		apic_id;
107 	s16		class_index;
108 	u8		nr_class;
109 	int		*ipcc_scores;
110 	struct amd_hfi_classes	*amd_hfi_classes;
111 };
112 
113 static DEFINE_PER_CPU(struct amd_hfi_cpuinfo, amd_hfi_cpuinfo) = {.class_index = -1};
114 
115 static DEFINE_MUTEX(hfi_cpuinfo_lock);
116 
amd_hfi_sched_itmt_work(struct work_struct * work)117 static void amd_hfi_sched_itmt_work(struct work_struct *work)
118 {
119 	sched_set_itmt_support();
120 }
121 static DECLARE_WORK(sched_amd_hfi_itmt_work, amd_hfi_sched_itmt_work);
122 
find_cpu_index_by_apicid(unsigned int target_apicid)123 static int find_cpu_index_by_apicid(unsigned int target_apicid)
124 {
125 	int cpu_index;
126 
127 	for_each_possible_cpu(cpu_index) {
128 		struct cpuinfo_x86 *info = &cpu_data(cpu_index);
129 
130 		if (info->topo.apicid == target_apicid) {
131 			pr_debug("match APIC id %u for CPU index: %d\n",
132 				 info->topo.apicid, cpu_index);
133 			return cpu_index;
134 		}
135 	}
136 
137 	return -ENODEV;
138 }
139 
amd_hfi_fill_metadata(struct amd_hfi_data * amd_hfi_data)140 static int amd_hfi_fill_metadata(struct amd_hfi_data *amd_hfi_data)
141 {
142 	struct acpi_pcct_ext_pcc_slave *pcct_ext =
143 		(struct acpi_pcct_ext_pcc_slave *)amd_hfi_data->pcct_entry;
144 	void __iomem *pcc_comm_addr;
145 	u32 apic_start = 0;
146 
147 	pcc_comm_addr = acpi_os_ioremap(amd_hfi_data->pcc_chan->shmem_base_addr,
148 					amd_hfi_data->pcc_chan->shmem_size);
149 	if (!pcc_comm_addr) {
150 		dev_err(amd_hfi_data->dev, "failed to ioremap PCC common region mem\n");
151 		return -ENOMEM;
152 	}
153 
154 	memcpy_fromio(amd_hfi_data->shmem, pcc_comm_addr, pcct_ext->length);
155 	iounmap(pcc_comm_addr);
156 
157 	if (amd_hfi_data->shmem->header.signature != PCC_SIGNATURE) {
158 		dev_err(amd_hfi_data->dev, "invalid signature in shared memory\n");
159 		return -EINVAL;
160 	}
161 	if (amd_hfi_data->shmem->version_number != AMD_HETERO_RANKING_TABLE_VER) {
162 		dev_err(amd_hfi_data->dev, "invalid version %d\n",
163 			amd_hfi_data->shmem->version_number);
164 		return -EINVAL;
165 	}
166 
167 	for (unsigned int i = 0; i < amd_hfi_data->shmem->n_bitmaps; i++) {
168 		u32 bitmap = amd_hfi_data->shmem->table_data[i];
169 
170 		for (unsigned int j = 0; j < BITS_PER_TYPE(u32); j++) {
171 			u32 apic_id = i * BITS_PER_TYPE(u32) + j;
172 			struct amd_hfi_cpuinfo *info;
173 			int cpu_index, apic_index;
174 
175 			if (!(bitmap & BIT(j)))
176 				continue;
177 
178 			cpu_index = find_cpu_index_by_apicid(apic_id);
179 			if (cpu_index < 0) {
180 				dev_warn(amd_hfi_data->dev, "APIC ID %u not found\n", apic_id);
181 				continue;
182 			}
183 
184 			info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index);
185 			info->apic_id = apic_id;
186 
187 			/* Fill the ranking data for each logical processor */
188 			info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu_index);
189 			apic_index = apic_start * info->nr_class * 2;
190 			for (unsigned int k = 0; k < info->nr_class; k++) {
191 				u32 *table = amd_hfi_data->shmem->table_data +
192 					     amd_hfi_data->shmem->n_bitmaps +
193 					     i * info->nr_class;
194 
195 				info->amd_hfi_classes[k].eff = table[apic_index + 2 * k];
196 				info->amd_hfi_classes[k].perf = table[apic_index + 2 * k + 1];
197 			}
198 			apic_start++;
199 		}
200 	}
201 
202 	return 0;
203 }
204 
amd_hfi_alloc_class_data(struct platform_device * pdev)205 static int amd_hfi_alloc_class_data(struct platform_device *pdev)
206 {
207 	struct amd_hfi_cpuinfo *hfi_cpuinfo;
208 	struct device *dev = &pdev->dev;
209 	u32 nr_class_id;
210 	int idx;
211 
212 	nr_class_id = cpuid_eax(AMD_HETERO_CPUID_27);
213 	if (nr_class_id > 255) {
214 		dev_err(dev, "number of supported classes too large: %d\n",
215 			nr_class_id);
216 		return -EINVAL;
217 	}
218 
219 	for_each_possible_cpu(idx) {
220 		struct amd_hfi_classes *classes;
221 		int *ipcc_scores;
222 
223 		classes = devm_kcalloc(dev,
224 				       nr_class_id,
225 				       sizeof(struct amd_hfi_classes),
226 				       GFP_KERNEL);
227 		if (!classes)
228 			return -ENOMEM;
229 		ipcc_scores = devm_kcalloc(dev, nr_class_id, sizeof(int), GFP_KERNEL);
230 		if (!ipcc_scores)
231 			return -ENOMEM;
232 		hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, idx);
233 		hfi_cpuinfo->amd_hfi_classes = classes;
234 		hfi_cpuinfo->ipcc_scores = ipcc_scores;
235 		hfi_cpuinfo->nr_class = nr_class_id;
236 	}
237 
238 	return 0;
239 }
240 
amd_hfi_remove(struct platform_device * pdev)241 static void amd_hfi_remove(struct platform_device *pdev)
242 {
243 	struct amd_hfi_data *dev = platform_get_drvdata(pdev);
244 
245 	debugfs_remove_recursive(dev->dbgfs_dir);
246 }
247 
amd_set_hfi_ipcc_score(struct amd_hfi_cpuinfo * hfi_cpuinfo,int cpu)248 static int amd_set_hfi_ipcc_score(struct amd_hfi_cpuinfo *hfi_cpuinfo, int cpu)
249 {
250 	for (int i = 0; i < hfi_cpuinfo->nr_class; i++)
251 		WRITE_ONCE(hfi_cpuinfo->ipcc_scores[i],
252 			   hfi_cpuinfo->amd_hfi_classes[i].perf);
253 
254 	sched_set_itmt_core_prio(hfi_cpuinfo->ipcc_scores[0], cpu);
255 
256 	return 0;
257 }
258 
amd_hfi_set_state(unsigned int cpu,bool state)259 static int amd_hfi_set_state(unsigned int cpu, bool state)
260 {
261 	int ret;
262 
263 	ret = wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_CLASS_CONFIG, state ? 1 : 0);
264 	if (ret)
265 		return ret;
266 
267 	return wrmsrq_on_cpu(cpu, MSR_AMD_WORKLOAD_HRST, 0x1);
268 }
269 
270 /**
271  * amd_hfi_online() - Enable workload classification on @cpu
272  * @cpu: CPU in which the workload classification will be enabled
273  *
274  * Return: 0 on success, negative error code on failure.
275  */
amd_hfi_online(unsigned int cpu)276 static int amd_hfi_online(unsigned int cpu)
277 {
278 	struct amd_hfi_cpuinfo *hfi_info = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
279 	struct amd_hfi_classes *hfi_classes;
280 	int ret;
281 
282 	if (WARN_ON_ONCE(!hfi_info))
283 		return -EINVAL;
284 
285 	/*
286 	 * Check if @cpu as an associated, initialized and ranking data must
287 	 * be filled.
288 	 */
289 	hfi_classes = hfi_info->amd_hfi_classes;
290 	if (!hfi_classes)
291 		return -EINVAL;
292 
293 	guard(mutex)(&hfi_cpuinfo_lock);
294 
295 	ret = amd_hfi_set_state(cpu, true);
296 	if (ret)
297 		pr_err("WCT enable failed for CPU %u\n", cpu);
298 
299 	return ret;
300 }
301 
302 /**
303  * amd_hfi_offline() - Disable workload classification on @cpu
304  * @cpu: CPU in which the workload classification will be disabled
305  *
306  * Remove @cpu from those covered by its HFI instance.
307  *
308  * Return: 0 on success, negative error code on failure
309  */
amd_hfi_offline(unsigned int cpu)310 static int amd_hfi_offline(unsigned int cpu)
311 {
312 	struct amd_hfi_cpuinfo *hfi_info = &per_cpu(amd_hfi_cpuinfo, cpu);
313 	int ret;
314 
315 	if (WARN_ON_ONCE(!hfi_info))
316 		return -EINVAL;
317 
318 	guard(mutex)(&hfi_cpuinfo_lock);
319 
320 	ret = amd_hfi_set_state(cpu, false);
321 	if (ret)
322 		pr_err("WCT disable failed for CPU %u\n", cpu);
323 
324 	return ret;
325 }
326 
update_hfi_ipcc_scores(void)327 static int update_hfi_ipcc_scores(void)
328 {
329 	int cpu;
330 	int ret;
331 
332 	for_each_possible_cpu(cpu) {
333 		struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
334 
335 		ret = amd_set_hfi_ipcc_score(hfi_cpuinfo, cpu);
336 		if (ret)
337 			return ret;
338 	}
339 
340 	return 0;
341 }
342 
amd_hfi_metadata_parser(struct platform_device * pdev,struct amd_hfi_data * amd_hfi_data)343 static int amd_hfi_metadata_parser(struct platform_device *pdev,
344 				   struct amd_hfi_data *amd_hfi_data)
345 {
346 	struct acpi_pcct_ext_pcc_slave *pcct_ext;
347 	struct acpi_subtable_header *pcct_entry;
348 	struct mbox_chan *pcc_mbox_channels;
349 	struct acpi_table_header *pcct_tbl;
350 	struct pcc_mbox_chan *pcc_chan;
351 	acpi_status status;
352 	int ret;
353 
354 	pcc_mbox_channels = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT,
355 					 sizeof(*pcc_mbox_channels), GFP_KERNEL);
356 	if (!pcc_mbox_channels)
357 		return -ENOMEM;
358 
359 	pcc_chan = devm_kcalloc(&pdev->dev, AMD_HFI_MAILBOX_COUNT,
360 				sizeof(*pcc_chan), GFP_KERNEL);
361 	if (!pcc_chan)
362 		return -ENOMEM;
363 
364 	status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
365 	if (ACPI_FAILURE(status) || !pcct_tbl)
366 		return -ENODEV;
367 
368 	/* get pointer to the first PCC subspace entry */
369 	pcct_entry = (struct acpi_subtable_header *) (
370 			(unsigned long)pcct_tbl + sizeof(struct acpi_table_pcct));
371 
372 	pcc_chan->mchan = &pcc_mbox_channels[0];
373 
374 	amd_hfi_data->pcc_chan = pcc_chan;
375 	amd_hfi_data->pcct_entry = pcct_entry;
376 	pcct_ext = (struct acpi_pcct_ext_pcc_slave *)pcct_entry;
377 
378 	if (pcct_ext->length <= 0) {
379 		ret = -EINVAL;
380 		goto out;
381 	}
382 
383 	amd_hfi_data->shmem = devm_kzalloc(amd_hfi_data->dev, pcct_ext->length, GFP_KERNEL);
384 	if (!amd_hfi_data->shmem) {
385 		ret = -ENOMEM;
386 		goto out;
387 	}
388 
389 	pcc_chan->shmem_base_addr = pcct_ext->base_address;
390 	pcc_chan->shmem_size = pcct_ext->length;
391 
392 	/* parse the shared memory info from the PCCT table */
393 	ret = amd_hfi_fill_metadata(amd_hfi_data);
394 
395 out:
396 	/* Don't leak any ACPI memory */
397 	acpi_put_table(pcct_tbl);
398 
399 	return ret;
400 }
401 
class_capabilities_show(struct seq_file * s,void * unused)402 static int class_capabilities_show(struct seq_file *s, void *unused)
403 {
404 	u32 cpu, idx;
405 
406 	seq_puts(s, "CPU #\tWLC\tPerf\tEff\n");
407 	for_each_possible_cpu(cpu) {
408 		struct amd_hfi_cpuinfo *hfi_cpuinfo = per_cpu_ptr(&amd_hfi_cpuinfo, cpu);
409 
410 		seq_printf(s, "%d", cpu);
411 		for (idx = 0; idx < hfi_cpuinfo->nr_class; idx++) {
412 			seq_printf(s, "\t%u\t%u\t%u\n", idx,
413 				   hfi_cpuinfo->amd_hfi_classes[idx].perf,
414 				   hfi_cpuinfo->amd_hfi_classes[idx].eff);
415 		}
416 	}
417 
418 	return 0;
419 }
420 DEFINE_SHOW_ATTRIBUTE(class_capabilities);
421 
amd_hfi_pm_resume(struct device * dev)422 static int amd_hfi_pm_resume(struct device *dev)
423 {
424 	int ret, cpu;
425 
426 	for_each_online_cpu(cpu) {
427 		ret = amd_hfi_set_state(cpu, true);
428 		if (ret < 0) {
429 			dev_err(dev, "failed to enable workload class config: %d\n", ret);
430 			return ret;
431 		}
432 	}
433 
434 	return 0;
435 }
436 
amd_hfi_pm_suspend(struct device * dev)437 static int amd_hfi_pm_suspend(struct device *dev)
438 {
439 	int ret, cpu;
440 
441 	for_each_online_cpu(cpu) {
442 		ret = amd_hfi_set_state(cpu, false);
443 		if (ret < 0) {
444 			dev_err(dev, "failed to disable workload class config: %d\n", ret);
445 			return ret;
446 		}
447 	}
448 
449 	return 0;
450 }
451 
452 static DEFINE_SIMPLE_DEV_PM_OPS(amd_hfi_pm_ops, amd_hfi_pm_suspend, amd_hfi_pm_resume);
453 
454 static const struct acpi_device_id amd_hfi_platform_match[] = {
455 	{"AMDI0104", 0},
456 	{ }
457 };
458 MODULE_DEVICE_TABLE(acpi, amd_hfi_platform_match);
459 
amd_hfi_probe(struct platform_device * pdev)460 static int amd_hfi_probe(struct platform_device *pdev)
461 {
462 	struct amd_hfi_data *amd_hfi_data;
463 	int ret;
464 
465 	if (!acpi_match_device(amd_hfi_platform_match, &pdev->dev))
466 		return -ENODEV;
467 
468 	amd_hfi_data = devm_kzalloc(&pdev->dev, sizeof(*amd_hfi_data), GFP_KERNEL);
469 	if (!amd_hfi_data)
470 		return -ENOMEM;
471 
472 	amd_hfi_data->dev = &pdev->dev;
473 	platform_set_drvdata(pdev, amd_hfi_data);
474 
475 	ret = amd_hfi_alloc_class_data(pdev);
476 	if (ret)
477 		return ret;
478 
479 	ret = amd_hfi_metadata_parser(pdev, amd_hfi_data);
480 	if (ret)
481 		return ret;
482 
483 	ret = update_hfi_ipcc_scores();
484 	if (ret)
485 		return ret;
486 
487 	/*
488 	 * Tasks will already be running at the time this happens. This is
489 	 * OK because rankings will be adjusted by the callbacks.
490 	 */
491 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/amd_hfi:online",
492 				amd_hfi_online, amd_hfi_offline);
493 	if (ret < 0)
494 		return ret;
495 
496 	schedule_work(&sched_amd_hfi_itmt_work);
497 
498 	amd_hfi_data->dbgfs_dir = debugfs_create_dir("amd_hfi", arch_debugfs_dir);
499 	debugfs_create_file("class_capabilities", 0644, amd_hfi_data->dbgfs_dir, pdev,
500 			    &class_capabilities_fops);
501 
502 	return 0;
503 }
504 
505 static struct platform_driver amd_hfi_driver = {
506 	.driver = {
507 		.name = AMD_HFI_DRIVER,
508 		.pm = &amd_hfi_pm_ops,
509 		.acpi_match_table = ACPI_PTR(amd_hfi_platform_match),
510 	},
511 	.probe = amd_hfi_probe,
512 	.remove = amd_hfi_remove,
513 };
514 
amd_hfi_init(void)515 static int __init amd_hfi_init(void)
516 {
517 	int ret;
518 
519 	if (acpi_disabled ||
520 	    !cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES) ||
521 	    !cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS))
522 		return -ENODEV;
523 
524 	device = platform_device_register_simple(AMD_HFI_DRIVER, -1, NULL, 0);
525 	if (IS_ERR(device)) {
526 		pr_err("unable to register HFI platform device\n");
527 		return PTR_ERR(device);
528 	}
529 
530 	ret = platform_driver_register(&amd_hfi_driver);
531 	if (ret)
532 		pr_err("failed to register HFI driver\n");
533 
534 	return ret;
535 }
536 
amd_hfi_exit(void)537 static __exit void amd_hfi_exit(void)
538 {
539 	platform_driver_unregister(&amd_hfi_driver);
540 	platform_device_unregister(device);
541 }
542 module_init(amd_hfi_init);
543 module_exit(amd_hfi_exit);
544 
545 MODULE_LICENSE("GPL");
546 MODULE_DESCRIPTION("AMD Hardware Feedback Interface Driver");
547