xref: /linux/drivers/platform/x86/intel/tpmi.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * intel-tpmi : Driver to enumerate TPMI features and create devices
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * The TPMI (Topology Aware Register and PM Capsule Interface) provides a
9  * flexible, extendable and PCIe enumerable MMIO interface for PM features.
10  *
11  * For example Intel RAPL (Running Average Power Limit) provides a MMIO
12  * interface using TPMI. This has advantage over traditional MSR
13  * (Model Specific Register) interface, where a thread needs to be scheduled
14  * on the target CPU to read or write. Also the RAPL features vary between
15  * CPU models, and hence lot of model specific code. Here TPMI provides an
16  * architectural interface by providing hierarchical tables and fields,
17  * which will not need any model specific implementation.
18  *
19  * The TPMI interface uses a PCI VSEC structure to expose the location of
20  * MMIO region.
21  *
22  * This VSEC structure is present in the PCI configuration space of the
23  * Intel Out-of-Band (OOB) device, which  is handled by the Intel VSEC
24  * driver. The Intel VSEC driver parses VSEC structures present in the PCI
25  * configuration space of the given device and creates an auxiliary device
26  * object for each of them. In particular, it creates an auxiliary device
27  * object representing TPMI that can be bound by an auxiliary driver.
28  *
29  * This TPMI driver will bind to the TPMI auxiliary device object created
30  * by the Intel VSEC driver.
31  *
32  * The TPMI specification defines a PFS (PM Feature Structure) table.
33  * This table is present in the TPMI MMIO region. The starting address
34  * of PFS is derived from the tBIR (Bar Indicator Register) and "Address"
35  * field from the VSEC header.
36  *
37  * Each TPMI PM feature has one entry in the PFS with a unique TPMI
38  * ID and its access details. The TPMI driver creates device nodes
39  * for the supported PM features.
40  *
41  * The names of the devices created by the TPMI driver start with the
42  * "intel_vsec.tpmi-" prefix which is followed by a specific name of the
43  * given PM feature (for example, "intel_vsec.tpmi-rapl.0").
44  *
45  * The device nodes are create by using interface "intel_vsec_add_aux()"
46  * provided by the Intel VSEC driver.
47  */
48 
49 #include <linux/auxiliary_bus.h>
50 #include <linux/bitfield.h>
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/intel_tpmi.h>
54 #include <linux/intel_vsec.h>
55 #include <linux/io.h>
56 #include <linux/iopoll.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/security.h>
60 #include <linux/sizes.h>
61 #include <linux/string_helpers.h>
62 
63 /**
64  * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
65  * @tpmi_id:	TPMI feature identifier (what the feature is and its data format).
66  * @num_entries: Number of feature interface instances present in the PFS.
67  *		 This represents the maximum number of Power domains in the SoC.
68  * @entry_size:	Interface instance entry size in 32-bit words.
69  * @cap_offset:	Offset from the PM_Features base address to the base of the PM VSEC
70  *		register bank in KB.
71  * @attribute:	Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved.
72  * @reserved:	Bits for use in the future.
73  *
74  * Represents one TPMI feature entry data in the PFS retrieved as is
75  * from the hardware.
76  */
77 struct intel_tpmi_pfs_entry {
78 	u64 tpmi_id:8;
79 	u64 num_entries:8;
80 	u64 entry_size:16;
81 	u64 cap_offset:16;
82 	u64 attribute:2;
83 	u64 reserved:14;
84 } __packed;
85 
86 /**
87  * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID
88  * @pfs_header:	PFS header retireved from the hardware.
89  * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially
90  *		 this offset = "Address" from VSEC header + PFS Capability
91  *		 offset for this feature entry.
92  * @vsec_dev:	Pointer to intel_vsec_device structure for this TPMI device
93  *
94  * Represents TPMI instance information for one TPMI ID.
95  */
96 struct intel_tpmi_pm_feature {
97 	struct intel_tpmi_pfs_entry pfs_header;
98 	u64 vsec_offset;
99 	struct intel_vsec_device *vsec_dev;
100 };
101 
102 /**
103  * struct intel_tpmi_info - TPMI information for all IDs in an instance
104  * @tpmi_features:	Pointer to a list of TPMI feature instances
105  * @vsec_dev:		Pointer to intel_vsec_device structure for this TPMI device
106  * @feature_count:	Number of TPMI of TPMI instances pointed by tpmi_features
107  * @pfs_start:		Start of PFS offset for the TPMI instances in this device
108  * @plat_info:		Stores platform info which can be used by the client drivers
109  * @tpmi_control_mem:	Memory mapped IO for getting control information
110  * @dbgfs_dir:		debugfs entry pointer
111  *
112  * Stores the information for all TPMI devices enumerated from a single PCI device.
113  */
114 struct intel_tpmi_info {
115 	struct intel_tpmi_pm_feature *tpmi_features;
116 	struct intel_vsec_device *vsec_dev;
117 	int feature_count;
118 	u64 pfs_start;
119 	struct intel_tpmi_plat_info plat_info;
120 	void __iomem *tpmi_control_mem;
121 	struct dentry *dbgfs_dir;
122 };
123 
124 /**
125  * struct tpmi_info_header - CPU package ID to PCI device mapping information
126  * @fn:		PCI function number
127  * @dev:	PCI device number
128  * @bus:	PCI bus number
129  * @pkg:	CPU Package id
130  * @segment:	PCI segment id
131  * @partition:	Package Partition id
132  * @cdie_mask:	Bitmap of compute dies in the current partition
133  * @reserved:	Reserved for future use
134  * @lock:	When set to 1 the register is locked and becomes read-only
135  *		until next reset. Not for use by the OS driver.
136  *
137  * The structure to read hardware provided mapping information.
138  */
139 struct tpmi_info_header {
140 	u64 fn:3;
141 	u64 dev:5;
142 	u64 bus:8;
143 	u64 pkg:8;
144 	u64 segment:8;
145 	u64 partition:2;
146 	u64 cdie_mask:16;
147 	u64 reserved:13;
148 	u64 lock:1;
149 } __packed;
150 
151 /**
152  * struct tpmi_feature_state - Structure to read hardware state of a feature
153  * @enabled:	Enable state of a feature, 1: enabled, 0: disabled
154  * @reserved_1:	Reserved for future use
155  * @write_blocked: Writes are blocked means all write operations are ignored
156  * @read_blocked: Reads are blocked means will read 0xFFs
157  * @pcs_select:	Interface used by out of band software, not used in OS
158  * @reserved_2:	Reserved for future use
159  * @id:		TPMI ID of the feature
160  * @reserved_3:	Reserved for future use
161  * @locked:	When set to 1, OS can't change this register.
162  *
163  * The structure is used to read hardware state of a TPMI feature. This
164  * information is used for debug and restricting operations for this feature.
165  */
166 struct tpmi_feature_state {
167 	u32 enabled:1;
168 	u32 reserved_1:3;
169 	u32 write_blocked:1;
170 	u32 read_blocked:1;
171 	u32 pcs_select:1;
172 	u32 reserved_2:1;
173 	u32 id:8;
174 	u32 reserved_3:15;
175 	u32 locked:1;
176 } __packed;
177 
178 /*
179  * The size from hardware is in u32 units. This size is from a trusted hardware,
180  * but better to verify for pre silicon platforms. Set size to 0, when invalid.
181  */
182 #define TPMI_GET_SINGLE_ENTRY_SIZE(pfs)							\
183 ({											\
184 	pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2;	\
185 })
186 
187 /* Used during auxbus device creation */
188 static DEFINE_IDA(intel_vsec_tpmi_ida);
189 
tpmi_get_platform_data(struct auxiliary_device * auxdev)190 struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev)
191 {
192 	struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
193 
194 	return vsec_dev->priv_data;
195 }
196 EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI);
197 
tpmi_get_resource_count(struct auxiliary_device * auxdev)198 int tpmi_get_resource_count(struct auxiliary_device *auxdev)
199 {
200 	struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
201 
202 	if (vsec_dev)
203 		return vsec_dev->num_resources;
204 
205 	return 0;
206 }
207 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI);
208 
tpmi_get_resource_at_index(struct auxiliary_device * auxdev,int index)209 struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index)
210 {
211 	struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
212 
213 	if (vsec_dev && index < vsec_dev->num_resources)
214 		return &vsec_dev->resource[index];
215 
216 	return NULL;
217 }
218 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
219 
220 /* TPMI Control Interface */
221 
222 #define TPMI_CONTROL_STATUS_OFFSET	0x00
223 #define TPMI_COMMAND_OFFSET		0x08
224 #define TMPI_CONTROL_DATA_VAL_OFFSET	0x0c
225 
226 /*
227  * Spec is calling for max 1 seconds to get ownership at the worst
228  * case. Read at 10 ms timeouts and repeat up to 1 second.
229  */
230 #define TPMI_CONTROL_TIMEOUT_US		(10 * USEC_PER_MSEC)
231 #define TPMI_CONTROL_TIMEOUT_MAX_US	(1 * USEC_PER_SEC)
232 
233 #define TPMI_RB_TIMEOUT_US		(10 * USEC_PER_MSEC)
234 #define TPMI_RB_TIMEOUT_MAX_US		USEC_PER_SEC
235 
236 /* TPMI Control status register defines */
237 
238 #define TPMI_CONTROL_STATUS_RB		BIT_ULL(0)
239 
240 #define TPMI_CONTROL_STATUS_OWNER	GENMASK_ULL(5, 4)
241 #define TPMI_OWNER_NONE			0
242 #define TPMI_OWNER_IN_BAND		1
243 
244 #define TPMI_CONTROL_STATUS_CPL		BIT_ULL(6)
245 #define TPMI_CONTROL_STATUS_RESULT	GENMASK_ULL(15, 8)
246 #define TPMI_CONTROL_STATUS_LEN		GENMASK_ULL(31, 16)
247 
248 #define TPMI_CMD_PKT_LEN		2
249 #define TPMI_CMD_STATUS_SUCCESS		0x40
250 
251 /* TPMI command data registers */
252 #define TMPI_CONTROL_DATA_CMD		GENMASK_ULL(7, 0)
253 #define TPMI_CONTROL_DATA_VAL_FEATURE	GENMASK_ULL(48, 40)
254 
255 /* Command to send via control interface */
256 #define TPMI_CONTROL_GET_STATE_CMD	0x10
257 
258 #define TPMI_CONTROL_CMD_MASK		GENMASK_ULL(48, 40)
259 
260 #define TPMI_CMD_LEN_MASK		GENMASK_ULL(18, 16)
261 
262 /* Mutex to complete get feature status without interruption */
263 static DEFINE_MUTEX(tpmi_dev_lock);
264 
tpmi_wait_for_owner(struct intel_tpmi_info * tpmi_info,u8 owner)265 static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner)
266 {
267 	u64 control;
268 
269 	return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
270 				  control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control),
271 				  TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US);
272 }
273 
tpmi_read_feature_status(struct intel_tpmi_info * tpmi_info,int feature_id,struct tpmi_feature_state * feature_state)274 static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id,
275 				    struct tpmi_feature_state *feature_state)
276 {
277 	u64 control, data;
278 	int ret;
279 
280 	if (!tpmi_info->tpmi_control_mem)
281 		return -EFAULT;
282 
283 	mutex_lock(&tpmi_dev_lock);
284 
285 	/* Wait for owner bit set to 0 (none) */
286 	ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE);
287 	if (ret)
288 		goto err_unlock;
289 
290 	/* set command id to 0x10 for TPMI_GET_STATE */
291 	data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD);
292 
293 	/* 32 bits for DATA offset and +8 for feature_id field */
294 	data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id);
295 
296 	/* Write at command offset for qword access */
297 	writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
298 
299 	/* Wait for owner bit set to in-band */
300 	ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND);
301 	if (ret)
302 		goto err_unlock;
303 
304 	/* Set Run Busy and packet length of 2 dwords */
305 	control = TPMI_CONTROL_STATUS_RB;
306 	control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN);
307 
308 	/* Write at status offset for qword access */
309 	writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
310 
311 	/* Wait for Run Busy clear */
312 	ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
313 				 control, !(control & TPMI_CONTROL_STATUS_RB),
314 				 TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US);
315 	if (ret)
316 		goto done_proc;
317 
318 	control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control);
319 	if (control != TPMI_CMD_STATUS_SUCCESS) {
320 		ret = -EBUSY;
321 		goto done_proc;
322 	}
323 
324 	/* Response is ready */
325 	memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET,
326 		      sizeof(*feature_state));
327 
328 	ret = 0;
329 
330 done_proc:
331 	/* Set CPL "completion" bit */
332 	writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
333 
334 err_unlock:
335 	mutex_unlock(&tpmi_dev_lock);
336 
337 	return ret;
338 }
339 
tpmi_get_feature_status(struct auxiliary_device * auxdev,int feature_id,bool * read_blocked,bool * write_blocked)340 int tpmi_get_feature_status(struct auxiliary_device *auxdev,
341 			    int feature_id, bool *read_blocked, bool *write_blocked)
342 {
343 	struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
344 	struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
345 	struct tpmi_feature_state feature_state;
346 	int ret;
347 
348 	ret = tpmi_read_feature_status(tpmi_info, feature_id, &feature_state);
349 	if (ret)
350 		return ret;
351 
352 	*read_blocked = feature_state.read_blocked;
353 	*write_blocked = feature_state.write_blocked;
354 
355 	return 0;
356 }
357 EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
358 
tpmi_get_debugfs_dir(struct auxiliary_device * auxdev)359 struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev)
360 {
361 	struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
362 	struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
363 
364 	return tpmi_info->dbgfs_dir;
365 }
366 EXPORT_SYMBOL_NS_GPL(tpmi_get_debugfs_dir, INTEL_TPMI);
367 
tpmi_pfs_dbg_show(struct seq_file * s,void * unused)368 static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
369 {
370 	struct intel_tpmi_info *tpmi_info = s->private;
371 	int locked, disabled, read_blocked, write_blocked;
372 	struct tpmi_feature_state feature_state;
373 	struct intel_tpmi_pm_feature *pfs;
374 	int ret, i;
375 
376 
377 	seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start);
378 	seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n");
379 	for (i = 0; i < tpmi_info->feature_count; ++i) {
380 		pfs = &tpmi_info->tpmi_features[i];
381 		ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state);
382 		if (ret) {
383 			locked = 'U';
384 			disabled = 'U';
385 			read_blocked = 'U';
386 			write_blocked = 'U';
387 		} else {
388 			disabled = feature_state.enabled ? 'N' : 'Y';
389 			locked = feature_state.locked ? 'Y' : 'N';
390 			read_blocked = feature_state.read_blocked ? 'Y' : 'N';
391 			write_blocked = feature_state.write_blocked ? 'Y' : 'N';
392 		}
393 		seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n",
394 			   pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
395 			   pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
396 			   pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled,
397 			   read_blocked, write_blocked);
398 	}
399 
400 	return 0;
401 }
402 DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg);
403 
404 #define MEM_DUMP_COLUMN_COUNT	8
405 
tpmi_mem_dump_show(struct seq_file * s,void * unused)406 static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
407 {
408 	size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32);
409 	struct intel_tpmi_pm_feature *pfs = s->private;
410 	int count, ret = 0;
411 	void __iomem *mem;
412 	u32 size;
413 	u64 off;
414 	u8 *buffer;
415 
416 	size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
417 	if (!size)
418 		return -EIO;
419 
420 	buffer = kmalloc(size, GFP_KERNEL);
421 	if (!buffer)
422 		return -ENOMEM;
423 
424 	off = pfs->vsec_offset;
425 
426 	mutex_lock(&tpmi_dev_lock);
427 
428 	for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
429 		seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
430 
431 		mem = ioremap(off, size);
432 		if (!mem) {
433 			ret = -ENOMEM;
434 			break;
435 		}
436 
437 		memcpy_fromio(buffer, mem, size);
438 
439 		seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size,
440 			     false);
441 
442 		iounmap(mem);
443 
444 		off += size;
445 	}
446 
447 	mutex_unlock(&tpmi_dev_lock);
448 
449 	kfree(buffer);
450 
451 	return ret;
452 }
453 DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump);
454 
mem_write(struct file * file,const char __user * userbuf,size_t len,loff_t * ppos)455 static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos)
456 {
457 	struct seq_file *m = file->private_data;
458 	struct intel_tpmi_pm_feature *pfs = m->private;
459 	u32 addr, value, punit, size;
460 	u32 num_elems, *array;
461 	void __iomem *mem;
462 	int ret;
463 
464 	size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
465 	if (!size)
466 		return -EIO;
467 
468 	ret = parse_int_array_user(userbuf, len, (int **)&array);
469 	if (ret < 0)
470 		return ret;
471 
472 	num_elems = *array;
473 	if (num_elems != 3) {
474 		ret = -EINVAL;
475 		goto exit_write;
476 	}
477 
478 	punit = array[1];
479 	addr = array[2];
480 	value = array[3];
481 
482 	if (punit >= pfs->pfs_header.num_entries) {
483 		ret = -EINVAL;
484 		goto exit_write;
485 	}
486 
487 	if (addr >= size) {
488 		ret = -EINVAL;
489 		goto exit_write;
490 	}
491 
492 	mutex_lock(&tpmi_dev_lock);
493 
494 	mem = ioremap(pfs->vsec_offset + punit * size, size);
495 	if (!mem) {
496 		ret = -ENOMEM;
497 		goto unlock_mem_write;
498 	}
499 
500 	writel(value, mem + addr);
501 
502 	iounmap(mem);
503 
504 	ret = len;
505 
506 unlock_mem_write:
507 	mutex_unlock(&tpmi_dev_lock);
508 
509 exit_write:
510 	kfree(array);
511 
512 	return ret;
513 }
514 
mem_write_show(struct seq_file * s,void * unused)515 static int mem_write_show(struct seq_file *s, void *unused)
516 {
517 	return 0;
518 }
519 
mem_write_open(struct inode * inode,struct file * file)520 static int mem_write_open(struct inode *inode, struct file *file)
521 {
522 	return single_open(file, mem_write_show, inode->i_private);
523 }
524 
525 static const struct file_operations mem_write_ops = {
526 	.open           = mem_write_open,
527 	.read           = seq_read,
528 	.write          = mem_write,
529 	.llseek         = seq_lseek,
530 	.release        = single_release,
531 };
532 
533 #define tpmi_to_dev(info)	(&info->vsec_dev->pcidev->dev)
534 
tpmi_dbgfs_register(struct intel_tpmi_info * tpmi_info)535 static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info)
536 {
537 	char name[64];
538 	int i;
539 
540 	snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info)));
541 	tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL);
542 
543 	debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops);
544 
545 	for (i = 0; i < tpmi_info->feature_count; ++i) {
546 		struct intel_tpmi_pm_feature *pfs;
547 		struct dentry *dir;
548 
549 		pfs = &tpmi_info->tpmi_features[i];
550 		snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id);
551 		dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir);
552 
553 		debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops);
554 		debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops);
555 	}
556 }
557 
tpmi_set_control_base(struct auxiliary_device * auxdev,struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs)558 static void tpmi_set_control_base(struct auxiliary_device *auxdev,
559 				  struct intel_tpmi_info *tpmi_info,
560 				  struct intel_tpmi_pm_feature *pfs)
561 {
562 	void __iomem *mem;
563 	u32 size;
564 
565 	size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
566 	if (!size)
567 		return;
568 
569 	mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size);
570 	if (!mem)
571 		return;
572 
573 	/* mem is pointing to TPMI CONTROL base */
574 	tpmi_info->tpmi_control_mem = mem;
575 }
576 
intel_tpmi_name(enum intel_tpmi_id id)577 static const char *intel_tpmi_name(enum intel_tpmi_id id)
578 {
579 	switch (id) {
580 	case TPMI_ID_RAPL:
581 		return "rapl";
582 	case TPMI_ID_PEM:
583 		return "pem";
584 	case TPMI_ID_UNCORE:
585 		return "uncore";
586 	case TPMI_ID_SST:
587 		return "sst";
588 	case TPMI_ID_PLR:
589 		return "plr";
590 	default:
591 		return NULL;
592 	}
593 }
594 
595 /* String Length for tpmi-"feature_name(upto 8 bytes)" */
596 #define TPMI_FEATURE_NAME_LEN	14
597 
tpmi_create_device(struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs,u64 pfs_start)598 static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
599 			      struct intel_tpmi_pm_feature *pfs,
600 			      u64 pfs_start)
601 {
602 	struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
603 	char feature_id_name[TPMI_FEATURE_NAME_LEN];
604 	struct intel_vsec_device *feature_vsec_dev;
605 	struct tpmi_feature_state feature_state;
606 	struct resource *res, *tmp;
607 	const char *name;
608 	int i, ret;
609 
610 	ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state);
611 	if (ret)
612 		return ret;
613 
614 	/*
615 	 * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP.
616 	 * This will not cause failure of loading of this driver.
617 	 */
618 	if (!feature_state.enabled)
619 		return -EOPNOTSUPP;
620 
621 	name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
622 	if (!name)
623 		return -EOPNOTSUPP;
624 
625 	res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
626 	if (!res)
627 		return -ENOMEM;
628 
629 	feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
630 	if (!feature_vsec_dev) {
631 		kfree(res);
632 		return -ENOMEM;
633 	}
634 
635 	snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
636 
637 	for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) {
638 		u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32);
639 
640 		tmp->start = pfs->vsec_offset + entry_size_bytes * i;
641 		tmp->end = tmp->start + entry_size_bytes - 1;
642 		tmp->flags = IORESOURCE_MEM;
643 	}
644 
645 	feature_vsec_dev->pcidev = vsec_dev->pcidev;
646 	feature_vsec_dev->resource = res;
647 	feature_vsec_dev->num_resources = pfs->pfs_header.num_entries;
648 	feature_vsec_dev->priv_data = &tpmi_info->plat_info;
649 	feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info);
650 	feature_vsec_dev->ida = &intel_vsec_tpmi_ida;
651 
652 	/*
653 	 * intel_vsec_add_aux() is resource managed, no explicit
654 	 * delete is required on error or on module unload.
655 	 * feature_vsec_dev and res memory are also freed as part of
656 	 * device deletion.
657 	 */
658 	return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
659 				  feature_vsec_dev, feature_id_name);
660 }
661 
tpmi_create_devices(struct intel_tpmi_info * tpmi_info)662 static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
663 {
664 	struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
665 	int ret, i;
666 
667 	for (i = 0; i < vsec_dev->num_resources; i++) {
668 		ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i],
669 					 tpmi_info->pfs_start);
670 		/*
671 		 * Fail, if the supported features fails to create device,
672 		 * otherwise, continue. Even if one device failed to create,
673 		 * fail the loading of driver. Since intel_vsec_add_aux()
674 		 * is resource managed, no clean up is required for the
675 		 * successfully created devices.
676 		 */
677 		if (ret && ret != -EOPNOTSUPP)
678 			return ret;
679 	}
680 
681 	return 0;
682 }
683 
684 #define TPMI_INFO_BUS_INFO_OFFSET	0x08
685 #define TPMI_INFO_MAJOR_VERSION		0x00
686 #define TPMI_INFO_MINOR_VERSION		0x02
687 
tpmi_process_info(struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs)688 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
689 			     struct intel_tpmi_pm_feature *pfs)
690 {
691 	struct tpmi_info_header header;
692 	void __iomem *info_mem;
693 	u64 feature_header;
694 	int ret = 0;
695 
696 	info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32));
697 	if (!info_mem)
698 		return -ENOMEM;
699 
700 	feature_header = readq(info_mem);
701 	if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) {
702 		ret = -ENODEV;
703 		goto error_info_header;
704 	}
705 
706 	memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header));
707 
708 	tpmi_info->plat_info.package_id = header.pkg;
709 	tpmi_info->plat_info.bus_number = header.bus;
710 	tpmi_info->plat_info.device_number = header.dev;
711 	tpmi_info->plat_info.function_number = header.fn;
712 
713 	if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) {
714 		tpmi_info->plat_info.cdie_mask = header.cdie_mask;
715 		tpmi_info->plat_info.partition = header.partition;
716 		tpmi_info->plat_info.segment = header.segment;
717 	}
718 
719 error_info_header:
720 	iounmap(info_mem);
721 
722 	return ret;
723 }
724 
tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature * pfs,u64 start,int size)725 static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
726 {
727 	void __iomem *pfs_mem;
728 
729 	pfs_mem = ioremap(start, size);
730 	if (!pfs_mem)
731 		return -ENOMEM;
732 
733 	memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header));
734 
735 	iounmap(pfs_mem);
736 
737 	return 0;
738 }
739 
740 #define TPMI_CAP_OFFSET_UNIT	1024
741 
intel_vsec_tpmi_init(struct auxiliary_device * auxdev)742 static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
743 {
744 	struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
745 	struct pci_dev *pci_dev = vsec_dev->pcidev;
746 	struct intel_tpmi_info *tpmi_info;
747 	u64 pfs_start = 0;
748 	int ret, i;
749 
750 	tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL);
751 	if (!tpmi_info)
752 		return -ENOMEM;
753 
754 	tpmi_info->vsec_dev = vsec_dev;
755 	tpmi_info->feature_count = vsec_dev->num_resources;
756 	tpmi_info->plat_info.bus_number = pci_dev->bus->number;
757 
758 	tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources,
759 						sizeof(*tpmi_info->tpmi_features),
760 						GFP_KERNEL);
761 	if (!tpmi_info->tpmi_features)
762 		return -ENOMEM;
763 
764 	for (i = 0; i < vsec_dev->num_resources; i++) {
765 		struct intel_tpmi_pm_feature *pfs;
766 		struct resource *res;
767 		u64 res_start;
768 		int size, ret;
769 
770 		pfs = &tpmi_info->tpmi_features[i];
771 		pfs->vsec_dev = vsec_dev;
772 
773 		res = &vsec_dev->resource[i];
774 		if (!res)
775 			continue;
776 
777 		res_start = res->start;
778 		size = resource_size(res);
779 		if (size < 0)
780 			continue;
781 
782 		ret = tpmi_fetch_pfs_header(pfs, res_start, size);
783 		if (ret)
784 			continue;
785 
786 		if (!pfs_start)
787 			pfs_start = res_start;
788 
789 		pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT;
790 
791 		/*
792 		 * Process TPMI_INFO to get PCI device to CPU package ID.
793 		 * Device nodes for TPMI features are not created in this
794 		 * for loop. So, the mapping information will be available
795 		 * when actual device nodes created outside this
796 		 * loop via tpmi_create_devices().
797 		 */
798 		if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
799 			ret = tpmi_process_info(tpmi_info, pfs);
800 			if (ret)
801 				return ret;
802 		}
803 
804 		if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
805 			tpmi_set_control_base(auxdev, tpmi_info, pfs);
806 	}
807 
808 	tpmi_info->pfs_start = pfs_start;
809 
810 	auxiliary_set_drvdata(auxdev, tpmi_info);
811 
812 	ret = tpmi_create_devices(tpmi_info);
813 	if (ret)
814 		return ret;
815 
816 	/*
817 	 * Allow debugfs when security policy allows. Everything this debugfs
818 	 * interface provides, can also be done via /dev/mem access. If
819 	 * /dev/mem interface is locked, don't allow debugfs to present any
820 	 * information. Also check for CAP_SYS_RAWIO as /dev/mem interface.
821 	 */
822 	if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO))
823 		tpmi_dbgfs_register(tpmi_info);
824 
825 	return 0;
826 }
827 
tpmi_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)828 static int tpmi_probe(struct auxiliary_device *auxdev,
829 		      const struct auxiliary_device_id *id)
830 {
831 	return intel_vsec_tpmi_init(auxdev);
832 }
833 
tpmi_remove(struct auxiliary_device * auxdev)834 static void tpmi_remove(struct auxiliary_device *auxdev)
835 {
836 	struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev);
837 
838 	debugfs_remove_recursive(tpmi_info->dbgfs_dir);
839 }
840 
841 static const struct auxiliary_device_id tpmi_id_table[] = {
842 	{ .name = "intel_vsec.tpmi" },
843 	{}
844 };
845 MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table);
846 
847 static struct auxiliary_driver tpmi_aux_driver = {
848 	.id_table	= tpmi_id_table,
849 	.probe		= tpmi_probe,
850 	.remove         = tpmi_remove,
851 };
852 
853 module_auxiliary_driver(tpmi_aux_driver);
854 
855 MODULE_IMPORT_NS(INTEL_VSEC);
856 MODULE_DESCRIPTION("Intel TPMI enumeration module");
857 MODULE_LICENSE("GPL");
858