xref: /linux/drivers/pci/tph.c (revision 4b2bdc22210e39a02b3dc984cb8eb6b3293a56a7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TPH (TLP Processing Hints) support
4  *
5  * Copyright (C) 2024 Advanced Micro Devices, Inc.
6  *     Eric Van Tassell <Eric.VanTassell@amd.com>
7  *     Wei Huang <wei.huang2@amd.com>
8  */
9 #include <linux/pci.h>
10 #include <linux/pci-acpi.h>
11 #include <linux/msi.h>
12 #include <linux/bitfield.h>
13 #include <linux/pci-tph.h>
14 
15 #include "pci.h"
16 
17 /* System-wide TPH disabled */
18 static bool pci_tph_disabled;
19 
20 #ifdef CONFIG_ACPI
21 /*
22  * The st_info struct defines the Steering Tag (ST) info returned by the
23  * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache
24  * Locality TPH Features"), as specified in the approved ECN for PCI Firmware
25  * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470.
26  *
27  * @vm_st_valid:  8-bit ST for volatile memory is valid
28  * @vm_xst_valid: 16-bit extended ST for volatile memory is valid
29  * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
30  * @vm_st:        8-bit ST for volatile mem
31  * @vm_xst:       16-bit extended ST for volatile mem
32  * @pm_st_valid:  8-bit ST for persistent memory is valid
33  * @pm_xst_valid: 16-bit extended ST for persistent memory is valid
34  * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
35  * @pm_st:        8-bit ST for persistent mem
36  * @pm_xst:       16-bit extended ST for persistent mem
37  */
38 union st_info {
39 	struct {
40 		u64 vm_st_valid : 1;
41 		u64 vm_xst_valid : 1;
42 		u64 vm_ph_ignore : 1;
43 		u64 rsvd1 : 5;
44 		u64 vm_st : 8;
45 		u64 vm_xst : 16;
46 		u64 pm_st_valid : 1;
47 		u64 pm_xst_valid : 1;
48 		u64 pm_ph_ignore : 1;
49 		u64 rsvd2 : 5;
50 		u64 pm_st : 8;
51 		u64 pm_xst : 16;
52 	};
53 	u64 value;
54 };
55 
56 static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type,
57 			   union st_info *info)
58 {
59 	switch (req_type) {
60 	case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */
61 		switch (mem_type) {
62 		case TPH_MEM_TYPE_VM:
63 			if (info->vm_st_valid)
64 				return info->vm_st;
65 			break;
66 		case TPH_MEM_TYPE_PM:
67 			if (info->pm_st_valid)
68 				return info->pm_st;
69 			break;
70 		}
71 		break;
72 	case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */
73 		switch (mem_type) {
74 		case TPH_MEM_TYPE_VM:
75 			if (info->vm_xst_valid)
76 				return info->vm_xst;
77 			break;
78 		case TPH_MEM_TYPE_PM:
79 			if (info->pm_xst_valid)
80 				return info->pm_xst;
81 			break;
82 		}
83 		break;
84 	default:
85 		return 0;
86 	}
87 
88 	return 0;
89 }
90 
91 #define TPH_ST_DSM_FUNC_INDEX	0xF
92 static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid,
93 				  union st_info *st_out)
94 {
95 	union acpi_object arg3[3], in_obj, *out_obj;
96 
97 	if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7,
98 			    BIT(TPH_ST_DSM_FUNC_INDEX)))
99 		return AE_ERROR;
100 
101 	/* DWORD: feature ID (0 for processor cache ST query) */
102 	arg3[0].integer.type = ACPI_TYPE_INTEGER;
103 	arg3[0].integer.value = 0;
104 
105 	/* DWORD: target UID */
106 	arg3[1].integer.type = ACPI_TYPE_INTEGER;
107 	arg3[1].integer.value = cpu_uid;
108 
109 	/* QWORD: properties, all 0's */
110 	arg3[2].integer.type = ACPI_TYPE_INTEGER;
111 	arg3[2].integer.value = 0;
112 
113 	in_obj.type = ACPI_TYPE_PACKAGE;
114 	in_obj.package.count = ARRAY_SIZE(arg3);
115 	in_obj.package.elements = arg3;
116 
117 	out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7,
118 				    TPH_ST_DSM_FUNC_INDEX, &in_obj);
119 	if (!out_obj)
120 		return AE_ERROR;
121 
122 	if (out_obj->type != ACPI_TYPE_BUFFER) {
123 		ACPI_FREE(out_obj);
124 		return AE_ERROR;
125 	}
126 
127 	st_out->value = *((u64 *)(out_obj->buffer.pointer));
128 
129 	ACPI_FREE(out_obj);
130 
131 	return AE_OK;
132 }
133 #endif
134 
135 /* Update the TPH Requester Enable field of TPH Control Register */
136 static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type)
137 {
138 	u32 reg;
139 
140 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
141 
142 	reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
143 	reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type);
144 
145 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
146 }
147 
148 static u8 get_st_modes(struct pci_dev *pdev)
149 {
150 	u32 reg;
151 
152 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
153 	reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS;
154 
155 	return reg;
156 }
157 
158 /**
159  * pcie_tph_get_st_table_loc - Return the device's ST table location
160  * @pdev: PCI device to query
161  *
162  * Return:
163  *  PCI_TPH_LOC_NONE - Not present
164  *  PCI_TPH_LOC_CAP  - Located in the TPH Requester Extended Capability
165  *  PCI_TPH_LOC_MSIX - Located in the MSI-X Table
166  */
167 u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev)
168 {
169 	u32 reg;
170 
171 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
172 
173 	return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
174 }
175 EXPORT_SYMBOL(pcie_tph_get_st_table_loc);
176 
177 /*
178  * Return the size of ST table. If ST table is not in TPH Requester Extended
179  * Capability space, return 0. Otherwise return the ST Table Size + 1.
180  */
181 u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
182 {
183 	u32 reg;
184 	u32 loc;
185 
186 	/* Check ST table location first */
187 	loc = pcie_tph_get_st_table_loc(pdev);
188 
189 	/* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
190 	loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
191 	if (loc != PCI_TPH_LOC_CAP)
192 		return 0;
193 
194 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
195 
196 	return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1;
197 }
198 EXPORT_SYMBOL(pcie_tph_get_st_table_size);
199 
200 /* Return device's Root Port completer capability */
201 static u8 get_rp_completer_type(struct pci_dev *pdev)
202 {
203 	struct pci_dev *rp;
204 	u32 reg;
205 	int ret;
206 
207 	rp = pcie_find_root_port(pdev);
208 	if (!rp)
209 		return 0;
210 
211 	ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, &reg);
212 	if (ret)
213 		return 0;
214 
215 	return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
216 }
217 
218 /* Write tag to ST table - Return 0 if OK, otherwise -errno */
219 static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
220 {
221 	int st_table_size;
222 	int offset;
223 
224 	/* Check if index is out of bound */
225 	st_table_size = pcie_tph_get_st_table_size(pdev);
226 	if (index >= st_table_size)
227 		return -ENXIO;
228 
229 	offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16);
230 
231 	return pci_write_config_word(pdev, offset, tag);
232 }
233 
234 /**
235  * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated
236  * with a specific CPU
237  * @pdev: PCI device
238  * @mem_type: target memory type (volatile or persistent RAM)
239  * @cpu: associated CPU id
240  * @tag: Steering Tag to be returned
241  *
242  * Return the Steering Tag for a target memory that is associated with a
243  * specific CPU as indicated by cpu.
244  *
245  * Return: 0 if success, otherwise negative value (-errno)
246  */
247 int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
248 			unsigned int cpu, u16 *tag)
249 {
250 #ifdef CONFIG_ACPI
251 	struct pci_dev *rp;
252 	acpi_handle rp_acpi_handle;
253 	union st_info info;
254 	u32 cpu_uid;
255 	int ret;
256 
257 	ret = acpi_get_cpu_uid(cpu, &cpu_uid);
258 	if (ret != 0)
259 		return ret;
260 
261 	rp = pcie_find_root_port(pdev);
262 	if (!rp || !rp->bus || !rp->bus->bridge)
263 		return -ENODEV;
264 
265 	rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge);
266 
267 	if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) {
268 		*tag = 0;
269 		return -EINVAL;
270 	}
271 
272 	*tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info);
273 
274 	pci_dbg(pdev, "get steering tag: mem_type=%s, cpu=%d, tag=%#04x\n",
275 		(mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent",
276 		cpu, *tag);
277 
278 	return 0;
279 #else
280 	return -ENODEV;
281 #endif
282 }
283 EXPORT_SYMBOL(pcie_tph_get_cpu_st);
284 
285 /**
286  * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry
287  * @pdev: PCI device
288  * @index: ST table entry index
289  * @tag: Steering Tag to be written
290  *
291  * Figure out the proper location of ST table, either in the MSI-X table or
292  * in the TPH Extended Capability space, and write the Steering Tag into
293  * the ST entry pointed by index.
294  *
295  * Return: 0 if success, otherwise negative value (-errno)
296  */
297 int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
298 {
299 	u32 loc;
300 	int err = 0;
301 
302 	if (!pdev->tph_cap)
303 		return -EINVAL;
304 
305 	if (!pdev->tph_enabled)
306 		return -EINVAL;
307 
308 	/* No need to write tag if device is in "No ST Mode" */
309 	if (pdev->tph_mode == PCI_TPH_ST_NS_MODE)
310 		return 0;
311 
312 	/*
313 	 * Disable TPH before updating ST to avoid potential instability as
314 	 * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation"
315 	 */
316 	set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
317 
318 	loc = pcie_tph_get_st_table_loc(pdev);
319 	/* Convert loc to match with PCI_TPH_LOC_* */
320 	loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
321 
322 	switch (loc) {
323 	case PCI_TPH_LOC_MSIX:
324 		err = pci_msix_write_tph_tag(pdev, index, tag);
325 		break;
326 	case PCI_TPH_LOC_CAP:
327 		err = write_tag_to_st_table(pdev, index, tag);
328 		break;
329 	default:
330 		err = -EINVAL;
331 	}
332 
333 	if (err) {
334 		pcie_disable_tph(pdev);
335 		return err;
336 	}
337 
338 	set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
339 
340 	pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
341 		(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
342 
343 	return 0;
344 }
345 EXPORT_SYMBOL(pcie_tph_set_st_entry);
346 
347 /**
348  * pcie_disable_tph - Turn off TPH support for device
349  * @pdev: PCI device
350  *
351  * Return: none
352  */
353 void pcie_disable_tph(struct pci_dev *pdev)
354 {
355 	if (!pdev->tph_cap)
356 		return;
357 
358 	if (!pdev->tph_enabled)
359 		return;
360 
361 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0);
362 
363 	pdev->tph_mode = 0;
364 	pdev->tph_req_type = 0;
365 	pdev->tph_enabled = 0;
366 }
367 EXPORT_SYMBOL(pcie_disable_tph);
368 
369 /**
370  * pcie_enable_tph - Enable TPH support for device using a specific ST mode
371  * @pdev: PCI device
372  * @mode: ST mode to enable. Current supported modes include:
373  *
374  *   - PCI_TPH_ST_NS_MODE: NO ST Mode
375  *   - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode
376  *   - PCI_TPH_ST_DS_MODE: Device Specific Mode
377  *
378  * Check whether the mode is actually supported by the device before enabling
379  * and return an error if not. Additionally determine what types of requests,
380  * TPH or extended TPH, can be issued by the device based on its TPH requester
381  * capability and the Root Port's completer capability.
382  *
383  * Return: 0 on success, otherwise negative value (-errno)
384  */
385 int pcie_enable_tph(struct pci_dev *pdev, int mode)
386 {
387 	u32 reg;
388 	u8 dev_modes;
389 	u8 rp_req_type;
390 
391 	/* Honor "notph" kernel parameter */
392 	if (pci_tph_disabled)
393 		return -EINVAL;
394 
395 	if (!pdev->tph_cap)
396 		return -EINVAL;
397 
398 	if (pdev->tph_enabled)
399 		return -EBUSY;
400 
401 	/* Sanitize and check ST mode compatibility */
402 	mode &= PCI_TPH_CTRL_MODE_SEL_MASK;
403 	dev_modes = get_st_modes(pdev);
404 	if (!((1 << mode) & dev_modes))
405 		return -EINVAL;
406 
407 	pdev->tph_mode = mode;
408 
409 	/* Get req_type supported by device and its Root Port */
410 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
411 	if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg))
412 		pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH;
413 	else
414 		pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY;
415 
416 	rp_req_type = get_rp_completer_type(pdev);
417 
418 	/* Final req_type is the smallest value of two */
419 	pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
420 
421 	if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE)
422 		return -EINVAL;
423 
424 	/* Write them into TPH control register */
425 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
426 
427 	reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK;
428 	reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode);
429 
430 	reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
431 	reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type);
432 
433 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
434 
435 	pdev->tph_enabled = 1;
436 
437 	return 0;
438 }
439 EXPORT_SYMBOL(pcie_enable_tph);
440 
441 void pci_restore_tph_state(struct pci_dev *pdev)
442 {
443 	struct pci_cap_saved_state *save_state;
444 	int num_entries, i, offset;
445 	u16 *st_entry;
446 	u32 *cap;
447 
448 	if (!pdev->tph_cap)
449 		return;
450 
451 	if (!pdev->tph_enabled)
452 		return;
453 
454 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
455 	if (!save_state)
456 		return;
457 
458 	/* Restore control register and all ST entries */
459 	cap = &save_state->cap.data[0];
460 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++);
461 	st_entry = (u16 *)cap;
462 	offset = PCI_TPH_BASE_SIZEOF;
463 	num_entries = pcie_tph_get_st_table_size(pdev);
464 	for (i = 0; i < num_entries; i++) {
465 		pci_write_config_word(pdev, pdev->tph_cap + offset,
466 				      *st_entry++);
467 		offset += sizeof(u16);
468 	}
469 }
470 
471 void pci_save_tph_state(struct pci_dev *pdev)
472 {
473 	struct pci_cap_saved_state *save_state;
474 	int num_entries, i, offset;
475 	u16 *st_entry;
476 	u32 *cap;
477 
478 	if (!pdev->tph_cap)
479 		return;
480 
481 	if (!pdev->tph_enabled)
482 		return;
483 
484 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
485 	if (!save_state)
486 		return;
487 
488 	/* Save control register */
489 	cap = &save_state->cap.data[0];
490 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++);
491 
492 	/* Save all ST entries in extended capability structure */
493 	st_entry = (u16 *)cap;
494 	offset = PCI_TPH_BASE_SIZEOF;
495 	num_entries = pcie_tph_get_st_table_size(pdev);
496 	for (i = 0; i < num_entries; i++) {
497 		pci_read_config_word(pdev, pdev->tph_cap + offset,
498 				     st_entry++);
499 		offset += sizeof(u16);
500 	}
501 }
502 
503 void pci_no_tph(void)
504 {
505 	pci_tph_disabled = true;
506 
507 	pr_info("PCIe TPH is disabled\n");
508 }
509 
510 void pci_tph_init(struct pci_dev *pdev)
511 {
512 	int num_entries;
513 	u32 save_size;
514 
515 	pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH);
516 	if (!pdev->tph_cap)
517 		return;
518 
519 	num_entries = pcie_tph_get_st_table_size(pdev);
520 	save_size = sizeof(u32) + num_entries * sizeof(u16);
521 	pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size);
522 }
523