xref: /linux/drivers/pci/tph.c (revision d06ccdc9529235130798b519f6519103d83a7272)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * TPH (TLP Processing Hints) support
4  *
5  * Copyright (C) 2024 Advanced Micro Devices, Inc.
6  *     Eric Van Tassell <Eric.VanTassell@amd.com>
7  *     Wei Huang <wei.huang2@amd.com>
8  */
9 #include <linux/pci.h>
10 #include <linux/pci-acpi.h>
11 #include <linux/msi.h>
12 #include <linux/bitfield.h>
13 #include <linux/pci-tph.h>
14 
15 #include "pci.h"
16 
17 /* System-wide TPH disabled */
18 static bool pci_tph_disabled;
19 
20 #ifdef CONFIG_ACPI
21 /*
22  * The st_info struct defines the Steering Tag (ST) info returned by the
23  * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache
24  * Locality TPH Features"), as specified in the approved ECN for PCI Firmware
25  * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470.
26  *
27  * @vm_st_valid:  8-bit ST for volatile memory is valid
28  * @vm_xst_valid: 16-bit extended ST for volatile memory is valid
29  * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
30  * @vm_st:        8-bit ST for volatile mem
31  * @vm_xst:       16-bit extended ST for volatile mem
32  * @pm_st_valid:  8-bit ST for persistent memory is valid
33  * @pm_xst_valid: 16-bit extended ST for persistent memory is valid
34  * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
35  * @pm_st:        8-bit ST for persistent mem
36  * @pm_xst:       16-bit extended ST for persistent mem
37  */
38 union st_info {
39 	struct {
40 		u64 vm_st_valid : 1;
41 		u64 vm_xst_valid : 1;
42 		u64 vm_ph_ignore : 1;
43 		u64 rsvd1 : 5;
44 		u64 vm_st : 8;
45 		u64 vm_xst : 16;
46 		u64 pm_st_valid : 1;
47 		u64 pm_xst_valid : 1;
48 		u64 pm_ph_ignore : 1;
49 		u64 rsvd2 : 5;
50 		u64 pm_st : 8;
51 		u64 pm_xst : 16;
52 	};
53 	u64 value;
54 };
55 
tph_extract_tag(enum tph_mem_type mem_type,u8 req_type,union st_info * info)56 static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type,
57 			   union st_info *info)
58 {
59 	switch (req_type) {
60 	case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */
61 		switch (mem_type) {
62 		case TPH_MEM_TYPE_VM:
63 			if (info->vm_st_valid)
64 				return info->vm_st;
65 			break;
66 		case TPH_MEM_TYPE_PM:
67 			if (info->pm_st_valid)
68 				return info->pm_st;
69 			break;
70 		}
71 		break;
72 	case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */
73 		switch (mem_type) {
74 		case TPH_MEM_TYPE_VM:
75 			if (info->vm_xst_valid)
76 				return info->vm_xst;
77 			break;
78 		case TPH_MEM_TYPE_PM:
79 			if (info->pm_xst_valid)
80 				return info->pm_xst;
81 			break;
82 		}
83 		break;
84 	default:
85 		return 0;
86 	}
87 
88 	return 0;
89 }
90 
91 #define TPH_ST_DSM_FUNC_INDEX	0xF
tph_invoke_dsm(acpi_handle handle,u32 cpu_uid,union st_info * st_out)92 static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid,
93 				  union st_info *st_out)
94 {
95 	union acpi_object arg3[3], in_obj, *out_obj;
96 
97 	if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7,
98 			    BIT(TPH_ST_DSM_FUNC_INDEX)))
99 		return AE_ERROR;
100 
101 	/* DWORD: feature ID (0 for processor cache ST query) */
102 	arg3[0].integer.type = ACPI_TYPE_INTEGER;
103 	arg3[0].integer.value = 0;
104 
105 	/* DWORD: target UID */
106 	arg3[1].integer.type = ACPI_TYPE_INTEGER;
107 	arg3[1].integer.value = cpu_uid;
108 
109 	/* QWORD: properties, all 0's */
110 	arg3[2].integer.type = ACPI_TYPE_INTEGER;
111 	arg3[2].integer.value = 0;
112 
113 	in_obj.type = ACPI_TYPE_PACKAGE;
114 	in_obj.package.count = ARRAY_SIZE(arg3);
115 	in_obj.package.elements = arg3;
116 
117 	out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7,
118 				    TPH_ST_DSM_FUNC_INDEX, &in_obj);
119 	if (!out_obj)
120 		return AE_ERROR;
121 
122 	if (out_obj->type != ACPI_TYPE_BUFFER) {
123 		ACPI_FREE(out_obj);
124 		return AE_ERROR;
125 	}
126 
127 	st_out->value = *((u64 *)(out_obj->buffer.pointer));
128 
129 	ACPI_FREE(out_obj);
130 
131 	return AE_OK;
132 }
133 #endif
134 
135 /* Update the TPH Requester Enable field of TPH Control Register */
set_ctrl_reg_req_en(struct pci_dev * pdev,u8 req_type)136 static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type)
137 {
138 	u32 reg;
139 
140 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
141 
142 	reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
143 	reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type);
144 
145 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
146 }
147 
get_st_modes(struct pci_dev * pdev)148 static u8 get_st_modes(struct pci_dev *pdev)
149 {
150 	u32 reg;
151 
152 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
153 	reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS;
154 
155 	return reg;
156 }
157 
158 /**
159  * pcie_tph_get_st_table_loc - Return the device's ST table location
160  * @pdev: PCI device to query
161  *
162  * Return:
163  *  PCI_TPH_LOC_NONE - Not present
164  *  PCI_TPH_LOC_CAP  - Located in the TPH Requester Extended Capability
165  *  PCI_TPH_LOC_MSIX - Located in the MSI-X Table
166  */
pcie_tph_get_st_table_loc(struct pci_dev * pdev)167 u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev)
168 {
169 	u32 reg;
170 
171 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
172 
173 	return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
174 }
175 EXPORT_SYMBOL(pcie_tph_get_st_table_loc);
176 
177 /*
178  * Return the size of ST table. If ST table is not in TPH Requester Extended
179  * Capability space, return 0. Otherwise return the ST Table Size + 1.
180  */
pcie_tph_get_st_table_size(struct pci_dev * pdev)181 u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
182 {
183 	u32 reg;
184 	u32 loc;
185 
186 	/* Check ST table location first */
187 	loc = pcie_tph_get_st_table_loc(pdev);
188 
189 	/* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
190 	loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
191 	if (loc != PCI_TPH_LOC_CAP)
192 		return 0;
193 
194 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
195 
196 	return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1;
197 }
198 EXPORT_SYMBOL(pcie_tph_get_st_table_size);
199 
200 /* Return device's Root Port completer capability */
get_rp_completer_type(struct pci_dev * pdev)201 static u8 get_rp_completer_type(struct pci_dev *pdev)
202 {
203 	struct pci_dev *rp;
204 	u32 reg;
205 	int ret;
206 
207 	rp = pcie_find_root_port(pdev);
208 	if (!rp)
209 		return 0;
210 
211 	ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, &reg);
212 	if (ret)
213 		return 0;
214 
215 	return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
216 }
217 
218 /* Write tag to ST table - Return 0 if OK, otherwise -errno */
write_tag_to_st_table(struct pci_dev * pdev,int index,u16 tag)219 static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
220 {
221 	int st_table_size;
222 	int offset;
223 
224 	/* Check if index is out of bound */
225 	st_table_size = pcie_tph_get_st_table_size(pdev);
226 	if (index >= st_table_size)
227 		return -ENXIO;
228 
229 	offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16);
230 
231 	return pci_write_config_word(pdev, offset, tag);
232 }
233 
234 /**
235  * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated
236  * with a specific CPU
237  * @pdev: PCI device
238  * @mem_type: target memory type (volatile or persistent RAM)
239  * @cpu_uid: associated CPU id
240  * @tag: Steering Tag to be returned
241  *
242  * Return the Steering Tag for a target memory that is associated with a
243  * specific CPU as indicated by cpu_uid.
244  *
245  * Return: 0 if success, otherwise negative value (-errno)
246  */
pcie_tph_get_cpu_st(struct pci_dev * pdev,enum tph_mem_type mem_type,unsigned int cpu_uid,u16 * tag)247 int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
248 			unsigned int cpu_uid, u16 *tag)
249 {
250 #ifdef CONFIG_ACPI
251 	struct pci_dev *rp;
252 	acpi_handle rp_acpi_handle;
253 	union st_info info;
254 
255 	rp = pcie_find_root_port(pdev);
256 	if (!rp || !rp->bus || !rp->bus->bridge)
257 		return -ENODEV;
258 
259 	rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge);
260 
261 	if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) {
262 		*tag = 0;
263 		return -EINVAL;
264 	}
265 
266 	*tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info);
267 
268 	pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n",
269 		(mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent",
270 		cpu_uid, *tag);
271 
272 	return 0;
273 #else
274 	return -ENODEV;
275 #endif
276 }
277 EXPORT_SYMBOL(pcie_tph_get_cpu_st);
278 
279 /**
280  * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry
281  * @pdev: PCI device
282  * @index: ST table entry index
283  * @tag: Steering Tag to be written
284  *
285  * Figure out the proper location of ST table, either in the MSI-X table or
286  * in the TPH Extended Capability space, and write the Steering Tag into
287  * the ST entry pointed by index.
288  *
289  * Return: 0 if success, otherwise negative value (-errno)
290  */
pcie_tph_set_st_entry(struct pci_dev * pdev,unsigned int index,u16 tag)291 int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
292 {
293 	u32 loc;
294 	int err = 0;
295 
296 	if (!pdev->tph_cap)
297 		return -EINVAL;
298 
299 	if (!pdev->tph_enabled)
300 		return -EINVAL;
301 
302 	/* No need to write tag if device is in "No ST Mode" */
303 	if (pdev->tph_mode == PCI_TPH_ST_NS_MODE)
304 		return 0;
305 
306 	/*
307 	 * Disable TPH before updating ST to avoid potential instability as
308 	 * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation"
309 	 */
310 	set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
311 
312 	loc = pcie_tph_get_st_table_loc(pdev);
313 	/* Convert loc to match with PCI_TPH_LOC_* */
314 	loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
315 
316 	switch (loc) {
317 	case PCI_TPH_LOC_MSIX:
318 		err = pci_msix_write_tph_tag(pdev, index, tag);
319 		break;
320 	case PCI_TPH_LOC_CAP:
321 		err = write_tag_to_st_table(pdev, index, tag);
322 		break;
323 	default:
324 		err = -EINVAL;
325 	}
326 
327 	if (err) {
328 		pcie_disable_tph(pdev);
329 		return err;
330 	}
331 
332 	set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
333 
334 	pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
335 		(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
336 
337 	return 0;
338 }
339 EXPORT_SYMBOL(pcie_tph_set_st_entry);
340 
341 /**
342  * pcie_disable_tph - Turn off TPH support for device
343  * @pdev: PCI device
344  *
345  * Return: none
346  */
pcie_disable_tph(struct pci_dev * pdev)347 void pcie_disable_tph(struct pci_dev *pdev)
348 {
349 	if (!pdev->tph_cap)
350 		return;
351 
352 	if (!pdev->tph_enabled)
353 		return;
354 
355 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0);
356 
357 	pdev->tph_mode = 0;
358 	pdev->tph_req_type = 0;
359 	pdev->tph_enabled = 0;
360 }
361 EXPORT_SYMBOL(pcie_disable_tph);
362 
363 /**
364  * pcie_enable_tph - Enable TPH support for device using a specific ST mode
365  * @pdev: PCI device
366  * @mode: ST mode to enable. Current supported modes include:
367  *
368  *   - PCI_TPH_ST_NS_MODE: NO ST Mode
369  *   - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode
370  *   - PCI_TPH_ST_DS_MODE: Device Specific Mode
371  *
372  * Check whether the mode is actually supported by the device before enabling
373  * and return an error if not. Additionally determine what types of requests,
374  * TPH or extended TPH, can be issued by the device based on its TPH requester
375  * capability and the Root Port's completer capability.
376  *
377  * Return: 0 on success, otherwise negative value (-errno)
378  */
pcie_enable_tph(struct pci_dev * pdev,int mode)379 int pcie_enable_tph(struct pci_dev *pdev, int mode)
380 {
381 	u32 reg;
382 	u8 dev_modes;
383 	u8 rp_req_type;
384 
385 	/* Honor "notph" kernel parameter */
386 	if (pci_tph_disabled)
387 		return -EINVAL;
388 
389 	if (!pdev->tph_cap)
390 		return -EINVAL;
391 
392 	if (pdev->tph_enabled)
393 		return -EBUSY;
394 
395 	/* Sanitize and check ST mode compatibility */
396 	mode &= PCI_TPH_CTRL_MODE_SEL_MASK;
397 	dev_modes = get_st_modes(pdev);
398 	if (!((1 << mode) & dev_modes))
399 		return -EINVAL;
400 
401 	pdev->tph_mode = mode;
402 
403 	/* Get req_type supported by device and its Root Port */
404 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
405 	if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg))
406 		pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH;
407 	else
408 		pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY;
409 
410 	rp_req_type = get_rp_completer_type(pdev);
411 
412 	/* Final req_type is the smallest value of two */
413 	pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
414 
415 	if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE)
416 		return -EINVAL;
417 
418 	/* Write them into TPH control register */
419 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
420 
421 	reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK;
422 	reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode);
423 
424 	reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
425 	reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type);
426 
427 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
428 
429 	pdev->tph_enabled = 1;
430 
431 	return 0;
432 }
433 EXPORT_SYMBOL(pcie_enable_tph);
434 
pci_restore_tph_state(struct pci_dev * pdev)435 void pci_restore_tph_state(struct pci_dev *pdev)
436 {
437 	struct pci_cap_saved_state *save_state;
438 	int num_entries, i, offset;
439 	u16 *st_entry;
440 	u32 *cap;
441 
442 	if (!pdev->tph_cap)
443 		return;
444 
445 	if (!pdev->tph_enabled)
446 		return;
447 
448 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
449 	if (!save_state)
450 		return;
451 
452 	/* Restore control register and all ST entries */
453 	cap = &save_state->cap.data[0];
454 	pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++);
455 	st_entry = (u16 *)cap;
456 	offset = PCI_TPH_BASE_SIZEOF;
457 	num_entries = pcie_tph_get_st_table_size(pdev);
458 	for (i = 0; i < num_entries; i++) {
459 		pci_write_config_word(pdev, pdev->tph_cap + offset,
460 				      *st_entry++);
461 		offset += sizeof(u16);
462 	}
463 }
464 
pci_save_tph_state(struct pci_dev * pdev)465 void pci_save_tph_state(struct pci_dev *pdev)
466 {
467 	struct pci_cap_saved_state *save_state;
468 	int num_entries, i, offset;
469 	u16 *st_entry;
470 	u32 *cap;
471 
472 	if (!pdev->tph_cap)
473 		return;
474 
475 	if (!pdev->tph_enabled)
476 		return;
477 
478 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
479 	if (!save_state)
480 		return;
481 
482 	/* Save control register */
483 	cap = &save_state->cap.data[0];
484 	pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++);
485 
486 	/* Save all ST entries in extended capability structure */
487 	st_entry = (u16 *)cap;
488 	offset = PCI_TPH_BASE_SIZEOF;
489 	num_entries = pcie_tph_get_st_table_size(pdev);
490 	for (i = 0; i < num_entries; i++) {
491 		pci_read_config_word(pdev, pdev->tph_cap + offset,
492 				     st_entry++);
493 		offset += sizeof(u16);
494 	}
495 }
496 
pci_no_tph(void)497 void pci_no_tph(void)
498 {
499 	pci_tph_disabled = true;
500 
501 	pr_info("PCIe TPH is disabled\n");
502 }
503 
pci_tph_init(struct pci_dev * pdev)504 void pci_tph_init(struct pci_dev *pdev)
505 {
506 	int num_entries;
507 	u32 save_size;
508 
509 	pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH);
510 	if (!pdev->tph_cap)
511 		return;
512 
513 	num_entries = pcie_tph_get_st_table_size(pdev);
514 	save_size = sizeof(u32) + num_entries * sizeof(u16);
515 	pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size);
516 }
517