xref: /linux/drivers/pci/pcie/aspm.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Enable PCIe link L0s/L1 state and Clock Power Management
4  *
5  * Copyright (C) 2007 Intel
6  * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7  * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/build_bug.h>
13 #include <linux/kernel.h>
14 #include <linux/limits.h>
15 #include <linux/math.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/of.h>
19 #include <linux/pci.h>
20 #include <linux/pci_regs.h>
21 #include <linux/errno.h>
22 #include <linux/pm.h>
23 #include <linux/init.h>
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/time.h>
27 
28 #include "../pci.h"
29 
pci_save_ltr_state(struct pci_dev * dev)30 void pci_save_ltr_state(struct pci_dev *dev)
31 {
32 	int ltr;
33 	struct pci_cap_saved_state *save_state;
34 	u32 *cap;
35 
36 	if (!pci_is_pcie(dev))
37 		return;
38 
39 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
40 	if (!ltr)
41 		return;
42 
43 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
44 	if (!save_state) {
45 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
46 		return;
47 	}
48 
49 	/* Some broken devices only support dword access to LTR */
50 	cap = &save_state->cap.data[0];
51 	pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
52 }
53 
pci_restore_ltr_state(struct pci_dev * dev)54 void pci_restore_ltr_state(struct pci_dev *dev)
55 {
56 	struct pci_cap_saved_state *save_state;
57 	int ltr;
58 	u32 *cap;
59 
60 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
61 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
62 	if (!save_state || !ltr)
63 		return;
64 
65 	/* Some broken devices only support dword access to LTR */
66 	cap = &save_state->cap.data[0];
67 	pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
68 }
69 
pci_configure_aspm_l1ss(struct pci_dev * pdev)70 void pci_configure_aspm_l1ss(struct pci_dev *pdev)
71 {
72 	int rc;
73 
74 	pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
75 
76 	rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
77 					 2 * sizeof(u32));
78 	if (rc)
79 		pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
80 			ERR_PTR(rc));
81 }
82 
pci_save_aspm_l1ss_state(struct pci_dev * pdev)83 void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
84 {
85 	struct pci_dev *parent = pdev->bus->self;
86 	struct pci_cap_saved_state *save_state;
87 	u32 *cap;
88 
89 	/*
90 	 * If this is a Downstream Port, we never restore the L1SS state
91 	 * directly; we only restore it when we restore the state of the
92 	 * Upstream Port below it.
93 	 */
94 	if (pcie_downstream_port(pdev) || !parent)
95 		return;
96 
97 	if (!pdev->l1ss || !parent->l1ss)
98 		return;
99 
100 	/*
101 	 * Save L1 substate configuration. The ASPM L0s/L1 configuration
102 	 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
103 	 */
104 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
105 	if (!save_state)
106 		return;
107 
108 	cap = &save_state->cap.data[0];
109 	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
110 	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
111 
112 	/*
113 	 * Save parent's L1 substate configuration so we have it for
114 	 * pci_restore_aspm_l1ss_state(pdev) to restore.
115 	 */
116 	save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
117 	if (!save_state)
118 		return;
119 
120 	cap = &save_state->cap.data[0];
121 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
122 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
123 }
124 
pci_restore_aspm_l1ss_state(struct pci_dev * pdev)125 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
126 {
127 	struct pci_cap_saved_state *pl_save_state, *cl_save_state;
128 	struct pci_dev *parent = pdev->bus->self;
129 	u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
130 	u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
131 	u16 clnkctl, plnkctl;
132 
133 	/*
134 	 * In case BIOS enabled L1.2 when resuming, we need to disable it first
135 	 * on the downstream component before the upstream. So, don't attempt to
136 	 * restore either until we are at the downstream component.
137 	 */
138 	if (pcie_downstream_port(pdev) || !parent)
139 		return;
140 
141 	if (!pdev->l1ss || !parent->l1ss)
142 		return;
143 
144 	cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
145 	pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
146 	if (!cl_save_state || !pl_save_state)
147 		return;
148 
149 	cap = &cl_save_state->cap.data[0];
150 	cl_ctl2 = *cap++;
151 	cl_ctl1 = *cap;
152 	cap = &pl_save_state->cap.data[0];
153 	pl_ctl2 = *cap++;
154 	pl_ctl1 = *cap;
155 
156 	/* Make sure L0s/L1 are disabled before updating L1SS config */
157 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
158 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
159 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
160 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
161 		pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
162 					   clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
163 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
164 					   plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
165 	}
166 
167 	/*
168 	 * Disable L1.2 on this downstream endpoint device first, followed
169 	 * by the upstream
170 	 */
171 	pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
172 				       PCI_L1SS_CTL1_L1_2_MASK, 0);
173 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
174 				       PCI_L1SS_CTL1_L1_2_MASK, 0);
175 
176 	/*
177 	 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
178 	 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
179 	 * enable bits, even though they're all in PCI_L1SS_CTL1.
180 	 */
181 	pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
182 	pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
183 	cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
184 	cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
185 
186 	/* Write back without enables first (above we cleared them in ctl1) */
187 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
188 	pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
189 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
190 	pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
191 
192 	/* Then write back the enables */
193 	if (pl_l1_2_enable || cl_l1_2_enable) {
194 		pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
195 				       pl_ctl1 | pl_l1_2_enable);
196 		pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
197 				       cl_ctl1 | cl_l1_2_enable);
198 	}
199 
200 	/* Restore L0s/L1 if they were enabled */
201 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
202 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
203 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
204 		pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
205 	}
206 }
207 
208 #ifdef CONFIG_PCIEASPM
209 
210 #ifdef MODULE_PARAM_PREFIX
211 #undef MODULE_PARAM_PREFIX
212 #endif
213 #define MODULE_PARAM_PREFIX "pcie_aspm."
214 
215 /* Note: these are not register definitions */
216 #define PCIE_LINK_STATE_L0S_UP	BIT(0)	/* Upstream direction L0s state */
217 #define PCIE_LINK_STATE_L0S_DW	BIT(1)	/* Downstream direction L0s state */
218 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
219 
220 #define PCIE_LINK_STATE_L1_SS_PCIPM	(PCIE_LINK_STATE_L1_1_PCIPM |\
221 					 PCIE_LINK_STATE_L1_2_PCIPM)
222 #define PCIE_LINK_STATE_L1_2_MASK	(PCIE_LINK_STATE_L1_2 |\
223 					 PCIE_LINK_STATE_L1_2_PCIPM)
224 #define PCIE_LINK_STATE_L1SS		(PCIE_LINK_STATE_L1_1 |\
225 					 PCIE_LINK_STATE_L1_1_PCIPM |\
226 					 PCIE_LINK_STATE_L1_2_MASK)
227 
228 struct pcie_link_state {
229 	struct pci_dev *pdev;		/* Upstream component of the Link */
230 	struct pci_dev *downstream;	/* Downstream component, function 0 */
231 	struct pcie_link_state *root;	/* pointer to the root port link */
232 	struct pcie_link_state *parent;	/* pointer to the parent Link state */
233 	struct list_head sibling;	/* node in link_list */
234 
235 	/* ASPM state */
236 	u32 aspm_support:7;		/* Supported ASPM state */
237 	u32 aspm_enabled:7;		/* Enabled ASPM state */
238 	u32 aspm_capable:7;		/* Capable ASPM state with latency */
239 	u32 aspm_default:7;		/* Default ASPM state by BIOS or
240 					   override */
241 	u32 aspm_disable:7;		/* Disabled ASPM state */
242 
243 	/* Clock PM state */
244 	u32 clkpm_capable:1;		/* Clock PM capable? */
245 	u32 clkpm_enabled:1;		/* Current Clock PM state */
246 	u32 clkpm_default:1;		/* Default Clock PM state by BIOS or
247 					   override */
248 	u32 clkpm_disable:1;		/* Clock PM disabled */
249 };
250 
251 static bool aspm_disabled, aspm_force;
252 static bool aspm_support_enabled = true;
253 static DEFINE_MUTEX(aspm_lock);
254 static LIST_HEAD(link_list);
255 
256 #define POLICY_DEFAULT 0	/* BIOS default setting */
257 #define POLICY_PERFORMANCE 1	/* high performance */
258 #define POLICY_POWERSAVE 2	/* high power saving */
259 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
260 
261 #ifdef CONFIG_PCIEASPM_PERFORMANCE
262 static int aspm_policy = POLICY_PERFORMANCE;
263 #elif defined CONFIG_PCIEASPM_POWERSAVE
264 static int aspm_policy = POLICY_POWERSAVE;
265 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
266 static int aspm_policy = POLICY_POWER_SUPERSAVE;
267 #else
268 static int aspm_policy;
269 #endif
270 
271 static const char *policy_str[] = {
272 	[POLICY_DEFAULT] = "default",
273 	[POLICY_PERFORMANCE] = "performance",
274 	[POLICY_POWERSAVE] = "powersave",
275 	[POLICY_POWER_SUPERSAVE] = "powersupersave"
276 };
277 
278 /*
279  * The L1 PM substate capability is only implemented in function 0 in a
280  * multi function device.
281  */
pci_function_0(struct pci_bus * linkbus)282 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
283 {
284 	struct pci_dev *child;
285 
286 	list_for_each_entry(child, &linkbus->devices, bus_list)
287 		if (PCI_FUNC(child->devfn) == 0)
288 			return child;
289 	return NULL;
290 }
291 
policy_to_aspm_state(struct pcie_link_state * link)292 static int policy_to_aspm_state(struct pcie_link_state *link)
293 {
294 	switch (aspm_policy) {
295 	case POLICY_PERFORMANCE:
296 		/* Disable ASPM and Clock PM */
297 		return 0;
298 	case POLICY_POWERSAVE:
299 		/* Enable ASPM L0s/L1 */
300 		return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
301 	case POLICY_POWER_SUPERSAVE:
302 		/* Enable Everything */
303 		return PCIE_LINK_STATE_ASPM_ALL;
304 	case POLICY_DEFAULT:
305 		return link->aspm_default;
306 	}
307 	return 0;
308 }
309 
policy_to_clkpm_state(struct pcie_link_state * link)310 static int policy_to_clkpm_state(struct pcie_link_state *link)
311 {
312 	switch (aspm_policy) {
313 	case POLICY_PERFORMANCE:
314 		/* Disable ASPM and Clock PM */
315 		return 0;
316 	case POLICY_POWERSAVE:
317 	case POLICY_POWER_SUPERSAVE:
318 		/* Enable Clock PM */
319 		return 1;
320 	case POLICY_DEFAULT:
321 		return link->clkpm_default;
322 	}
323 	return 0;
324 }
325 
pci_update_aspm_saved_state(struct pci_dev * dev)326 static void pci_update_aspm_saved_state(struct pci_dev *dev)
327 {
328 	struct pci_cap_saved_state *save_state;
329 	u16 *cap, lnkctl, aspm_ctl;
330 
331 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
332 	if (!save_state)
333 		return;
334 
335 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
336 
337 	/*
338 	 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
339 	 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
340 	 * change after being captured in save_state.
341 	 */
342 	aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
343 	lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
344 
345 	/* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
346 	cap = (u16 *)&save_state->cap.data[0];
347 	cap[1] = lnkctl | aspm_ctl;
348 }
349 
pcie_set_clkpm_nocheck(struct pcie_link_state * link,int enable)350 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
351 {
352 	struct pci_dev *child;
353 	struct pci_bus *linkbus = link->pdev->subordinate;
354 	u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
355 
356 	list_for_each_entry(child, &linkbus->devices, bus_list) {
357 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
358 						   PCI_EXP_LNKCTL_CLKREQ_EN,
359 						   val);
360 		pci_update_aspm_saved_state(child);
361 	}
362 	link->clkpm_enabled = !!enable;
363 }
364 
pcie_set_clkpm(struct pcie_link_state * link,int enable)365 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
366 {
367 	/*
368 	 * Don't enable Clock PM if the link is not Clock PM capable
369 	 * or Clock PM is disabled
370 	 */
371 	if (!link->clkpm_capable || link->clkpm_disable)
372 		enable = 0;
373 	/* Need nothing if the specified equals to current state */
374 	if (link->clkpm_enabled == enable)
375 		return;
376 	pcie_set_clkpm_nocheck(link, enable);
377 }
378 
pcie_clkpm_override_default_link_state(struct pcie_link_state * link,int enabled)379 static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
380 						   int enabled)
381 {
382 	struct pci_dev *pdev = link->downstream;
383 
384 	/* For devicetree platforms, enable ClockPM by default */
385 	if (of_have_populated_dt() && !enabled) {
386 		link->clkpm_default = 1;
387 		pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
388 	}
389 }
390 
pcie_clkpm_cap_init(struct pcie_link_state * link,int blacklist)391 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
392 {
393 	int capable = 1, enabled = 1;
394 	u32 reg32;
395 	u16 reg16;
396 	struct pci_dev *child;
397 	struct pci_bus *linkbus = link->pdev->subordinate;
398 
399 	/* All functions should have the same cap and state, take the worst */
400 	list_for_each_entry(child, &linkbus->devices, bus_list) {
401 		pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
402 		if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
403 			capable = 0;
404 			enabled = 0;
405 			break;
406 		}
407 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
408 		if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
409 			enabled = 0;
410 	}
411 	link->clkpm_enabled = enabled;
412 	link->clkpm_default = enabled;
413 	pcie_clkpm_override_default_link_state(link, enabled);
414 	link->clkpm_capable = capable;
415 	link->clkpm_disable = blacklist ? 1 : 0;
416 }
417 
418 /*
419  * pcie_aspm_configure_common_clock: check if the 2 ends of a link
420  *   could use common clock. If they are, configure them to use the
421  *   common clock. That will reduce the ASPM state exit latency.
422  */
pcie_aspm_configure_common_clock(struct pcie_link_state * link)423 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
424 {
425 	int same_clock = 1;
426 	u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
427 	struct pci_dev *child, *parent = link->pdev;
428 	struct pci_bus *linkbus = parent->subordinate;
429 	/*
430 	 * All functions of a slot should have the same Slot Clock
431 	 * Configuration, so just check one function
432 	 */
433 	child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
434 	BUG_ON(!pci_is_pcie(child));
435 
436 	/* Check downstream component if bit Slot Clock Configuration is 1 */
437 	pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
438 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
439 		same_clock = 0;
440 
441 	/* Check upstream component if bit Slot Clock Configuration is 1 */
442 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
443 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
444 		same_clock = 0;
445 
446 	/* Port might be already in common clock mode */
447 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
448 	parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
449 	if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
450 		bool consistent = true;
451 
452 		list_for_each_entry(child, &linkbus->devices, bus_list) {
453 			pcie_capability_read_word(child, PCI_EXP_LNKCTL,
454 						  &reg16);
455 			if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
456 				consistent = false;
457 				break;
458 			}
459 		}
460 		if (consistent)
461 			return;
462 		pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
463 	}
464 
465 	ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
466 	/* Configure downstream component, all functions */
467 	list_for_each_entry(child, &linkbus->devices, bus_list) {
468 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
469 		child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
470 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
471 						   PCI_EXP_LNKCTL_CCC, ccc);
472 	}
473 
474 	/* Configure upstream component */
475 	pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
476 					   PCI_EXP_LNKCTL_CCC, ccc);
477 
478 	if (pcie_retrain_link(link->pdev, true)) {
479 
480 		/* Training failed. Restore common clock configurations */
481 		pci_err(parent, "ASPM: Could not configure common clock\n");
482 		list_for_each_entry(child, &linkbus->devices, bus_list)
483 			pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
484 							   PCI_EXP_LNKCTL_CCC,
485 							   child_old_ccc[PCI_FUNC(child->devfn)]);
486 		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
487 						   PCI_EXP_LNKCTL_CCC, parent_old_ccc);
488 	}
489 }
490 
491 /* Convert L0s latency encoding to ns */
calc_l0s_latency(u32 lnkcap)492 static u32 calc_l0s_latency(u32 lnkcap)
493 {
494 	u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap);
495 
496 	if (encoding == 0x7)
497 		return 5 * NSEC_PER_USEC;	/* > 4us */
498 	return (64 << encoding);
499 }
500 
501 /* Convert L0s acceptable latency encoding to ns */
calc_l0s_acceptable(u32 encoding)502 static u32 calc_l0s_acceptable(u32 encoding)
503 {
504 	if (encoding == 0x7)
505 		return U32_MAX;
506 	return (64 << encoding);
507 }
508 
509 /* Convert L1 latency encoding to ns */
calc_l1_latency(u32 lnkcap)510 static u32 calc_l1_latency(u32 lnkcap)
511 {
512 	u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap);
513 
514 	if (encoding == 0x7)
515 		return 65 * NSEC_PER_USEC;	/* > 64us */
516 	return NSEC_PER_USEC << encoding;
517 }
518 
519 /* Convert L1 acceptable latency encoding to ns */
calc_l1_acceptable(u32 encoding)520 static u32 calc_l1_acceptable(u32 encoding)
521 {
522 	if (encoding == 0x7)
523 		return U32_MAX;
524 	return NSEC_PER_USEC << encoding;
525 }
526 
527 /* Convert L1SS T_pwr encoding to usec */
calc_l12_pwron(struct pci_dev * pdev,u32 scale,u32 val)528 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
529 {
530 	switch (scale) {
531 	case 0:
532 		return val * 2;
533 	case 1:
534 		return val * 10;
535 	case 2:
536 		return val * 100;
537 	}
538 	pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
539 	return 0;
540 }
541 
542 /*
543  * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
544  * register.  Ports enter L1.2 when the most recent LTR value is greater
545  * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
546  * don't enter L1.2 too aggressively.
547  *
548  * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
549  */
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)550 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
551 {
552 	u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC;
553 
554 	/*
555 	 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
556 	 * value of 0x3ff.
557 	 */
558 	if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
559 		*scale = 0;		/* Value times 1ns */
560 		*value = threshold_ns;
561 	} else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
562 		*scale = 1;		/* Value times 32ns */
563 		*value = roundup(threshold_ns, 32) / 32;
564 	} else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
565 		*scale = 2;		/* Value times 1024ns */
566 		*value = roundup(threshold_ns, 1024) / 1024;
567 	} else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
568 		*scale = 3;		/* Value times 32768ns */
569 		*value = roundup(threshold_ns, 32768) / 32768;
570 	} else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
571 		*scale = 4;		/* Value times 1048576ns */
572 		*value = roundup(threshold_ns, 1048576) / 1048576;
573 	} else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
574 		*scale = 5;		/* Value times 33554432ns */
575 		*value = roundup(threshold_ns, 33554432) / 33554432;
576 	} else {
577 		*scale = 5;
578 		*value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE);
579 	}
580 }
581 
pcie_aspm_check_latency(struct pci_dev * endpoint)582 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
583 {
584 	u32 latency, encoding, lnkcap_up, lnkcap_dw;
585 	u32 l1_switch_latency = 0, latency_up_l0s;
586 	u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
587 	u32 acceptable_l0s, acceptable_l1;
588 	struct pcie_link_state *link;
589 
590 	/* Device not in D0 doesn't need latency check */
591 	if ((endpoint->current_state != PCI_D0) &&
592 	    (endpoint->current_state != PCI_UNKNOWN))
593 		return;
594 
595 	link = endpoint->bus->self->link_state;
596 
597 	/* Calculate endpoint L0s acceptable latency */
598 	encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap);
599 	acceptable_l0s = calc_l0s_acceptable(encoding);
600 
601 	/* Calculate endpoint L1 acceptable latency */
602 	encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap);
603 	acceptable_l1 = calc_l1_acceptable(encoding);
604 
605 	while (link) {
606 		struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
607 
608 		/* Read direction exit latencies */
609 		pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
610 					   &lnkcap_up);
611 		pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
612 					   &lnkcap_dw);
613 		latency_up_l0s = calc_l0s_latency(lnkcap_up);
614 		latency_up_l1 = calc_l1_latency(lnkcap_up);
615 		latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
616 		latency_dw_l1 = calc_l1_latency(lnkcap_dw);
617 
618 		/* Check upstream direction L0s latency */
619 		if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
620 		    (latency_up_l0s > acceptable_l0s))
621 			link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
622 
623 		/* Check downstream direction L0s latency */
624 		if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
625 		    (latency_dw_l0s > acceptable_l0s))
626 			link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
627 		/*
628 		 * Check L1 latency.
629 		 * Every switch on the path to root complex need 1
630 		 * more microsecond for L1. Spec doesn't mention L0s.
631 		 *
632 		 * The exit latencies for L1 substates are not advertised
633 		 * by a device.  Since the spec also doesn't mention a way
634 		 * to determine max latencies introduced by enabling L1
635 		 * substates on the components, it is not clear how to do
636 		 * a L1 substate exit latency check.  We assume that the
637 		 * L1 exit latencies advertised by a device include L1
638 		 * substate latencies (and hence do not do any check).
639 		 */
640 		latency = max_t(u32, latency_up_l1, latency_dw_l1);
641 		if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
642 		    (latency + l1_switch_latency > acceptable_l1))
643 			link->aspm_capable &= ~PCIE_LINK_STATE_L1;
644 		l1_switch_latency += NSEC_PER_USEC;
645 
646 		link = link->parent;
647 	}
648 }
649 
650 /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l12_info(struct pcie_link_state * link,u32 parent_l1ss_cap,u32 child_l1ss_cap)651 static void aspm_calc_l12_info(struct pcie_link_state *link,
652 				u32 parent_l1ss_cap, u32 child_l1ss_cap)
653 {
654 	struct pci_dev *child = link->downstream, *parent = link->pdev;
655 	u32 val1, val2, scale1, scale2;
656 	u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
657 	u32 ctl1 = 0, ctl2 = 0;
658 	u32 pctl1, pctl2, cctl1, cctl2;
659 	u32 pl1_2_enables, cl1_2_enables;
660 
661 	/* Choose the greater of the two Port Common_Mode_Restore_Times */
662 	val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap);
663 	val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap);
664 	t_common_mode = max(val1, val2);
665 
666 	/* Choose the greater of the two Port T_POWER_ON times */
667 	val1   = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap);
668 	scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap);
669 	val2   = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap);
670 	scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap);
671 
672 	if (calc_l12_pwron(parent, scale1, val1) >
673 	    calc_l12_pwron(child, scale2, val2)) {
674 		ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) |
675 			FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1);
676 		t_power_on = calc_l12_pwron(parent, scale1, val1);
677 	} else {
678 		ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) |
679 			FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2);
680 		t_power_on = calc_l12_pwron(child, scale2, val2);
681 	}
682 
683 	/*
684 	 * Set LTR_L1.2_THRESHOLD to the time required to transition the
685 	 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
686 	 * downstream devices report (via LTR) that they can tolerate at
687 	 * least that much latency.
688 	 *
689 	 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
690 	 * Table 5-11.  T(POWER_OFF) is at most 2us and T(L1.2) is at
691 	 * least 4us.
692 	 */
693 	l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
694 	encode_l12_threshold(l1_2_threshold, &scale, &value);
695 	ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) |
696 		FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) |
697 		FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale);
698 
699 	/* Some broken devices only support dword access to L1 SS */
700 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
701 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
702 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
703 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
704 
705 	if (ctl1 == pctl1 && ctl1 == cctl1 &&
706 	    ctl2 == pctl2 && ctl2 == cctl2)
707 		return;
708 
709 	/* Disable L1.2 while updating.  See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
710 	pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
711 	cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
712 
713 	if (pl1_2_enables || cl1_2_enables) {
714 		pci_clear_and_set_config_dword(child,
715 					       child->l1ss + PCI_L1SS_CTL1,
716 					       PCI_L1SS_CTL1_L1_2_MASK, 0);
717 		pci_clear_and_set_config_dword(parent,
718 					       parent->l1ss + PCI_L1SS_CTL1,
719 					       PCI_L1SS_CTL1_L1_2_MASK, 0);
720 	}
721 
722 	/* Program T_POWER_ON times in both ports */
723 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
724 	pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
725 
726 	/* Program Common_Mode_Restore_Time in upstream device */
727 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
728 				       PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
729 
730 	/* Program LTR_L1.2_THRESHOLD time in both ports */
731 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
732 				       PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
733 				       PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
734 				       ctl1);
735 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
736 				       PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
737 				       PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
738 				       ctl1);
739 
740 	if (pl1_2_enables || cl1_2_enables) {
741 		pci_clear_and_set_config_dword(parent,
742 					       parent->l1ss + PCI_L1SS_CTL1, 0,
743 					       pl1_2_enables);
744 		pci_clear_and_set_config_dword(child,
745 					       child->l1ss + PCI_L1SS_CTL1, 0,
746 					       cl1_2_enables);
747 	}
748 }
749 
aspm_l1ss_init(struct pcie_link_state * link)750 static void aspm_l1ss_init(struct pcie_link_state *link)
751 {
752 	struct pci_dev *child = link->downstream, *parent = link->pdev;
753 	u32 parent_l1ss_cap, child_l1ss_cap;
754 	u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
755 
756 	if (!parent->l1ss || !child->l1ss)
757 		return;
758 
759 	/* Setup L1 substate */
760 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
761 			      &parent_l1ss_cap);
762 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
763 			      &child_l1ss_cap);
764 
765 	if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
766 		parent_l1ss_cap = 0;
767 	if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
768 		child_l1ss_cap = 0;
769 
770 	/*
771 	 * If we don't have LTR for the entire path from the Root Complex
772 	 * to this device, we can't use ASPM L1.2 because it relies on the
773 	 * LTR_L1.2_THRESHOLD.  See PCIe r4.0, secs 5.5.4, 6.18.
774 	 */
775 	if (!child->ltr_path)
776 		child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
777 
778 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
779 		link->aspm_support |= PCIE_LINK_STATE_L1_1;
780 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
781 		link->aspm_support |= PCIE_LINK_STATE_L1_2;
782 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
783 		link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
784 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
785 		link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
786 
787 	if (parent_l1ss_cap)
788 		pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
789 				      &parent_l1ss_ctl1);
790 	if (child_l1ss_cap)
791 		pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
792 				      &child_l1ss_ctl1);
793 
794 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
795 		link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
796 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
797 		link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
798 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
799 		link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
800 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
801 		link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
802 
803 	if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
804 		aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
805 }
806 
807 #define FLAG(x, y, d)	(((x) & (PCIE_LINK_STATE_##y)) ? d : "")
808 
pcie_aspm_override_default_link_state(struct pcie_link_state * link)809 static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
810 {
811 	struct pci_dev *pdev = link->downstream;
812 	u32 override;
813 
814 	/* For devicetree platforms, enable all ASPM states by default */
815 	if (of_have_populated_dt()) {
816 		link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
817 		override = link->aspm_default & ~link->aspm_enabled;
818 		if (override)
819 			pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
820 				 FLAG(override, L0S_UP, " L0s-up"),
821 				 FLAG(override, L0S_DW, " L0s-dw"),
822 				 FLAG(override, L1, " L1"),
823 				 FLAG(override, L1_1, " ASPM-L1.1"),
824 				 FLAG(override, L1_2, " ASPM-L1.2"),
825 				 FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
826 				 FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
827 	}
828 }
829 
pcie_aspm_cap_init(struct pcie_link_state * link,int blacklist)830 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
831 {
832 	struct pci_dev *child = link->downstream, *parent = link->pdev;
833 	u32 parent_lnkcap, child_lnkcap;
834 	u16 parent_lnkctl, child_lnkctl;
835 	struct pci_bus *linkbus = parent->subordinate;
836 
837 	if (blacklist) {
838 		/* Set enabled/disable so that we will disable ASPM later */
839 		link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
840 		link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
841 		return;
842 	}
843 
844 	/*
845 	 * If ASPM not supported, don't mess with the clocks and link,
846 	 * bail out now.
847 	 */
848 	pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
849 	pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
850 	if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
851 		return;
852 
853 	/* Configure common clock before checking latencies */
854 	pcie_aspm_configure_common_clock(link);
855 
856 	/*
857 	 * Re-read upstream/downstream components' register state after
858 	 * clock configuration.  L0s & L1 exit latencies in the otherwise
859 	 * read-only Link Capabilities may change depending on common clock
860 	 * configuration (PCIe r5.0, sec 7.5.3.6).
861 	 */
862 	pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
863 	pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
864 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
865 	pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
866 
867 	/* Disable L0s/L1 before updating L1SS config */
868 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
869 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
870 		pcie_capability_write_word(child, PCI_EXP_LNKCTL,
871 					   child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
872 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
873 					   parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
874 	}
875 
876 	/*
877 	 * Setup L0s state
878 	 *
879 	 * Note that we must not enable L0s in either direction on a
880 	 * given link unless components on both sides of the link each
881 	 * support L0s.
882 	 */
883 	if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
884 		link->aspm_support |= PCIE_LINK_STATE_L0S;
885 
886 	if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
887 		link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
888 	if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
889 		link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
890 
891 	/* Setup L1 state */
892 	if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
893 		link->aspm_support |= PCIE_LINK_STATE_L1;
894 
895 	if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
896 		link->aspm_enabled |= PCIE_LINK_STATE_L1;
897 
898 	aspm_l1ss_init(link);
899 
900 	/* Restore L0s/L1 if they were enabled */
901 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
902 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
903 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
904 		pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
905 	}
906 
907 	/* Save default state */
908 	link->aspm_default = link->aspm_enabled;
909 
910 	pcie_aspm_override_default_link_state(link);
911 
912 	/* Setup initial capable state. Will be updated later */
913 	link->aspm_capable = link->aspm_support;
914 
915 	/* Get and check endpoint acceptable latencies */
916 	list_for_each_entry(child, &linkbus->devices, bus_list) {
917 		if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
918 		    pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
919 			continue;
920 
921 		pcie_aspm_check_latency(child);
922 	}
923 }
924 
925 /* Configure the ASPM L1 substates. Caller must disable L1 first. */
pcie_config_aspm_l1ss(struct pcie_link_state * link,u32 state)926 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
927 {
928 	u32 val = 0;
929 	struct pci_dev *child = link->downstream, *parent = link->pdev;
930 
931 	if (state & PCIE_LINK_STATE_L1_1)
932 		val |= PCI_L1SS_CTL1_ASPM_L1_1;
933 	if (state & PCIE_LINK_STATE_L1_2)
934 		val |= PCI_L1SS_CTL1_ASPM_L1_2;
935 	if (state & PCIE_LINK_STATE_L1_1_PCIPM)
936 		val |= PCI_L1SS_CTL1_PCIPM_L1_1;
937 	if (state & PCIE_LINK_STATE_L1_2_PCIPM)
938 		val |= PCI_L1SS_CTL1_PCIPM_L1_2;
939 
940 	/*
941 	 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
942 	 * - Clear L1.x enable bits at child first, then at parent
943 	 * - Set L1.x enable bits at parent first, then at child
944 	 * - ASPM/PCIPM L1.2 must be disabled while programming timing
945 	 *   parameters
946 	 */
947 
948 	/* Disable all L1 substates */
949 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
950 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
951 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
952 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
953 
954 	/* Enable what we need to enable */
955 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
956 				       PCI_L1SS_CTL1_L1SS_MASK, val);
957 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
958 				       PCI_L1SS_CTL1_L1SS_MASK, val);
959 }
960 
pcie_config_aspm_dev(struct pci_dev * pdev,u32 val)961 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
962 {
963 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
964 					   PCI_EXP_LNKCTL_ASPMC, val);
965 }
966 
pcie_config_aspm_link(struct pcie_link_state * link,u32 state)967 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
968 {
969 	u32 upstream = 0, dwstream = 0;
970 	struct pci_dev *child = link->downstream, *parent = link->pdev;
971 	struct pci_bus *linkbus = parent->subordinate;
972 
973 	/* Enable only the states that were not explicitly disabled */
974 	state &= (link->aspm_capable & ~link->aspm_disable);
975 
976 	/* Can't enable any substates if L1 is not enabled */
977 	if (!(state & PCIE_LINK_STATE_L1))
978 		state &= ~PCIE_LINK_STATE_L1SS;
979 
980 	/* Spec says both ports must be in D0 before enabling PCI PM substates*/
981 	if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
982 		state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
983 		state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
984 	}
985 
986 	/* Nothing to do if the link is already in the requested state */
987 	if (link->aspm_enabled == state)
988 		return;
989 	/* Convert ASPM state to upstream/downstream ASPM register state */
990 	if (state & PCIE_LINK_STATE_L0S_UP)
991 		dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
992 	if (state & PCIE_LINK_STATE_L0S_DW)
993 		upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
994 	if (state & PCIE_LINK_STATE_L1) {
995 		upstream |= PCI_EXP_LNKCTL_ASPM_L1;
996 		dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
997 	}
998 
999 	/*
1000 	 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
1001 	 * bits for ASPM L1 PM Substates must be done while ASPM L1 is
1002 	 * disabled. Disable L1 here and apply new configuration after L1SS
1003 	 * configuration has been completed.
1004 	 *
1005 	 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
1006 	 * it in the Downstream component prior to disabling it in the
1007 	 * Upstream component, and ASPM L1 must be enabled in the Upstream
1008 	 * component prior to enabling it in the Downstream component.
1009 	 *
1010 	 * Sec 7.5.3.7 also recommends programming the same ASPM Control
1011 	 * value for all functions of a multi-function device.
1012 	 */
1013 	list_for_each_entry(child, &linkbus->devices, bus_list)
1014 		pcie_config_aspm_dev(child, 0);
1015 	pcie_config_aspm_dev(parent, 0);
1016 
1017 	if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
1018 		pcie_config_aspm_l1ss(link, state);
1019 
1020 	pcie_config_aspm_dev(parent, upstream);
1021 	list_for_each_entry(child, &linkbus->devices, bus_list)
1022 		pcie_config_aspm_dev(child, dwstream);
1023 
1024 	link->aspm_enabled = state;
1025 
1026 	/* Update latest ASPM configuration in saved context */
1027 	pci_save_aspm_l1ss_state(link->downstream);
1028 	pci_update_aspm_saved_state(link->downstream);
1029 	pci_save_aspm_l1ss_state(parent);
1030 	pci_update_aspm_saved_state(parent);
1031 }
1032 
pcie_config_aspm_path(struct pcie_link_state * link)1033 static void pcie_config_aspm_path(struct pcie_link_state *link)
1034 {
1035 	while (link) {
1036 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
1037 		link = link->parent;
1038 	}
1039 }
1040 
free_link_state(struct pcie_link_state * link)1041 static void free_link_state(struct pcie_link_state *link)
1042 {
1043 	link->pdev->link_state = NULL;
1044 	kfree(link);
1045 }
1046 
pcie_aspm_sanity_check(struct pci_dev * pdev)1047 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
1048 {
1049 	struct pci_dev *child;
1050 	u32 reg32;
1051 
1052 	/*
1053 	 * Some functions in a slot might not all be PCIe functions,
1054 	 * very strange. Disable ASPM for the whole slot
1055 	 */
1056 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
1057 		if (!pci_is_pcie(child))
1058 			return -EINVAL;
1059 
1060 		/*
1061 		 * If ASPM is disabled then we're not going to change
1062 		 * the BIOS state. It's safe to continue even if it's a
1063 		 * pre-1.1 device
1064 		 */
1065 
1066 		if (aspm_disabled)
1067 			continue;
1068 
1069 		/*
1070 		 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
1071 		 * RBER bit to determine if a function is 1.1 version device
1072 		 */
1073 		pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
1074 		if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
1075 			pci_info(child, "disabling ASPM on pre-1.1 PCIe device.  You can enable it with 'pcie_aspm=force'\n");
1076 			return -EINVAL;
1077 		}
1078 	}
1079 	return 0;
1080 }
1081 
alloc_pcie_link_state(struct pci_dev * pdev)1082 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1083 {
1084 	struct pcie_link_state *link;
1085 
1086 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1087 	if (!link)
1088 		return NULL;
1089 
1090 	INIT_LIST_HEAD(&link->sibling);
1091 	link->pdev = pdev;
1092 	link->downstream = pci_function_0(pdev->subordinate);
1093 
1094 	/*
1095 	 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1096 	 * hierarchies.  Note that some PCIe host implementations omit
1097 	 * the root ports entirely, in which case a downstream port on
1098 	 * a switch may become the root of the link state chain for all
1099 	 * its subordinate endpoints.
1100 	 */
1101 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1102 	    pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
1103 	    !pdev->bus->parent->self) {
1104 		link->root = link;
1105 	} else {
1106 		struct pcie_link_state *parent;
1107 
1108 		parent = pdev->bus->parent->self->link_state;
1109 		if (!parent) {
1110 			kfree(link);
1111 			return NULL;
1112 		}
1113 
1114 		link->parent = parent;
1115 		link->root = link->parent->root;
1116 	}
1117 
1118 	list_add(&link->sibling, &link_list);
1119 	pdev->link_state = link;
1120 	return link;
1121 }
1122 
pcie_aspm_update_sysfs_visibility(struct pci_dev * pdev)1123 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
1124 {
1125 	struct pci_dev *child;
1126 
1127 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
1128 		sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
1129 }
1130 
1131 /*
1132  * pcie_aspm_init_link_state: Initiate PCI express link state.
1133  * It is called after the pcie and its children devices are scanned.
1134  * @pdev: the root port or switch downstream port
1135  */
pcie_aspm_init_link_state(struct pci_dev * pdev)1136 void pcie_aspm_init_link_state(struct pci_dev *pdev)
1137 {
1138 	struct pcie_link_state *link;
1139 	int blacklist = !!pcie_aspm_sanity_check(pdev);
1140 
1141 	if (!aspm_support_enabled)
1142 		return;
1143 
1144 	if (pdev->link_state)
1145 		return;
1146 
1147 	/*
1148 	 * We allocate pcie_link_state for the component on the upstream
1149 	 * end of a Link, so there's nothing to do unless this device is
1150 	 * downstream port.
1151 	 */
1152 	if (!pcie_downstream_port(pdev))
1153 		return;
1154 
1155 	/* VIA has a strange chipset, root port is under a bridge */
1156 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
1157 	    pdev->bus->self)
1158 		return;
1159 
1160 	down_read(&pci_bus_sem);
1161 	if (list_empty(&pdev->subordinate->devices))
1162 		goto out;
1163 
1164 	mutex_lock(&aspm_lock);
1165 	link = alloc_pcie_link_state(pdev);
1166 	if (!link)
1167 		goto unlock;
1168 	/*
1169 	 * Setup initial ASPM state. Note that we need to configure
1170 	 * upstream links also because capable state of them can be
1171 	 * update through pcie_aspm_cap_init().
1172 	 */
1173 	pcie_aspm_cap_init(link, blacklist);
1174 
1175 	/* Setup initial Clock PM state */
1176 	pcie_clkpm_cap_init(link, blacklist);
1177 
1178 	/*
1179 	 * At this stage drivers haven't had an opportunity to change the
1180 	 * link policy setting. Enabling ASPM on broken hardware can cripple
1181 	 * it even before the driver has had a chance to disable ASPM, so
1182 	 * default to a safe level right now. If we're enabling ASPM beyond
1183 	 * the BIOS's expectation, we'll do so once pci_enable_device() is
1184 	 * called.
1185 	 */
1186 	if (aspm_policy != POLICY_POWERSAVE &&
1187 	    aspm_policy != POLICY_POWER_SUPERSAVE) {
1188 		pcie_config_aspm_path(link);
1189 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
1190 	}
1191 
1192 	pcie_aspm_update_sysfs_visibility(pdev);
1193 
1194 unlock:
1195 	mutex_unlock(&aspm_lock);
1196 out:
1197 	up_read(&pci_bus_sem);
1198 }
1199 
pci_bridge_reconfigure_ltr(struct pci_dev * pdev)1200 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
1201 {
1202 	struct pci_dev *bridge;
1203 	u32 ctl;
1204 
1205 	bridge = pci_upstream_bridge(pdev);
1206 	if (bridge && bridge->ltr_path) {
1207 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1208 		if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1209 			pci_dbg(bridge, "re-enabling LTR\n");
1210 			pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1211 						 PCI_EXP_DEVCTL2_LTR_EN);
1212 		}
1213 	}
1214 }
1215 
pci_configure_ltr(struct pci_dev * pdev)1216 void pci_configure_ltr(struct pci_dev *pdev)
1217 {
1218 	struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
1219 	struct pci_dev *bridge;
1220 	u32 cap, ctl;
1221 
1222 	if (!pci_is_pcie(pdev))
1223 		return;
1224 
1225 	pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
1226 	if (!(cap & PCI_EXP_DEVCAP2_LTR))
1227 		return;
1228 
1229 	pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
1230 	if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
1231 		if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1232 			pdev->ltr_path = 1;
1233 			return;
1234 		}
1235 
1236 		bridge = pci_upstream_bridge(pdev);
1237 		if (bridge && bridge->ltr_path)
1238 			pdev->ltr_path = 1;
1239 
1240 		return;
1241 	}
1242 
1243 	if (!host->native_ltr)
1244 		return;
1245 
1246 	/*
1247 	 * Software must not enable LTR in an Endpoint unless the Root
1248 	 * Complex and all intermediate Switches indicate support for LTR.
1249 	 * PCIe r4.0, sec 6.18.
1250 	 */
1251 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1252 		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1253 					 PCI_EXP_DEVCTL2_LTR_EN);
1254 		pdev->ltr_path = 1;
1255 		return;
1256 	}
1257 
1258 	/*
1259 	 * If we're configuring a hot-added device, LTR was likely
1260 	 * disabled in the upstream bridge, so re-enable it before enabling
1261 	 * it in the new device.
1262 	 */
1263 	bridge = pci_upstream_bridge(pdev);
1264 	if (bridge && bridge->ltr_path) {
1265 		pci_bridge_reconfigure_ltr(pdev);
1266 		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1267 					 PCI_EXP_DEVCTL2_LTR_EN);
1268 		pdev->ltr_path = 1;
1269 	}
1270 }
1271 
1272 /* Recheck latencies and update aspm_capable for links under the root */
pcie_update_aspm_capable(struct pcie_link_state * root)1273 static void pcie_update_aspm_capable(struct pcie_link_state *root)
1274 {
1275 	struct pcie_link_state *link;
1276 	BUG_ON(root->parent);
1277 	list_for_each_entry(link, &link_list, sibling) {
1278 		if (link->root != root)
1279 			continue;
1280 		link->aspm_capable = link->aspm_support;
1281 	}
1282 	list_for_each_entry(link, &link_list, sibling) {
1283 		struct pci_dev *child;
1284 		struct pci_bus *linkbus = link->pdev->subordinate;
1285 		if (link->root != root)
1286 			continue;
1287 		list_for_each_entry(child, &linkbus->devices, bus_list) {
1288 			if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
1289 			    (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
1290 				continue;
1291 			pcie_aspm_check_latency(child);
1292 		}
1293 	}
1294 }
1295 
1296 /* @pdev: the endpoint device */
pcie_aspm_exit_link_state(struct pci_dev * pdev)1297 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1298 {
1299 	struct pci_dev *parent = pdev->bus->self;
1300 	struct pcie_link_state *link, *root, *parent_link;
1301 
1302 	if (!parent || !parent->link_state)
1303 		return;
1304 
1305 	down_read(&pci_bus_sem);
1306 	mutex_lock(&aspm_lock);
1307 
1308 	link = parent->link_state;
1309 	root = link->root;
1310 	parent_link = link->parent;
1311 
1312 	/*
1313 	 * Free the parent link state, no later than function 0 (i.e.
1314 	 * link->downstream) being removed.
1315 	 *
1316 	 * Do not free the link state any earlier. If function 0 is a
1317 	 * switch upstream port, this link state is parent_link to all
1318 	 * subordinate ones.
1319 	 */
1320 	if (pdev != link->downstream)
1321 		goto out;
1322 
1323 	pcie_config_aspm_link(link, 0);
1324 	list_del(&link->sibling);
1325 	free_link_state(link);
1326 
1327 	/* Recheck latencies and configure upstream links */
1328 	if (parent_link) {
1329 		pcie_update_aspm_capable(root);
1330 		pcie_config_aspm_path(parent_link);
1331 	}
1332 
1333  out:
1334 	mutex_unlock(&aspm_lock);
1335 	up_read(&pci_bus_sem);
1336 }
1337 
1338 /*
1339  * @pdev: the root port or switch downstream port
1340  * @locked: whether pci_bus_sem is held
1341  */
pcie_aspm_pm_state_change(struct pci_dev * pdev,bool locked)1342 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
1343 {
1344 	struct pcie_link_state *link = pdev->link_state;
1345 
1346 	if (aspm_disabled || !link)
1347 		return;
1348 	/*
1349 	 * Devices changed PM state, we should recheck if latency
1350 	 * meets all functions' requirement
1351 	 */
1352 	if (!locked)
1353 		down_read(&pci_bus_sem);
1354 	mutex_lock(&aspm_lock);
1355 	pcie_update_aspm_capable(link->root);
1356 	pcie_config_aspm_path(link);
1357 	mutex_unlock(&aspm_lock);
1358 	if (!locked)
1359 		up_read(&pci_bus_sem);
1360 }
1361 
pcie_aspm_powersave_config_link(struct pci_dev * pdev)1362 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1363 {
1364 	struct pcie_link_state *link = pdev->link_state;
1365 
1366 	if (aspm_disabled || !link)
1367 		return;
1368 
1369 	if (aspm_policy != POLICY_POWERSAVE &&
1370 	    aspm_policy != POLICY_POWER_SUPERSAVE)
1371 		return;
1372 
1373 	down_read(&pci_bus_sem);
1374 	mutex_lock(&aspm_lock);
1375 	pcie_config_aspm_path(link);
1376 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1377 	mutex_unlock(&aspm_lock);
1378 	up_read(&pci_bus_sem);
1379 }
1380 
pcie_aspm_get_link(struct pci_dev * pdev)1381 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
1382 {
1383 	struct pci_dev *bridge;
1384 
1385 	if (!pci_is_pcie(pdev))
1386 		return NULL;
1387 
1388 	bridge = pci_upstream_bridge(pdev);
1389 	if (!bridge || !pci_is_pcie(bridge))
1390 		return NULL;
1391 
1392 	return bridge->link_state;
1393 }
1394 
pci_calc_aspm_disable_mask(int state)1395 static u8 pci_calc_aspm_disable_mask(int state)
1396 {
1397 	state &= ~PCIE_LINK_STATE_CLKPM;
1398 
1399 	/* L1 PM substates require L1 */
1400 	if (state & PCIE_LINK_STATE_L1)
1401 		state |= PCIE_LINK_STATE_L1SS;
1402 
1403 	return state;
1404 }
1405 
pci_calc_aspm_enable_mask(int state)1406 static u8 pci_calc_aspm_enable_mask(int state)
1407 {
1408 	state &= ~PCIE_LINK_STATE_CLKPM;
1409 
1410 	/* L1 PM substates require L1 */
1411 	if (state & PCIE_LINK_STATE_L1SS)
1412 		state |= PCIE_LINK_STATE_L1;
1413 
1414 	return state;
1415 }
1416 
__pci_disable_link_state(struct pci_dev * pdev,int state,bool locked)1417 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
1418 {
1419 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1420 
1421 	if (!link)
1422 		return -EINVAL;
1423 	/*
1424 	 * A driver requested that ASPM be disabled on this device, but
1425 	 * if we don't have permission to manage ASPM (e.g., on ACPI
1426 	 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1427 	 * the _OSC method), we can't honor that request.  Windows has
1428 	 * a similar mechanism using "PciASPMOptOut", which is also
1429 	 * ignored in this situation.
1430 	 */
1431 	if (aspm_disabled) {
1432 		pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1433 		return -EPERM;
1434 	}
1435 
1436 	if (!locked)
1437 		down_read(&pci_bus_sem);
1438 	mutex_lock(&aspm_lock);
1439 	link->aspm_disable |= pci_calc_aspm_disable_mask(state);
1440 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1441 
1442 	if (state & PCIE_LINK_STATE_CLKPM)
1443 		link->clkpm_disable = 1;
1444 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1445 	mutex_unlock(&aspm_lock);
1446 	if (!locked)
1447 		up_read(&pci_bus_sem);
1448 
1449 	return 0;
1450 }
1451 
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1452 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1453 {
1454 	lockdep_assert_held_read(&pci_bus_sem);
1455 
1456 	return __pci_disable_link_state(pdev, state, true);
1457 }
1458 EXPORT_SYMBOL(pci_disable_link_state_locked);
1459 
1460 /**
1461  * pci_disable_link_state - Disable device's link state, so the link will
1462  * never enter specific states.  Note that if the BIOS didn't grant ASPM
1463  * control to the OS, this does nothing because we can't touch the LNKCTL
1464  * register. Returns 0 or a negative errno.
1465  *
1466  * @pdev: PCI device
1467  * @state: ASPM link state to disable
1468  */
pci_disable_link_state(struct pci_dev * pdev,int state)1469 int pci_disable_link_state(struct pci_dev *pdev, int state)
1470 {
1471 	return __pci_disable_link_state(pdev, state, false);
1472 }
1473 EXPORT_SYMBOL(pci_disable_link_state);
1474 
__pci_enable_link_state(struct pci_dev * pdev,int state,bool locked)1475 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
1476 {
1477 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1478 
1479 	if (!link)
1480 		return -EINVAL;
1481 	/*
1482 	 * A driver requested that ASPM be enabled on this device, but
1483 	 * if we don't have permission to manage ASPM (e.g., on ACPI
1484 	 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1485 	 * the _OSC method), we can't honor that request.
1486 	 */
1487 	if (aspm_disabled) {
1488 		pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
1489 		return -EPERM;
1490 	}
1491 
1492 	if (!locked)
1493 		down_read(&pci_bus_sem);
1494 	mutex_lock(&aspm_lock);
1495 	link->aspm_default = pci_calc_aspm_enable_mask(state);
1496 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1497 
1498 	link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
1499 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1500 	mutex_unlock(&aspm_lock);
1501 	if (!locked)
1502 		up_read(&pci_bus_sem);
1503 
1504 	return 0;
1505 }
1506 
1507 /**
1508  * pci_enable_link_state - Clear and set the default device link state so that
1509  * the link may be allowed to enter the specified states. Note that if the
1510  * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
1511  * touch the LNKCTL register. Also note that this does not enable states
1512  * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1513  *
1514  * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1515  * PCIe r6.0, sec 5.5.4.
1516  *
1517  * @pdev: PCI device
1518  * @state: Mask of ASPM link states to enable
1519  */
pci_enable_link_state(struct pci_dev * pdev,int state)1520 int pci_enable_link_state(struct pci_dev *pdev, int state)
1521 {
1522 	return __pci_enable_link_state(pdev, state, false);
1523 }
1524 EXPORT_SYMBOL(pci_enable_link_state);
1525 
1526 /**
1527  * pci_enable_link_state_locked - Clear and set the default device link state
1528  * so that the link may be allowed to enter the specified states. Note that if
1529  * the BIOS didn't grant ASPM control to the OS, this does nothing because we
1530  * can't touch the LNKCTL register. Also note that this does not enable states
1531  * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1532  *
1533  * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1534  * PCIe r6.0, sec 5.5.4.
1535  *
1536  * @pdev: PCI device
1537  * @state: Mask of ASPM link states to enable
1538  *
1539  * Context: Caller holds pci_bus_sem read lock.
1540  */
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1541 int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1542 {
1543 	lockdep_assert_held_read(&pci_bus_sem);
1544 
1545 	return __pci_enable_link_state(pdev, state, true);
1546 }
1547 EXPORT_SYMBOL(pci_enable_link_state_locked);
1548 
pcie_aspm_set_policy(const char * val,const struct kernel_param * kp)1549 static int pcie_aspm_set_policy(const char *val,
1550 				const struct kernel_param *kp)
1551 {
1552 	int i;
1553 	struct pcie_link_state *link;
1554 
1555 	if (aspm_disabled)
1556 		return -EPERM;
1557 	i = sysfs_match_string(policy_str, val);
1558 	if (i < 0)
1559 		return i;
1560 	if (i == aspm_policy)
1561 		return 0;
1562 
1563 	down_read(&pci_bus_sem);
1564 	mutex_lock(&aspm_lock);
1565 	aspm_policy = i;
1566 	list_for_each_entry(link, &link_list, sibling) {
1567 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
1568 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
1569 	}
1570 	mutex_unlock(&aspm_lock);
1571 	up_read(&pci_bus_sem);
1572 	return 0;
1573 }
1574 
pcie_aspm_get_policy(char * buffer,const struct kernel_param * kp)1575 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1576 {
1577 	int i, cnt = 0;
1578 	for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1579 		if (i == aspm_policy)
1580 			cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1581 		else
1582 			cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1583 	cnt += sprintf(buffer + cnt, "\n");
1584 	return cnt;
1585 }
1586 
1587 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1588 	NULL, 0644);
1589 
1590 /**
1591  * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1592  * @pdev: Target device.
1593  *
1594  * Relies on the upstream bridge's link_state being valid.  The link_state
1595  * is deallocated only when the last child of the bridge (i.e., @pdev or a
1596  * sibling) is removed, and the caller should be holding a reference to
1597  * @pdev, so this should be safe.
1598  */
pcie_aspm_enabled(struct pci_dev * pdev)1599 bool pcie_aspm_enabled(struct pci_dev *pdev)
1600 {
1601 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1602 
1603 	if (!link)
1604 		return false;
1605 
1606 	return link->aspm_enabled;
1607 }
1608 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1609 
aspm_attr_show_common(struct device * dev,struct device_attribute * attr,char * buf,u8 state)1610 static ssize_t aspm_attr_show_common(struct device *dev,
1611 				     struct device_attribute *attr,
1612 				     char *buf, u8 state)
1613 {
1614 	struct pci_dev *pdev = to_pci_dev(dev);
1615 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1616 
1617 	return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1618 }
1619 
aspm_attr_store_common(struct device * dev,struct device_attribute * attr,const char * buf,size_t len,u8 state)1620 static ssize_t aspm_attr_store_common(struct device *dev,
1621 				      struct device_attribute *attr,
1622 				      const char *buf, size_t len, u8 state)
1623 {
1624 	struct pci_dev *pdev = to_pci_dev(dev);
1625 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1626 	bool state_enable;
1627 
1628 	if (kstrtobool(buf, &state_enable) < 0)
1629 		return -EINVAL;
1630 
1631 	down_read(&pci_bus_sem);
1632 	mutex_lock(&aspm_lock);
1633 
1634 	if (state_enable) {
1635 		link->aspm_disable &= ~state;
1636 		/* need to enable L1 for substates */
1637 		if (state & PCIE_LINK_STATE_L1SS)
1638 			link->aspm_disable &= ~PCIE_LINK_STATE_L1;
1639 	} else {
1640 		link->aspm_disable |= state;
1641 		if (state & PCIE_LINK_STATE_L1)
1642 			link->aspm_disable |= PCIE_LINK_STATE_L1SS;
1643 	}
1644 
1645 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1646 
1647 	mutex_unlock(&aspm_lock);
1648 	up_read(&pci_bus_sem);
1649 
1650 	return len;
1651 }
1652 
1653 #define ASPM_ATTR(_f, _s)						\
1654 static ssize_t _f##_show(struct device *dev,				\
1655 			 struct device_attribute *attr, char *buf)	\
1656 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); }	\
1657 									\
1658 static ssize_t _f##_store(struct device *dev,				\
1659 			  struct device_attribute *attr,		\
1660 			  const char *buf, size_t len)			\
1661 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
1662 
ASPM_ATTR(l0s_aspm,L0S)1663 ASPM_ATTR(l0s_aspm, L0S)
1664 ASPM_ATTR(l1_aspm, L1)
1665 ASPM_ATTR(l1_1_aspm, L1_1)
1666 ASPM_ATTR(l1_2_aspm, L1_2)
1667 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1668 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
1669 
1670 static ssize_t clkpm_show(struct device *dev,
1671 			  struct device_attribute *attr, char *buf)
1672 {
1673 	struct pci_dev *pdev = to_pci_dev(dev);
1674 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1675 
1676 	return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
1677 }
1678 
clkpm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1679 static ssize_t clkpm_store(struct device *dev,
1680 			   struct device_attribute *attr,
1681 			   const char *buf, size_t len)
1682 {
1683 	struct pci_dev *pdev = to_pci_dev(dev);
1684 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1685 	bool state_enable;
1686 
1687 	if (kstrtobool(buf, &state_enable) < 0)
1688 		return -EINVAL;
1689 
1690 	down_read(&pci_bus_sem);
1691 	mutex_lock(&aspm_lock);
1692 
1693 	link->clkpm_disable = !state_enable;
1694 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1695 
1696 	mutex_unlock(&aspm_lock);
1697 	up_read(&pci_bus_sem);
1698 
1699 	return len;
1700 }
1701 
1702 static DEVICE_ATTR_RW(clkpm);
1703 static DEVICE_ATTR_RW(l0s_aspm);
1704 static DEVICE_ATTR_RW(l1_aspm);
1705 static DEVICE_ATTR_RW(l1_1_aspm);
1706 static DEVICE_ATTR_RW(l1_2_aspm);
1707 static DEVICE_ATTR_RW(l1_1_pcipm);
1708 static DEVICE_ATTR_RW(l1_2_pcipm);
1709 
1710 static struct attribute *aspm_ctrl_attrs[] = {
1711 	&dev_attr_clkpm.attr,
1712 	&dev_attr_l0s_aspm.attr,
1713 	&dev_attr_l1_aspm.attr,
1714 	&dev_attr_l1_1_aspm.attr,
1715 	&dev_attr_l1_2_aspm.attr,
1716 	&dev_attr_l1_1_pcipm.attr,
1717 	&dev_attr_l1_2_pcipm.attr,
1718 	NULL
1719 };
1720 
aspm_ctrl_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1721 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1722 					   struct attribute *a, int n)
1723 {
1724 	struct device *dev = kobj_to_dev(kobj);
1725 	struct pci_dev *pdev = to_pci_dev(dev);
1726 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1727 	static const u8 aspm_state_map[] = {
1728 		PCIE_LINK_STATE_L0S,
1729 		PCIE_LINK_STATE_L1,
1730 		PCIE_LINK_STATE_L1_1,
1731 		PCIE_LINK_STATE_L1_2,
1732 		PCIE_LINK_STATE_L1_1_PCIPM,
1733 		PCIE_LINK_STATE_L1_2_PCIPM,
1734 	};
1735 
1736 	if (aspm_disabled || !link)
1737 		return 0;
1738 
1739 	if (n == 0)
1740 		return link->clkpm_capable ? a->mode : 0;
1741 
1742 	return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
1743 }
1744 
1745 const struct attribute_group aspm_ctrl_attr_group = {
1746 	.name = "link",
1747 	.attrs = aspm_ctrl_attrs,
1748 	.is_visible = aspm_ctrl_attrs_are_visible,
1749 };
1750 
pcie_aspm_disable(char * str)1751 static int __init pcie_aspm_disable(char *str)
1752 {
1753 	if (!strcmp(str, "off")) {
1754 		aspm_policy = POLICY_DEFAULT;
1755 		aspm_disabled = true;
1756 		aspm_support_enabled = false;
1757 		pr_info("PCIe ASPM is disabled\n");
1758 	} else if (!strcmp(str, "force")) {
1759 		aspm_force = true;
1760 		pr_info("PCIe ASPM is forcibly enabled\n");
1761 	}
1762 	return 1;
1763 }
1764 
1765 __setup("pcie_aspm=", pcie_aspm_disable);
1766 
pcie_no_aspm(void)1767 void pcie_no_aspm(void)
1768 {
1769 	/*
1770 	 * Disabling ASPM is intended to prevent the kernel from modifying
1771 	 * existing hardware state, not to clear existing state. To that end:
1772 	 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1773 	 * (b) prevent userspace from changing policy
1774 	 */
1775 	if (!aspm_force) {
1776 		aspm_policy = POLICY_DEFAULT;
1777 		aspm_disabled = true;
1778 	}
1779 }
1780 
pcie_aspm_support_enabled(void)1781 bool pcie_aspm_support_enabled(void)
1782 {
1783 	return aspm_support_enabled;
1784 }
1785 
1786 #endif /* CONFIG_PCIEASPM */
1787