xref: /linux/drivers/pci/pcie/aspm.c (revision 09b1704f5b02c18dd02b21343530463fcfc92c54)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Enable PCIe link L0s/L1 state and Clock Power Management
4  *
5  * Copyright (C) 2007 Intel
6  * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7  * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/build_bug.h>
13 #include <linux/kernel.h>
14 #include <linux/limits.h>
15 #include <linux/math.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/of.h>
19 #include <linux/pci.h>
20 #include <linux/pci_regs.h>
21 #include <linux/errno.h>
22 #include <linux/pm.h>
23 #include <linux/init.h>
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/time.h>
27 
28 #include "../pci.h"
29 
30 void pci_save_ltr_state(struct pci_dev *dev)
31 {
32 	int ltr;
33 	struct pci_cap_saved_state *save_state;
34 	u32 *cap;
35 
36 	if (!pci_is_pcie(dev))
37 		return;
38 
39 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
40 	if (!ltr)
41 		return;
42 
43 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
44 	if (!save_state) {
45 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
46 		return;
47 	}
48 
49 	/* Some broken devices only support dword access to LTR */
50 	cap = &save_state->cap.data[0];
51 	pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
52 }
53 
54 void pci_restore_ltr_state(struct pci_dev *dev)
55 {
56 	struct pci_cap_saved_state *save_state;
57 	int ltr;
58 	u32 *cap;
59 
60 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
61 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
62 	if (!save_state || !ltr)
63 		return;
64 
65 	/* Some broken devices only support dword access to LTR */
66 	cap = &save_state->cap.data[0];
67 	pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
68 }
69 
70 void pci_configure_aspm_l1ss(struct pci_dev *pdev)
71 {
72 	int rc;
73 
74 	pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
75 
76 	rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
77 					 2 * sizeof(u32));
78 	if (rc)
79 		pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
80 			ERR_PTR(rc));
81 }
82 
83 void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
84 {
85 	struct pci_dev *parent = pdev->bus->self;
86 	struct pci_cap_saved_state *save_state;
87 	u32 *cap;
88 
89 	/*
90 	 * If this is a Downstream Port, we never restore the L1SS state
91 	 * directly; we only restore it when we restore the state of the
92 	 * Upstream Port below it.
93 	 */
94 	if (pcie_downstream_port(pdev) || !parent)
95 		return;
96 
97 	if (!pdev->l1ss || !parent->l1ss)
98 		return;
99 
100 	/*
101 	 * Save L1 substate configuration. The ASPM L0s/L1 configuration
102 	 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
103 	 */
104 	save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
105 	if (!save_state)
106 		return;
107 
108 	cap = &save_state->cap.data[0];
109 	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
110 	pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
111 
112 	/*
113 	 * Save parent's L1 substate configuration so we have it for
114 	 * pci_restore_aspm_l1ss_state(pdev) to restore.
115 	 */
116 	save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
117 	if (!save_state)
118 		return;
119 
120 	cap = &save_state->cap.data[0];
121 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
122 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
123 }
124 
125 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
126 {
127 	struct pci_cap_saved_state *pl_save_state, *cl_save_state;
128 	struct pci_dev *parent = pdev->bus->self;
129 	u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
130 	u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
131 	u16 clnkctl, plnkctl;
132 
133 	/*
134 	 * In case BIOS enabled L1.2 when resuming, we need to disable it first
135 	 * on the downstream component before the upstream. So, don't attempt to
136 	 * restore either until we are at the downstream component.
137 	 */
138 	if (pcie_downstream_port(pdev) || !parent)
139 		return;
140 
141 	if (!pdev->l1ss || !parent->l1ss)
142 		return;
143 
144 	cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
145 	pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
146 	if (!cl_save_state || !pl_save_state)
147 		return;
148 
149 	cap = &cl_save_state->cap.data[0];
150 	cl_ctl2 = *cap++;
151 	cl_ctl1 = *cap;
152 	cap = &pl_save_state->cap.data[0];
153 	pl_ctl2 = *cap++;
154 	pl_ctl1 = *cap;
155 
156 	/* Make sure L0s/L1 are disabled before updating L1SS config */
157 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
158 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
159 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
160 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
161 		pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
162 					   clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
163 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
164 					   plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
165 	}
166 
167 	/*
168 	 * Disable L1.2 on this downstream endpoint device first, followed
169 	 * by the upstream
170 	 */
171 	pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
172 				       PCI_L1SS_CTL1_L1_2_MASK, 0);
173 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
174 				       PCI_L1SS_CTL1_L1_2_MASK, 0);
175 
176 	/*
177 	 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
178 	 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
179 	 * enable bits, even though they're all in PCI_L1SS_CTL1.
180 	 */
181 	pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
182 	pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
183 	cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
184 	cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
185 
186 	/* Write back without enables first (above we cleared them in ctl1) */
187 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
188 	pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
189 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
190 	pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
191 
192 	/* Then write back the enables */
193 	if (pl_l1_2_enable || cl_l1_2_enable) {
194 		pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
195 				       pl_ctl1 | pl_l1_2_enable);
196 		pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
197 				       cl_ctl1 | cl_l1_2_enable);
198 	}
199 
200 	/* Restore L0s/L1 if they were enabled */
201 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
202 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
203 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
204 		pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
205 	}
206 }
207 
208 #ifdef CONFIG_PCIEASPM
209 
210 #ifdef MODULE_PARAM_PREFIX
211 #undef MODULE_PARAM_PREFIX
212 #endif
213 #define MODULE_PARAM_PREFIX "pcie_aspm."
214 
215 /* Note: these are not register definitions */
216 #define PCIE_LINK_STATE_L0S_UP	BIT(0)	/* Upstream direction L0s state */
217 #define PCIE_LINK_STATE_L0S_DW	BIT(1)	/* Downstream direction L0s state */
218 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
219 
220 #define PCIE_LINK_STATE_L1_SS_PCIPM	(PCIE_LINK_STATE_L1_1_PCIPM |\
221 					 PCIE_LINK_STATE_L1_2_PCIPM)
222 #define PCIE_LINK_STATE_L1_2_MASK	(PCIE_LINK_STATE_L1_2 |\
223 					 PCIE_LINK_STATE_L1_2_PCIPM)
224 #define PCIE_LINK_STATE_L1SS		(PCIE_LINK_STATE_L1_1 |\
225 					 PCIE_LINK_STATE_L1_1_PCIPM |\
226 					 PCIE_LINK_STATE_L1_2_MASK)
227 
228 struct pcie_link_state {
229 	struct pci_dev *pdev;		/* Upstream component of the Link */
230 	struct pci_dev *downstream;	/* Downstream component, function 0 */
231 	struct pcie_link_state *root;	/* pointer to the root port link */
232 	struct pcie_link_state *parent;	/* pointer to the parent Link state */
233 	struct list_head sibling;	/* node in link_list */
234 
235 	/* ASPM state */
236 	u32 aspm_support:7;		/* Supported ASPM state */
237 	u32 aspm_enabled:7;		/* Enabled ASPM state */
238 	u32 aspm_capable:7;		/* Capable ASPM state with latency */
239 	u32 aspm_default:7;		/* Default ASPM state by BIOS or
240 					   override */
241 	u32 aspm_disable:7;		/* Disabled ASPM state */
242 
243 	/* Clock PM state */
244 	u32 clkpm_capable:1;		/* Clock PM capable? */
245 	u32 clkpm_enabled:1;		/* Current Clock PM state */
246 	u32 clkpm_default:1;		/* Default Clock PM state by BIOS */
247 	u32 clkpm_disable:1;		/* Clock PM disabled */
248 };
249 
250 static bool aspm_disabled, aspm_force;
251 static bool aspm_support_enabled = true;
252 static DEFINE_MUTEX(aspm_lock);
253 static LIST_HEAD(link_list);
254 
255 #define POLICY_DEFAULT 0	/* BIOS default setting */
256 #define POLICY_PERFORMANCE 1	/* high performance */
257 #define POLICY_POWERSAVE 2	/* high power saving */
258 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
259 
260 #ifdef CONFIG_PCIEASPM_PERFORMANCE
261 static int aspm_policy = POLICY_PERFORMANCE;
262 #elif defined CONFIG_PCIEASPM_POWERSAVE
263 static int aspm_policy = POLICY_POWERSAVE;
264 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
265 static int aspm_policy = POLICY_POWER_SUPERSAVE;
266 #else
267 static int aspm_policy;
268 #endif
269 
270 static const char *policy_str[] = {
271 	[POLICY_DEFAULT] = "default",
272 	[POLICY_PERFORMANCE] = "performance",
273 	[POLICY_POWERSAVE] = "powersave",
274 	[POLICY_POWER_SUPERSAVE] = "powersupersave"
275 };
276 
277 /*
278  * The L1 PM substate capability is only implemented in function 0 in a
279  * multi function device.
280  */
281 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
282 {
283 	struct pci_dev *child;
284 
285 	list_for_each_entry(child, &linkbus->devices, bus_list)
286 		if (PCI_FUNC(child->devfn) == 0)
287 			return child;
288 	return NULL;
289 }
290 
291 static int policy_to_aspm_state(struct pcie_link_state *link)
292 {
293 	switch (aspm_policy) {
294 	case POLICY_PERFORMANCE:
295 		/* Disable ASPM and Clock PM */
296 		return 0;
297 	case POLICY_POWERSAVE:
298 		/* Enable ASPM L0s/L1 */
299 		return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
300 	case POLICY_POWER_SUPERSAVE:
301 		/* Enable Everything */
302 		return PCIE_LINK_STATE_ASPM_ALL;
303 	case POLICY_DEFAULT:
304 		return link->aspm_default;
305 	}
306 	return 0;
307 }
308 
309 static int policy_to_clkpm_state(struct pcie_link_state *link)
310 {
311 	switch (aspm_policy) {
312 	case POLICY_PERFORMANCE:
313 		/* Disable ASPM and Clock PM */
314 		return 0;
315 	case POLICY_POWERSAVE:
316 	case POLICY_POWER_SUPERSAVE:
317 		/* Enable Clock PM */
318 		return 1;
319 	case POLICY_DEFAULT:
320 		return link->clkpm_default;
321 	}
322 	return 0;
323 }
324 
325 static void pci_update_aspm_saved_state(struct pci_dev *dev)
326 {
327 	struct pci_cap_saved_state *save_state;
328 	u16 *cap, lnkctl, aspm_ctl;
329 
330 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
331 	if (!save_state)
332 		return;
333 
334 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
335 
336 	/*
337 	 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
338 	 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
339 	 * change after being captured in save_state.
340 	 */
341 	aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
342 	lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
343 
344 	/* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
345 	cap = (u16 *)&save_state->cap.data[0];
346 	cap[1] = lnkctl | aspm_ctl;
347 }
348 
349 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
350 {
351 	struct pci_dev *child;
352 	struct pci_bus *linkbus = link->pdev->subordinate;
353 	u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
354 
355 	list_for_each_entry(child, &linkbus->devices, bus_list) {
356 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
357 						   PCI_EXP_LNKCTL_CLKREQ_EN,
358 						   val);
359 		pci_update_aspm_saved_state(child);
360 	}
361 	link->clkpm_enabled = !!enable;
362 }
363 
364 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
365 {
366 	/*
367 	 * Don't enable Clock PM if the link is not Clock PM capable
368 	 * or Clock PM is disabled
369 	 */
370 	if (!link->clkpm_capable || link->clkpm_disable)
371 		enable = 0;
372 	/* Need nothing if the specified equals to current state */
373 	if (link->clkpm_enabled == enable)
374 		return;
375 	pcie_set_clkpm_nocheck(link, enable);
376 }
377 
378 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
379 {
380 	int capable = 1, enabled = 1;
381 	u32 reg32;
382 	u16 reg16;
383 	struct pci_dev *child;
384 	struct pci_bus *linkbus = link->pdev->subordinate;
385 
386 	/* All functions should have the same cap and state, take the worst */
387 	list_for_each_entry(child, &linkbus->devices, bus_list) {
388 		pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
389 		if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
390 			capable = 0;
391 			enabled = 0;
392 			break;
393 		}
394 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
395 		if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
396 			enabled = 0;
397 	}
398 	link->clkpm_enabled = enabled;
399 	link->clkpm_default = enabled;
400 	link->clkpm_capable = capable;
401 	link->clkpm_disable = blacklist ? 1 : 0;
402 }
403 
404 /*
405  * pcie_aspm_configure_common_clock: check if the 2 ends of a link
406  *   could use common clock. If they are, configure them to use the
407  *   common clock. That will reduce the ASPM state exit latency.
408  */
409 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
410 {
411 	int same_clock = 1;
412 	u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
413 	struct pci_dev *child, *parent = link->pdev;
414 	struct pci_bus *linkbus = parent->subordinate;
415 	/*
416 	 * All functions of a slot should have the same Slot Clock
417 	 * Configuration, so just check one function
418 	 */
419 	child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
420 	BUG_ON(!pci_is_pcie(child));
421 
422 	/* Check downstream component if bit Slot Clock Configuration is 1 */
423 	pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
424 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
425 		same_clock = 0;
426 
427 	/* Check upstream component if bit Slot Clock Configuration is 1 */
428 	pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
429 	if (!(reg16 & PCI_EXP_LNKSTA_SLC))
430 		same_clock = 0;
431 
432 	/* Port might be already in common clock mode */
433 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
434 	parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
435 	if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
436 		bool consistent = true;
437 
438 		list_for_each_entry(child, &linkbus->devices, bus_list) {
439 			pcie_capability_read_word(child, PCI_EXP_LNKCTL,
440 						  &reg16);
441 			if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
442 				consistent = false;
443 				break;
444 			}
445 		}
446 		if (consistent)
447 			return;
448 		pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
449 	}
450 
451 	ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
452 	/* Configure downstream component, all functions */
453 	list_for_each_entry(child, &linkbus->devices, bus_list) {
454 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
455 		child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
456 		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
457 						   PCI_EXP_LNKCTL_CCC, ccc);
458 	}
459 
460 	/* Configure upstream component */
461 	pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
462 					   PCI_EXP_LNKCTL_CCC, ccc);
463 
464 	if (pcie_retrain_link(link->pdev, true)) {
465 
466 		/* Training failed. Restore common clock configurations */
467 		pci_err(parent, "ASPM: Could not configure common clock\n");
468 		list_for_each_entry(child, &linkbus->devices, bus_list)
469 			pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
470 							   PCI_EXP_LNKCTL_CCC,
471 							   child_old_ccc[PCI_FUNC(child->devfn)]);
472 		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
473 						   PCI_EXP_LNKCTL_CCC, parent_old_ccc);
474 	}
475 }
476 
477 /* Convert L0s latency encoding to ns */
478 static u32 calc_l0s_latency(u32 lnkcap)
479 {
480 	u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap);
481 
482 	if (encoding == 0x7)
483 		return 5 * NSEC_PER_USEC;	/* > 4us */
484 	return (64 << encoding);
485 }
486 
487 /* Convert L0s acceptable latency encoding to ns */
488 static u32 calc_l0s_acceptable(u32 encoding)
489 {
490 	if (encoding == 0x7)
491 		return U32_MAX;
492 	return (64 << encoding);
493 }
494 
495 /* Convert L1 latency encoding to ns */
496 static u32 calc_l1_latency(u32 lnkcap)
497 {
498 	u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap);
499 
500 	if (encoding == 0x7)
501 		return 65 * NSEC_PER_USEC;	/* > 64us */
502 	return NSEC_PER_USEC << encoding;
503 }
504 
505 /* Convert L1 acceptable latency encoding to ns */
506 static u32 calc_l1_acceptable(u32 encoding)
507 {
508 	if (encoding == 0x7)
509 		return U32_MAX;
510 	return NSEC_PER_USEC << encoding;
511 }
512 
513 /* Convert L1SS T_pwr encoding to usec */
514 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
515 {
516 	switch (scale) {
517 	case 0:
518 		return val * 2;
519 	case 1:
520 		return val * 10;
521 	case 2:
522 		return val * 100;
523 	}
524 	pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
525 	return 0;
526 }
527 
528 /*
529  * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
530  * register.  Ports enter L1.2 when the most recent LTR value is greater
531  * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
532  * don't enter L1.2 too aggressively.
533  *
534  * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
535  */
536 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
537 {
538 	u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC;
539 
540 	/*
541 	 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
542 	 * value of 0x3ff.
543 	 */
544 	if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
545 		*scale = 0;		/* Value times 1ns */
546 		*value = threshold_ns;
547 	} else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
548 		*scale = 1;		/* Value times 32ns */
549 		*value = roundup(threshold_ns, 32) / 32;
550 	} else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
551 		*scale = 2;		/* Value times 1024ns */
552 		*value = roundup(threshold_ns, 1024) / 1024;
553 	} else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
554 		*scale = 3;		/* Value times 32768ns */
555 		*value = roundup(threshold_ns, 32768) / 32768;
556 	} else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
557 		*scale = 4;		/* Value times 1048576ns */
558 		*value = roundup(threshold_ns, 1048576) / 1048576;
559 	} else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
560 		*scale = 5;		/* Value times 33554432ns */
561 		*value = roundup(threshold_ns, 33554432) / 33554432;
562 	} else {
563 		*scale = 5;
564 		*value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE);
565 	}
566 }
567 
568 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
569 {
570 	u32 latency, encoding, lnkcap_up, lnkcap_dw;
571 	u32 l1_switch_latency = 0, latency_up_l0s;
572 	u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
573 	u32 acceptable_l0s, acceptable_l1;
574 	struct pcie_link_state *link;
575 
576 	/* Device not in D0 doesn't need latency check */
577 	if ((endpoint->current_state != PCI_D0) &&
578 	    (endpoint->current_state != PCI_UNKNOWN))
579 		return;
580 
581 	link = endpoint->bus->self->link_state;
582 
583 	/* Calculate endpoint L0s acceptable latency */
584 	encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap);
585 	acceptable_l0s = calc_l0s_acceptable(encoding);
586 
587 	/* Calculate endpoint L1 acceptable latency */
588 	encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap);
589 	acceptable_l1 = calc_l1_acceptable(encoding);
590 
591 	while (link) {
592 		struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
593 
594 		/* Read direction exit latencies */
595 		pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
596 					   &lnkcap_up);
597 		pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
598 					   &lnkcap_dw);
599 		latency_up_l0s = calc_l0s_latency(lnkcap_up);
600 		latency_up_l1 = calc_l1_latency(lnkcap_up);
601 		latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
602 		latency_dw_l1 = calc_l1_latency(lnkcap_dw);
603 
604 		/* Check upstream direction L0s latency */
605 		if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
606 		    (latency_up_l0s > acceptable_l0s))
607 			link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
608 
609 		/* Check downstream direction L0s latency */
610 		if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
611 		    (latency_dw_l0s > acceptable_l0s))
612 			link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
613 		/*
614 		 * Check L1 latency.
615 		 * Every switch on the path to root complex need 1
616 		 * more microsecond for L1. Spec doesn't mention L0s.
617 		 *
618 		 * The exit latencies for L1 substates are not advertised
619 		 * by a device.  Since the spec also doesn't mention a way
620 		 * to determine max latencies introduced by enabling L1
621 		 * substates on the components, it is not clear how to do
622 		 * a L1 substate exit latency check.  We assume that the
623 		 * L1 exit latencies advertised by a device include L1
624 		 * substate latencies (and hence do not do any check).
625 		 */
626 		latency = max_t(u32, latency_up_l1, latency_dw_l1);
627 		if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
628 		    (latency + l1_switch_latency > acceptable_l1))
629 			link->aspm_capable &= ~PCIE_LINK_STATE_L1;
630 		l1_switch_latency += NSEC_PER_USEC;
631 
632 		link = link->parent;
633 	}
634 }
635 
636 /* Calculate L1.2 PM substate timing parameters */
637 static void aspm_calc_l12_info(struct pcie_link_state *link,
638 				u32 parent_l1ss_cap, u32 child_l1ss_cap)
639 {
640 	struct pci_dev *child = link->downstream, *parent = link->pdev;
641 	u32 val1, val2, scale1, scale2;
642 	u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
643 	u32 ctl1 = 0, ctl2 = 0;
644 	u32 pctl1, pctl2, cctl1, cctl2;
645 	u32 pl1_2_enables, cl1_2_enables;
646 
647 	/* Choose the greater of the two Port Common_Mode_Restore_Times */
648 	val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap);
649 	val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap);
650 	t_common_mode = max(val1, val2);
651 
652 	/* Choose the greater of the two Port T_POWER_ON times */
653 	val1   = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap);
654 	scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap);
655 	val2   = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap);
656 	scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap);
657 
658 	if (calc_l12_pwron(parent, scale1, val1) >
659 	    calc_l12_pwron(child, scale2, val2)) {
660 		ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) |
661 			FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1);
662 		t_power_on = calc_l12_pwron(parent, scale1, val1);
663 	} else {
664 		ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) |
665 			FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2);
666 		t_power_on = calc_l12_pwron(child, scale2, val2);
667 	}
668 
669 	/*
670 	 * Set LTR_L1.2_THRESHOLD to the time required to transition the
671 	 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
672 	 * downstream devices report (via LTR) that they can tolerate at
673 	 * least that much latency.
674 	 *
675 	 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
676 	 * Table 5-11.  T(POWER_OFF) is at most 2us and T(L1.2) is at
677 	 * least 4us.
678 	 */
679 	l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
680 	encode_l12_threshold(l1_2_threshold, &scale, &value);
681 	ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) |
682 		FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) |
683 		FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale);
684 
685 	/* Some broken devices only support dword access to L1 SS */
686 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
687 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
688 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
689 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
690 
691 	if (ctl1 == pctl1 && ctl1 == cctl1 &&
692 	    ctl2 == pctl2 && ctl2 == cctl2)
693 		return;
694 
695 	/* Disable L1.2 while updating.  See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
696 	pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
697 	cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
698 
699 	if (pl1_2_enables || cl1_2_enables) {
700 		pci_clear_and_set_config_dword(child,
701 					       child->l1ss + PCI_L1SS_CTL1,
702 					       PCI_L1SS_CTL1_L1_2_MASK, 0);
703 		pci_clear_and_set_config_dword(parent,
704 					       parent->l1ss + PCI_L1SS_CTL1,
705 					       PCI_L1SS_CTL1_L1_2_MASK, 0);
706 	}
707 
708 	/* Program T_POWER_ON times in both ports */
709 	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
710 	pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
711 
712 	/* Program Common_Mode_Restore_Time in upstream device */
713 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
714 				       PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
715 
716 	/* Program LTR_L1.2_THRESHOLD time in both ports */
717 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
718 				       PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
719 				       PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
720 				       ctl1);
721 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
722 				       PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
723 				       PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
724 				       ctl1);
725 
726 	if (pl1_2_enables || cl1_2_enables) {
727 		pci_clear_and_set_config_dword(parent,
728 					       parent->l1ss + PCI_L1SS_CTL1, 0,
729 					       pl1_2_enables);
730 		pci_clear_and_set_config_dword(child,
731 					       child->l1ss + PCI_L1SS_CTL1, 0,
732 					       cl1_2_enables);
733 	}
734 }
735 
736 static void aspm_l1ss_init(struct pcie_link_state *link)
737 {
738 	struct pci_dev *child = link->downstream, *parent = link->pdev;
739 	u32 parent_l1ss_cap, child_l1ss_cap;
740 	u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
741 
742 	if (!parent->l1ss || !child->l1ss)
743 		return;
744 
745 	/* Setup L1 substate */
746 	pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
747 			      &parent_l1ss_cap);
748 	pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
749 			      &child_l1ss_cap);
750 
751 	if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
752 		parent_l1ss_cap = 0;
753 	if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
754 		child_l1ss_cap = 0;
755 
756 	/*
757 	 * If we don't have LTR for the entire path from the Root Complex
758 	 * to this device, we can't use ASPM L1.2 because it relies on the
759 	 * LTR_L1.2_THRESHOLD.  See PCIe r4.0, secs 5.5.4, 6.18.
760 	 */
761 	if (!child->ltr_path)
762 		child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
763 
764 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
765 		link->aspm_support |= PCIE_LINK_STATE_L1_1;
766 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
767 		link->aspm_support |= PCIE_LINK_STATE_L1_2;
768 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
769 		link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
770 	if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
771 		link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
772 
773 	if (parent_l1ss_cap)
774 		pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
775 				      &parent_l1ss_ctl1);
776 	if (child_l1ss_cap)
777 		pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
778 				      &child_l1ss_ctl1);
779 
780 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
781 		link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
782 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
783 		link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
784 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
785 		link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
786 	if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
787 		link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
788 
789 	if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
790 		aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
791 }
792 
793 #define FLAG(x, y, d)	(((x) & (PCIE_LINK_STATE_##y)) ? d : "")
794 
795 static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
796 {
797 	struct pci_dev *pdev = link->downstream;
798 	u32 override;
799 
800 	/* For devicetree platforms, enable L0s and L1 by default */
801 	if (of_have_populated_dt()) {
802 		if (link->aspm_support & PCIE_LINK_STATE_L0S)
803 			link->aspm_default |= PCIE_LINK_STATE_L0S;
804 		if (link->aspm_support & PCIE_LINK_STATE_L1)
805 			link->aspm_default |= PCIE_LINK_STATE_L1;
806 		override = link->aspm_default & ~link->aspm_enabled;
807 		if (override)
808 			pci_info(pdev, "ASPM: default states%s%s\n",
809 				 FLAG(override, L0S, " L0s"),
810 				 FLAG(override, L1, " L1"));
811 	}
812 }
813 
814 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
815 {
816 	struct pci_dev *child = link->downstream, *parent = link->pdev;
817 	u32 parent_lnkcap, child_lnkcap;
818 	u16 parent_lnkctl, child_lnkctl;
819 	struct pci_bus *linkbus = parent->subordinate;
820 
821 	if (blacklist) {
822 		/* Set enabled/disable so that we will disable ASPM later */
823 		link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
824 		link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
825 		return;
826 	}
827 
828 	/*
829 	 * If ASPM not supported, don't mess with the clocks and link,
830 	 * bail out now.
831 	 */
832 	pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
833 	pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
834 	if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
835 		return;
836 
837 	/* Configure common clock before checking latencies */
838 	pcie_aspm_configure_common_clock(link);
839 
840 	/*
841 	 * Re-read upstream/downstream components' register state after
842 	 * clock configuration.  L0s & L1 exit latencies in the otherwise
843 	 * read-only Link Capabilities may change depending on common clock
844 	 * configuration (PCIe r5.0, sec 7.5.3.6).
845 	 */
846 	pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
847 	pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
848 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
849 	pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
850 
851 	/* Disable L0s/L1 before updating L1SS config */
852 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
853 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
854 		pcie_capability_write_word(child, PCI_EXP_LNKCTL,
855 					   child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
856 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
857 					   parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
858 	}
859 
860 	/*
861 	 * Setup L0s state
862 	 *
863 	 * Note that we must not enable L0s in either direction on a
864 	 * given link unless components on both sides of the link each
865 	 * support L0s.
866 	 */
867 	if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
868 		link->aspm_support |= PCIE_LINK_STATE_L0S;
869 
870 	if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
871 		link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
872 	if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
873 		link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
874 
875 	/* Setup L1 state */
876 	if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
877 		link->aspm_support |= PCIE_LINK_STATE_L1;
878 
879 	if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
880 		link->aspm_enabled |= PCIE_LINK_STATE_L1;
881 
882 	aspm_l1ss_init(link);
883 
884 	/* Restore L0s/L1 if they were enabled */
885 	if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
886 	    FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
887 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
888 		pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
889 	}
890 
891 	/* Save default state */
892 	link->aspm_default = link->aspm_enabled;
893 
894 	pcie_aspm_override_default_link_state(link);
895 
896 	/* Setup initial capable state. Will be updated later */
897 	link->aspm_capable = link->aspm_support;
898 
899 	/* Get and check endpoint acceptable latencies */
900 	list_for_each_entry(child, &linkbus->devices, bus_list) {
901 		if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
902 		    pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
903 			continue;
904 
905 		pcie_aspm_check_latency(child);
906 	}
907 }
908 
909 /* Configure the ASPM L1 substates. Caller must disable L1 first. */
910 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
911 {
912 	u32 val = 0;
913 	struct pci_dev *child = link->downstream, *parent = link->pdev;
914 
915 	if (state & PCIE_LINK_STATE_L1_1)
916 		val |= PCI_L1SS_CTL1_ASPM_L1_1;
917 	if (state & PCIE_LINK_STATE_L1_2)
918 		val |= PCI_L1SS_CTL1_ASPM_L1_2;
919 	if (state & PCIE_LINK_STATE_L1_1_PCIPM)
920 		val |= PCI_L1SS_CTL1_PCIPM_L1_1;
921 	if (state & PCIE_LINK_STATE_L1_2_PCIPM)
922 		val |= PCI_L1SS_CTL1_PCIPM_L1_2;
923 
924 	/*
925 	 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
926 	 * - Clear L1.x enable bits at child first, then at parent
927 	 * - Set L1.x enable bits at parent first, then at child
928 	 * - ASPM/PCIPM L1.2 must be disabled while programming timing
929 	 *   parameters
930 	 */
931 
932 	/* Disable all L1 substates */
933 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
934 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
935 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
936 				       PCI_L1SS_CTL1_L1SS_MASK, 0);
937 
938 	/* Enable what we need to enable */
939 	pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
940 				       PCI_L1SS_CTL1_L1SS_MASK, val);
941 	pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
942 				       PCI_L1SS_CTL1_L1SS_MASK, val);
943 }
944 
945 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
946 {
947 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
948 					   PCI_EXP_LNKCTL_ASPMC, val);
949 }
950 
951 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
952 {
953 	u32 upstream = 0, dwstream = 0;
954 	struct pci_dev *child = link->downstream, *parent = link->pdev;
955 	struct pci_bus *linkbus = parent->subordinate;
956 
957 	/* Enable only the states that were not explicitly disabled */
958 	state &= (link->aspm_capable & ~link->aspm_disable);
959 
960 	/* Can't enable any substates if L1 is not enabled */
961 	if (!(state & PCIE_LINK_STATE_L1))
962 		state &= ~PCIE_LINK_STATE_L1SS;
963 
964 	/* Spec says both ports must be in D0 before enabling PCI PM substates*/
965 	if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
966 		state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
967 		state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
968 	}
969 
970 	/* Nothing to do if the link is already in the requested state */
971 	if (link->aspm_enabled == state)
972 		return;
973 	/* Convert ASPM state to upstream/downstream ASPM register state */
974 	if (state & PCIE_LINK_STATE_L0S_UP)
975 		dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
976 	if (state & PCIE_LINK_STATE_L0S_DW)
977 		upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
978 	if (state & PCIE_LINK_STATE_L1) {
979 		upstream |= PCI_EXP_LNKCTL_ASPM_L1;
980 		dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
981 	}
982 
983 	/*
984 	 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
985 	 * bits for ASPM L1 PM Substates must be done while ASPM L1 is
986 	 * disabled. Disable L1 here and apply new configuration after L1SS
987 	 * configuration has been completed.
988 	 *
989 	 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
990 	 * it in the Downstream component prior to disabling it in the
991 	 * Upstream component, and ASPM L1 must be enabled in the Upstream
992 	 * component prior to enabling it in the Downstream component.
993 	 *
994 	 * Sec 7.5.3.7 also recommends programming the same ASPM Control
995 	 * value for all functions of a multi-function device.
996 	 */
997 	list_for_each_entry(child, &linkbus->devices, bus_list)
998 		pcie_config_aspm_dev(child, 0);
999 	pcie_config_aspm_dev(parent, 0);
1000 
1001 	if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
1002 		pcie_config_aspm_l1ss(link, state);
1003 
1004 	pcie_config_aspm_dev(parent, upstream);
1005 	list_for_each_entry(child, &linkbus->devices, bus_list)
1006 		pcie_config_aspm_dev(child, dwstream);
1007 
1008 	link->aspm_enabled = state;
1009 
1010 	/* Update latest ASPM configuration in saved context */
1011 	pci_save_aspm_l1ss_state(link->downstream);
1012 	pci_update_aspm_saved_state(link->downstream);
1013 	pci_save_aspm_l1ss_state(parent);
1014 	pci_update_aspm_saved_state(parent);
1015 }
1016 
1017 static void pcie_config_aspm_path(struct pcie_link_state *link)
1018 {
1019 	while (link) {
1020 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
1021 		link = link->parent;
1022 	}
1023 }
1024 
1025 static void free_link_state(struct pcie_link_state *link)
1026 {
1027 	link->pdev->link_state = NULL;
1028 	kfree(link);
1029 }
1030 
1031 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
1032 {
1033 	struct pci_dev *child;
1034 	u32 reg32;
1035 
1036 	/*
1037 	 * Some functions in a slot might not all be PCIe functions,
1038 	 * very strange. Disable ASPM for the whole slot
1039 	 */
1040 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
1041 		if (!pci_is_pcie(child))
1042 			return -EINVAL;
1043 
1044 		/*
1045 		 * If ASPM is disabled then we're not going to change
1046 		 * the BIOS state. It's safe to continue even if it's a
1047 		 * pre-1.1 device
1048 		 */
1049 
1050 		if (aspm_disabled)
1051 			continue;
1052 
1053 		/*
1054 		 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
1055 		 * RBER bit to determine if a function is 1.1 version device
1056 		 */
1057 		pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
1058 		if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
1059 			pci_info(child, "disabling ASPM on pre-1.1 PCIe device.  You can enable it with 'pcie_aspm=force'\n");
1060 			return -EINVAL;
1061 		}
1062 	}
1063 	return 0;
1064 }
1065 
1066 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1067 {
1068 	struct pcie_link_state *link;
1069 
1070 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1071 	if (!link)
1072 		return NULL;
1073 
1074 	INIT_LIST_HEAD(&link->sibling);
1075 	link->pdev = pdev;
1076 	link->downstream = pci_function_0(pdev->subordinate);
1077 
1078 	/*
1079 	 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1080 	 * hierarchies.  Note that some PCIe host implementations omit
1081 	 * the root ports entirely, in which case a downstream port on
1082 	 * a switch may become the root of the link state chain for all
1083 	 * its subordinate endpoints.
1084 	 */
1085 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1086 	    pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
1087 	    !pdev->bus->parent->self) {
1088 		link->root = link;
1089 	} else {
1090 		struct pcie_link_state *parent;
1091 
1092 		parent = pdev->bus->parent->self->link_state;
1093 		if (!parent) {
1094 			kfree(link);
1095 			return NULL;
1096 		}
1097 
1098 		link->parent = parent;
1099 		link->root = link->parent->root;
1100 	}
1101 
1102 	list_add(&link->sibling, &link_list);
1103 	pdev->link_state = link;
1104 	return link;
1105 }
1106 
1107 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
1108 {
1109 	struct pci_dev *child;
1110 
1111 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
1112 		sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
1113 }
1114 
1115 /*
1116  * pcie_aspm_init_link_state: Initiate PCI express link state.
1117  * It is called after the pcie and its children devices are scanned.
1118  * @pdev: the root port or switch downstream port
1119  */
1120 void pcie_aspm_init_link_state(struct pci_dev *pdev)
1121 {
1122 	struct pcie_link_state *link;
1123 	int blacklist = !!pcie_aspm_sanity_check(pdev);
1124 
1125 	if (!aspm_support_enabled)
1126 		return;
1127 
1128 	if (pdev->link_state)
1129 		return;
1130 
1131 	/*
1132 	 * We allocate pcie_link_state for the component on the upstream
1133 	 * end of a Link, so there's nothing to do unless this device is
1134 	 * downstream port.
1135 	 */
1136 	if (!pcie_downstream_port(pdev))
1137 		return;
1138 
1139 	/* VIA has a strange chipset, root port is under a bridge */
1140 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
1141 	    pdev->bus->self)
1142 		return;
1143 
1144 	down_read(&pci_bus_sem);
1145 	if (list_empty(&pdev->subordinate->devices))
1146 		goto out;
1147 
1148 	mutex_lock(&aspm_lock);
1149 	link = alloc_pcie_link_state(pdev);
1150 	if (!link)
1151 		goto unlock;
1152 	/*
1153 	 * Setup initial ASPM state. Note that we need to configure
1154 	 * upstream links also because capable state of them can be
1155 	 * update through pcie_aspm_cap_init().
1156 	 */
1157 	pcie_aspm_cap_init(link, blacklist);
1158 
1159 	/* Setup initial Clock PM state */
1160 	pcie_clkpm_cap_init(link, blacklist);
1161 
1162 	/*
1163 	 * At this stage drivers haven't had an opportunity to change the
1164 	 * link policy setting. Enabling ASPM on broken hardware can cripple
1165 	 * it even before the driver has had a chance to disable ASPM, so
1166 	 * default to a safe level right now. If we're enabling ASPM beyond
1167 	 * the BIOS's expectation, we'll do so once pci_enable_device() is
1168 	 * called.
1169 	 */
1170 	if (aspm_policy != POLICY_POWERSAVE &&
1171 	    aspm_policy != POLICY_POWER_SUPERSAVE) {
1172 		pcie_config_aspm_path(link);
1173 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
1174 	}
1175 
1176 	pcie_aspm_update_sysfs_visibility(pdev);
1177 
1178 unlock:
1179 	mutex_unlock(&aspm_lock);
1180 out:
1181 	up_read(&pci_bus_sem);
1182 }
1183 
1184 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
1185 {
1186 	struct pci_dev *bridge;
1187 	u32 ctl;
1188 
1189 	bridge = pci_upstream_bridge(pdev);
1190 	if (bridge && bridge->ltr_path) {
1191 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1192 		if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1193 			pci_dbg(bridge, "re-enabling LTR\n");
1194 			pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1195 						 PCI_EXP_DEVCTL2_LTR_EN);
1196 		}
1197 	}
1198 }
1199 
1200 void pci_configure_ltr(struct pci_dev *pdev)
1201 {
1202 	struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
1203 	struct pci_dev *bridge;
1204 	u32 cap, ctl;
1205 
1206 	if (!pci_is_pcie(pdev))
1207 		return;
1208 
1209 	pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
1210 	if (!(cap & PCI_EXP_DEVCAP2_LTR))
1211 		return;
1212 
1213 	pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
1214 	if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
1215 		if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1216 			pdev->ltr_path = 1;
1217 			return;
1218 		}
1219 
1220 		bridge = pci_upstream_bridge(pdev);
1221 		if (bridge && bridge->ltr_path)
1222 			pdev->ltr_path = 1;
1223 
1224 		return;
1225 	}
1226 
1227 	if (!host->native_ltr)
1228 		return;
1229 
1230 	/*
1231 	 * Software must not enable LTR in an Endpoint unless the Root
1232 	 * Complex and all intermediate Switches indicate support for LTR.
1233 	 * PCIe r4.0, sec 6.18.
1234 	 */
1235 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1236 		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1237 					 PCI_EXP_DEVCTL2_LTR_EN);
1238 		pdev->ltr_path = 1;
1239 		return;
1240 	}
1241 
1242 	/*
1243 	 * If we're configuring a hot-added device, LTR was likely
1244 	 * disabled in the upstream bridge, so re-enable it before enabling
1245 	 * it in the new device.
1246 	 */
1247 	bridge = pci_upstream_bridge(pdev);
1248 	if (bridge && bridge->ltr_path) {
1249 		pci_bridge_reconfigure_ltr(pdev);
1250 		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1251 					 PCI_EXP_DEVCTL2_LTR_EN);
1252 		pdev->ltr_path = 1;
1253 	}
1254 }
1255 
1256 /* Recheck latencies and update aspm_capable for links under the root */
1257 static void pcie_update_aspm_capable(struct pcie_link_state *root)
1258 {
1259 	struct pcie_link_state *link;
1260 	BUG_ON(root->parent);
1261 	list_for_each_entry(link, &link_list, sibling) {
1262 		if (link->root != root)
1263 			continue;
1264 		link->aspm_capable = link->aspm_support;
1265 	}
1266 	list_for_each_entry(link, &link_list, sibling) {
1267 		struct pci_dev *child;
1268 		struct pci_bus *linkbus = link->pdev->subordinate;
1269 		if (link->root != root)
1270 			continue;
1271 		list_for_each_entry(child, &linkbus->devices, bus_list) {
1272 			if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
1273 			    (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
1274 				continue;
1275 			pcie_aspm_check_latency(child);
1276 		}
1277 	}
1278 }
1279 
1280 /* @pdev: the endpoint device */
1281 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1282 {
1283 	struct pci_dev *parent = pdev->bus->self;
1284 	struct pcie_link_state *link, *root, *parent_link;
1285 
1286 	if (!parent || !parent->link_state)
1287 		return;
1288 
1289 	down_read(&pci_bus_sem);
1290 	mutex_lock(&aspm_lock);
1291 
1292 	link = parent->link_state;
1293 	root = link->root;
1294 	parent_link = link->parent;
1295 
1296 	/*
1297 	 * Free the parent link state, no later than function 0 (i.e.
1298 	 * link->downstream) being removed.
1299 	 *
1300 	 * Do not free the link state any earlier. If function 0 is a
1301 	 * switch upstream port, this link state is parent_link to all
1302 	 * subordinate ones.
1303 	 */
1304 	if (pdev != link->downstream)
1305 		goto out;
1306 
1307 	pcie_config_aspm_link(link, 0);
1308 	list_del(&link->sibling);
1309 	free_link_state(link);
1310 
1311 	/* Recheck latencies and configure upstream links */
1312 	if (parent_link) {
1313 		pcie_update_aspm_capable(root);
1314 		pcie_config_aspm_path(parent_link);
1315 	}
1316 
1317  out:
1318 	mutex_unlock(&aspm_lock);
1319 	up_read(&pci_bus_sem);
1320 }
1321 
1322 /*
1323  * @pdev: the root port or switch downstream port
1324  * @locked: whether pci_bus_sem is held
1325  */
1326 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
1327 {
1328 	struct pcie_link_state *link = pdev->link_state;
1329 
1330 	if (aspm_disabled || !link)
1331 		return;
1332 	/*
1333 	 * Devices changed PM state, we should recheck if latency
1334 	 * meets all functions' requirement
1335 	 */
1336 	if (!locked)
1337 		down_read(&pci_bus_sem);
1338 	mutex_lock(&aspm_lock);
1339 	pcie_update_aspm_capable(link->root);
1340 	pcie_config_aspm_path(link);
1341 	mutex_unlock(&aspm_lock);
1342 	if (!locked)
1343 		up_read(&pci_bus_sem);
1344 }
1345 
1346 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1347 {
1348 	struct pcie_link_state *link = pdev->link_state;
1349 
1350 	if (aspm_disabled || !link)
1351 		return;
1352 
1353 	if (aspm_policy != POLICY_POWERSAVE &&
1354 	    aspm_policy != POLICY_POWER_SUPERSAVE)
1355 		return;
1356 
1357 	down_read(&pci_bus_sem);
1358 	mutex_lock(&aspm_lock);
1359 	pcie_config_aspm_path(link);
1360 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1361 	mutex_unlock(&aspm_lock);
1362 	up_read(&pci_bus_sem);
1363 }
1364 
1365 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
1366 {
1367 	struct pci_dev *bridge;
1368 
1369 	if (!pci_is_pcie(pdev))
1370 		return NULL;
1371 
1372 	bridge = pci_upstream_bridge(pdev);
1373 	if (!bridge || !pci_is_pcie(bridge))
1374 		return NULL;
1375 
1376 	return bridge->link_state;
1377 }
1378 
1379 static u8 pci_calc_aspm_disable_mask(int state)
1380 {
1381 	state &= ~PCIE_LINK_STATE_CLKPM;
1382 
1383 	/* L1 PM substates require L1 */
1384 	if (state & PCIE_LINK_STATE_L1)
1385 		state |= PCIE_LINK_STATE_L1SS;
1386 
1387 	return state;
1388 }
1389 
1390 static u8 pci_calc_aspm_enable_mask(int state)
1391 {
1392 	state &= ~PCIE_LINK_STATE_CLKPM;
1393 
1394 	/* L1 PM substates require L1 */
1395 	if (state & PCIE_LINK_STATE_L1SS)
1396 		state |= PCIE_LINK_STATE_L1;
1397 
1398 	return state;
1399 }
1400 
1401 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
1402 {
1403 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1404 
1405 	if (!link)
1406 		return -EINVAL;
1407 	/*
1408 	 * A driver requested that ASPM be disabled on this device, but
1409 	 * if we don't have permission to manage ASPM (e.g., on ACPI
1410 	 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1411 	 * the _OSC method), we can't honor that request.  Windows has
1412 	 * a similar mechanism using "PciASPMOptOut", which is also
1413 	 * ignored in this situation.
1414 	 */
1415 	if (aspm_disabled) {
1416 		pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1417 		return -EPERM;
1418 	}
1419 
1420 	if (!locked)
1421 		down_read(&pci_bus_sem);
1422 	mutex_lock(&aspm_lock);
1423 	link->aspm_disable |= pci_calc_aspm_disable_mask(state);
1424 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1425 
1426 	if (state & PCIE_LINK_STATE_CLKPM)
1427 		link->clkpm_disable = 1;
1428 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1429 	mutex_unlock(&aspm_lock);
1430 	if (!locked)
1431 		up_read(&pci_bus_sem);
1432 
1433 	return 0;
1434 }
1435 
1436 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1437 {
1438 	lockdep_assert_held_read(&pci_bus_sem);
1439 
1440 	return __pci_disable_link_state(pdev, state, true);
1441 }
1442 EXPORT_SYMBOL(pci_disable_link_state_locked);
1443 
1444 /**
1445  * pci_disable_link_state - Disable device's link state, so the link will
1446  * never enter specific states.  Note that if the BIOS didn't grant ASPM
1447  * control to the OS, this does nothing because we can't touch the LNKCTL
1448  * register. Returns 0 or a negative errno.
1449  *
1450  * @pdev: PCI device
1451  * @state: ASPM link state to disable
1452  */
1453 int pci_disable_link_state(struct pci_dev *pdev, int state)
1454 {
1455 	return __pci_disable_link_state(pdev, state, false);
1456 }
1457 EXPORT_SYMBOL(pci_disable_link_state);
1458 
1459 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
1460 {
1461 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1462 
1463 	if (!link)
1464 		return -EINVAL;
1465 	/*
1466 	 * A driver requested that ASPM be enabled on this device, but
1467 	 * if we don't have permission to manage ASPM (e.g., on ACPI
1468 	 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1469 	 * the _OSC method), we can't honor that request.
1470 	 */
1471 	if (aspm_disabled) {
1472 		pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
1473 		return -EPERM;
1474 	}
1475 
1476 	if (!locked)
1477 		down_read(&pci_bus_sem);
1478 	mutex_lock(&aspm_lock);
1479 	link->aspm_default = pci_calc_aspm_enable_mask(state);
1480 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1481 
1482 	link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
1483 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1484 	mutex_unlock(&aspm_lock);
1485 	if (!locked)
1486 		up_read(&pci_bus_sem);
1487 
1488 	return 0;
1489 }
1490 
1491 /**
1492  * pci_enable_link_state - Clear and set the default device link state so that
1493  * the link may be allowed to enter the specified states. Note that if the
1494  * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
1495  * touch the LNKCTL register. Also note that this does not enable states
1496  * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1497  *
1498  * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1499  * PCIe r6.0, sec 5.5.4.
1500  *
1501  * @pdev: PCI device
1502  * @state: Mask of ASPM link states to enable
1503  */
1504 int pci_enable_link_state(struct pci_dev *pdev, int state)
1505 {
1506 	return __pci_enable_link_state(pdev, state, false);
1507 }
1508 EXPORT_SYMBOL(pci_enable_link_state);
1509 
1510 /**
1511  * pci_enable_link_state_locked - Clear and set the default device link state
1512  * so that the link may be allowed to enter the specified states. Note that if
1513  * the BIOS didn't grant ASPM control to the OS, this does nothing because we
1514  * can't touch the LNKCTL register. Also note that this does not enable states
1515  * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1516  *
1517  * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1518  * PCIe r6.0, sec 5.5.4.
1519  *
1520  * @pdev: PCI device
1521  * @state: Mask of ASPM link states to enable
1522  *
1523  * Context: Caller holds pci_bus_sem read lock.
1524  */
1525 int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1526 {
1527 	lockdep_assert_held_read(&pci_bus_sem);
1528 
1529 	return __pci_enable_link_state(pdev, state, true);
1530 }
1531 EXPORT_SYMBOL(pci_enable_link_state_locked);
1532 
1533 static int pcie_aspm_set_policy(const char *val,
1534 				const struct kernel_param *kp)
1535 {
1536 	int i;
1537 	struct pcie_link_state *link;
1538 
1539 	if (aspm_disabled)
1540 		return -EPERM;
1541 	i = sysfs_match_string(policy_str, val);
1542 	if (i < 0)
1543 		return i;
1544 	if (i == aspm_policy)
1545 		return 0;
1546 
1547 	down_read(&pci_bus_sem);
1548 	mutex_lock(&aspm_lock);
1549 	aspm_policy = i;
1550 	list_for_each_entry(link, &link_list, sibling) {
1551 		pcie_config_aspm_link(link, policy_to_aspm_state(link));
1552 		pcie_set_clkpm(link, policy_to_clkpm_state(link));
1553 	}
1554 	mutex_unlock(&aspm_lock);
1555 	up_read(&pci_bus_sem);
1556 	return 0;
1557 }
1558 
1559 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1560 {
1561 	int i, cnt = 0;
1562 	for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1563 		if (i == aspm_policy)
1564 			cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1565 		else
1566 			cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1567 	cnt += sprintf(buffer + cnt, "\n");
1568 	return cnt;
1569 }
1570 
1571 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1572 	NULL, 0644);
1573 
1574 /**
1575  * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1576  * @pdev: Target device.
1577  *
1578  * Relies on the upstream bridge's link_state being valid.  The link_state
1579  * is deallocated only when the last child of the bridge (i.e., @pdev or a
1580  * sibling) is removed, and the caller should be holding a reference to
1581  * @pdev, so this should be safe.
1582  */
1583 bool pcie_aspm_enabled(struct pci_dev *pdev)
1584 {
1585 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1586 
1587 	if (!link)
1588 		return false;
1589 
1590 	return link->aspm_enabled;
1591 }
1592 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1593 
1594 static ssize_t aspm_attr_show_common(struct device *dev,
1595 				     struct device_attribute *attr,
1596 				     char *buf, u8 state)
1597 {
1598 	struct pci_dev *pdev = to_pci_dev(dev);
1599 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1600 
1601 	return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1602 }
1603 
1604 static ssize_t aspm_attr_store_common(struct device *dev,
1605 				      struct device_attribute *attr,
1606 				      const char *buf, size_t len, u8 state)
1607 {
1608 	struct pci_dev *pdev = to_pci_dev(dev);
1609 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1610 	bool state_enable;
1611 
1612 	if (kstrtobool(buf, &state_enable) < 0)
1613 		return -EINVAL;
1614 
1615 	down_read(&pci_bus_sem);
1616 	mutex_lock(&aspm_lock);
1617 
1618 	if (state_enable) {
1619 		link->aspm_disable &= ~state;
1620 		/* need to enable L1 for substates */
1621 		if (state & PCIE_LINK_STATE_L1SS)
1622 			link->aspm_disable &= ~PCIE_LINK_STATE_L1;
1623 	} else {
1624 		link->aspm_disable |= state;
1625 		if (state & PCIE_LINK_STATE_L1)
1626 			link->aspm_disable |= PCIE_LINK_STATE_L1SS;
1627 	}
1628 
1629 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
1630 
1631 	mutex_unlock(&aspm_lock);
1632 	up_read(&pci_bus_sem);
1633 
1634 	return len;
1635 }
1636 
1637 #define ASPM_ATTR(_f, _s)						\
1638 static ssize_t _f##_show(struct device *dev,				\
1639 			 struct device_attribute *attr, char *buf)	\
1640 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); }	\
1641 									\
1642 static ssize_t _f##_store(struct device *dev,				\
1643 			  struct device_attribute *attr,		\
1644 			  const char *buf, size_t len)			\
1645 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
1646 
1647 ASPM_ATTR(l0s_aspm, L0S)
1648 ASPM_ATTR(l1_aspm, L1)
1649 ASPM_ATTR(l1_1_aspm, L1_1)
1650 ASPM_ATTR(l1_2_aspm, L1_2)
1651 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1652 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
1653 
1654 static ssize_t clkpm_show(struct device *dev,
1655 			  struct device_attribute *attr, char *buf)
1656 {
1657 	struct pci_dev *pdev = to_pci_dev(dev);
1658 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1659 
1660 	return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
1661 }
1662 
1663 static ssize_t clkpm_store(struct device *dev,
1664 			   struct device_attribute *attr,
1665 			   const char *buf, size_t len)
1666 {
1667 	struct pci_dev *pdev = to_pci_dev(dev);
1668 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1669 	bool state_enable;
1670 
1671 	if (kstrtobool(buf, &state_enable) < 0)
1672 		return -EINVAL;
1673 
1674 	down_read(&pci_bus_sem);
1675 	mutex_lock(&aspm_lock);
1676 
1677 	link->clkpm_disable = !state_enable;
1678 	pcie_set_clkpm(link, policy_to_clkpm_state(link));
1679 
1680 	mutex_unlock(&aspm_lock);
1681 	up_read(&pci_bus_sem);
1682 
1683 	return len;
1684 }
1685 
1686 static DEVICE_ATTR_RW(clkpm);
1687 static DEVICE_ATTR_RW(l0s_aspm);
1688 static DEVICE_ATTR_RW(l1_aspm);
1689 static DEVICE_ATTR_RW(l1_1_aspm);
1690 static DEVICE_ATTR_RW(l1_2_aspm);
1691 static DEVICE_ATTR_RW(l1_1_pcipm);
1692 static DEVICE_ATTR_RW(l1_2_pcipm);
1693 
1694 static struct attribute *aspm_ctrl_attrs[] = {
1695 	&dev_attr_clkpm.attr,
1696 	&dev_attr_l0s_aspm.attr,
1697 	&dev_attr_l1_aspm.attr,
1698 	&dev_attr_l1_1_aspm.attr,
1699 	&dev_attr_l1_2_aspm.attr,
1700 	&dev_attr_l1_1_pcipm.attr,
1701 	&dev_attr_l1_2_pcipm.attr,
1702 	NULL
1703 };
1704 
1705 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1706 					   struct attribute *a, int n)
1707 {
1708 	struct device *dev = kobj_to_dev(kobj);
1709 	struct pci_dev *pdev = to_pci_dev(dev);
1710 	struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1711 	static const u8 aspm_state_map[] = {
1712 		PCIE_LINK_STATE_L0S,
1713 		PCIE_LINK_STATE_L1,
1714 		PCIE_LINK_STATE_L1_1,
1715 		PCIE_LINK_STATE_L1_2,
1716 		PCIE_LINK_STATE_L1_1_PCIPM,
1717 		PCIE_LINK_STATE_L1_2_PCIPM,
1718 	};
1719 
1720 	if (aspm_disabled || !link)
1721 		return 0;
1722 
1723 	if (n == 0)
1724 		return link->clkpm_capable ? a->mode : 0;
1725 
1726 	return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
1727 }
1728 
1729 const struct attribute_group aspm_ctrl_attr_group = {
1730 	.name = "link",
1731 	.attrs = aspm_ctrl_attrs,
1732 	.is_visible = aspm_ctrl_attrs_are_visible,
1733 };
1734 
1735 static int __init pcie_aspm_disable(char *str)
1736 {
1737 	if (!strcmp(str, "off")) {
1738 		aspm_policy = POLICY_DEFAULT;
1739 		aspm_disabled = true;
1740 		aspm_support_enabled = false;
1741 		pr_info("PCIe ASPM is disabled\n");
1742 	} else if (!strcmp(str, "force")) {
1743 		aspm_force = true;
1744 		pr_info("PCIe ASPM is forcibly enabled\n");
1745 	}
1746 	return 1;
1747 }
1748 
1749 __setup("pcie_aspm=", pcie_aspm_disable);
1750 
1751 void pcie_no_aspm(void)
1752 {
1753 	/*
1754 	 * Disabling ASPM is intended to prevent the kernel from modifying
1755 	 * existing hardware state, not to clear existing state. To that end:
1756 	 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1757 	 * (b) prevent userspace from changing policy
1758 	 */
1759 	if (!aspm_force) {
1760 		aspm_policy = POLICY_DEFAULT;
1761 		aspm_disabled = true;
1762 	}
1763 }
1764 
1765 bool pcie_aspm_support_enabled(void)
1766 {
1767 	return aspm_support_enabled;
1768 }
1769 
1770 #endif /* CONFIG_PCIEASPM */
1771