1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Enable PCIe link L0s/L1 state and Clock Power Management
4 *
5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7 * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/build_bug.h>
13 #include <linux/kernel.h>
14 #include <linux/limits.h>
15 #include <linux/math.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/of.h>
19 #include <linux/pci.h>
20 #include <linux/pci_regs.h>
21 #include <linux/errno.h>
22 #include <linux/pm.h>
23 #include <linux/init.h>
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/time.h>
27
28 #include "../pci.h"
29
pci_save_ltr_state(struct pci_dev * dev)30 void pci_save_ltr_state(struct pci_dev *dev)
31 {
32 int ltr;
33 struct pci_cap_saved_state *save_state;
34 u32 *cap;
35
36 if (!pci_is_pcie(dev))
37 return;
38
39 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
40 if (!ltr)
41 return;
42
43 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
44 if (!save_state) {
45 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
46 return;
47 }
48
49 /* Some broken devices only support dword access to LTR */
50 cap = &save_state->cap.data[0];
51 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
52 }
53
pci_restore_ltr_state(struct pci_dev * dev)54 void pci_restore_ltr_state(struct pci_dev *dev)
55 {
56 struct pci_cap_saved_state *save_state;
57 int ltr;
58 u32 *cap;
59
60 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
61 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
62 if (!save_state || !ltr)
63 return;
64
65 /* Some broken devices only support dword access to LTR */
66 cap = &save_state->cap.data[0];
67 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
68 }
69
pci_configure_aspm_l1ss(struct pci_dev * pdev)70 void pci_configure_aspm_l1ss(struct pci_dev *pdev)
71 {
72 int rc;
73
74 pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
75
76 rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
77 2 * sizeof(u32));
78 if (rc)
79 pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
80 ERR_PTR(rc));
81 }
82
pci_save_aspm_l1ss_state(struct pci_dev * pdev)83 void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
84 {
85 struct pci_dev *parent = pdev->bus->self;
86 struct pci_cap_saved_state *save_state;
87 u32 *cap;
88
89 /*
90 * If this is a Downstream Port, we never restore the L1SS state
91 * directly; we only restore it when we restore the state of the
92 * Upstream Port below it.
93 */
94 if (pcie_downstream_port(pdev) || !parent)
95 return;
96
97 if (!pdev->l1ss || !parent->l1ss)
98 return;
99
100 /*
101 * Save L1 substate configuration. The ASPM L0s/L1 configuration
102 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
103 */
104 save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
105 if (!save_state)
106 return;
107
108 cap = &save_state->cap.data[0];
109 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
110 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
111
112 /*
113 * Save parent's L1 substate configuration so we have it for
114 * pci_restore_aspm_l1ss_state(pdev) to restore.
115 */
116 save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
117 if (!save_state)
118 return;
119
120 cap = &save_state->cap.data[0];
121 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
122 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
123 }
124
pci_restore_aspm_l1ss_state(struct pci_dev * pdev)125 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
126 {
127 struct pci_cap_saved_state *pl_save_state, *cl_save_state;
128 struct pci_dev *parent = pdev->bus->self;
129 u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
130 u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
131 u16 clnkctl, plnkctl;
132
133 /*
134 * In case BIOS enabled L1.2 when resuming, we need to disable it first
135 * on the downstream component before the upstream. So, don't attempt to
136 * restore either until we are at the downstream component.
137 */
138 if (pcie_downstream_port(pdev) || !parent)
139 return;
140
141 if (!pdev->l1ss || !parent->l1ss)
142 return;
143
144 cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
145 pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
146 if (!cl_save_state || !pl_save_state)
147 return;
148
149 cap = &cl_save_state->cap.data[0];
150 cl_ctl2 = *cap++;
151 cl_ctl1 = *cap;
152 cap = &pl_save_state->cap.data[0];
153 pl_ctl2 = *cap++;
154 pl_ctl1 = *cap;
155
156 /* Make sure L0s/L1 are disabled before updating L1SS config */
157 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
158 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
159 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
160 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
161 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
162 clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
163 pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
164 plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
165 }
166
167 /*
168 * Disable L1.2 on this downstream endpoint device first, followed
169 * by the upstream
170 */
171 pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
172 PCI_L1SS_CTL1_L1_2_MASK, 0);
173 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
174 PCI_L1SS_CTL1_L1_2_MASK, 0);
175
176 /*
177 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
178 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
179 * enable bits, even though they're all in PCI_L1SS_CTL1.
180 */
181 pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
182 pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
183 cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
184 cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
185
186 /* Write back without enables first (above we cleared them in ctl1) */
187 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
188 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
189 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
190 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
191
192 /* Then write back the enables */
193 if (pl_l1_2_enable || cl_l1_2_enable) {
194 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
195 pl_ctl1 | pl_l1_2_enable);
196 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
197 cl_ctl1 | cl_l1_2_enable);
198 }
199
200 /* Restore L0s/L1 if they were enabled */
201 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
202 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
203 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
204 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
205 }
206 }
207
208 #ifdef CONFIG_PCIEASPM
209
210 #ifdef MODULE_PARAM_PREFIX
211 #undef MODULE_PARAM_PREFIX
212 #endif
213 #define MODULE_PARAM_PREFIX "pcie_aspm."
214
215 /* Note: these are not register definitions */
216 #define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
217 #define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
218 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
219
220 #define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
221 PCIE_LINK_STATE_L1_2_PCIPM)
222 #define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
223 PCIE_LINK_STATE_L1_2_PCIPM)
224 #define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
225 PCIE_LINK_STATE_L1_1_PCIPM |\
226 PCIE_LINK_STATE_L1_2_MASK)
227
228 struct pcie_link_state {
229 struct pci_dev *pdev; /* Upstream component of the Link */
230 struct pci_dev *downstream; /* Downstream component, function 0 */
231 struct pcie_link_state *root; /* pointer to the root port link */
232 struct pcie_link_state *parent; /* pointer to the parent Link state */
233 struct list_head sibling; /* node in link_list */
234
235 /* ASPM state */
236 u32 aspm_support:7; /* Supported ASPM state */
237 u32 aspm_enabled:7; /* Enabled ASPM state */
238 u32 aspm_capable:7; /* Capable ASPM state with latency */
239 u32 aspm_default:7; /* Default ASPM state by BIOS or
240 override */
241 u32 aspm_disable:7; /* Disabled ASPM state */
242
243 /* Clock PM state */
244 u32 clkpm_capable:1; /* Clock PM capable? */
245 u32 clkpm_enabled:1; /* Current Clock PM state */
246 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
247 u32 clkpm_disable:1; /* Clock PM disabled */
248 };
249
250 static bool aspm_disabled, aspm_force;
251 static bool aspm_support_enabled = true;
252 static DEFINE_MUTEX(aspm_lock);
253 static LIST_HEAD(link_list);
254
255 #define POLICY_DEFAULT 0 /* BIOS default setting */
256 #define POLICY_PERFORMANCE 1 /* high performance */
257 #define POLICY_POWERSAVE 2 /* high power saving */
258 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
259
260 #ifdef CONFIG_PCIEASPM_PERFORMANCE
261 static int aspm_policy = POLICY_PERFORMANCE;
262 #elif defined CONFIG_PCIEASPM_POWERSAVE
263 static int aspm_policy = POLICY_POWERSAVE;
264 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
265 static int aspm_policy = POLICY_POWER_SUPERSAVE;
266 #else
267 static int aspm_policy;
268 #endif
269
270 static const char *policy_str[] = {
271 [POLICY_DEFAULT] = "default",
272 [POLICY_PERFORMANCE] = "performance",
273 [POLICY_POWERSAVE] = "powersave",
274 [POLICY_POWER_SUPERSAVE] = "powersupersave"
275 };
276
277 /*
278 * The L1 PM substate capability is only implemented in function 0 in a
279 * multi function device.
280 */
pci_function_0(struct pci_bus * linkbus)281 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
282 {
283 struct pci_dev *child;
284
285 list_for_each_entry(child, &linkbus->devices, bus_list)
286 if (PCI_FUNC(child->devfn) == 0)
287 return child;
288 return NULL;
289 }
290
policy_to_aspm_state(struct pcie_link_state * link)291 static int policy_to_aspm_state(struct pcie_link_state *link)
292 {
293 switch (aspm_policy) {
294 case POLICY_PERFORMANCE:
295 /* Disable ASPM and Clock PM */
296 return 0;
297 case POLICY_POWERSAVE:
298 /* Enable ASPM L0s/L1 */
299 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
300 case POLICY_POWER_SUPERSAVE:
301 /* Enable Everything */
302 return PCIE_LINK_STATE_ASPM_ALL;
303 case POLICY_DEFAULT:
304 return link->aspm_default;
305 }
306 return 0;
307 }
308
policy_to_clkpm_state(struct pcie_link_state * link)309 static int policy_to_clkpm_state(struct pcie_link_state *link)
310 {
311 switch (aspm_policy) {
312 case POLICY_PERFORMANCE:
313 /* Disable ASPM and Clock PM */
314 return 0;
315 case POLICY_POWERSAVE:
316 case POLICY_POWER_SUPERSAVE:
317 /* Enable Clock PM */
318 return 1;
319 case POLICY_DEFAULT:
320 return link->clkpm_default;
321 }
322 return 0;
323 }
324
pci_update_aspm_saved_state(struct pci_dev * dev)325 static void pci_update_aspm_saved_state(struct pci_dev *dev)
326 {
327 struct pci_cap_saved_state *save_state;
328 u16 *cap, lnkctl, aspm_ctl;
329
330 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
331 if (!save_state)
332 return;
333
334 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
335
336 /*
337 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
338 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
339 * change after being captured in save_state.
340 */
341 aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
342 lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
343
344 /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
345 cap = (u16 *)&save_state->cap.data[0];
346 cap[1] = lnkctl | aspm_ctl;
347 }
348
pcie_set_clkpm_nocheck(struct pcie_link_state * link,int enable)349 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
350 {
351 struct pci_dev *child;
352 struct pci_bus *linkbus = link->pdev->subordinate;
353 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
354
355 list_for_each_entry(child, &linkbus->devices, bus_list) {
356 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
357 PCI_EXP_LNKCTL_CLKREQ_EN,
358 val);
359 pci_update_aspm_saved_state(child);
360 }
361 link->clkpm_enabled = !!enable;
362 }
363
pcie_set_clkpm(struct pcie_link_state * link,int enable)364 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
365 {
366 /*
367 * Don't enable Clock PM if the link is not Clock PM capable
368 * or Clock PM is disabled
369 */
370 if (!link->clkpm_capable || link->clkpm_disable)
371 enable = 0;
372 /* Need nothing if the specified equals to current state */
373 if (link->clkpm_enabled == enable)
374 return;
375 pcie_set_clkpm_nocheck(link, enable);
376 }
377
pcie_clkpm_cap_init(struct pcie_link_state * link,int blacklist)378 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
379 {
380 int capable = 1, enabled = 1;
381 u32 reg32;
382 u16 reg16;
383 struct pci_dev *child;
384 struct pci_bus *linkbus = link->pdev->subordinate;
385
386 /* All functions should have the same cap and state, take the worst */
387 list_for_each_entry(child, &linkbus->devices, bus_list) {
388 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32);
389 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
390 capable = 0;
391 enabled = 0;
392 break;
393 }
394 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
395 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
396 enabled = 0;
397 }
398 link->clkpm_enabled = enabled;
399 link->clkpm_default = enabled;
400 link->clkpm_capable = capable;
401 link->clkpm_disable = blacklist ? 1 : 0;
402 }
403
404 /*
405 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
406 * could use common clock. If they are, configure them to use the
407 * common clock. That will reduce the ASPM state exit latency.
408 */
pcie_aspm_configure_common_clock(struct pcie_link_state * link)409 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
410 {
411 int same_clock = 1;
412 u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
413 struct pci_dev *child, *parent = link->pdev;
414 struct pci_bus *linkbus = parent->subordinate;
415 /*
416 * All functions of a slot should have the same Slot Clock
417 * Configuration, so just check one function
418 */
419 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
420 BUG_ON(!pci_is_pcie(child));
421
422 /* Check downstream component if bit Slot Clock Configuration is 1 */
423 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16);
424 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
425 same_clock = 0;
426
427 /* Check upstream component if bit Slot Clock Configuration is 1 */
428 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
429 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
430 same_clock = 0;
431
432 /* Port might be already in common clock mode */
433 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
434 parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
435 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
436 bool consistent = true;
437
438 list_for_each_entry(child, &linkbus->devices, bus_list) {
439 pcie_capability_read_word(child, PCI_EXP_LNKCTL,
440 ®16);
441 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
442 consistent = false;
443 break;
444 }
445 }
446 if (consistent)
447 return;
448 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
449 }
450
451 ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
452 /* Configure downstream component, all functions */
453 list_for_each_entry(child, &linkbus->devices, bus_list) {
454 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
455 child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
456 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
457 PCI_EXP_LNKCTL_CCC, ccc);
458 }
459
460 /* Configure upstream component */
461 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
462 PCI_EXP_LNKCTL_CCC, ccc);
463
464 if (pcie_retrain_link(link->pdev, true)) {
465
466 /* Training failed. Restore common clock configurations */
467 pci_err(parent, "ASPM: Could not configure common clock\n");
468 list_for_each_entry(child, &linkbus->devices, bus_list)
469 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
470 PCI_EXP_LNKCTL_CCC,
471 child_old_ccc[PCI_FUNC(child->devfn)]);
472 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
473 PCI_EXP_LNKCTL_CCC, parent_old_ccc);
474 }
475 }
476
477 /* Convert L0s latency encoding to ns */
calc_l0s_latency(u32 lnkcap)478 static u32 calc_l0s_latency(u32 lnkcap)
479 {
480 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap);
481
482 if (encoding == 0x7)
483 return 5 * NSEC_PER_USEC; /* > 4us */
484 return (64 << encoding);
485 }
486
487 /* Convert L0s acceptable latency encoding to ns */
calc_l0s_acceptable(u32 encoding)488 static u32 calc_l0s_acceptable(u32 encoding)
489 {
490 if (encoding == 0x7)
491 return U32_MAX;
492 return (64 << encoding);
493 }
494
495 /* Convert L1 latency encoding to ns */
calc_l1_latency(u32 lnkcap)496 static u32 calc_l1_latency(u32 lnkcap)
497 {
498 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap);
499
500 if (encoding == 0x7)
501 return 65 * NSEC_PER_USEC; /* > 64us */
502 return NSEC_PER_USEC << encoding;
503 }
504
505 /* Convert L1 acceptable latency encoding to ns */
calc_l1_acceptable(u32 encoding)506 static u32 calc_l1_acceptable(u32 encoding)
507 {
508 if (encoding == 0x7)
509 return U32_MAX;
510 return NSEC_PER_USEC << encoding;
511 }
512
513 /* Convert L1SS T_pwr encoding to usec */
calc_l12_pwron(struct pci_dev * pdev,u32 scale,u32 val)514 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
515 {
516 switch (scale) {
517 case 0:
518 return val * 2;
519 case 1:
520 return val * 10;
521 case 2:
522 return val * 100;
523 }
524 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
525 return 0;
526 }
527
528 /*
529 * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
530 * register. Ports enter L1.2 when the most recent LTR value is greater
531 * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
532 * don't enter L1.2 too aggressively.
533 *
534 * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
535 */
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)536 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
537 {
538 u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC;
539
540 /*
541 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
542 * value of 0x3ff.
543 */
544 if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
545 *scale = 0; /* Value times 1ns */
546 *value = threshold_ns;
547 } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
548 *scale = 1; /* Value times 32ns */
549 *value = roundup(threshold_ns, 32) / 32;
550 } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
551 *scale = 2; /* Value times 1024ns */
552 *value = roundup(threshold_ns, 1024) / 1024;
553 } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
554 *scale = 3; /* Value times 32768ns */
555 *value = roundup(threshold_ns, 32768) / 32768;
556 } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
557 *scale = 4; /* Value times 1048576ns */
558 *value = roundup(threshold_ns, 1048576) / 1048576;
559 } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
560 *scale = 5; /* Value times 33554432ns */
561 *value = roundup(threshold_ns, 33554432) / 33554432;
562 } else {
563 *scale = 5;
564 *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE);
565 }
566 }
567
pcie_aspm_check_latency(struct pci_dev * endpoint)568 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
569 {
570 u32 latency, encoding, lnkcap_up, lnkcap_dw;
571 u32 l1_switch_latency = 0, latency_up_l0s;
572 u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
573 u32 acceptable_l0s, acceptable_l1;
574 struct pcie_link_state *link;
575
576 /* Device not in D0 doesn't need latency check */
577 if ((endpoint->current_state != PCI_D0) &&
578 (endpoint->current_state != PCI_UNKNOWN))
579 return;
580
581 link = endpoint->bus->self->link_state;
582
583 /* Calculate endpoint L0s acceptable latency */
584 encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap);
585 acceptable_l0s = calc_l0s_acceptable(encoding);
586
587 /* Calculate endpoint L1 acceptable latency */
588 encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap);
589 acceptable_l1 = calc_l1_acceptable(encoding);
590
591 while (link) {
592 struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
593
594 /* Read direction exit latencies */
595 pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
596 &lnkcap_up);
597 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
598 &lnkcap_dw);
599 latency_up_l0s = calc_l0s_latency(lnkcap_up);
600 latency_up_l1 = calc_l1_latency(lnkcap_up);
601 latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
602 latency_dw_l1 = calc_l1_latency(lnkcap_dw);
603
604 /* Check upstream direction L0s latency */
605 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
606 (latency_up_l0s > acceptable_l0s))
607 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
608
609 /* Check downstream direction L0s latency */
610 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
611 (latency_dw_l0s > acceptable_l0s))
612 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
613 /*
614 * Check L1 latency.
615 * Every switch on the path to root complex need 1
616 * more microsecond for L1. Spec doesn't mention L0s.
617 *
618 * The exit latencies for L1 substates are not advertised
619 * by a device. Since the spec also doesn't mention a way
620 * to determine max latencies introduced by enabling L1
621 * substates on the components, it is not clear how to do
622 * a L1 substate exit latency check. We assume that the
623 * L1 exit latencies advertised by a device include L1
624 * substate latencies (and hence do not do any check).
625 */
626 latency = max_t(u32, latency_up_l1, latency_dw_l1);
627 if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
628 (latency + l1_switch_latency > acceptable_l1))
629 link->aspm_capable &= ~PCIE_LINK_STATE_L1;
630 l1_switch_latency += NSEC_PER_USEC;
631
632 link = link->parent;
633 }
634 }
635
636 /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l12_info(struct pcie_link_state * link,u32 parent_l1ss_cap,u32 child_l1ss_cap)637 static void aspm_calc_l12_info(struct pcie_link_state *link,
638 u32 parent_l1ss_cap, u32 child_l1ss_cap)
639 {
640 struct pci_dev *child = link->downstream, *parent = link->pdev;
641 u32 val1, val2, scale1, scale2;
642 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
643 u32 ctl1 = 0, ctl2 = 0;
644 u32 pctl1, pctl2, cctl1, cctl2;
645 u32 pl1_2_enables, cl1_2_enables;
646
647 /* Choose the greater of the two Port Common_Mode_Restore_Times */
648 val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap);
649 val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap);
650 t_common_mode = max(val1, val2);
651
652 /* Choose the greater of the two Port T_POWER_ON times */
653 val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap);
654 scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap);
655 val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap);
656 scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap);
657
658 if (calc_l12_pwron(parent, scale1, val1) >
659 calc_l12_pwron(child, scale2, val2)) {
660 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) |
661 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1);
662 t_power_on = calc_l12_pwron(parent, scale1, val1);
663 } else {
664 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) |
665 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2);
666 t_power_on = calc_l12_pwron(child, scale2, val2);
667 }
668
669 /*
670 * Set LTR_L1.2_THRESHOLD to the time required to transition the
671 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
672 * downstream devices report (via LTR) that they can tolerate at
673 * least that much latency.
674 *
675 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
676 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
677 * least 4us.
678 */
679 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
680 encode_l12_threshold(l1_2_threshold, &scale, &value);
681 ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) |
682 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) |
683 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale);
684
685 /* Some broken devices only support dword access to L1 SS */
686 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
687 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
688 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
689 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
690
691 if (ctl1 == pctl1 && ctl1 == cctl1 &&
692 ctl2 == pctl2 && ctl2 == cctl2)
693 return;
694
695 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
696 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
697 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
698
699 if (pl1_2_enables || cl1_2_enables) {
700 pci_clear_and_set_config_dword(child,
701 child->l1ss + PCI_L1SS_CTL1,
702 PCI_L1SS_CTL1_L1_2_MASK, 0);
703 pci_clear_and_set_config_dword(parent,
704 parent->l1ss + PCI_L1SS_CTL1,
705 PCI_L1SS_CTL1_L1_2_MASK, 0);
706 }
707
708 /* Program T_POWER_ON times in both ports */
709 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
710 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
711
712 /* Program Common_Mode_Restore_Time in upstream device */
713 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
714 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
715
716 /* Program LTR_L1.2_THRESHOLD time in both ports */
717 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
718 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
719 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
720 ctl1);
721 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
722 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
723 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
724 ctl1);
725
726 if (pl1_2_enables || cl1_2_enables) {
727 pci_clear_and_set_config_dword(parent,
728 parent->l1ss + PCI_L1SS_CTL1, 0,
729 pl1_2_enables);
730 pci_clear_and_set_config_dword(child,
731 child->l1ss + PCI_L1SS_CTL1, 0,
732 cl1_2_enables);
733 }
734 }
735
aspm_l1ss_init(struct pcie_link_state * link)736 static void aspm_l1ss_init(struct pcie_link_state *link)
737 {
738 struct pci_dev *child = link->downstream, *parent = link->pdev;
739 u32 parent_l1ss_cap, child_l1ss_cap;
740 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
741
742 if (!parent->l1ss || !child->l1ss)
743 return;
744
745 /* Setup L1 substate */
746 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
747 &parent_l1ss_cap);
748 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
749 &child_l1ss_cap);
750
751 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
752 parent_l1ss_cap = 0;
753 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
754 child_l1ss_cap = 0;
755
756 /*
757 * If we don't have LTR for the entire path from the Root Complex
758 * to this device, we can't use ASPM L1.2 because it relies on the
759 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
760 */
761 if (!child->ltr_path)
762 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
763
764 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
765 link->aspm_support |= PCIE_LINK_STATE_L1_1;
766 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
767 link->aspm_support |= PCIE_LINK_STATE_L1_2;
768 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
769 link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
770 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
771 link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
772
773 if (parent_l1ss_cap)
774 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
775 &parent_l1ss_ctl1);
776 if (child_l1ss_cap)
777 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
778 &child_l1ss_ctl1);
779
780 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
781 link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
782 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
783 link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
784 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
785 link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
786 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
787 link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
788
789 if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
790 aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
791 }
792
793 #define FLAG(x, y, d) (((x) & (PCIE_LINK_STATE_##y)) ? d : "")
794
pcie_aspm_override_default_link_state(struct pcie_link_state * link)795 static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
796 {
797 struct pci_dev *pdev = link->downstream;
798 u32 override;
799
800 /* For devicetree platforms, enable L0s and L1 by default */
801 if (of_have_populated_dt()) {
802 if (link->aspm_support & PCIE_LINK_STATE_L0S)
803 link->aspm_default |= PCIE_LINK_STATE_L0S;
804 if (link->aspm_support & PCIE_LINK_STATE_L1)
805 link->aspm_default |= PCIE_LINK_STATE_L1;
806 override = link->aspm_default & ~link->aspm_enabled;
807 if (override)
808 pci_info(pdev, "ASPM: default states%s%s\n",
809 FLAG(override, L0S, " L0s"),
810 FLAG(override, L1, " L1"));
811 }
812 }
813
pcie_aspm_cap_init(struct pcie_link_state * link,int blacklist)814 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
815 {
816 struct pci_dev *child = link->downstream, *parent = link->pdev;
817 u16 parent_lnkctl, child_lnkctl;
818 struct pci_bus *linkbus = parent->subordinate;
819
820 if (blacklist) {
821 /* Set enabled/disable so that we will disable ASPM later */
822 link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
823 link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
824 return;
825 }
826
827 /*
828 * If ASPM not supported, don't mess with the clocks and link,
829 * bail out now.
830 */
831 if (!(parent->aspm_l0s_support && child->aspm_l0s_support) &&
832 !(parent->aspm_l1_support && child->aspm_l1_support))
833 return;
834
835 /* Configure common clock before checking latencies */
836 pcie_aspm_configure_common_clock(link);
837
838 /*
839 * Re-read upstream/downstream components' register state after
840 * clock configuration. L0s & L1 exit latencies in the otherwise
841 * read-only Link Capabilities may change depending on common clock
842 * configuration (PCIe r5.0, sec 7.5.3.6).
843 */
844 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
845 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
846
847 /* Disable L0s/L1 before updating L1SS config */
848 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
849 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
850 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
851 child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
852 pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
853 parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
854 }
855
856 /*
857 * Setup L0s state
858 *
859 * Note that we must not enable L0s in either direction on a
860 * given link unless components on both sides of the link each
861 * support L0s.
862 */
863 if (parent->aspm_l0s_support && child->aspm_l0s_support)
864 link->aspm_support |= PCIE_LINK_STATE_L0S;
865
866 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
867 link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
868 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
869 link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
870
871 /* Setup L1 state */
872 if (parent->aspm_l1_support && child->aspm_l1_support)
873 link->aspm_support |= PCIE_LINK_STATE_L1;
874
875 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
876 link->aspm_enabled |= PCIE_LINK_STATE_L1;
877
878 aspm_l1ss_init(link);
879
880 /* Restore L0s/L1 if they were enabled */
881 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
882 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
883 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
884 pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
885 }
886
887 /* Save default state */
888 link->aspm_default = link->aspm_enabled;
889
890 pcie_aspm_override_default_link_state(link);
891
892 /* Setup initial capable state. Will be updated later */
893 link->aspm_capable = link->aspm_support;
894
895 /* Get and check endpoint acceptable latencies */
896 list_for_each_entry(child, &linkbus->devices, bus_list) {
897 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
898 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
899 continue;
900
901 pcie_aspm_check_latency(child);
902 }
903 }
904
905 /* Configure the ASPM L1 substates. Caller must disable L1 first. */
pcie_config_aspm_l1ss(struct pcie_link_state * link,u32 state)906 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
907 {
908 u32 val = 0;
909 struct pci_dev *child = link->downstream, *parent = link->pdev;
910
911 if (state & PCIE_LINK_STATE_L1_1)
912 val |= PCI_L1SS_CTL1_ASPM_L1_1;
913 if (state & PCIE_LINK_STATE_L1_2)
914 val |= PCI_L1SS_CTL1_ASPM_L1_2;
915 if (state & PCIE_LINK_STATE_L1_1_PCIPM)
916 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
917 if (state & PCIE_LINK_STATE_L1_2_PCIPM)
918 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
919
920 /*
921 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
922 * - Clear L1.x enable bits at child first, then at parent
923 * - Set L1.x enable bits at parent first, then at child
924 * - ASPM/PCIPM L1.2 must be disabled while programming timing
925 * parameters
926 */
927
928 /* Disable all L1 substates */
929 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
930 PCI_L1SS_CTL1_L1SS_MASK, 0);
931 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
932 PCI_L1SS_CTL1_L1SS_MASK, 0);
933
934 /* Enable what we need to enable */
935 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
936 PCI_L1SS_CTL1_L1SS_MASK, val);
937 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
938 PCI_L1SS_CTL1_L1SS_MASK, val);
939 }
940
pcie_config_aspm_dev(struct pci_dev * pdev,u32 val)941 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
942 {
943 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
944 PCI_EXP_LNKCTL_ASPMC, val);
945 }
946
pcie_config_aspm_link(struct pcie_link_state * link,u32 state)947 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
948 {
949 u32 upstream = 0, dwstream = 0;
950 struct pci_dev *child = link->downstream, *parent = link->pdev;
951 struct pci_bus *linkbus = parent->subordinate;
952
953 /* Enable only the states that were not explicitly disabled */
954 state &= (link->aspm_capable & ~link->aspm_disable);
955
956 /* Can't enable any substates if L1 is not enabled */
957 if (!(state & PCIE_LINK_STATE_L1))
958 state &= ~PCIE_LINK_STATE_L1SS;
959
960 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
961 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
962 state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
963 state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
964 }
965
966 /* Nothing to do if the link is already in the requested state */
967 if (link->aspm_enabled == state)
968 return;
969 /* Convert ASPM state to upstream/downstream ASPM register state */
970 if (state & PCIE_LINK_STATE_L0S_UP)
971 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
972 if (state & PCIE_LINK_STATE_L0S_DW)
973 upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
974 if (state & PCIE_LINK_STATE_L1) {
975 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
976 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
977 }
978
979 /*
980 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
981 * bits for ASPM L1 PM Substates must be done while ASPM L1 is
982 * disabled. Disable L1 here and apply new configuration after L1SS
983 * configuration has been completed.
984 *
985 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
986 * it in the Downstream component prior to disabling it in the
987 * Upstream component, and ASPM L1 must be enabled in the Upstream
988 * component prior to enabling it in the Downstream component.
989 *
990 * Sec 7.5.3.7 also recommends programming the same ASPM Control
991 * value for all functions of a multi-function device.
992 */
993 list_for_each_entry(child, &linkbus->devices, bus_list)
994 pcie_config_aspm_dev(child, 0);
995 pcie_config_aspm_dev(parent, 0);
996
997 if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
998 pcie_config_aspm_l1ss(link, state);
999
1000 pcie_config_aspm_dev(parent, upstream);
1001 list_for_each_entry(child, &linkbus->devices, bus_list)
1002 pcie_config_aspm_dev(child, dwstream);
1003
1004 link->aspm_enabled = state;
1005
1006 /* Update latest ASPM configuration in saved context */
1007 pci_save_aspm_l1ss_state(link->downstream);
1008 pci_update_aspm_saved_state(link->downstream);
1009 pci_save_aspm_l1ss_state(parent);
1010 pci_update_aspm_saved_state(parent);
1011 }
1012
pcie_config_aspm_path(struct pcie_link_state * link)1013 static void pcie_config_aspm_path(struct pcie_link_state *link)
1014 {
1015 while (link) {
1016 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1017 link = link->parent;
1018 }
1019 }
1020
free_link_state(struct pcie_link_state * link)1021 static void free_link_state(struct pcie_link_state *link)
1022 {
1023 link->pdev->link_state = NULL;
1024 kfree(link);
1025 }
1026
pcie_aspm_sanity_check(struct pci_dev * pdev)1027 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
1028 {
1029 struct pci_dev *child;
1030 u32 reg32;
1031
1032 /*
1033 * Some functions in a slot might not all be PCIe functions,
1034 * very strange. Disable ASPM for the whole slot
1035 */
1036 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
1037 if (!pci_is_pcie(child))
1038 return -EINVAL;
1039
1040 /*
1041 * If ASPM is disabled then we're not going to change
1042 * the BIOS state. It's safe to continue even if it's a
1043 * pre-1.1 device
1044 */
1045
1046 if (aspm_disabled)
1047 continue;
1048
1049 /*
1050 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
1051 * RBER bit to determine if a function is 1.1 version device
1052 */
1053 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
1054 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
1055 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
1056 return -EINVAL;
1057 }
1058 }
1059 return 0;
1060 }
1061
alloc_pcie_link_state(struct pci_dev * pdev)1062 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1063 {
1064 struct pcie_link_state *link;
1065
1066 link = kzalloc(sizeof(*link), GFP_KERNEL);
1067 if (!link)
1068 return NULL;
1069
1070 INIT_LIST_HEAD(&link->sibling);
1071 link->pdev = pdev;
1072 link->downstream = pci_function_0(pdev->subordinate);
1073
1074 /*
1075 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1076 * hierarchies. Note that some PCIe host implementations omit
1077 * the root ports entirely, in which case a downstream port on
1078 * a switch may become the root of the link state chain for all
1079 * its subordinate endpoints.
1080 */
1081 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1082 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
1083 !pdev->bus->parent->self) {
1084 link->root = link;
1085 } else {
1086 struct pcie_link_state *parent;
1087
1088 parent = pdev->bus->parent->self->link_state;
1089 if (!parent) {
1090 kfree(link);
1091 return NULL;
1092 }
1093
1094 link->parent = parent;
1095 link->root = link->parent->root;
1096 }
1097
1098 list_add(&link->sibling, &link_list);
1099 pdev->link_state = link;
1100 return link;
1101 }
1102
pcie_aspm_update_sysfs_visibility(struct pci_dev * pdev)1103 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
1104 {
1105 struct pci_dev *child;
1106
1107 list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
1108 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
1109 }
1110
1111 /*
1112 * pcie_aspm_init_link_state: Initiate PCI express link state.
1113 * It is called after the pcie and its children devices are scanned.
1114 * @pdev: the root port or switch downstream port
1115 */
pcie_aspm_init_link_state(struct pci_dev * pdev)1116 void pcie_aspm_init_link_state(struct pci_dev *pdev)
1117 {
1118 struct pcie_link_state *link;
1119 int blacklist = !!pcie_aspm_sanity_check(pdev);
1120
1121 if (!aspm_support_enabled)
1122 return;
1123
1124 if (pdev->link_state)
1125 return;
1126
1127 /*
1128 * We allocate pcie_link_state for the component on the upstream
1129 * end of a Link, so there's nothing to do unless this device is
1130 * downstream port.
1131 */
1132 if (!pcie_downstream_port(pdev))
1133 return;
1134
1135 /* VIA has a strange chipset, root port is under a bridge */
1136 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
1137 pdev->bus->self)
1138 return;
1139
1140 down_read(&pci_bus_sem);
1141 if (list_empty(&pdev->subordinate->devices))
1142 goto out;
1143
1144 mutex_lock(&aspm_lock);
1145 link = alloc_pcie_link_state(pdev);
1146 if (!link)
1147 goto unlock;
1148 /*
1149 * Setup initial ASPM state. Note that we need to configure
1150 * upstream links also because capable state of them can be
1151 * update through pcie_aspm_cap_init().
1152 */
1153 pcie_aspm_cap_init(link, blacklist);
1154
1155 /* Setup initial Clock PM state */
1156 pcie_clkpm_cap_init(link, blacklist);
1157
1158 /*
1159 * At this stage drivers haven't had an opportunity to change the
1160 * link policy setting. Enabling ASPM on broken hardware can cripple
1161 * it even before the driver has had a chance to disable ASPM, so
1162 * default to a safe level right now. If we're enabling ASPM beyond
1163 * the BIOS's expectation, we'll do so once pci_enable_device() is
1164 * called.
1165 */
1166 if (aspm_policy != POLICY_POWERSAVE &&
1167 aspm_policy != POLICY_POWER_SUPERSAVE) {
1168 pcie_config_aspm_path(link);
1169 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1170 }
1171
1172 pcie_aspm_update_sysfs_visibility(pdev);
1173
1174 unlock:
1175 mutex_unlock(&aspm_lock);
1176 out:
1177 up_read(&pci_bus_sem);
1178 }
1179
pci_bridge_reconfigure_ltr(struct pci_dev * pdev)1180 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
1181 {
1182 struct pci_dev *bridge;
1183 u32 ctl;
1184
1185 bridge = pci_upstream_bridge(pdev);
1186 if (bridge && bridge->ltr_path) {
1187 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1188 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1189 pci_dbg(bridge, "re-enabling LTR\n");
1190 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1191 PCI_EXP_DEVCTL2_LTR_EN);
1192 }
1193 }
1194 }
1195
pci_configure_ltr(struct pci_dev * pdev)1196 void pci_configure_ltr(struct pci_dev *pdev)
1197 {
1198 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
1199 struct pci_dev *bridge;
1200 u32 cap, ctl;
1201
1202 if (!pci_is_pcie(pdev))
1203 return;
1204
1205 pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
1206 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1207 return;
1208
1209 pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
1210 if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
1211 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1212 pdev->ltr_path = 1;
1213 return;
1214 }
1215
1216 bridge = pci_upstream_bridge(pdev);
1217 if (bridge && bridge->ltr_path)
1218 pdev->ltr_path = 1;
1219
1220 return;
1221 }
1222
1223 if (!host->native_ltr)
1224 return;
1225
1226 /*
1227 * Software must not enable LTR in an Endpoint unless the Root
1228 * Complex and all intermediate Switches indicate support for LTR.
1229 * PCIe r4.0, sec 6.18.
1230 */
1231 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1232 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1233 PCI_EXP_DEVCTL2_LTR_EN);
1234 pdev->ltr_path = 1;
1235 return;
1236 }
1237
1238 /*
1239 * If we're configuring a hot-added device, LTR was likely
1240 * disabled in the upstream bridge, so re-enable it before enabling
1241 * it in the new device.
1242 */
1243 bridge = pci_upstream_bridge(pdev);
1244 if (bridge && bridge->ltr_path) {
1245 pci_bridge_reconfigure_ltr(pdev);
1246 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1247 PCI_EXP_DEVCTL2_LTR_EN);
1248 pdev->ltr_path = 1;
1249 }
1250 }
1251
1252 /* Recheck latencies and update aspm_capable for links under the root */
pcie_update_aspm_capable(struct pcie_link_state * root)1253 static void pcie_update_aspm_capable(struct pcie_link_state *root)
1254 {
1255 struct pcie_link_state *link;
1256 BUG_ON(root->parent);
1257 list_for_each_entry(link, &link_list, sibling) {
1258 if (link->root != root)
1259 continue;
1260 link->aspm_capable = link->aspm_support;
1261 }
1262 list_for_each_entry(link, &link_list, sibling) {
1263 struct pci_dev *child;
1264 struct pci_bus *linkbus = link->pdev->subordinate;
1265 if (link->root != root)
1266 continue;
1267 list_for_each_entry(child, &linkbus->devices, bus_list) {
1268 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
1269 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
1270 continue;
1271 pcie_aspm_check_latency(child);
1272 }
1273 }
1274 }
1275
1276 /* @pdev: the endpoint device */
pcie_aspm_exit_link_state(struct pci_dev * pdev)1277 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1278 {
1279 struct pci_dev *parent = pdev->bus->self;
1280 struct pcie_link_state *link, *root, *parent_link;
1281
1282 if (!parent || !parent->link_state)
1283 return;
1284
1285 down_read(&pci_bus_sem);
1286 mutex_lock(&aspm_lock);
1287
1288 link = parent->link_state;
1289 root = link->root;
1290 parent_link = link->parent;
1291
1292 /*
1293 * Free the parent link state, no later than function 0 (i.e.
1294 * link->downstream) being removed.
1295 *
1296 * Do not free the link state any earlier. If function 0 is a
1297 * switch upstream port, this link state is parent_link to all
1298 * subordinate ones.
1299 */
1300 if (pdev != link->downstream)
1301 goto out;
1302
1303 pcie_config_aspm_link(link, 0);
1304 list_del(&link->sibling);
1305 free_link_state(link);
1306
1307 /* Recheck latencies and configure upstream links */
1308 if (parent_link) {
1309 pcie_update_aspm_capable(root);
1310 pcie_config_aspm_path(parent_link);
1311 }
1312
1313 out:
1314 mutex_unlock(&aspm_lock);
1315 up_read(&pci_bus_sem);
1316 }
1317
1318 /*
1319 * @pdev: the root port or switch downstream port
1320 * @locked: whether pci_bus_sem is held
1321 */
pcie_aspm_pm_state_change(struct pci_dev * pdev,bool locked)1322 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
1323 {
1324 struct pcie_link_state *link = pdev->link_state;
1325
1326 if (aspm_disabled || !link)
1327 return;
1328 /*
1329 * Devices changed PM state, we should recheck if latency
1330 * meets all functions' requirement
1331 */
1332 if (!locked)
1333 down_read(&pci_bus_sem);
1334 mutex_lock(&aspm_lock);
1335 pcie_update_aspm_capable(link->root);
1336 pcie_config_aspm_path(link);
1337 mutex_unlock(&aspm_lock);
1338 if (!locked)
1339 up_read(&pci_bus_sem);
1340 }
1341
pcie_aspm_powersave_config_link(struct pci_dev * pdev)1342 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1343 {
1344 struct pcie_link_state *link = pdev->link_state;
1345
1346 if (aspm_disabled || !link)
1347 return;
1348
1349 if (aspm_policy != POLICY_POWERSAVE &&
1350 aspm_policy != POLICY_POWER_SUPERSAVE)
1351 return;
1352
1353 down_read(&pci_bus_sem);
1354 mutex_lock(&aspm_lock);
1355 pcie_config_aspm_path(link);
1356 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1357 mutex_unlock(&aspm_lock);
1358 up_read(&pci_bus_sem);
1359 }
1360
pcie_aspm_get_link(struct pci_dev * pdev)1361 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
1362 {
1363 struct pci_dev *bridge;
1364
1365 if (!pci_is_pcie(pdev))
1366 return NULL;
1367
1368 bridge = pci_upstream_bridge(pdev);
1369 if (!bridge || !pci_is_pcie(bridge))
1370 return NULL;
1371
1372 return bridge->link_state;
1373 }
1374
pci_calc_aspm_disable_mask(int state)1375 static u8 pci_calc_aspm_disable_mask(int state)
1376 {
1377 state &= ~PCIE_LINK_STATE_CLKPM;
1378
1379 /* L1 PM substates require L1 */
1380 if (state & PCIE_LINK_STATE_L1)
1381 state |= PCIE_LINK_STATE_L1SS;
1382
1383 return state;
1384 }
1385
pci_calc_aspm_enable_mask(int state)1386 static u8 pci_calc_aspm_enable_mask(int state)
1387 {
1388 state &= ~PCIE_LINK_STATE_CLKPM;
1389
1390 /* L1 PM substates require L1 */
1391 if (state & PCIE_LINK_STATE_L1SS)
1392 state |= PCIE_LINK_STATE_L1;
1393
1394 return state;
1395 }
1396
__pci_disable_link_state(struct pci_dev * pdev,int state,bool locked)1397 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
1398 {
1399 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1400
1401 if (!link)
1402 return -EINVAL;
1403 /*
1404 * A driver requested that ASPM be disabled on this device, but
1405 * if we don't have permission to manage ASPM (e.g., on ACPI
1406 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1407 * the _OSC method), we can't honor that request. Windows has
1408 * a similar mechanism using "PciASPMOptOut", which is also
1409 * ignored in this situation.
1410 */
1411 if (aspm_disabled) {
1412 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1413 return -EPERM;
1414 }
1415
1416 if (!locked)
1417 down_read(&pci_bus_sem);
1418 mutex_lock(&aspm_lock);
1419 link->aspm_disable |= pci_calc_aspm_disable_mask(state);
1420 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1421
1422 if (state & PCIE_LINK_STATE_CLKPM)
1423 link->clkpm_disable = 1;
1424 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1425 mutex_unlock(&aspm_lock);
1426 if (!locked)
1427 up_read(&pci_bus_sem);
1428
1429 return 0;
1430 }
1431
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1432 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1433 {
1434 lockdep_assert_held_read(&pci_bus_sem);
1435
1436 return __pci_disable_link_state(pdev, state, true);
1437 }
1438 EXPORT_SYMBOL(pci_disable_link_state_locked);
1439
1440 /**
1441 * pci_disable_link_state - Disable device's link state, so the link will
1442 * never enter specific states. Note that if the BIOS didn't grant ASPM
1443 * control to the OS, this does nothing because we can't touch the LNKCTL
1444 * register. Returns 0 or a negative errno.
1445 *
1446 * @pdev: PCI device
1447 * @state: ASPM link state to disable
1448 */
pci_disable_link_state(struct pci_dev * pdev,int state)1449 int pci_disable_link_state(struct pci_dev *pdev, int state)
1450 {
1451 return __pci_disable_link_state(pdev, state, false);
1452 }
1453 EXPORT_SYMBOL(pci_disable_link_state);
1454
__pci_enable_link_state(struct pci_dev * pdev,int state,bool locked)1455 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
1456 {
1457 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1458
1459 if (!link)
1460 return -EINVAL;
1461 /*
1462 * A driver requested that ASPM be enabled on this device, but
1463 * if we don't have permission to manage ASPM (e.g., on ACPI
1464 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1465 * the _OSC method), we can't honor that request.
1466 */
1467 if (aspm_disabled) {
1468 pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
1469 return -EPERM;
1470 }
1471
1472 if (!locked)
1473 down_read(&pci_bus_sem);
1474 mutex_lock(&aspm_lock);
1475 link->aspm_default = pci_calc_aspm_enable_mask(state);
1476 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1477
1478 link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
1479 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1480 mutex_unlock(&aspm_lock);
1481 if (!locked)
1482 up_read(&pci_bus_sem);
1483
1484 return 0;
1485 }
1486
1487 /**
1488 * pci_enable_link_state - Clear and set the default device link state so that
1489 * the link may be allowed to enter the specified states. Note that if the
1490 * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
1491 * touch the LNKCTL register. Also note that this does not enable states
1492 * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1493 *
1494 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1495 * PCIe r6.0, sec 5.5.4.
1496 *
1497 * @pdev: PCI device
1498 * @state: Mask of ASPM link states to enable
1499 */
pci_enable_link_state(struct pci_dev * pdev,int state)1500 int pci_enable_link_state(struct pci_dev *pdev, int state)
1501 {
1502 return __pci_enable_link_state(pdev, state, false);
1503 }
1504 EXPORT_SYMBOL(pci_enable_link_state);
1505
1506 /**
1507 * pci_enable_link_state_locked - Clear and set the default device link state
1508 * so that the link may be allowed to enter the specified states. Note that if
1509 * the BIOS didn't grant ASPM control to the OS, this does nothing because we
1510 * can't touch the LNKCTL register. Also note that this does not enable states
1511 * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1512 *
1513 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1514 * PCIe r6.0, sec 5.5.4.
1515 *
1516 * @pdev: PCI device
1517 * @state: Mask of ASPM link states to enable
1518 *
1519 * Context: Caller holds pci_bus_sem read lock.
1520 */
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1521 int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1522 {
1523 lockdep_assert_held_read(&pci_bus_sem);
1524
1525 return __pci_enable_link_state(pdev, state, true);
1526 }
1527 EXPORT_SYMBOL(pci_enable_link_state_locked);
1528
pcie_aspm_remove_cap(struct pci_dev * pdev,u32 lnkcap)1529 void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap)
1530 {
1531 if (lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
1532 pdev->aspm_l0s_support = 0;
1533 if (lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
1534 pdev->aspm_l1_support = 0;
1535
1536 pci_info(pdev, "ASPM: Link Capabilities%s%s treated as unsupported to avoid device defect\n",
1537 lnkcap & PCI_EXP_LNKCAP_ASPM_L0S ? " L0s" : "",
1538 lnkcap & PCI_EXP_LNKCAP_ASPM_L1 ? " L1" : "");
1539
1540 }
1541
pcie_aspm_set_policy(const char * val,const struct kernel_param * kp)1542 static int pcie_aspm_set_policy(const char *val,
1543 const struct kernel_param *kp)
1544 {
1545 int i;
1546 struct pcie_link_state *link;
1547
1548 if (aspm_disabled)
1549 return -EPERM;
1550 i = sysfs_match_string(policy_str, val);
1551 if (i < 0)
1552 return i;
1553 if (i == aspm_policy)
1554 return 0;
1555
1556 down_read(&pci_bus_sem);
1557 mutex_lock(&aspm_lock);
1558 aspm_policy = i;
1559 list_for_each_entry(link, &link_list, sibling) {
1560 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1561 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1562 }
1563 mutex_unlock(&aspm_lock);
1564 up_read(&pci_bus_sem);
1565 return 0;
1566 }
1567
pcie_aspm_get_policy(char * buffer,const struct kernel_param * kp)1568 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1569 {
1570 int i, cnt = 0;
1571 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1572 if (i == aspm_policy)
1573 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1574 else
1575 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1576 cnt += sprintf(buffer + cnt, "\n");
1577 return cnt;
1578 }
1579
1580 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1581 NULL, 0644);
1582
1583 /**
1584 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1585 * @pdev: Target device.
1586 *
1587 * Relies on the upstream bridge's link_state being valid. The link_state
1588 * is deallocated only when the last child of the bridge (i.e., @pdev or a
1589 * sibling) is removed, and the caller should be holding a reference to
1590 * @pdev, so this should be safe.
1591 */
pcie_aspm_enabled(struct pci_dev * pdev)1592 bool pcie_aspm_enabled(struct pci_dev *pdev)
1593 {
1594 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1595
1596 if (!link)
1597 return false;
1598
1599 return link->aspm_enabled;
1600 }
1601 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1602
aspm_attr_show_common(struct device * dev,struct device_attribute * attr,char * buf,u8 state)1603 static ssize_t aspm_attr_show_common(struct device *dev,
1604 struct device_attribute *attr,
1605 char *buf, u8 state)
1606 {
1607 struct pci_dev *pdev = to_pci_dev(dev);
1608 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1609
1610 return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1611 }
1612
aspm_attr_store_common(struct device * dev,struct device_attribute * attr,const char * buf,size_t len,u8 state)1613 static ssize_t aspm_attr_store_common(struct device *dev,
1614 struct device_attribute *attr,
1615 const char *buf, size_t len, u8 state)
1616 {
1617 struct pci_dev *pdev = to_pci_dev(dev);
1618 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1619 bool state_enable;
1620
1621 if (kstrtobool(buf, &state_enable) < 0)
1622 return -EINVAL;
1623
1624 down_read(&pci_bus_sem);
1625 mutex_lock(&aspm_lock);
1626
1627 if (state_enable) {
1628 link->aspm_disable &= ~state;
1629 /* need to enable L1 for substates */
1630 if (state & PCIE_LINK_STATE_L1SS)
1631 link->aspm_disable &= ~PCIE_LINK_STATE_L1;
1632 } else {
1633 link->aspm_disable |= state;
1634 if (state & PCIE_LINK_STATE_L1)
1635 link->aspm_disable |= PCIE_LINK_STATE_L1SS;
1636 }
1637
1638 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1639
1640 mutex_unlock(&aspm_lock);
1641 up_read(&pci_bus_sem);
1642
1643 return len;
1644 }
1645
1646 #define ASPM_ATTR(_f, _s) \
1647 static ssize_t _f##_show(struct device *dev, \
1648 struct device_attribute *attr, char *buf) \
1649 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
1650 \
1651 static ssize_t _f##_store(struct device *dev, \
1652 struct device_attribute *attr, \
1653 const char *buf, size_t len) \
1654 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
1655
ASPM_ATTR(l0s_aspm,L0S)1656 ASPM_ATTR(l0s_aspm, L0S)
1657 ASPM_ATTR(l1_aspm, L1)
1658 ASPM_ATTR(l1_1_aspm, L1_1)
1659 ASPM_ATTR(l1_2_aspm, L1_2)
1660 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1661 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
1662
1663 static ssize_t clkpm_show(struct device *dev,
1664 struct device_attribute *attr, char *buf)
1665 {
1666 struct pci_dev *pdev = to_pci_dev(dev);
1667 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1668
1669 return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
1670 }
1671
clkpm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1672 static ssize_t clkpm_store(struct device *dev,
1673 struct device_attribute *attr,
1674 const char *buf, size_t len)
1675 {
1676 struct pci_dev *pdev = to_pci_dev(dev);
1677 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1678 bool state_enable;
1679
1680 if (kstrtobool(buf, &state_enable) < 0)
1681 return -EINVAL;
1682
1683 down_read(&pci_bus_sem);
1684 mutex_lock(&aspm_lock);
1685
1686 link->clkpm_disable = !state_enable;
1687 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1688
1689 mutex_unlock(&aspm_lock);
1690 up_read(&pci_bus_sem);
1691
1692 return len;
1693 }
1694
1695 static DEVICE_ATTR_RW(clkpm);
1696 static DEVICE_ATTR_RW(l0s_aspm);
1697 static DEVICE_ATTR_RW(l1_aspm);
1698 static DEVICE_ATTR_RW(l1_1_aspm);
1699 static DEVICE_ATTR_RW(l1_2_aspm);
1700 static DEVICE_ATTR_RW(l1_1_pcipm);
1701 static DEVICE_ATTR_RW(l1_2_pcipm);
1702
1703 static struct attribute *aspm_ctrl_attrs[] = {
1704 &dev_attr_clkpm.attr,
1705 &dev_attr_l0s_aspm.attr,
1706 &dev_attr_l1_aspm.attr,
1707 &dev_attr_l1_1_aspm.attr,
1708 &dev_attr_l1_2_aspm.attr,
1709 &dev_attr_l1_1_pcipm.attr,
1710 &dev_attr_l1_2_pcipm.attr,
1711 NULL
1712 };
1713
aspm_ctrl_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1714 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1715 struct attribute *a, int n)
1716 {
1717 struct device *dev = kobj_to_dev(kobj);
1718 struct pci_dev *pdev = to_pci_dev(dev);
1719 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1720 static const u8 aspm_state_map[] = {
1721 PCIE_LINK_STATE_L0S,
1722 PCIE_LINK_STATE_L1,
1723 PCIE_LINK_STATE_L1_1,
1724 PCIE_LINK_STATE_L1_2,
1725 PCIE_LINK_STATE_L1_1_PCIPM,
1726 PCIE_LINK_STATE_L1_2_PCIPM,
1727 };
1728
1729 if (aspm_disabled || !link)
1730 return 0;
1731
1732 if (n == 0)
1733 return link->clkpm_capable ? a->mode : 0;
1734
1735 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
1736 }
1737
1738 const struct attribute_group aspm_ctrl_attr_group = {
1739 .name = "link",
1740 .attrs = aspm_ctrl_attrs,
1741 .is_visible = aspm_ctrl_attrs_are_visible,
1742 };
1743
pcie_aspm_disable(char * str)1744 static int __init pcie_aspm_disable(char *str)
1745 {
1746 if (!strcmp(str, "off")) {
1747 aspm_policy = POLICY_DEFAULT;
1748 aspm_disabled = true;
1749 aspm_support_enabled = false;
1750 pr_info("PCIe ASPM is disabled\n");
1751 } else if (!strcmp(str, "force")) {
1752 aspm_force = true;
1753 pr_info("PCIe ASPM is forcibly enabled\n");
1754 }
1755 return 1;
1756 }
1757
1758 __setup("pcie_aspm=", pcie_aspm_disable);
1759
pcie_no_aspm(void)1760 void pcie_no_aspm(void)
1761 {
1762 /*
1763 * Disabling ASPM is intended to prevent the kernel from modifying
1764 * existing hardware state, not to clear existing state. To that end:
1765 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1766 * (b) prevent userspace from changing policy
1767 */
1768 if (!aspm_force) {
1769 aspm_policy = POLICY_DEFAULT;
1770 aspm_disabled = true;
1771 }
1772 }
1773
pcie_aspm_support_enabled(void)1774 bool pcie_aspm_support_enabled(void)
1775 {
1776 return aspm_support_enabled;
1777 }
1778
1779 #endif /* CONFIG_PCIEASPM */
1780