1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Enable PCIe link L0s/L1 state and Clock Power Management
4 *
5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7 * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/build_bug.h>
13 #include <linux/kernel.h>
14 #include <linux/limits.h>
15 #include <linux/math.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/pci.h>
19 #include <linux/pci_regs.h>
20 #include <linux/errno.h>
21 #include <linux/pm.h>
22 #include <linux/init.h>
23 #include <linux/printk.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26
27 #include "../pci.h"
28
pci_save_ltr_state(struct pci_dev * dev)29 void pci_save_ltr_state(struct pci_dev *dev)
30 {
31 int ltr;
32 struct pci_cap_saved_state *save_state;
33 u32 *cap;
34
35 if (!pci_is_pcie(dev))
36 return;
37
38 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
39 if (!ltr)
40 return;
41
42 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
43 if (!save_state) {
44 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
45 return;
46 }
47
48 /* Some broken devices only support dword access to LTR */
49 cap = &save_state->cap.data[0];
50 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
51 }
52
pci_restore_ltr_state(struct pci_dev * dev)53 void pci_restore_ltr_state(struct pci_dev *dev)
54 {
55 struct pci_cap_saved_state *save_state;
56 int ltr;
57 u32 *cap;
58
59 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
60 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
61 if (!save_state || !ltr)
62 return;
63
64 /* Some broken devices only support dword access to LTR */
65 cap = &save_state->cap.data[0];
66 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
67 }
68
pci_configure_aspm_l1ss(struct pci_dev * pdev)69 void pci_configure_aspm_l1ss(struct pci_dev *pdev)
70 {
71 int rc;
72
73 pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
74
75 rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
76 2 * sizeof(u32));
77 if (rc)
78 pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
79 ERR_PTR(rc));
80 }
81
pci_save_aspm_l1ss_state(struct pci_dev * pdev)82 void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
83 {
84 struct pci_dev *parent = pdev->bus->self;
85 struct pci_cap_saved_state *save_state;
86 u32 *cap;
87
88 /*
89 * If this is a Downstream Port, we never restore the L1SS state
90 * directly; we only restore it when we restore the state of the
91 * Upstream Port below it.
92 */
93 if (pcie_downstream_port(pdev) || !parent)
94 return;
95
96 if (!pdev->l1ss || !parent->l1ss)
97 return;
98
99 /*
100 * Save L1 substate configuration. The ASPM L0s/L1 configuration
101 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
102 */
103 save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
104 if (!save_state)
105 return;
106
107 cap = &save_state->cap.data[0];
108 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
109 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
110
111 /*
112 * Save parent's L1 substate configuration so we have it for
113 * pci_restore_aspm_l1ss_state(pdev) to restore.
114 */
115 save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
116 if (!save_state)
117 return;
118
119 cap = &save_state->cap.data[0];
120 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
121 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
122 }
123
pci_restore_aspm_l1ss_state(struct pci_dev * pdev)124 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
125 {
126 struct pci_cap_saved_state *pl_save_state, *cl_save_state;
127 struct pci_dev *parent = pdev->bus->self;
128 u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
129 u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
130 u16 clnkctl, plnkctl;
131
132 /*
133 * In case BIOS enabled L1.2 when resuming, we need to disable it first
134 * on the downstream component before the upstream. So, don't attempt to
135 * restore either until we are at the downstream component.
136 */
137 if (pcie_downstream_port(pdev) || !parent)
138 return;
139
140 if (!pdev->l1ss || !parent->l1ss)
141 return;
142
143 cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
144 pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
145 if (!cl_save_state || !pl_save_state)
146 return;
147
148 cap = &cl_save_state->cap.data[0];
149 cl_ctl2 = *cap++;
150 cl_ctl1 = *cap;
151 cap = &pl_save_state->cap.data[0];
152 pl_ctl2 = *cap++;
153 pl_ctl1 = *cap;
154
155 /* Make sure L0s/L1 are disabled before updating L1SS config */
156 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
157 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
158 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
159 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
160 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
161 clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
162 pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
163 plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
164 }
165
166 /*
167 * Disable L1.2 on this downstream endpoint device first, followed
168 * by the upstream
169 */
170 pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
171 PCI_L1SS_CTL1_L1_2_MASK, 0);
172 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
173 PCI_L1SS_CTL1_L1_2_MASK, 0);
174
175 /*
176 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
177 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
178 * enable bits, even though they're all in PCI_L1SS_CTL1.
179 */
180 pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
181 pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
182 cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
183 cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
184
185 /* Write back without enables first (above we cleared them in ctl1) */
186 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
187 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
188 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
189 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
190
191 /* Then write back the enables */
192 if (pl_l1_2_enable || cl_l1_2_enable) {
193 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
194 pl_ctl1 | pl_l1_2_enable);
195 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
196 cl_ctl1 | cl_l1_2_enable);
197 }
198
199 /* Restore L0s/L1 if they were enabled */
200 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
201 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
202 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
203 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
204 }
205 }
206
207 #ifdef CONFIG_PCIEASPM
208
209 #ifdef MODULE_PARAM_PREFIX
210 #undef MODULE_PARAM_PREFIX
211 #endif
212 #define MODULE_PARAM_PREFIX "pcie_aspm."
213
214 /* Note: these are not register definitions */
215 #define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
216 #define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
217 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
218
219 #define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
220 PCIE_LINK_STATE_L1_2_PCIPM)
221 #define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
222 PCIE_LINK_STATE_L1_2_PCIPM)
223 #define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
224 PCIE_LINK_STATE_L1_1_PCIPM |\
225 PCIE_LINK_STATE_L1_2_MASK)
226
227 struct pcie_link_state {
228 struct pci_dev *pdev; /* Upstream component of the Link */
229 struct pci_dev *downstream; /* Downstream component, function 0 */
230 struct pcie_link_state *root; /* pointer to the root port link */
231 struct pcie_link_state *parent; /* pointer to the parent Link state */
232 struct list_head sibling; /* node in link_list */
233
234 /* ASPM state */
235 u32 aspm_support:7; /* Supported ASPM state */
236 u32 aspm_enabled:7; /* Enabled ASPM state */
237 u32 aspm_capable:7; /* Capable ASPM state with latency */
238 u32 aspm_default:7; /* Default ASPM state by BIOS */
239 u32 aspm_disable:7; /* Disabled ASPM state */
240
241 /* Clock PM state */
242 u32 clkpm_capable:1; /* Clock PM capable? */
243 u32 clkpm_enabled:1; /* Current Clock PM state */
244 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
245 u32 clkpm_disable:1; /* Clock PM disabled */
246 };
247
248 static int aspm_disabled, aspm_force;
249 static bool aspm_support_enabled = true;
250 static DEFINE_MUTEX(aspm_lock);
251 static LIST_HEAD(link_list);
252
253 #define POLICY_DEFAULT 0 /* BIOS default setting */
254 #define POLICY_PERFORMANCE 1 /* high performance */
255 #define POLICY_POWERSAVE 2 /* high power saving */
256 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
257
258 #ifdef CONFIG_PCIEASPM_PERFORMANCE
259 static int aspm_policy = POLICY_PERFORMANCE;
260 #elif defined CONFIG_PCIEASPM_POWERSAVE
261 static int aspm_policy = POLICY_POWERSAVE;
262 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
263 static int aspm_policy = POLICY_POWER_SUPERSAVE;
264 #else
265 static int aspm_policy;
266 #endif
267
268 static const char *policy_str[] = {
269 [POLICY_DEFAULT] = "default",
270 [POLICY_PERFORMANCE] = "performance",
271 [POLICY_POWERSAVE] = "powersave",
272 [POLICY_POWER_SUPERSAVE] = "powersupersave"
273 };
274
275 /*
276 * The L1 PM substate capability is only implemented in function 0 in a
277 * multi function device.
278 */
pci_function_0(struct pci_bus * linkbus)279 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
280 {
281 struct pci_dev *child;
282
283 list_for_each_entry(child, &linkbus->devices, bus_list)
284 if (PCI_FUNC(child->devfn) == 0)
285 return child;
286 return NULL;
287 }
288
policy_to_aspm_state(struct pcie_link_state * link)289 static int policy_to_aspm_state(struct pcie_link_state *link)
290 {
291 switch (aspm_policy) {
292 case POLICY_PERFORMANCE:
293 /* Disable ASPM and Clock PM */
294 return 0;
295 case POLICY_POWERSAVE:
296 /* Enable ASPM L0s/L1 */
297 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
298 case POLICY_POWER_SUPERSAVE:
299 /* Enable Everything */
300 return PCIE_LINK_STATE_ASPM_ALL;
301 case POLICY_DEFAULT:
302 return link->aspm_default;
303 }
304 return 0;
305 }
306
policy_to_clkpm_state(struct pcie_link_state * link)307 static int policy_to_clkpm_state(struct pcie_link_state *link)
308 {
309 switch (aspm_policy) {
310 case POLICY_PERFORMANCE:
311 /* Disable ASPM and Clock PM */
312 return 0;
313 case POLICY_POWERSAVE:
314 case POLICY_POWER_SUPERSAVE:
315 /* Enable Clock PM */
316 return 1;
317 case POLICY_DEFAULT:
318 return link->clkpm_default;
319 }
320 return 0;
321 }
322
pci_update_aspm_saved_state(struct pci_dev * dev)323 static void pci_update_aspm_saved_state(struct pci_dev *dev)
324 {
325 struct pci_cap_saved_state *save_state;
326 u16 *cap, lnkctl, aspm_ctl;
327
328 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
329 if (!save_state)
330 return;
331
332 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
333
334 /*
335 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
336 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
337 * change after being captured in save_state.
338 */
339 aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
340 lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
341
342 /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
343 cap = (u16 *)&save_state->cap.data[0];
344 cap[1] = lnkctl | aspm_ctl;
345 }
346
pcie_set_clkpm_nocheck(struct pcie_link_state * link,int enable)347 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
348 {
349 struct pci_dev *child;
350 struct pci_bus *linkbus = link->pdev->subordinate;
351 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
352
353 list_for_each_entry(child, &linkbus->devices, bus_list) {
354 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
355 PCI_EXP_LNKCTL_CLKREQ_EN,
356 val);
357 pci_update_aspm_saved_state(child);
358 }
359 link->clkpm_enabled = !!enable;
360 }
361
pcie_set_clkpm(struct pcie_link_state * link,int enable)362 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
363 {
364 /*
365 * Don't enable Clock PM if the link is not Clock PM capable
366 * or Clock PM is disabled
367 */
368 if (!link->clkpm_capable || link->clkpm_disable)
369 enable = 0;
370 /* Need nothing if the specified equals to current state */
371 if (link->clkpm_enabled == enable)
372 return;
373 pcie_set_clkpm_nocheck(link, enable);
374 }
375
pcie_clkpm_cap_init(struct pcie_link_state * link,int blacklist)376 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
377 {
378 int capable = 1, enabled = 1;
379 u32 reg32;
380 u16 reg16;
381 struct pci_dev *child;
382 struct pci_bus *linkbus = link->pdev->subordinate;
383
384 /* All functions should have the same cap and state, take the worst */
385 list_for_each_entry(child, &linkbus->devices, bus_list) {
386 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32);
387 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
388 capable = 0;
389 enabled = 0;
390 break;
391 }
392 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
393 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
394 enabled = 0;
395 }
396 link->clkpm_enabled = enabled;
397 link->clkpm_default = enabled;
398 link->clkpm_capable = capable;
399 link->clkpm_disable = blacklist ? 1 : 0;
400 }
401
402 /*
403 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
404 * could use common clock. If they are, configure them to use the
405 * common clock. That will reduce the ASPM state exit latency.
406 */
pcie_aspm_configure_common_clock(struct pcie_link_state * link)407 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
408 {
409 int same_clock = 1;
410 u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
411 struct pci_dev *child, *parent = link->pdev;
412 struct pci_bus *linkbus = parent->subordinate;
413 /*
414 * All functions of a slot should have the same Slot Clock
415 * Configuration, so just check one function
416 */
417 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
418 BUG_ON(!pci_is_pcie(child));
419
420 /* Check downstream component if bit Slot Clock Configuration is 1 */
421 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16);
422 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
423 same_clock = 0;
424
425 /* Check upstream component if bit Slot Clock Configuration is 1 */
426 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
427 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
428 same_clock = 0;
429
430 /* Port might be already in common clock mode */
431 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
432 parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
433 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
434 bool consistent = true;
435
436 list_for_each_entry(child, &linkbus->devices, bus_list) {
437 pcie_capability_read_word(child, PCI_EXP_LNKCTL,
438 ®16);
439 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
440 consistent = false;
441 break;
442 }
443 }
444 if (consistent)
445 return;
446 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
447 }
448
449 ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
450 /* Configure downstream component, all functions */
451 list_for_each_entry(child, &linkbus->devices, bus_list) {
452 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
453 child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
454 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
455 PCI_EXP_LNKCTL_CCC, ccc);
456 }
457
458 /* Configure upstream component */
459 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
460 PCI_EXP_LNKCTL_CCC, ccc);
461
462 if (pcie_retrain_link(link->pdev, true)) {
463
464 /* Training failed. Restore common clock configurations */
465 pci_err(parent, "ASPM: Could not configure common clock\n");
466 list_for_each_entry(child, &linkbus->devices, bus_list)
467 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
468 PCI_EXP_LNKCTL_CCC,
469 child_old_ccc[PCI_FUNC(child->devfn)]);
470 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
471 PCI_EXP_LNKCTL_CCC, parent_old_ccc);
472 }
473 }
474
475 /* Convert L0s latency encoding to ns */
calc_l0s_latency(u32 lnkcap)476 static u32 calc_l0s_latency(u32 lnkcap)
477 {
478 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap);
479
480 if (encoding == 0x7)
481 return 5 * NSEC_PER_USEC; /* > 4us */
482 return (64 << encoding);
483 }
484
485 /* Convert L0s acceptable latency encoding to ns */
calc_l0s_acceptable(u32 encoding)486 static u32 calc_l0s_acceptable(u32 encoding)
487 {
488 if (encoding == 0x7)
489 return U32_MAX;
490 return (64 << encoding);
491 }
492
493 /* Convert L1 latency encoding to ns */
calc_l1_latency(u32 lnkcap)494 static u32 calc_l1_latency(u32 lnkcap)
495 {
496 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap);
497
498 if (encoding == 0x7)
499 return 65 * NSEC_PER_USEC; /* > 64us */
500 return NSEC_PER_USEC << encoding;
501 }
502
503 /* Convert L1 acceptable latency encoding to ns */
calc_l1_acceptable(u32 encoding)504 static u32 calc_l1_acceptable(u32 encoding)
505 {
506 if (encoding == 0x7)
507 return U32_MAX;
508 return NSEC_PER_USEC << encoding;
509 }
510
511 /* Convert L1SS T_pwr encoding to usec */
calc_l12_pwron(struct pci_dev * pdev,u32 scale,u32 val)512 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
513 {
514 switch (scale) {
515 case 0:
516 return val * 2;
517 case 1:
518 return val * 10;
519 case 2:
520 return val * 100;
521 }
522 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
523 return 0;
524 }
525
526 /*
527 * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
528 * register. Ports enter L1.2 when the most recent LTR value is greater
529 * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
530 * don't enter L1.2 too aggressively.
531 *
532 * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
533 */
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)534 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
535 {
536 u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC;
537
538 /*
539 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
540 * value of 0x3ff.
541 */
542 if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
543 *scale = 0; /* Value times 1ns */
544 *value = threshold_ns;
545 } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
546 *scale = 1; /* Value times 32ns */
547 *value = roundup(threshold_ns, 32) / 32;
548 } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
549 *scale = 2; /* Value times 1024ns */
550 *value = roundup(threshold_ns, 1024) / 1024;
551 } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
552 *scale = 3; /* Value times 32768ns */
553 *value = roundup(threshold_ns, 32768) / 32768;
554 } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
555 *scale = 4; /* Value times 1048576ns */
556 *value = roundup(threshold_ns, 1048576) / 1048576;
557 } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
558 *scale = 5; /* Value times 33554432ns */
559 *value = roundup(threshold_ns, 33554432) / 33554432;
560 } else {
561 *scale = 5;
562 *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE);
563 }
564 }
565
pcie_aspm_check_latency(struct pci_dev * endpoint)566 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
567 {
568 u32 latency, encoding, lnkcap_up, lnkcap_dw;
569 u32 l1_switch_latency = 0, latency_up_l0s;
570 u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
571 u32 acceptable_l0s, acceptable_l1;
572 struct pcie_link_state *link;
573
574 /* Device not in D0 doesn't need latency check */
575 if ((endpoint->current_state != PCI_D0) &&
576 (endpoint->current_state != PCI_UNKNOWN))
577 return;
578
579 link = endpoint->bus->self->link_state;
580
581 /* Calculate endpoint L0s acceptable latency */
582 encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap);
583 acceptable_l0s = calc_l0s_acceptable(encoding);
584
585 /* Calculate endpoint L1 acceptable latency */
586 encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap);
587 acceptable_l1 = calc_l1_acceptable(encoding);
588
589 while (link) {
590 struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
591
592 /* Read direction exit latencies */
593 pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
594 &lnkcap_up);
595 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
596 &lnkcap_dw);
597 latency_up_l0s = calc_l0s_latency(lnkcap_up);
598 latency_up_l1 = calc_l1_latency(lnkcap_up);
599 latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
600 latency_dw_l1 = calc_l1_latency(lnkcap_dw);
601
602 /* Check upstream direction L0s latency */
603 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
604 (latency_up_l0s > acceptable_l0s))
605 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
606
607 /* Check downstream direction L0s latency */
608 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
609 (latency_dw_l0s > acceptable_l0s))
610 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
611 /*
612 * Check L1 latency.
613 * Every switch on the path to root complex need 1
614 * more microsecond for L1. Spec doesn't mention L0s.
615 *
616 * The exit latencies for L1 substates are not advertised
617 * by a device. Since the spec also doesn't mention a way
618 * to determine max latencies introduced by enabling L1
619 * substates on the components, it is not clear how to do
620 * a L1 substate exit latency check. We assume that the
621 * L1 exit latencies advertised by a device include L1
622 * substate latencies (and hence do not do any check).
623 */
624 latency = max_t(u32, latency_up_l1, latency_dw_l1);
625 if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
626 (latency + l1_switch_latency > acceptable_l1))
627 link->aspm_capable &= ~PCIE_LINK_STATE_L1;
628 l1_switch_latency += NSEC_PER_USEC;
629
630 link = link->parent;
631 }
632 }
633
634 /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l12_info(struct pcie_link_state * link,u32 parent_l1ss_cap,u32 child_l1ss_cap)635 static void aspm_calc_l12_info(struct pcie_link_state *link,
636 u32 parent_l1ss_cap, u32 child_l1ss_cap)
637 {
638 struct pci_dev *child = link->downstream, *parent = link->pdev;
639 u32 val1, val2, scale1, scale2;
640 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
641 u32 ctl1 = 0, ctl2 = 0;
642 u32 pctl1, pctl2, cctl1, cctl2;
643 u32 pl1_2_enables, cl1_2_enables;
644
645 /* Choose the greater of the two Port Common_Mode_Restore_Times */
646 val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap);
647 val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap);
648 t_common_mode = max(val1, val2);
649
650 /* Choose the greater of the two Port T_POWER_ON times */
651 val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap);
652 scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap);
653 val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap);
654 scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap);
655
656 if (calc_l12_pwron(parent, scale1, val1) >
657 calc_l12_pwron(child, scale2, val2)) {
658 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) |
659 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1);
660 t_power_on = calc_l12_pwron(parent, scale1, val1);
661 } else {
662 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) |
663 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2);
664 t_power_on = calc_l12_pwron(child, scale2, val2);
665 }
666
667 /*
668 * Set LTR_L1.2_THRESHOLD to the time required to transition the
669 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
670 * downstream devices report (via LTR) that they can tolerate at
671 * least that much latency.
672 *
673 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
674 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
675 * least 4us.
676 */
677 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
678 encode_l12_threshold(l1_2_threshold, &scale, &value);
679 ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) |
680 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) |
681 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale);
682
683 /* Some broken devices only support dword access to L1 SS */
684 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
685 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
686 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
687 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
688
689 if (ctl1 == pctl1 && ctl1 == cctl1 &&
690 ctl2 == pctl2 && ctl2 == cctl2)
691 return;
692
693 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
694 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
695 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
696
697 if (pl1_2_enables || cl1_2_enables) {
698 pci_clear_and_set_config_dword(child,
699 child->l1ss + PCI_L1SS_CTL1,
700 PCI_L1SS_CTL1_L1_2_MASK, 0);
701 pci_clear_and_set_config_dword(parent,
702 parent->l1ss + PCI_L1SS_CTL1,
703 PCI_L1SS_CTL1_L1_2_MASK, 0);
704 }
705
706 /* Program T_POWER_ON times in both ports */
707 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
708 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
709
710 /* Program Common_Mode_Restore_Time in upstream device */
711 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
712 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
713
714 /* Program LTR_L1.2_THRESHOLD time in both ports */
715 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
716 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
717 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
718 ctl1);
719 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
720 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
721 PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
722 ctl1);
723
724 if (pl1_2_enables || cl1_2_enables) {
725 pci_clear_and_set_config_dword(parent,
726 parent->l1ss + PCI_L1SS_CTL1, 0,
727 pl1_2_enables);
728 pci_clear_and_set_config_dword(child,
729 child->l1ss + PCI_L1SS_CTL1, 0,
730 cl1_2_enables);
731 }
732 }
733
aspm_l1ss_init(struct pcie_link_state * link)734 static void aspm_l1ss_init(struct pcie_link_state *link)
735 {
736 struct pci_dev *child = link->downstream, *parent = link->pdev;
737 u32 parent_l1ss_cap, child_l1ss_cap;
738 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
739
740 if (!parent->l1ss || !child->l1ss)
741 return;
742
743 /* Setup L1 substate */
744 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
745 &parent_l1ss_cap);
746 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
747 &child_l1ss_cap);
748
749 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
750 parent_l1ss_cap = 0;
751 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
752 child_l1ss_cap = 0;
753
754 /*
755 * If we don't have LTR for the entire path from the Root Complex
756 * to this device, we can't use ASPM L1.2 because it relies on the
757 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
758 */
759 if (!child->ltr_path)
760 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
761
762 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
763 link->aspm_support |= PCIE_LINK_STATE_L1_1;
764 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
765 link->aspm_support |= PCIE_LINK_STATE_L1_2;
766 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
767 link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
768 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
769 link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
770
771 if (parent_l1ss_cap)
772 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
773 &parent_l1ss_ctl1);
774 if (child_l1ss_cap)
775 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
776 &child_l1ss_ctl1);
777
778 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
779 link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
780 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
781 link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
782 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
783 link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
784 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
785 link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
786
787 if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
788 aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
789 }
790
pcie_aspm_cap_init(struct pcie_link_state * link,int blacklist)791 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
792 {
793 struct pci_dev *child = link->downstream, *parent = link->pdev;
794 u32 parent_lnkcap, child_lnkcap;
795 u16 parent_lnkctl, child_lnkctl;
796 struct pci_bus *linkbus = parent->subordinate;
797
798 if (blacklist) {
799 /* Set enabled/disable so that we will disable ASPM later */
800 link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
801 link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
802 return;
803 }
804
805 /*
806 * If ASPM not supported, don't mess with the clocks and link,
807 * bail out now.
808 */
809 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
810 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
811 if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
812 return;
813
814 /* Configure common clock before checking latencies */
815 pcie_aspm_configure_common_clock(link);
816
817 /*
818 * Re-read upstream/downstream components' register state after
819 * clock configuration. L0s & L1 exit latencies in the otherwise
820 * read-only Link Capabilities may change depending on common clock
821 * configuration (PCIe r5.0, sec 7.5.3.6).
822 */
823 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
824 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
825 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
826 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
827
828 /* Disable L0s/L1 before updating L1SS config */
829 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
830 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
831 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
832 child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
833 pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
834 parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
835 }
836
837 /*
838 * Setup L0s state
839 *
840 * Note that we must not enable L0s in either direction on a
841 * given link unless components on both sides of the link each
842 * support L0s.
843 */
844 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
845 link->aspm_support |= PCIE_LINK_STATE_L0S;
846
847 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
848 link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
849 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
850 link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
851
852 /* Setup L1 state */
853 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
854 link->aspm_support |= PCIE_LINK_STATE_L1;
855
856 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
857 link->aspm_enabled |= PCIE_LINK_STATE_L1;
858
859 aspm_l1ss_init(link);
860
861 /* Restore L0s/L1 if they were enabled */
862 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
863 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
864 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
865 pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
866 }
867
868 /* Save default state */
869 link->aspm_default = link->aspm_enabled;
870
871 /* Setup initial capable state. Will be updated later */
872 link->aspm_capable = link->aspm_support;
873
874 /* Get and check endpoint acceptable latencies */
875 list_for_each_entry(child, &linkbus->devices, bus_list) {
876 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
877 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
878 continue;
879
880 pcie_aspm_check_latency(child);
881 }
882 }
883
884 /* Configure the ASPM L1 substates. Caller must disable L1 first. */
pcie_config_aspm_l1ss(struct pcie_link_state * link,u32 state)885 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
886 {
887 u32 val;
888 struct pci_dev *child = link->downstream, *parent = link->pdev;
889
890 val = 0;
891 if (state & PCIE_LINK_STATE_L1_1)
892 val |= PCI_L1SS_CTL1_ASPM_L1_1;
893 if (state & PCIE_LINK_STATE_L1_2)
894 val |= PCI_L1SS_CTL1_ASPM_L1_2;
895 if (state & PCIE_LINK_STATE_L1_1_PCIPM)
896 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
897 if (state & PCIE_LINK_STATE_L1_2_PCIPM)
898 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
899
900 /*
901 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
902 * - Clear L1.x enable bits at child first, then at parent
903 * - Set L1.x enable bits at parent first, then at child
904 * - ASPM/PCIPM L1.2 must be disabled while programming timing
905 * parameters
906 */
907
908 /* Disable all L1 substates */
909 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
910 PCI_L1SS_CTL1_L1SS_MASK, 0);
911 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
912 PCI_L1SS_CTL1_L1SS_MASK, 0);
913
914 /* Enable what we need to enable */
915 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
916 PCI_L1SS_CTL1_L1SS_MASK, val);
917 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
918 PCI_L1SS_CTL1_L1SS_MASK, val);
919 }
920
pcie_config_aspm_dev(struct pci_dev * pdev,u32 val)921 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
922 {
923 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
924 PCI_EXP_LNKCTL_ASPMC, val);
925 }
926
pcie_config_aspm_link(struct pcie_link_state * link,u32 state)927 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
928 {
929 u32 upstream = 0, dwstream = 0;
930 struct pci_dev *child = link->downstream, *parent = link->pdev;
931 struct pci_bus *linkbus = parent->subordinate;
932
933 /* Enable only the states that were not explicitly disabled */
934 state &= (link->aspm_capable & ~link->aspm_disable);
935
936 /* Can't enable any substates if L1 is not enabled */
937 if (!(state & PCIE_LINK_STATE_L1))
938 state &= ~PCIE_LINK_STATE_L1SS;
939
940 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
941 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
942 state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
943 state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
944 }
945
946 /* Nothing to do if the link is already in the requested state */
947 if (link->aspm_enabled == state)
948 return;
949 /* Convert ASPM state to upstream/downstream ASPM register state */
950 if (state & PCIE_LINK_STATE_L0S_UP)
951 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
952 if (state & PCIE_LINK_STATE_L0S_DW)
953 upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
954 if (state & PCIE_LINK_STATE_L1) {
955 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
956 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
957 }
958
959 /*
960 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
961 * bits for ASPM L1 PM Substates must be done while ASPM L1 is
962 * disabled. Disable L1 here and apply new configuration after L1SS
963 * configuration has been completed.
964 *
965 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
966 * it in the Downstream component prior to disabling it in the
967 * Upstream component, and ASPM L1 must be enabled in the Upstream
968 * component prior to enabling it in the Downstream component.
969 *
970 * Sec 7.5.3.7 also recommends programming the same ASPM Control
971 * value for all functions of a multi-function device.
972 */
973 list_for_each_entry(child, &linkbus->devices, bus_list)
974 pcie_config_aspm_dev(child, 0);
975 pcie_config_aspm_dev(parent, 0);
976
977 if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
978 pcie_config_aspm_l1ss(link, state);
979
980 pcie_config_aspm_dev(parent, upstream);
981 list_for_each_entry(child, &linkbus->devices, bus_list)
982 pcie_config_aspm_dev(child, dwstream);
983
984 link->aspm_enabled = state;
985
986 /* Update latest ASPM configuration in saved context */
987 pci_save_aspm_l1ss_state(link->downstream);
988 pci_update_aspm_saved_state(link->downstream);
989 pci_save_aspm_l1ss_state(parent);
990 pci_update_aspm_saved_state(parent);
991 }
992
pcie_config_aspm_path(struct pcie_link_state * link)993 static void pcie_config_aspm_path(struct pcie_link_state *link)
994 {
995 while (link) {
996 pcie_config_aspm_link(link, policy_to_aspm_state(link));
997 link = link->parent;
998 }
999 }
1000
free_link_state(struct pcie_link_state * link)1001 static void free_link_state(struct pcie_link_state *link)
1002 {
1003 link->pdev->link_state = NULL;
1004 kfree(link);
1005 }
1006
pcie_aspm_sanity_check(struct pci_dev * pdev)1007 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
1008 {
1009 struct pci_dev *child;
1010 u32 reg32;
1011
1012 /*
1013 * Some functions in a slot might not all be PCIe functions,
1014 * very strange. Disable ASPM for the whole slot
1015 */
1016 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
1017 if (!pci_is_pcie(child))
1018 return -EINVAL;
1019
1020 /*
1021 * If ASPM is disabled then we're not going to change
1022 * the BIOS state. It's safe to continue even if it's a
1023 * pre-1.1 device
1024 */
1025
1026 if (aspm_disabled)
1027 continue;
1028
1029 /*
1030 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
1031 * RBER bit to determine if a function is 1.1 version device
1032 */
1033 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
1034 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
1035 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
1036 return -EINVAL;
1037 }
1038 }
1039 return 0;
1040 }
1041
alloc_pcie_link_state(struct pci_dev * pdev)1042 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1043 {
1044 struct pcie_link_state *link;
1045
1046 link = kzalloc(sizeof(*link), GFP_KERNEL);
1047 if (!link)
1048 return NULL;
1049
1050 INIT_LIST_HEAD(&link->sibling);
1051 link->pdev = pdev;
1052 link->downstream = pci_function_0(pdev->subordinate);
1053
1054 /*
1055 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1056 * hierarchies. Note that some PCIe host implementations omit
1057 * the root ports entirely, in which case a downstream port on
1058 * a switch may become the root of the link state chain for all
1059 * its subordinate endpoints.
1060 */
1061 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1062 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
1063 !pdev->bus->parent->self) {
1064 link->root = link;
1065 } else {
1066 struct pcie_link_state *parent;
1067
1068 parent = pdev->bus->parent->self->link_state;
1069 if (!parent) {
1070 kfree(link);
1071 return NULL;
1072 }
1073
1074 link->parent = parent;
1075 link->root = link->parent->root;
1076 }
1077
1078 list_add(&link->sibling, &link_list);
1079 pdev->link_state = link;
1080 return link;
1081 }
1082
pcie_aspm_update_sysfs_visibility(struct pci_dev * pdev)1083 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
1084 {
1085 struct pci_dev *child;
1086
1087 list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
1088 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
1089 }
1090
1091 /*
1092 * pcie_aspm_init_link_state: Initiate PCI express link state.
1093 * It is called after the pcie and its children devices are scanned.
1094 * @pdev: the root port or switch downstream port
1095 */
pcie_aspm_init_link_state(struct pci_dev * pdev)1096 void pcie_aspm_init_link_state(struct pci_dev *pdev)
1097 {
1098 struct pcie_link_state *link;
1099 int blacklist = !!pcie_aspm_sanity_check(pdev);
1100
1101 if (!aspm_support_enabled)
1102 return;
1103
1104 if (pdev->link_state)
1105 return;
1106
1107 /*
1108 * We allocate pcie_link_state for the component on the upstream
1109 * end of a Link, so there's nothing to do unless this device is
1110 * downstream port.
1111 */
1112 if (!pcie_downstream_port(pdev))
1113 return;
1114
1115 /* VIA has a strange chipset, root port is under a bridge */
1116 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
1117 pdev->bus->self)
1118 return;
1119
1120 down_read(&pci_bus_sem);
1121 if (list_empty(&pdev->subordinate->devices))
1122 goto out;
1123
1124 mutex_lock(&aspm_lock);
1125 link = alloc_pcie_link_state(pdev);
1126 if (!link)
1127 goto unlock;
1128 /*
1129 * Setup initial ASPM state. Note that we need to configure
1130 * upstream links also because capable state of them can be
1131 * update through pcie_aspm_cap_init().
1132 */
1133 pcie_aspm_cap_init(link, blacklist);
1134
1135 /* Setup initial Clock PM state */
1136 pcie_clkpm_cap_init(link, blacklist);
1137
1138 /*
1139 * At this stage drivers haven't had an opportunity to change the
1140 * link policy setting. Enabling ASPM on broken hardware can cripple
1141 * it even before the driver has had a chance to disable ASPM, so
1142 * default to a safe level right now. If we're enabling ASPM beyond
1143 * the BIOS's expectation, we'll do so once pci_enable_device() is
1144 * called.
1145 */
1146 if (aspm_policy != POLICY_POWERSAVE &&
1147 aspm_policy != POLICY_POWER_SUPERSAVE) {
1148 pcie_config_aspm_path(link);
1149 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1150 }
1151
1152 pcie_aspm_update_sysfs_visibility(pdev);
1153
1154 unlock:
1155 mutex_unlock(&aspm_lock);
1156 out:
1157 up_read(&pci_bus_sem);
1158 }
1159
pci_bridge_reconfigure_ltr(struct pci_dev * pdev)1160 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
1161 {
1162 struct pci_dev *bridge;
1163 u32 ctl;
1164
1165 bridge = pci_upstream_bridge(pdev);
1166 if (bridge && bridge->ltr_path) {
1167 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1168 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1169 pci_dbg(bridge, "re-enabling LTR\n");
1170 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1171 PCI_EXP_DEVCTL2_LTR_EN);
1172 }
1173 }
1174 }
1175
pci_configure_ltr(struct pci_dev * pdev)1176 void pci_configure_ltr(struct pci_dev *pdev)
1177 {
1178 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
1179 struct pci_dev *bridge;
1180 u32 cap, ctl;
1181
1182 if (!pci_is_pcie(pdev))
1183 return;
1184
1185 pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
1186 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1187 return;
1188
1189 pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
1190 if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
1191 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1192 pdev->ltr_path = 1;
1193 return;
1194 }
1195
1196 bridge = pci_upstream_bridge(pdev);
1197 if (bridge && bridge->ltr_path)
1198 pdev->ltr_path = 1;
1199
1200 return;
1201 }
1202
1203 if (!host->native_ltr)
1204 return;
1205
1206 /*
1207 * Software must not enable LTR in an Endpoint unless the Root
1208 * Complex and all intermediate Switches indicate support for LTR.
1209 * PCIe r4.0, sec 6.18.
1210 */
1211 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
1212 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1213 PCI_EXP_DEVCTL2_LTR_EN);
1214 pdev->ltr_path = 1;
1215 return;
1216 }
1217
1218 /*
1219 * If we're configuring a hot-added device, LTR was likely
1220 * disabled in the upstream bridge, so re-enable it before enabling
1221 * it in the new device.
1222 */
1223 bridge = pci_upstream_bridge(pdev);
1224 if (bridge && bridge->ltr_path) {
1225 pci_bridge_reconfigure_ltr(pdev);
1226 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1227 PCI_EXP_DEVCTL2_LTR_EN);
1228 pdev->ltr_path = 1;
1229 }
1230 }
1231
1232 /* Recheck latencies and update aspm_capable for links under the root */
pcie_update_aspm_capable(struct pcie_link_state * root)1233 static void pcie_update_aspm_capable(struct pcie_link_state *root)
1234 {
1235 struct pcie_link_state *link;
1236 BUG_ON(root->parent);
1237 list_for_each_entry(link, &link_list, sibling) {
1238 if (link->root != root)
1239 continue;
1240 link->aspm_capable = link->aspm_support;
1241 }
1242 list_for_each_entry(link, &link_list, sibling) {
1243 struct pci_dev *child;
1244 struct pci_bus *linkbus = link->pdev->subordinate;
1245 if (link->root != root)
1246 continue;
1247 list_for_each_entry(child, &linkbus->devices, bus_list) {
1248 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
1249 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
1250 continue;
1251 pcie_aspm_check_latency(child);
1252 }
1253 }
1254 }
1255
1256 /* @pdev: the endpoint device */
pcie_aspm_exit_link_state(struct pci_dev * pdev)1257 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1258 {
1259 struct pci_dev *parent = pdev->bus->self;
1260 struct pcie_link_state *link, *root, *parent_link;
1261
1262 if (!parent || !parent->link_state)
1263 return;
1264
1265 down_read(&pci_bus_sem);
1266 mutex_lock(&aspm_lock);
1267
1268 link = parent->link_state;
1269 root = link->root;
1270 parent_link = link->parent;
1271
1272 /*
1273 * link->downstream is a pointer to the pci_dev of function 0. If
1274 * we remove that function, the pci_dev is about to be deallocated,
1275 * so we can't use link->downstream again. Free the link state to
1276 * avoid this.
1277 *
1278 * If we're removing a non-0 function, it's possible we could
1279 * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
1280 * programming the same ASPM Control value for all functions of
1281 * multi-function devices, so disable ASPM for all of them.
1282 */
1283 pcie_config_aspm_link(link, 0);
1284 list_del(&link->sibling);
1285 free_link_state(link);
1286
1287 /* Recheck latencies and configure upstream links */
1288 if (parent_link) {
1289 pcie_update_aspm_capable(root);
1290 pcie_config_aspm_path(parent_link);
1291 }
1292
1293 mutex_unlock(&aspm_lock);
1294 up_read(&pci_bus_sem);
1295 }
1296
1297 /*
1298 * @pdev: the root port or switch downstream port
1299 * @locked: whether pci_bus_sem is held
1300 */
pcie_aspm_pm_state_change(struct pci_dev * pdev,bool locked)1301 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
1302 {
1303 struct pcie_link_state *link = pdev->link_state;
1304
1305 if (aspm_disabled || !link)
1306 return;
1307 /*
1308 * Devices changed PM state, we should recheck if latency
1309 * meets all functions' requirement
1310 */
1311 if (!locked)
1312 down_read(&pci_bus_sem);
1313 mutex_lock(&aspm_lock);
1314 pcie_update_aspm_capable(link->root);
1315 pcie_config_aspm_path(link);
1316 mutex_unlock(&aspm_lock);
1317 if (!locked)
1318 up_read(&pci_bus_sem);
1319 }
1320
pcie_aspm_powersave_config_link(struct pci_dev * pdev)1321 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1322 {
1323 struct pcie_link_state *link = pdev->link_state;
1324
1325 if (aspm_disabled || !link)
1326 return;
1327
1328 if (aspm_policy != POLICY_POWERSAVE &&
1329 aspm_policy != POLICY_POWER_SUPERSAVE)
1330 return;
1331
1332 down_read(&pci_bus_sem);
1333 mutex_lock(&aspm_lock);
1334 pcie_config_aspm_path(link);
1335 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1336 mutex_unlock(&aspm_lock);
1337 up_read(&pci_bus_sem);
1338 }
1339
pcie_aspm_get_link(struct pci_dev * pdev)1340 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
1341 {
1342 struct pci_dev *bridge;
1343
1344 if (!pci_is_pcie(pdev))
1345 return NULL;
1346
1347 bridge = pci_upstream_bridge(pdev);
1348 if (!bridge || !pci_is_pcie(bridge))
1349 return NULL;
1350
1351 return bridge->link_state;
1352 }
1353
pci_calc_aspm_disable_mask(int state)1354 static u8 pci_calc_aspm_disable_mask(int state)
1355 {
1356 state &= ~PCIE_LINK_STATE_CLKPM;
1357
1358 /* L1 PM substates require L1 */
1359 if (state & PCIE_LINK_STATE_L1)
1360 state |= PCIE_LINK_STATE_L1SS;
1361
1362 return state;
1363 }
1364
pci_calc_aspm_enable_mask(int state)1365 static u8 pci_calc_aspm_enable_mask(int state)
1366 {
1367 state &= ~PCIE_LINK_STATE_CLKPM;
1368
1369 /* L1 PM substates require L1 */
1370 if (state & PCIE_LINK_STATE_L1SS)
1371 state |= PCIE_LINK_STATE_L1;
1372
1373 return state;
1374 }
1375
__pci_disable_link_state(struct pci_dev * pdev,int state,bool locked)1376 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
1377 {
1378 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1379
1380 if (!link)
1381 return -EINVAL;
1382 /*
1383 * A driver requested that ASPM be disabled on this device, but
1384 * if we don't have permission to manage ASPM (e.g., on ACPI
1385 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1386 * the _OSC method), we can't honor that request. Windows has
1387 * a similar mechanism using "PciASPMOptOut", which is also
1388 * ignored in this situation.
1389 */
1390 if (aspm_disabled) {
1391 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1392 return -EPERM;
1393 }
1394
1395 if (!locked)
1396 down_read(&pci_bus_sem);
1397 mutex_lock(&aspm_lock);
1398 link->aspm_disable |= pci_calc_aspm_disable_mask(state);
1399 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1400
1401 if (state & PCIE_LINK_STATE_CLKPM)
1402 link->clkpm_disable = 1;
1403 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1404 mutex_unlock(&aspm_lock);
1405 if (!locked)
1406 up_read(&pci_bus_sem);
1407
1408 return 0;
1409 }
1410
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1411 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1412 {
1413 lockdep_assert_held_read(&pci_bus_sem);
1414
1415 return __pci_disable_link_state(pdev, state, true);
1416 }
1417 EXPORT_SYMBOL(pci_disable_link_state_locked);
1418
1419 /**
1420 * pci_disable_link_state - Disable device's link state, so the link will
1421 * never enter specific states. Note that if the BIOS didn't grant ASPM
1422 * control to the OS, this does nothing because we can't touch the LNKCTL
1423 * register. Returns 0 or a negative errno.
1424 *
1425 * @pdev: PCI device
1426 * @state: ASPM link state to disable
1427 */
pci_disable_link_state(struct pci_dev * pdev,int state)1428 int pci_disable_link_state(struct pci_dev *pdev, int state)
1429 {
1430 return __pci_disable_link_state(pdev, state, false);
1431 }
1432 EXPORT_SYMBOL(pci_disable_link_state);
1433
__pci_enable_link_state(struct pci_dev * pdev,int state,bool locked)1434 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
1435 {
1436 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1437
1438 if (!link)
1439 return -EINVAL;
1440 /*
1441 * A driver requested that ASPM be enabled on this device, but
1442 * if we don't have permission to manage ASPM (e.g., on ACPI
1443 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1444 * the _OSC method), we can't honor that request.
1445 */
1446 if (aspm_disabled) {
1447 pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
1448 return -EPERM;
1449 }
1450
1451 if (!locked)
1452 down_read(&pci_bus_sem);
1453 mutex_lock(&aspm_lock);
1454 link->aspm_default = pci_calc_aspm_enable_mask(state);
1455 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1456
1457 link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
1458 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1459 mutex_unlock(&aspm_lock);
1460 if (!locked)
1461 up_read(&pci_bus_sem);
1462
1463 return 0;
1464 }
1465
1466 /**
1467 * pci_enable_link_state - Clear and set the default device link state so that
1468 * the link may be allowed to enter the specified states. Note that if the
1469 * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
1470 * touch the LNKCTL register. Also note that this does not enable states
1471 * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1472 *
1473 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1474 * PCIe r6.0, sec 5.5.4.
1475 *
1476 * @pdev: PCI device
1477 * @state: Mask of ASPM link states to enable
1478 */
pci_enable_link_state(struct pci_dev * pdev,int state)1479 int pci_enable_link_state(struct pci_dev *pdev, int state)
1480 {
1481 return __pci_enable_link_state(pdev, state, false);
1482 }
1483 EXPORT_SYMBOL(pci_enable_link_state);
1484
1485 /**
1486 * pci_enable_link_state_locked - Clear and set the default device link state
1487 * so that the link may be allowed to enter the specified states. Note that if
1488 * the BIOS didn't grant ASPM control to the OS, this does nothing because we
1489 * can't touch the LNKCTL register. Also note that this does not enable states
1490 * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1491 *
1492 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
1493 * PCIe r6.0, sec 5.5.4.
1494 *
1495 * @pdev: PCI device
1496 * @state: Mask of ASPM link states to enable
1497 *
1498 * Context: Caller holds pci_bus_sem read lock.
1499 */
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1500 int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1501 {
1502 lockdep_assert_held_read(&pci_bus_sem);
1503
1504 return __pci_enable_link_state(pdev, state, true);
1505 }
1506 EXPORT_SYMBOL(pci_enable_link_state_locked);
1507
pcie_aspm_set_policy(const char * val,const struct kernel_param * kp)1508 static int pcie_aspm_set_policy(const char *val,
1509 const struct kernel_param *kp)
1510 {
1511 int i;
1512 struct pcie_link_state *link;
1513
1514 if (aspm_disabled)
1515 return -EPERM;
1516 i = sysfs_match_string(policy_str, val);
1517 if (i < 0)
1518 return i;
1519 if (i == aspm_policy)
1520 return 0;
1521
1522 down_read(&pci_bus_sem);
1523 mutex_lock(&aspm_lock);
1524 aspm_policy = i;
1525 list_for_each_entry(link, &link_list, sibling) {
1526 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1527 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1528 }
1529 mutex_unlock(&aspm_lock);
1530 up_read(&pci_bus_sem);
1531 return 0;
1532 }
1533
pcie_aspm_get_policy(char * buffer,const struct kernel_param * kp)1534 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1535 {
1536 int i, cnt = 0;
1537 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1538 if (i == aspm_policy)
1539 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1540 else
1541 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1542 cnt += sprintf(buffer + cnt, "\n");
1543 return cnt;
1544 }
1545
1546 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1547 NULL, 0644);
1548
1549 /**
1550 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1551 * @pdev: Target device.
1552 *
1553 * Relies on the upstream bridge's link_state being valid. The link_state
1554 * is deallocated only when the last child of the bridge (i.e., @pdev or a
1555 * sibling) is removed, and the caller should be holding a reference to
1556 * @pdev, so this should be safe.
1557 */
pcie_aspm_enabled(struct pci_dev * pdev)1558 bool pcie_aspm_enabled(struct pci_dev *pdev)
1559 {
1560 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1561
1562 if (!link)
1563 return false;
1564
1565 return link->aspm_enabled;
1566 }
1567 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1568
aspm_attr_show_common(struct device * dev,struct device_attribute * attr,char * buf,u8 state)1569 static ssize_t aspm_attr_show_common(struct device *dev,
1570 struct device_attribute *attr,
1571 char *buf, u8 state)
1572 {
1573 struct pci_dev *pdev = to_pci_dev(dev);
1574 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1575
1576 return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1577 }
1578
aspm_attr_store_common(struct device * dev,struct device_attribute * attr,const char * buf,size_t len,u8 state)1579 static ssize_t aspm_attr_store_common(struct device *dev,
1580 struct device_attribute *attr,
1581 const char *buf, size_t len, u8 state)
1582 {
1583 struct pci_dev *pdev = to_pci_dev(dev);
1584 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1585 bool state_enable;
1586
1587 if (kstrtobool(buf, &state_enable) < 0)
1588 return -EINVAL;
1589
1590 down_read(&pci_bus_sem);
1591 mutex_lock(&aspm_lock);
1592
1593 if (state_enable) {
1594 link->aspm_disable &= ~state;
1595 /* need to enable L1 for substates */
1596 if (state & PCIE_LINK_STATE_L1SS)
1597 link->aspm_disable &= ~PCIE_LINK_STATE_L1;
1598 } else {
1599 link->aspm_disable |= state;
1600 if (state & PCIE_LINK_STATE_L1)
1601 link->aspm_disable |= PCIE_LINK_STATE_L1SS;
1602 }
1603
1604 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1605
1606 mutex_unlock(&aspm_lock);
1607 up_read(&pci_bus_sem);
1608
1609 return len;
1610 }
1611
1612 #define ASPM_ATTR(_f, _s) \
1613 static ssize_t _f##_show(struct device *dev, \
1614 struct device_attribute *attr, char *buf) \
1615 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
1616 \
1617 static ssize_t _f##_store(struct device *dev, \
1618 struct device_attribute *attr, \
1619 const char *buf, size_t len) \
1620 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
1621
ASPM_ATTR(l0s_aspm,L0S)1622 ASPM_ATTR(l0s_aspm, L0S)
1623 ASPM_ATTR(l1_aspm, L1)
1624 ASPM_ATTR(l1_1_aspm, L1_1)
1625 ASPM_ATTR(l1_2_aspm, L1_2)
1626 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1627 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
1628
1629 static ssize_t clkpm_show(struct device *dev,
1630 struct device_attribute *attr, char *buf)
1631 {
1632 struct pci_dev *pdev = to_pci_dev(dev);
1633 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1634
1635 return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
1636 }
1637
clkpm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1638 static ssize_t clkpm_store(struct device *dev,
1639 struct device_attribute *attr,
1640 const char *buf, size_t len)
1641 {
1642 struct pci_dev *pdev = to_pci_dev(dev);
1643 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1644 bool state_enable;
1645
1646 if (kstrtobool(buf, &state_enable) < 0)
1647 return -EINVAL;
1648
1649 down_read(&pci_bus_sem);
1650 mutex_lock(&aspm_lock);
1651
1652 link->clkpm_disable = !state_enable;
1653 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1654
1655 mutex_unlock(&aspm_lock);
1656 up_read(&pci_bus_sem);
1657
1658 return len;
1659 }
1660
1661 static DEVICE_ATTR_RW(clkpm);
1662 static DEVICE_ATTR_RW(l0s_aspm);
1663 static DEVICE_ATTR_RW(l1_aspm);
1664 static DEVICE_ATTR_RW(l1_1_aspm);
1665 static DEVICE_ATTR_RW(l1_2_aspm);
1666 static DEVICE_ATTR_RW(l1_1_pcipm);
1667 static DEVICE_ATTR_RW(l1_2_pcipm);
1668
1669 static struct attribute *aspm_ctrl_attrs[] = {
1670 &dev_attr_clkpm.attr,
1671 &dev_attr_l0s_aspm.attr,
1672 &dev_attr_l1_aspm.attr,
1673 &dev_attr_l1_1_aspm.attr,
1674 &dev_attr_l1_2_aspm.attr,
1675 &dev_attr_l1_1_pcipm.attr,
1676 &dev_attr_l1_2_pcipm.attr,
1677 NULL
1678 };
1679
aspm_ctrl_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1680 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1681 struct attribute *a, int n)
1682 {
1683 struct device *dev = kobj_to_dev(kobj);
1684 struct pci_dev *pdev = to_pci_dev(dev);
1685 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1686 static const u8 aspm_state_map[] = {
1687 PCIE_LINK_STATE_L0S,
1688 PCIE_LINK_STATE_L1,
1689 PCIE_LINK_STATE_L1_1,
1690 PCIE_LINK_STATE_L1_2,
1691 PCIE_LINK_STATE_L1_1_PCIPM,
1692 PCIE_LINK_STATE_L1_2_PCIPM,
1693 };
1694
1695 if (aspm_disabled || !link)
1696 return 0;
1697
1698 if (n == 0)
1699 return link->clkpm_capable ? a->mode : 0;
1700
1701 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
1702 }
1703
1704 const struct attribute_group aspm_ctrl_attr_group = {
1705 .name = "link",
1706 .attrs = aspm_ctrl_attrs,
1707 .is_visible = aspm_ctrl_attrs_are_visible,
1708 };
1709
pcie_aspm_disable(char * str)1710 static int __init pcie_aspm_disable(char *str)
1711 {
1712 if (!strcmp(str, "off")) {
1713 aspm_policy = POLICY_DEFAULT;
1714 aspm_disabled = 1;
1715 aspm_support_enabled = false;
1716 pr_info("PCIe ASPM is disabled\n");
1717 } else if (!strcmp(str, "force")) {
1718 aspm_force = 1;
1719 pr_info("PCIe ASPM is forcibly enabled\n");
1720 }
1721 return 1;
1722 }
1723
1724 __setup("pcie_aspm=", pcie_aspm_disable);
1725
pcie_no_aspm(void)1726 void pcie_no_aspm(void)
1727 {
1728 /*
1729 * Disabling ASPM is intended to prevent the kernel from modifying
1730 * existing hardware state, not to clear existing state. To that end:
1731 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1732 * (b) prevent userspace from changing policy
1733 */
1734 if (!aspm_force) {
1735 aspm_policy = POLICY_DEFAULT;
1736 aspm_disabled = 1;
1737 }
1738 }
1739
pcie_aspm_support_enabled(void)1740 bool pcie_aspm_support_enabled(void)
1741 {
1742 return aspm_support_enabled;
1743 }
1744
1745 #endif /* CONFIG_PCIEASPM */
1746