1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Enable PCIe link L0s/L1 state and Clock Power Management 4 * 5 * Copyright (C) 2007 Intel 6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) 7 * Copyright (C) Shaohua Li (shaohua.li@intel.com) 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/bits.h> 12 #include <linux/build_bug.h> 13 #include <linux/kernel.h> 14 #include <linux/limits.h> 15 #include <linux/math.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/pci.h> 19 #include <linux/pci_regs.h> 20 #include <linux/errno.h> 21 #include <linux/pm.h> 22 #include <linux/init.h> 23 #include <linux/printk.h> 24 #include <linux/slab.h> 25 #include <linux/time.h> 26 27 #include "../pci.h" 28 29 void pci_save_ltr_state(struct pci_dev *dev) 30 { 31 int ltr; 32 struct pci_cap_saved_state *save_state; 33 u32 *cap; 34 35 if (!pci_is_pcie(dev)) 36 return; 37 38 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 39 if (!ltr) 40 return; 41 42 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 43 if (!save_state) { 44 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); 45 return; 46 } 47 48 /* Some broken devices only support dword access to LTR */ 49 cap = &save_state->cap.data[0]; 50 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); 51 } 52 53 void pci_restore_ltr_state(struct pci_dev *dev) 54 { 55 struct pci_cap_saved_state *save_state; 56 int ltr; 57 u32 *cap; 58 59 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 60 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 61 if (!save_state || !ltr) 62 return; 63 64 /* Some broken devices only support dword access to LTR */ 65 cap = &save_state->cap.data[0]; 66 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); 67 } 68 69 void pci_configure_aspm_l1ss(struct pci_dev *pdev) 70 { 71 int rc; 72 73 pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 74 75 rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS, 76 2 * sizeof(u32)); 77 if (rc) 78 pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n", 79 ERR_PTR(rc)); 80 } 81 82 void pci_save_aspm_l1ss_state(struct pci_dev *pdev) 83 { 84 struct pci_dev *parent = pdev->bus->self; 85 struct pci_cap_saved_state *save_state; 86 u32 *cap; 87 88 /* 89 * If this is a Downstream Port, we never restore the L1SS state 90 * directly; we only restore it when we restore the state of the 91 * Upstream Port below it. 92 */ 93 if (pcie_downstream_port(pdev) || !parent) 94 return; 95 96 if (!pdev->l1ss || !parent->l1ss) 97 return; 98 99 /* 100 * Save L1 substate configuration. The ASPM L0s/L1 configuration 101 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state(). 102 */ 103 save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); 104 if (!save_state) 105 return; 106 107 cap = &save_state->cap.data[0]; 108 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++); 109 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++); 110 111 /* 112 * Save parent's L1 substate configuration so we have it for 113 * pci_restore_aspm_l1ss_state(pdev) to restore. 114 */ 115 save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS); 116 if (!save_state) 117 return; 118 119 cap = &save_state->cap.data[0]; 120 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++); 121 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++); 122 } 123 124 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev) 125 { 126 struct pci_cap_saved_state *pl_save_state, *cl_save_state; 127 struct pci_dev *parent = pdev->bus->self; 128 u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable; 129 u32 cl_ctl1, cl_ctl2, cl_l1_2_enable; 130 u16 clnkctl, plnkctl; 131 132 /* 133 * In case BIOS enabled L1.2 when resuming, we need to disable it first 134 * on the downstream component before the upstream. So, don't attempt to 135 * restore either until we are at the downstream component. 136 */ 137 if (pcie_downstream_port(pdev) || !parent) 138 return; 139 140 if (!pdev->l1ss || !parent->l1ss) 141 return; 142 143 cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); 144 pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS); 145 if (!cl_save_state || !pl_save_state) 146 return; 147 148 cap = &cl_save_state->cap.data[0]; 149 cl_ctl2 = *cap++; 150 cl_ctl1 = *cap; 151 cap = &pl_save_state->cap.data[0]; 152 pl_ctl2 = *cap++; 153 pl_ctl1 = *cap; 154 155 /* Make sure L0s/L1 are disabled before updating L1SS config */ 156 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl); 157 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl); 158 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || 159 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { 160 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, 161 clnkctl & ~PCI_EXP_LNKCTL_ASPMC); 162 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, 163 plnkctl & ~PCI_EXP_LNKCTL_ASPMC); 164 } 165 166 /* 167 * Disable L1.2 on this downstream endpoint device first, followed 168 * by the upstream 169 */ 170 pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, 171 PCI_L1SS_CTL1_L1_2_MASK, 0); 172 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 173 PCI_L1SS_CTL1_L1_2_MASK, 0); 174 175 /* 176 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD 177 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2 178 * enable bits, even though they're all in PCI_L1SS_CTL1. 179 */ 180 pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; 181 pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; 182 cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; 183 cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; 184 185 /* Write back without enables first (above we cleared them in ctl1) */ 186 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2); 187 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2); 188 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1); 189 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1); 190 191 /* Then write back the enables */ 192 if (pl_l1_2_enable || cl_l1_2_enable) { 193 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 194 pl_ctl1 | pl_l1_2_enable); 195 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, 196 cl_ctl1 | cl_l1_2_enable); 197 } 198 199 /* Restore L0s/L1 if they were enabled */ 200 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || 201 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { 202 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl); 203 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl); 204 } 205 } 206 207 #ifdef CONFIG_PCIEASPM 208 209 #ifdef MODULE_PARAM_PREFIX 210 #undef MODULE_PARAM_PREFIX 211 #endif 212 #define MODULE_PARAM_PREFIX "pcie_aspm." 213 214 /* Note: these are not register definitions */ 215 #define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */ 216 #define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */ 217 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW)); 218 219 #define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\ 220 PCIE_LINK_STATE_L1_2_PCIPM) 221 #define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\ 222 PCIE_LINK_STATE_L1_2_PCIPM) 223 #define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\ 224 PCIE_LINK_STATE_L1_1_PCIPM |\ 225 PCIE_LINK_STATE_L1_2_MASK) 226 227 struct pcie_link_state { 228 struct pci_dev *pdev; /* Upstream component of the Link */ 229 struct pci_dev *downstream; /* Downstream component, function 0 */ 230 struct pcie_link_state *root; /* pointer to the root port link */ 231 struct pcie_link_state *parent; /* pointer to the parent Link state */ 232 struct list_head sibling; /* node in link_list */ 233 234 /* ASPM state */ 235 u32 aspm_support:7; /* Supported ASPM state */ 236 u32 aspm_enabled:7; /* Enabled ASPM state */ 237 u32 aspm_capable:7; /* Capable ASPM state with latency */ 238 u32 aspm_default:7; /* Default ASPM state by BIOS */ 239 u32 aspm_disable:7; /* Disabled ASPM state */ 240 241 /* Clock PM state */ 242 u32 clkpm_capable:1; /* Clock PM capable? */ 243 u32 clkpm_enabled:1; /* Current Clock PM state */ 244 u32 clkpm_default:1; /* Default Clock PM state by BIOS */ 245 u32 clkpm_disable:1; /* Clock PM disabled */ 246 }; 247 248 static int aspm_disabled, aspm_force; 249 static bool aspm_support_enabled = true; 250 static DEFINE_MUTEX(aspm_lock); 251 static LIST_HEAD(link_list); 252 253 #define POLICY_DEFAULT 0 /* BIOS default setting */ 254 #define POLICY_PERFORMANCE 1 /* high performance */ 255 #define POLICY_POWERSAVE 2 /* high power saving */ 256 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */ 257 258 #ifdef CONFIG_PCIEASPM_PERFORMANCE 259 static int aspm_policy = POLICY_PERFORMANCE; 260 #elif defined CONFIG_PCIEASPM_POWERSAVE 261 static int aspm_policy = POLICY_POWERSAVE; 262 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE 263 static int aspm_policy = POLICY_POWER_SUPERSAVE; 264 #else 265 static int aspm_policy; 266 #endif 267 268 static const char *policy_str[] = { 269 [POLICY_DEFAULT] = "default", 270 [POLICY_PERFORMANCE] = "performance", 271 [POLICY_POWERSAVE] = "powersave", 272 [POLICY_POWER_SUPERSAVE] = "powersupersave" 273 }; 274 275 /* 276 * The L1 PM substate capability is only implemented in function 0 in a 277 * multi function device. 278 */ 279 static struct pci_dev *pci_function_0(struct pci_bus *linkbus) 280 { 281 struct pci_dev *child; 282 283 list_for_each_entry(child, &linkbus->devices, bus_list) 284 if (PCI_FUNC(child->devfn) == 0) 285 return child; 286 return NULL; 287 } 288 289 static int policy_to_aspm_state(struct pcie_link_state *link) 290 { 291 switch (aspm_policy) { 292 case POLICY_PERFORMANCE: 293 /* Disable ASPM and Clock PM */ 294 return 0; 295 case POLICY_POWERSAVE: 296 /* Enable ASPM L0s/L1 */ 297 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; 298 case POLICY_POWER_SUPERSAVE: 299 /* Enable Everything */ 300 return PCIE_LINK_STATE_ASPM_ALL; 301 case POLICY_DEFAULT: 302 return link->aspm_default; 303 } 304 return 0; 305 } 306 307 static int policy_to_clkpm_state(struct pcie_link_state *link) 308 { 309 switch (aspm_policy) { 310 case POLICY_PERFORMANCE: 311 /* Disable ASPM and Clock PM */ 312 return 0; 313 case POLICY_POWERSAVE: 314 case POLICY_POWER_SUPERSAVE: 315 /* Enable Clock PM */ 316 return 1; 317 case POLICY_DEFAULT: 318 return link->clkpm_default; 319 } 320 return 0; 321 } 322 323 static void pci_update_aspm_saved_state(struct pci_dev *dev) 324 { 325 struct pci_cap_saved_state *save_state; 326 u16 *cap, lnkctl, aspm_ctl; 327 328 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 329 if (!save_state) 330 return; 331 332 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl); 333 334 /* 335 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only 336 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't 337 * change after being captured in save_state. 338 */ 339 aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); 340 lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); 341 342 /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */ 343 cap = (u16 *)&save_state->cap.data[0]; 344 cap[1] = lnkctl | aspm_ctl; 345 } 346 347 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) 348 { 349 struct pci_dev *child; 350 struct pci_bus *linkbus = link->pdev->subordinate; 351 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; 352 353 list_for_each_entry(child, &linkbus->devices, bus_list) { 354 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 355 PCI_EXP_LNKCTL_CLKREQ_EN, 356 val); 357 pci_update_aspm_saved_state(child); 358 } 359 link->clkpm_enabled = !!enable; 360 } 361 362 static void pcie_set_clkpm(struct pcie_link_state *link, int enable) 363 { 364 /* 365 * Don't enable Clock PM if the link is not Clock PM capable 366 * or Clock PM is disabled 367 */ 368 if (!link->clkpm_capable || link->clkpm_disable) 369 enable = 0; 370 /* Need nothing if the specified equals to current state */ 371 if (link->clkpm_enabled == enable) 372 return; 373 pcie_set_clkpm_nocheck(link, enable); 374 } 375 376 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 377 { 378 int capable = 1, enabled = 1; 379 u32 reg32; 380 u16 reg16; 381 struct pci_dev *child; 382 struct pci_bus *linkbus = link->pdev->subordinate; 383 384 /* All functions should have the same cap and state, take the worst */ 385 list_for_each_entry(child, &linkbus->devices, bus_list) { 386 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); 387 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 388 capable = 0; 389 enabled = 0; 390 break; 391 } 392 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 393 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 394 enabled = 0; 395 } 396 link->clkpm_enabled = enabled; 397 link->clkpm_default = enabled; 398 link->clkpm_capable = capable; 399 link->clkpm_disable = blacklist ? 1 : 0; 400 } 401 402 /* 403 * pcie_aspm_configure_common_clock: check if the 2 ends of a link 404 * could use common clock. If they are, configure them to use the 405 * common clock. That will reduce the ASPM state exit latency. 406 */ 407 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) 408 { 409 int same_clock = 1; 410 u16 reg16, ccc, parent_old_ccc, child_old_ccc[8]; 411 struct pci_dev *child, *parent = link->pdev; 412 struct pci_bus *linkbus = parent->subordinate; 413 /* 414 * All functions of a slot should have the same Slot Clock 415 * Configuration, so just check one function 416 */ 417 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 418 BUG_ON(!pci_is_pcie(child)); 419 420 /* Check downstream component if bit Slot Clock Configuration is 1 */ 421 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); 422 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 423 same_clock = 0; 424 425 /* Check upstream component if bit Slot Clock Configuration is 1 */ 426 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); 427 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 428 same_clock = 0; 429 430 /* Port might be already in common clock mode */ 431 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); 432 parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC; 433 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) { 434 bool consistent = true; 435 436 list_for_each_entry(child, &linkbus->devices, bus_list) { 437 pcie_capability_read_word(child, PCI_EXP_LNKCTL, 438 ®16); 439 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) { 440 consistent = false; 441 break; 442 } 443 } 444 if (consistent) 445 return; 446 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n"); 447 } 448 449 ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0; 450 /* Configure downstream component, all functions */ 451 list_for_each_entry(child, &linkbus->devices, bus_list) { 452 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 453 child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC; 454 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 455 PCI_EXP_LNKCTL_CCC, ccc); 456 } 457 458 /* Configure upstream component */ 459 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, 460 PCI_EXP_LNKCTL_CCC, ccc); 461 462 if (pcie_retrain_link(link->pdev, true)) { 463 464 /* Training failed. Restore common clock configurations */ 465 pci_err(parent, "ASPM: Could not configure common clock\n"); 466 list_for_each_entry(child, &linkbus->devices, bus_list) 467 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 468 PCI_EXP_LNKCTL_CCC, 469 child_old_ccc[PCI_FUNC(child->devfn)]); 470 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, 471 PCI_EXP_LNKCTL_CCC, parent_old_ccc); 472 } 473 } 474 475 /* Convert L0s latency encoding to ns */ 476 static u32 calc_l0s_latency(u32 lnkcap) 477 { 478 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap); 479 480 if (encoding == 0x7) 481 return 5 * NSEC_PER_USEC; /* > 4us */ 482 return (64 << encoding); 483 } 484 485 /* Convert L0s acceptable latency encoding to ns */ 486 static u32 calc_l0s_acceptable(u32 encoding) 487 { 488 if (encoding == 0x7) 489 return U32_MAX; 490 return (64 << encoding); 491 } 492 493 /* Convert L1 latency encoding to ns */ 494 static u32 calc_l1_latency(u32 lnkcap) 495 { 496 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap); 497 498 if (encoding == 0x7) 499 return 65 * NSEC_PER_USEC; /* > 64us */ 500 return NSEC_PER_USEC << encoding; 501 } 502 503 /* Convert L1 acceptable latency encoding to ns */ 504 static u32 calc_l1_acceptable(u32 encoding) 505 { 506 if (encoding == 0x7) 507 return U32_MAX; 508 return NSEC_PER_USEC << encoding; 509 } 510 511 /* Convert L1SS T_pwr encoding to usec */ 512 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val) 513 { 514 switch (scale) { 515 case 0: 516 return val * 2; 517 case 1: 518 return val * 10; 519 case 2: 520 return val * 100; 521 } 522 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale); 523 return 0; 524 } 525 526 /* 527 * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1 528 * register. Ports enter L1.2 when the most recent LTR value is greater 529 * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we 530 * don't enter L1.2 too aggressively. 531 * 532 * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3. 533 */ 534 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) 535 { 536 u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC; 537 538 /* 539 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max 540 * value of 0x3ff. 541 */ 542 if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 543 *scale = 0; /* Value times 1ns */ 544 *value = threshold_ns; 545 } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 546 *scale = 1; /* Value times 32ns */ 547 *value = roundup(threshold_ns, 32) / 32; 548 } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 549 *scale = 2; /* Value times 1024ns */ 550 *value = roundup(threshold_ns, 1024) / 1024; 551 } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 552 *scale = 3; /* Value times 32768ns */ 553 *value = roundup(threshold_ns, 32768) / 32768; 554 } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 555 *scale = 4; /* Value times 1048576ns */ 556 *value = roundup(threshold_ns, 1048576) / 1048576; 557 } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 558 *scale = 5; /* Value times 33554432ns */ 559 *value = roundup(threshold_ns, 33554432) / 33554432; 560 } else { 561 *scale = 5; 562 *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE); 563 } 564 } 565 566 static void pcie_aspm_check_latency(struct pci_dev *endpoint) 567 { 568 u32 latency, encoding, lnkcap_up, lnkcap_dw; 569 u32 l1_switch_latency = 0, latency_up_l0s; 570 u32 latency_up_l1, latency_dw_l0s, latency_dw_l1; 571 u32 acceptable_l0s, acceptable_l1; 572 struct pcie_link_state *link; 573 574 /* Device not in D0 doesn't need latency check */ 575 if ((endpoint->current_state != PCI_D0) && 576 (endpoint->current_state != PCI_UNKNOWN)) 577 return; 578 579 link = endpoint->bus->self->link_state; 580 581 /* Calculate endpoint L0s acceptable latency */ 582 encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap); 583 acceptable_l0s = calc_l0s_acceptable(encoding); 584 585 /* Calculate endpoint L1 acceptable latency */ 586 encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap); 587 acceptable_l1 = calc_l1_acceptable(encoding); 588 589 while (link) { 590 struct pci_dev *dev = pci_function_0(link->pdev->subordinate); 591 592 /* Read direction exit latencies */ 593 pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP, 594 &lnkcap_up); 595 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, 596 &lnkcap_dw); 597 latency_up_l0s = calc_l0s_latency(lnkcap_up); 598 latency_up_l1 = calc_l1_latency(lnkcap_up); 599 latency_dw_l0s = calc_l0s_latency(lnkcap_dw); 600 latency_dw_l1 = calc_l1_latency(lnkcap_dw); 601 602 /* Check upstream direction L0s latency */ 603 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) && 604 (latency_up_l0s > acceptable_l0s)) 605 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP; 606 607 /* Check downstream direction L0s latency */ 608 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) && 609 (latency_dw_l0s > acceptable_l0s)) 610 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW; 611 /* 612 * Check L1 latency. 613 * Every switch on the path to root complex need 1 614 * more microsecond for L1. Spec doesn't mention L0s. 615 * 616 * The exit latencies for L1 substates are not advertised 617 * by a device. Since the spec also doesn't mention a way 618 * to determine max latencies introduced by enabling L1 619 * substates on the components, it is not clear how to do 620 * a L1 substate exit latency check. We assume that the 621 * L1 exit latencies advertised by a device include L1 622 * substate latencies (and hence do not do any check). 623 */ 624 latency = max_t(u32, latency_up_l1, latency_dw_l1); 625 if ((link->aspm_capable & PCIE_LINK_STATE_L1) && 626 (latency + l1_switch_latency > acceptable_l1)) 627 link->aspm_capable &= ~PCIE_LINK_STATE_L1; 628 l1_switch_latency += NSEC_PER_USEC; 629 630 link = link->parent; 631 } 632 } 633 634 /* Calculate L1.2 PM substate timing parameters */ 635 static void aspm_calc_l12_info(struct pcie_link_state *link, 636 u32 parent_l1ss_cap, u32 child_l1ss_cap) 637 { 638 struct pci_dev *child = link->downstream, *parent = link->pdev; 639 u32 val1, val2, scale1, scale2; 640 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; 641 u32 ctl1 = 0, ctl2 = 0; 642 u32 pctl1, pctl2, cctl1, cctl2; 643 u32 pl1_2_enables, cl1_2_enables; 644 645 /* Choose the greater of the two Port Common_Mode_Restore_Times */ 646 val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap); 647 val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap); 648 t_common_mode = max(val1, val2); 649 650 /* Choose the greater of the two Port T_POWER_ON times */ 651 val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap); 652 scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap); 653 val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap); 654 scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap); 655 656 if (calc_l12_pwron(parent, scale1, val1) > 657 calc_l12_pwron(child, scale2, val2)) { 658 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) | 659 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1); 660 t_power_on = calc_l12_pwron(parent, scale1, val1); 661 } else { 662 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) | 663 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2); 664 t_power_on = calc_l12_pwron(child, scale2, val2); 665 } 666 667 /* 668 * Set LTR_L1.2_THRESHOLD to the time required to transition the 669 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if 670 * downstream devices report (via LTR) that they can tolerate at 671 * least that much latency. 672 * 673 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and 674 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at 675 * least 4us. 676 */ 677 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; 678 encode_l12_threshold(l1_2_threshold, &scale, &value); 679 ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) | 680 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) | 681 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale); 682 683 /* Some broken devices only support dword access to L1 SS */ 684 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1); 685 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2); 686 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1); 687 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2); 688 689 if (ctl1 == pctl1 && ctl1 == cctl1 && 690 ctl2 == pctl2 && ctl2 == cctl2) 691 return; 692 693 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */ 694 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK; 695 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; 696 697 if (pl1_2_enables || cl1_2_enables) { 698 pci_clear_and_set_config_dword(child, 699 child->l1ss + PCI_L1SS_CTL1, 700 PCI_L1SS_CTL1_L1_2_MASK, 0); 701 pci_clear_and_set_config_dword(parent, 702 parent->l1ss + PCI_L1SS_CTL1, 703 PCI_L1SS_CTL1_L1_2_MASK, 0); 704 } 705 706 /* Program T_POWER_ON times in both ports */ 707 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2); 708 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); 709 710 /* Program Common_Mode_Restore_Time in upstream device */ 711 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 712 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); 713 714 /* Program LTR_L1.2_THRESHOLD time in both ports */ 715 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 716 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 717 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 718 ctl1); 719 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 720 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 721 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 722 ctl1); 723 724 if (pl1_2_enables || cl1_2_enables) { 725 pci_clear_and_set_config_dword(parent, 726 parent->l1ss + PCI_L1SS_CTL1, 0, 727 pl1_2_enables); 728 pci_clear_and_set_config_dword(child, 729 child->l1ss + PCI_L1SS_CTL1, 0, 730 cl1_2_enables); 731 } 732 } 733 734 static void aspm_l1ss_init(struct pcie_link_state *link) 735 { 736 struct pci_dev *child = link->downstream, *parent = link->pdev; 737 u32 parent_l1ss_cap, child_l1ss_cap; 738 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0; 739 740 if (!parent->l1ss || !child->l1ss) 741 return; 742 743 /* Setup L1 substate */ 744 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, 745 &parent_l1ss_cap); 746 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP, 747 &child_l1ss_cap); 748 749 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) 750 parent_l1ss_cap = 0; 751 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) 752 child_l1ss_cap = 0; 753 754 /* 755 * If we don't have LTR for the entire path from the Root Complex 756 * to this device, we can't use ASPM L1.2 because it relies on the 757 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. 758 */ 759 if (!child->ltr_path) 760 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; 761 762 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) 763 link->aspm_support |= PCIE_LINK_STATE_L1_1; 764 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) 765 link->aspm_support |= PCIE_LINK_STATE_L1_2; 766 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) 767 link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM; 768 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) 769 link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM; 770 771 if (parent_l1ss_cap) 772 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 773 &parent_l1ss_ctl1); 774 if (child_l1ss_cap) 775 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 776 &child_l1ss_ctl1); 777 778 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) 779 link->aspm_enabled |= PCIE_LINK_STATE_L1_1; 780 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) 781 link->aspm_enabled |= PCIE_LINK_STATE_L1_2; 782 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) 783 link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM; 784 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) 785 link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM; 786 787 if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK) 788 aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap); 789 } 790 791 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 792 { 793 struct pci_dev *child = link->downstream, *parent = link->pdev; 794 u32 parent_lnkcap, child_lnkcap; 795 u16 parent_lnkctl, child_lnkctl; 796 struct pci_bus *linkbus = parent->subordinate; 797 798 if (blacklist) { 799 /* Set enabled/disable so that we will disable ASPM later */ 800 link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL; 801 link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL; 802 return; 803 } 804 805 /* 806 * If ASPM not supported, don't mess with the clocks and link, 807 * bail out now. 808 */ 809 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); 810 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); 811 if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS)) 812 return; 813 814 /* Configure common clock before checking latencies */ 815 pcie_aspm_configure_common_clock(link); 816 817 /* 818 * Re-read upstream/downstream components' register state after 819 * clock configuration. L0s & L1 exit latencies in the otherwise 820 * read-only Link Capabilities may change depending on common clock 821 * configuration (PCIe r5.0, sec 7.5.3.6). 822 */ 823 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); 824 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); 825 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); 826 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); 827 828 /* Disable L0s/L1 before updating L1SS config */ 829 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || 830 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { 831 pcie_capability_write_word(child, PCI_EXP_LNKCTL, 832 child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); 833 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, 834 parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); 835 } 836 837 /* 838 * Setup L0s state 839 * 840 * Note that we must not enable L0s in either direction on a 841 * given link unless components on both sides of the link each 842 * support L0s. 843 */ 844 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S) 845 link->aspm_support |= PCIE_LINK_STATE_L0S; 846 847 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) 848 link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP; 849 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) 850 link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW; 851 852 /* Setup L1 state */ 853 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1) 854 link->aspm_support |= PCIE_LINK_STATE_L1; 855 856 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) 857 link->aspm_enabled |= PCIE_LINK_STATE_L1; 858 859 aspm_l1ss_init(link); 860 861 /* Restore L0s/L1 if they were enabled */ 862 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || 863 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { 864 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl); 865 pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl); 866 } 867 868 /* Save default state */ 869 link->aspm_default = link->aspm_enabled; 870 871 /* Setup initial capable state. Will be updated later */ 872 link->aspm_capable = link->aspm_support; 873 874 /* Get and check endpoint acceptable latencies */ 875 list_for_each_entry(child, &linkbus->devices, bus_list) { 876 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && 877 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) 878 continue; 879 880 pcie_aspm_check_latency(child); 881 } 882 } 883 884 /* Configure the ASPM L1 substates. Caller must disable L1 first. */ 885 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 886 { 887 u32 val; 888 struct pci_dev *child = link->downstream, *parent = link->pdev; 889 890 val = 0; 891 if (state & PCIE_LINK_STATE_L1_1) 892 val |= PCI_L1SS_CTL1_ASPM_L1_1; 893 if (state & PCIE_LINK_STATE_L1_2) 894 val |= PCI_L1SS_CTL1_ASPM_L1_2; 895 if (state & PCIE_LINK_STATE_L1_1_PCIPM) 896 val |= PCI_L1SS_CTL1_PCIPM_L1_1; 897 if (state & PCIE_LINK_STATE_L1_2_PCIPM) 898 val |= PCI_L1SS_CTL1_PCIPM_L1_2; 899 900 /* 901 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates: 902 * - Clear L1.x enable bits at child first, then at parent 903 * - Set L1.x enable bits at parent first, then at child 904 * - ASPM/PCIPM L1.2 must be disabled while programming timing 905 * parameters 906 */ 907 908 /* Disable all L1 substates */ 909 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 910 PCI_L1SS_CTL1_L1SS_MASK, 0); 911 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 912 PCI_L1SS_CTL1_L1SS_MASK, 0); 913 914 /* Enable what we need to enable */ 915 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 916 PCI_L1SS_CTL1_L1SS_MASK, val); 917 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 918 PCI_L1SS_CTL1_L1SS_MASK, val); 919 } 920 921 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 922 { 923 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 924 PCI_EXP_LNKCTL_ASPMC, val); 925 } 926 927 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) 928 { 929 u32 upstream = 0, dwstream = 0; 930 struct pci_dev *child = link->downstream, *parent = link->pdev; 931 struct pci_bus *linkbus = parent->subordinate; 932 933 /* Enable only the states that were not explicitly disabled */ 934 state &= (link->aspm_capable & ~link->aspm_disable); 935 936 /* Can't enable any substates if L1 is not enabled */ 937 if (!(state & PCIE_LINK_STATE_L1)) 938 state &= ~PCIE_LINK_STATE_L1SS; 939 940 /* Spec says both ports must be in D0 before enabling PCI PM substates*/ 941 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { 942 state &= ~PCIE_LINK_STATE_L1_SS_PCIPM; 943 state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM); 944 } 945 946 /* Nothing to do if the link is already in the requested state */ 947 if (link->aspm_enabled == state) 948 return; 949 /* Convert ASPM state to upstream/downstream ASPM register state */ 950 if (state & PCIE_LINK_STATE_L0S_UP) 951 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; 952 if (state & PCIE_LINK_STATE_L0S_DW) 953 upstream |= PCI_EXP_LNKCTL_ASPM_L0S; 954 if (state & PCIE_LINK_STATE_L1) { 955 upstream |= PCI_EXP_LNKCTL_ASPM_L1; 956 dwstream |= PCI_EXP_LNKCTL_ASPM_L1; 957 } 958 959 /* 960 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable 961 * bits for ASPM L1 PM Substates must be done while ASPM L1 is 962 * disabled. Disable L1 here and apply new configuration after L1SS 963 * configuration has been completed. 964 * 965 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable 966 * it in the Downstream component prior to disabling it in the 967 * Upstream component, and ASPM L1 must be enabled in the Upstream 968 * component prior to enabling it in the Downstream component. 969 * 970 * Sec 7.5.3.7 also recommends programming the same ASPM Control 971 * value for all functions of a multi-function device. 972 */ 973 list_for_each_entry(child, &linkbus->devices, bus_list) 974 pcie_config_aspm_dev(child, 0); 975 pcie_config_aspm_dev(parent, 0); 976 977 if (link->aspm_capable & PCIE_LINK_STATE_L1SS) 978 pcie_config_aspm_l1ss(link, state); 979 980 pcie_config_aspm_dev(parent, upstream); 981 list_for_each_entry(child, &linkbus->devices, bus_list) 982 pcie_config_aspm_dev(child, dwstream); 983 984 link->aspm_enabled = state; 985 986 /* Update latest ASPM configuration in saved context */ 987 pci_save_aspm_l1ss_state(link->downstream); 988 pci_update_aspm_saved_state(link->downstream); 989 pci_save_aspm_l1ss_state(parent); 990 pci_update_aspm_saved_state(parent); 991 } 992 993 static void pcie_config_aspm_path(struct pcie_link_state *link) 994 { 995 while (link) { 996 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 997 link = link->parent; 998 } 999 } 1000 1001 static void free_link_state(struct pcie_link_state *link) 1002 { 1003 link->pdev->link_state = NULL; 1004 kfree(link); 1005 } 1006 1007 static int pcie_aspm_sanity_check(struct pci_dev *pdev) 1008 { 1009 struct pci_dev *child; 1010 u32 reg32; 1011 1012 /* 1013 * Some functions in a slot might not all be PCIe functions, 1014 * very strange. Disable ASPM for the whole slot 1015 */ 1016 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 1017 if (!pci_is_pcie(child)) 1018 return -EINVAL; 1019 1020 /* 1021 * If ASPM is disabled then we're not going to change 1022 * the BIOS state. It's safe to continue even if it's a 1023 * pre-1.1 device 1024 */ 1025 1026 if (aspm_disabled) 1027 continue; 1028 1029 /* 1030 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 1031 * RBER bit to determine if a function is 1.1 version device 1032 */ 1033 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); 1034 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 1035 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); 1036 return -EINVAL; 1037 } 1038 } 1039 return 0; 1040 } 1041 1042 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) 1043 { 1044 struct pcie_link_state *link; 1045 1046 link = kzalloc(sizeof(*link), GFP_KERNEL); 1047 if (!link) 1048 return NULL; 1049 1050 INIT_LIST_HEAD(&link->sibling); 1051 link->pdev = pdev; 1052 link->downstream = pci_function_0(pdev->subordinate); 1053 1054 /* 1055 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe 1056 * hierarchies. Note that some PCIe host implementations omit 1057 * the root ports entirely, in which case a downstream port on 1058 * a switch may become the root of the link state chain for all 1059 * its subordinate endpoints. 1060 */ 1061 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 1062 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || 1063 !pdev->bus->parent->self) { 1064 link->root = link; 1065 } else { 1066 struct pcie_link_state *parent; 1067 1068 parent = pdev->bus->parent->self->link_state; 1069 if (!parent) { 1070 kfree(link); 1071 return NULL; 1072 } 1073 1074 link->parent = parent; 1075 link->root = link->parent->root; 1076 } 1077 1078 list_add(&link->sibling, &link_list); 1079 pdev->link_state = link; 1080 return link; 1081 } 1082 1083 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev) 1084 { 1085 struct pci_dev *child; 1086 1087 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) 1088 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group); 1089 } 1090 1091 /* 1092 * pcie_aspm_init_link_state: Initiate PCI express link state. 1093 * It is called after the pcie and its children devices are scanned. 1094 * @pdev: the root port or switch downstream port 1095 */ 1096 void pcie_aspm_init_link_state(struct pci_dev *pdev) 1097 { 1098 struct pcie_link_state *link; 1099 int blacklist = !!pcie_aspm_sanity_check(pdev); 1100 1101 if (!aspm_support_enabled) 1102 return; 1103 1104 if (pdev->link_state) 1105 return; 1106 1107 /* 1108 * We allocate pcie_link_state for the component on the upstream 1109 * end of a Link, so there's nothing to do unless this device is 1110 * downstream port. 1111 */ 1112 if (!pcie_downstream_port(pdev)) 1113 return; 1114 1115 /* VIA has a strange chipset, root port is under a bridge */ 1116 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && 1117 pdev->bus->self) 1118 return; 1119 1120 down_read(&pci_bus_sem); 1121 if (list_empty(&pdev->subordinate->devices)) 1122 goto out; 1123 1124 mutex_lock(&aspm_lock); 1125 link = alloc_pcie_link_state(pdev); 1126 if (!link) 1127 goto unlock; 1128 /* 1129 * Setup initial ASPM state. Note that we need to configure 1130 * upstream links also because capable state of them can be 1131 * update through pcie_aspm_cap_init(). 1132 */ 1133 pcie_aspm_cap_init(link, blacklist); 1134 1135 /* Setup initial Clock PM state */ 1136 pcie_clkpm_cap_init(link, blacklist); 1137 1138 /* 1139 * At this stage drivers haven't had an opportunity to change the 1140 * link policy setting. Enabling ASPM on broken hardware can cripple 1141 * it even before the driver has had a chance to disable ASPM, so 1142 * default to a safe level right now. If we're enabling ASPM beyond 1143 * the BIOS's expectation, we'll do so once pci_enable_device() is 1144 * called. 1145 */ 1146 if (aspm_policy != POLICY_POWERSAVE && 1147 aspm_policy != POLICY_POWER_SUPERSAVE) { 1148 pcie_config_aspm_path(link); 1149 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1150 } 1151 1152 pcie_aspm_update_sysfs_visibility(pdev); 1153 1154 unlock: 1155 mutex_unlock(&aspm_lock); 1156 out: 1157 up_read(&pci_bus_sem); 1158 } 1159 1160 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) 1161 { 1162 struct pci_dev *bridge; 1163 u32 ctl; 1164 1165 bridge = pci_upstream_bridge(pdev); 1166 if (bridge && bridge->ltr_path) { 1167 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); 1168 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { 1169 pci_dbg(bridge, "re-enabling LTR\n"); 1170 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 1171 PCI_EXP_DEVCTL2_LTR_EN); 1172 } 1173 } 1174 } 1175 1176 void pci_configure_ltr(struct pci_dev *pdev) 1177 { 1178 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); 1179 struct pci_dev *bridge; 1180 u32 cap, ctl; 1181 1182 if (!pci_is_pcie(pdev)) 1183 return; 1184 1185 pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap); 1186 if (!(cap & PCI_EXP_DEVCAP2_LTR)) 1187 return; 1188 1189 pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl); 1190 if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { 1191 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { 1192 pdev->ltr_path = 1; 1193 return; 1194 } 1195 1196 bridge = pci_upstream_bridge(pdev); 1197 if (bridge && bridge->ltr_path) 1198 pdev->ltr_path = 1; 1199 1200 return; 1201 } 1202 1203 if (!host->native_ltr) 1204 return; 1205 1206 /* 1207 * Software must not enable LTR in an Endpoint unless the Root 1208 * Complex and all intermediate Switches indicate support for LTR. 1209 * PCIe r4.0, sec 6.18. 1210 */ 1211 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { 1212 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 1213 PCI_EXP_DEVCTL2_LTR_EN); 1214 pdev->ltr_path = 1; 1215 return; 1216 } 1217 1218 /* 1219 * If we're configuring a hot-added device, LTR was likely 1220 * disabled in the upstream bridge, so re-enable it before enabling 1221 * it in the new device. 1222 */ 1223 bridge = pci_upstream_bridge(pdev); 1224 if (bridge && bridge->ltr_path) { 1225 pci_bridge_reconfigure_ltr(pdev); 1226 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 1227 PCI_EXP_DEVCTL2_LTR_EN); 1228 pdev->ltr_path = 1; 1229 } 1230 } 1231 1232 /* Recheck latencies and update aspm_capable for links under the root */ 1233 static void pcie_update_aspm_capable(struct pcie_link_state *root) 1234 { 1235 struct pcie_link_state *link; 1236 BUG_ON(root->parent); 1237 list_for_each_entry(link, &link_list, sibling) { 1238 if (link->root != root) 1239 continue; 1240 link->aspm_capable = link->aspm_support; 1241 } 1242 list_for_each_entry(link, &link_list, sibling) { 1243 struct pci_dev *child; 1244 struct pci_bus *linkbus = link->pdev->subordinate; 1245 if (link->root != root) 1246 continue; 1247 list_for_each_entry(child, &linkbus->devices, bus_list) { 1248 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && 1249 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) 1250 continue; 1251 pcie_aspm_check_latency(child); 1252 } 1253 } 1254 } 1255 1256 /* @pdev: the endpoint device */ 1257 void pcie_aspm_exit_link_state(struct pci_dev *pdev) 1258 { 1259 struct pci_dev *parent = pdev->bus->self; 1260 struct pcie_link_state *link, *root, *parent_link; 1261 1262 if (!parent || !parent->link_state) 1263 return; 1264 1265 down_read(&pci_bus_sem); 1266 mutex_lock(&aspm_lock); 1267 1268 link = parent->link_state; 1269 root = link->root; 1270 parent_link = link->parent; 1271 1272 /* 1273 * Free the parent link state, no later than function 0 (i.e. 1274 * link->downstream) being removed. 1275 * 1276 * Do not free the link state any earlier. If function 0 is a 1277 * switch upstream port, this link state is parent_link to all 1278 * subordinate ones. 1279 */ 1280 if (pdev != link->downstream) 1281 goto out; 1282 1283 pcie_config_aspm_link(link, 0); 1284 list_del(&link->sibling); 1285 free_link_state(link); 1286 1287 /* Recheck latencies and configure upstream links */ 1288 if (parent_link) { 1289 pcie_update_aspm_capable(root); 1290 pcie_config_aspm_path(parent_link); 1291 } 1292 1293 out: 1294 mutex_unlock(&aspm_lock); 1295 up_read(&pci_bus_sem); 1296 } 1297 1298 /* 1299 * @pdev: the root port or switch downstream port 1300 * @locked: whether pci_bus_sem is held 1301 */ 1302 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) 1303 { 1304 struct pcie_link_state *link = pdev->link_state; 1305 1306 if (aspm_disabled || !link) 1307 return; 1308 /* 1309 * Devices changed PM state, we should recheck if latency 1310 * meets all functions' requirement 1311 */ 1312 if (!locked) 1313 down_read(&pci_bus_sem); 1314 mutex_lock(&aspm_lock); 1315 pcie_update_aspm_capable(link->root); 1316 pcie_config_aspm_path(link); 1317 mutex_unlock(&aspm_lock); 1318 if (!locked) 1319 up_read(&pci_bus_sem); 1320 } 1321 1322 void pcie_aspm_powersave_config_link(struct pci_dev *pdev) 1323 { 1324 struct pcie_link_state *link = pdev->link_state; 1325 1326 if (aspm_disabled || !link) 1327 return; 1328 1329 if (aspm_policy != POLICY_POWERSAVE && 1330 aspm_policy != POLICY_POWER_SUPERSAVE) 1331 return; 1332 1333 down_read(&pci_bus_sem); 1334 mutex_lock(&aspm_lock); 1335 pcie_config_aspm_path(link); 1336 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1337 mutex_unlock(&aspm_lock); 1338 up_read(&pci_bus_sem); 1339 } 1340 1341 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev) 1342 { 1343 struct pci_dev *bridge; 1344 1345 if (!pci_is_pcie(pdev)) 1346 return NULL; 1347 1348 bridge = pci_upstream_bridge(pdev); 1349 if (!bridge || !pci_is_pcie(bridge)) 1350 return NULL; 1351 1352 return bridge->link_state; 1353 } 1354 1355 static u8 pci_calc_aspm_disable_mask(int state) 1356 { 1357 state &= ~PCIE_LINK_STATE_CLKPM; 1358 1359 /* L1 PM substates require L1 */ 1360 if (state & PCIE_LINK_STATE_L1) 1361 state |= PCIE_LINK_STATE_L1SS; 1362 1363 return state; 1364 } 1365 1366 static u8 pci_calc_aspm_enable_mask(int state) 1367 { 1368 state &= ~PCIE_LINK_STATE_CLKPM; 1369 1370 /* L1 PM substates require L1 */ 1371 if (state & PCIE_LINK_STATE_L1SS) 1372 state |= PCIE_LINK_STATE_L1; 1373 1374 return state; 1375 } 1376 1377 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked) 1378 { 1379 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1380 1381 if (!link) 1382 return -EINVAL; 1383 /* 1384 * A driver requested that ASPM be disabled on this device, but 1385 * if we don't have permission to manage ASPM (e.g., on ACPI 1386 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and 1387 * the _OSC method), we can't honor that request. Windows has 1388 * a similar mechanism using "PciASPMOptOut", which is also 1389 * ignored in this situation. 1390 */ 1391 if (aspm_disabled) { 1392 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n"); 1393 return -EPERM; 1394 } 1395 1396 if (!locked) 1397 down_read(&pci_bus_sem); 1398 mutex_lock(&aspm_lock); 1399 link->aspm_disable |= pci_calc_aspm_disable_mask(state); 1400 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1401 1402 if (state & PCIE_LINK_STATE_CLKPM) 1403 link->clkpm_disable = 1; 1404 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1405 mutex_unlock(&aspm_lock); 1406 if (!locked) 1407 up_read(&pci_bus_sem); 1408 1409 return 0; 1410 } 1411 1412 int pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1413 { 1414 lockdep_assert_held_read(&pci_bus_sem); 1415 1416 return __pci_disable_link_state(pdev, state, true); 1417 } 1418 EXPORT_SYMBOL(pci_disable_link_state_locked); 1419 1420 /** 1421 * pci_disable_link_state - Disable device's link state, so the link will 1422 * never enter specific states. Note that if the BIOS didn't grant ASPM 1423 * control to the OS, this does nothing because we can't touch the LNKCTL 1424 * register. Returns 0 or a negative errno. 1425 * 1426 * @pdev: PCI device 1427 * @state: ASPM link state to disable 1428 */ 1429 int pci_disable_link_state(struct pci_dev *pdev, int state) 1430 { 1431 return __pci_disable_link_state(pdev, state, false); 1432 } 1433 EXPORT_SYMBOL(pci_disable_link_state); 1434 1435 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) 1436 { 1437 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1438 1439 if (!link) 1440 return -EINVAL; 1441 /* 1442 * A driver requested that ASPM be enabled on this device, but 1443 * if we don't have permission to manage ASPM (e.g., on ACPI 1444 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and 1445 * the _OSC method), we can't honor that request. 1446 */ 1447 if (aspm_disabled) { 1448 pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n"); 1449 return -EPERM; 1450 } 1451 1452 if (!locked) 1453 down_read(&pci_bus_sem); 1454 mutex_lock(&aspm_lock); 1455 link->aspm_default = pci_calc_aspm_enable_mask(state); 1456 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1457 1458 link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0; 1459 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1460 mutex_unlock(&aspm_lock); 1461 if (!locked) 1462 up_read(&pci_bus_sem); 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * pci_enable_link_state - Clear and set the default device link state so that 1469 * the link may be allowed to enter the specified states. Note that if the 1470 * BIOS didn't grant ASPM control to the OS, this does nothing because we can't 1471 * touch the LNKCTL register. Also note that this does not enable states 1472 * disabled by pci_disable_link_state(). Return 0 or a negative errno. 1473 * 1474 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per 1475 * PCIe r6.0, sec 5.5.4. 1476 * 1477 * @pdev: PCI device 1478 * @state: Mask of ASPM link states to enable 1479 */ 1480 int pci_enable_link_state(struct pci_dev *pdev, int state) 1481 { 1482 return __pci_enable_link_state(pdev, state, false); 1483 } 1484 EXPORT_SYMBOL(pci_enable_link_state); 1485 1486 /** 1487 * pci_enable_link_state_locked - Clear and set the default device link state 1488 * so that the link may be allowed to enter the specified states. Note that if 1489 * the BIOS didn't grant ASPM control to the OS, this does nothing because we 1490 * can't touch the LNKCTL register. Also note that this does not enable states 1491 * disabled by pci_disable_link_state(). Return 0 or a negative errno. 1492 * 1493 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per 1494 * PCIe r6.0, sec 5.5.4. 1495 * 1496 * @pdev: PCI device 1497 * @state: Mask of ASPM link states to enable 1498 * 1499 * Context: Caller holds pci_bus_sem read lock. 1500 */ 1501 int pci_enable_link_state_locked(struct pci_dev *pdev, int state) 1502 { 1503 lockdep_assert_held_read(&pci_bus_sem); 1504 1505 return __pci_enable_link_state(pdev, state, true); 1506 } 1507 EXPORT_SYMBOL(pci_enable_link_state_locked); 1508 1509 static int pcie_aspm_set_policy(const char *val, 1510 const struct kernel_param *kp) 1511 { 1512 int i; 1513 struct pcie_link_state *link; 1514 1515 if (aspm_disabled) 1516 return -EPERM; 1517 i = sysfs_match_string(policy_str, val); 1518 if (i < 0) 1519 return i; 1520 if (i == aspm_policy) 1521 return 0; 1522 1523 down_read(&pci_bus_sem); 1524 mutex_lock(&aspm_lock); 1525 aspm_policy = i; 1526 list_for_each_entry(link, &link_list, sibling) { 1527 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1528 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1529 } 1530 mutex_unlock(&aspm_lock); 1531 up_read(&pci_bus_sem); 1532 return 0; 1533 } 1534 1535 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) 1536 { 1537 int i, cnt = 0; 1538 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 1539 if (i == aspm_policy) 1540 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); 1541 else 1542 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); 1543 cnt += sprintf(buffer + cnt, "\n"); 1544 return cnt; 1545 } 1546 1547 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1548 NULL, 0644); 1549 1550 /** 1551 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. 1552 * @pdev: Target device. 1553 * 1554 * Relies on the upstream bridge's link_state being valid. The link_state 1555 * is deallocated only when the last child of the bridge (i.e., @pdev or a 1556 * sibling) is removed, and the caller should be holding a reference to 1557 * @pdev, so this should be safe. 1558 */ 1559 bool pcie_aspm_enabled(struct pci_dev *pdev) 1560 { 1561 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1562 1563 if (!link) 1564 return false; 1565 1566 return link->aspm_enabled; 1567 } 1568 EXPORT_SYMBOL_GPL(pcie_aspm_enabled); 1569 1570 static ssize_t aspm_attr_show_common(struct device *dev, 1571 struct device_attribute *attr, 1572 char *buf, u8 state) 1573 { 1574 struct pci_dev *pdev = to_pci_dev(dev); 1575 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1576 1577 return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0); 1578 } 1579 1580 static ssize_t aspm_attr_store_common(struct device *dev, 1581 struct device_attribute *attr, 1582 const char *buf, size_t len, u8 state) 1583 { 1584 struct pci_dev *pdev = to_pci_dev(dev); 1585 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1586 bool state_enable; 1587 1588 if (kstrtobool(buf, &state_enable) < 0) 1589 return -EINVAL; 1590 1591 down_read(&pci_bus_sem); 1592 mutex_lock(&aspm_lock); 1593 1594 if (state_enable) { 1595 link->aspm_disable &= ~state; 1596 /* need to enable L1 for substates */ 1597 if (state & PCIE_LINK_STATE_L1SS) 1598 link->aspm_disable &= ~PCIE_LINK_STATE_L1; 1599 } else { 1600 link->aspm_disable |= state; 1601 if (state & PCIE_LINK_STATE_L1) 1602 link->aspm_disable |= PCIE_LINK_STATE_L1SS; 1603 } 1604 1605 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1606 1607 mutex_unlock(&aspm_lock); 1608 up_read(&pci_bus_sem); 1609 1610 return len; 1611 } 1612 1613 #define ASPM_ATTR(_f, _s) \ 1614 static ssize_t _f##_show(struct device *dev, \ 1615 struct device_attribute *attr, char *buf) \ 1616 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \ 1617 \ 1618 static ssize_t _f##_store(struct device *dev, \ 1619 struct device_attribute *attr, \ 1620 const char *buf, size_t len) \ 1621 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); } 1622 1623 ASPM_ATTR(l0s_aspm, L0S) 1624 ASPM_ATTR(l1_aspm, L1) 1625 ASPM_ATTR(l1_1_aspm, L1_1) 1626 ASPM_ATTR(l1_2_aspm, L1_2) 1627 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM) 1628 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM) 1629 1630 static ssize_t clkpm_show(struct device *dev, 1631 struct device_attribute *attr, char *buf) 1632 { 1633 struct pci_dev *pdev = to_pci_dev(dev); 1634 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1635 1636 return sysfs_emit(buf, "%d\n", link->clkpm_enabled); 1637 } 1638 1639 static ssize_t clkpm_store(struct device *dev, 1640 struct device_attribute *attr, 1641 const char *buf, size_t len) 1642 { 1643 struct pci_dev *pdev = to_pci_dev(dev); 1644 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1645 bool state_enable; 1646 1647 if (kstrtobool(buf, &state_enable) < 0) 1648 return -EINVAL; 1649 1650 down_read(&pci_bus_sem); 1651 mutex_lock(&aspm_lock); 1652 1653 link->clkpm_disable = !state_enable; 1654 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1655 1656 mutex_unlock(&aspm_lock); 1657 up_read(&pci_bus_sem); 1658 1659 return len; 1660 } 1661 1662 static DEVICE_ATTR_RW(clkpm); 1663 static DEVICE_ATTR_RW(l0s_aspm); 1664 static DEVICE_ATTR_RW(l1_aspm); 1665 static DEVICE_ATTR_RW(l1_1_aspm); 1666 static DEVICE_ATTR_RW(l1_2_aspm); 1667 static DEVICE_ATTR_RW(l1_1_pcipm); 1668 static DEVICE_ATTR_RW(l1_2_pcipm); 1669 1670 static struct attribute *aspm_ctrl_attrs[] = { 1671 &dev_attr_clkpm.attr, 1672 &dev_attr_l0s_aspm.attr, 1673 &dev_attr_l1_aspm.attr, 1674 &dev_attr_l1_1_aspm.attr, 1675 &dev_attr_l1_2_aspm.attr, 1676 &dev_attr_l1_1_pcipm.attr, 1677 &dev_attr_l1_2_pcipm.attr, 1678 NULL 1679 }; 1680 1681 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj, 1682 struct attribute *a, int n) 1683 { 1684 struct device *dev = kobj_to_dev(kobj); 1685 struct pci_dev *pdev = to_pci_dev(dev); 1686 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1687 static const u8 aspm_state_map[] = { 1688 PCIE_LINK_STATE_L0S, 1689 PCIE_LINK_STATE_L1, 1690 PCIE_LINK_STATE_L1_1, 1691 PCIE_LINK_STATE_L1_2, 1692 PCIE_LINK_STATE_L1_1_PCIPM, 1693 PCIE_LINK_STATE_L1_2_PCIPM, 1694 }; 1695 1696 if (aspm_disabled || !link) 1697 return 0; 1698 1699 if (n == 0) 1700 return link->clkpm_capable ? a->mode : 0; 1701 1702 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0; 1703 } 1704 1705 const struct attribute_group aspm_ctrl_attr_group = { 1706 .name = "link", 1707 .attrs = aspm_ctrl_attrs, 1708 .is_visible = aspm_ctrl_attrs_are_visible, 1709 }; 1710 1711 static int __init pcie_aspm_disable(char *str) 1712 { 1713 if (!strcmp(str, "off")) { 1714 aspm_policy = POLICY_DEFAULT; 1715 aspm_disabled = 1; 1716 aspm_support_enabled = false; 1717 pr_info("PCIe ASPM is disabled\n"); 1718 } else if (!strcmp(str, "force")) { 1719 aspm_force = 1; 1720 pr_info("PCIe ASPM is forcibly enabled\n"); 1721 } 1722 return 1; 1723 } 1724 1725 __setup("pcie_aspm=", pcie_aspm_disable); 1726 1727 void pcie_no_aspm(void) 1728 { 1729 /* 1730 * Disabling ASPM is intended to prevent the kernel from modifying 1731 * existing hardware state, not to clear existing state. To that end: 1732 * (a) set policy to POLICY_DEFAULT in order to avoid changing state 1733 * (b) prevent userspace from changing policy 1734 */ 1735 if (!aspm_force) { 1736 aspm_policy = POLICY_DEFAULT; 1737 aspm_disabled = 1; 1738 } 1739 } 1740 1741 bool pcie_aspm_support_enabled(void) 1742 { 1743 return aspm_support_enabled; 1744 } 1745 1746 #endif /* CONFIG_PCIEASPM */ 1747