1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Enable PCIe link L0s/L1 state and Clock Power Management 4 * 5 * Copyright (C) 2007 Intel 6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) 7 * Copyright (C) Shaohua Li (shaohua.li@intel.com) 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/bits.h> 12 #include <linux/build_bug.h> 13 #include <linux/kernel.h> 14 #include <linux/limits.h> 15 #include <linux/math.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/pci.h> 19 #include <linux/pci_regs.h> 20 #include <linux/errno.h> 21 #include <linux/pm.h> 22 #include <linux/init.h> 23 #include <linux/printk.h> 24 #include <linux/slab.h> 25 #include <linux/time.h> 26 27 #include "../pci.h" 28 29 void pci_save_ltr_state(struct pci_dev *dev) 30 { 31 int ltr; 32 struct pci_cap_saved_state *save_state; 33 u32 *cap; 34 35 if (!pci_is_pcie(dev)) 36 return; 37 38 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 39 if (!ltr) 40 return; 41 42 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 43 if (!save_state) { 44 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); 45 return; 46 } 47 48 /* Some broken devices only support dword access to LTR */ 49 cap = &save_state->cap.data[0]; 50 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); 51 } 52 53 void pci_restore_ltr_state(struct pci_dev *dev) 54 { 55 struct pci_cap_saved_state *save_state; 56 int ltr; 57 u32 *cap; 58 59 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); 60 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); 61 if (!save_state || !ltr) 62 return; 63 64 /* Some broken devices only support dword access to LTR */ 65 cap = &save_state->cap.data[0]; 66 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); 67 } 68 69 void pci_configure_aspm_l1ss(struct pci_dev *pdev) 70 { 71 int rc; 72 73 pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 74 75 rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS, 76 2 * sizeof(u32)); 77 if (rc) 78 pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n", 79 ERR_PTR(rc)); 80 } 81 82 void pci_save_aspm_l1ss_state(struct pci_dev *pdev) 83 { 84 struct pci_dev *parent = pdev->bus->self; 85 struct pci_cap_saved_state *save_state; 86 u32 *cap; 87 88 /* 89 * If this is a Downstream Port, we never restore the L1SS state 90 * directly; we only restore it when we restore the state of the 91 * Upstream Port below it. 92 */ 93 if (pcie_downstream_port(pdev) || !parent) 94 return; 95 96 if (!pdev->l1ss || !parent->l1ss) 97 return; 98 99 /* 100 * Save L1 substate configuration. The ASPM L0s/L1 configuration 101 * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state(). 102 */ 103 save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); 104 if (!save_state) 105 return; 106 107 cap = &save_state->cap.data[0]; 108 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++); 109 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++); 110 111 if (parent->state_saved) 112 return; 113 114 /* 115 * Save parent's L1 substate configuration so we have it for 116 * pci_restore_aspm_l1ss_state(pdev) to restore. 117 */ 118 save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS); 119 if (!save_state) 120 return; 121 122 cap = &save_state->cap.data[0]; 123 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++); 124 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++); 125 } 126 127 void pci_restore_aspm_l1ss_state(struct pci_dev *pdev) 128 { 129 struct pci_cap_saved_state *pl_save_state, *cl_save_state; 130 struct pci_dev *parent = pdev->bus->self; 131 u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable; 132 u32 cl_ctl1, cl_ctl2, cl_l1_2_enable; 133 u16 clnkctl, plnkctl; 134 135 /* 136 * In case BIOS enabled L1.2 when resuming, we need to disable it first 137 * on the downstream component before the upstream. So, don't attempt to 138 * restore either until we are at the downstream component. 139 */ 140 if (pcie_downstream_port(pdev) || !parent) 141 return; 142 143 if (!pdev->l1ss || !parent->l1ss) 144 return; 145 146 cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS); 147 pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS); 148 if (!cl_save_state || !pl_save_state) 149 return; 150 151 cap = &cl_save_state->cap.data[0]; 152 cl_ctl2 = *cap++; 153 cl_ctl1 = *cap; 154 cap = &pl_save_state->cap.data[0]; 155 pl_ctl2 = *cap++; 156 pl_ctl1 = *cap; 157 158 /* Make sure L0s/L1 are disabled before updating L1SS config */ 159 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl); 160 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl); 161 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || 162 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { 163 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, 164 clnkctl & ~PCI_EXP_LNKCTL_ASPMC); 165 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, 166 plnkctl & ~PCI_EXP_LNKCTL_ASPMC); 167 } 168 169 /* 170 * Disable L1.2 on this downstream endpoint device first, followed 171 * by the upstream 172 */ 173 pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, 174 PCI_L1SS_CTL1_L1_2_MASK, 0); 175 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 176 PCI_L1SS_CTL1_L1_2_MASK, 0); 177 178 /* 179 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD 180 * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2 181 * enable bits, even though they're all in PCI_L1SS_CTL1. 182 */ 183 pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; 184 pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; 185 cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK; 186 cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; 187 188 /* Write back without enables first (above we cleared them in ctl1) */ 189 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2); 190 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2); 191 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1); 192 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1); 193 194 /* Then write back the enables */ 195 if (pl_l1_2_enable || cl_l1_2_enable) { 196 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 197 pl_ctl1 | pl_l1_2_enable); 198 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, 199 cl_ctl1 | cl_l1_2_enable); 200 } 201 202 /* Restore L0s/L1 if they were enabled */ 203 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) || 204 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) { 205 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl); 206 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl); 207 } 208 } 209 210 #ifdef CONFIG_PCIEASPM 211 212 #ifdef MODULE_PARAM_PREFIX 213 #undef MODULE_PARAM_PREFIX 214 #endif 215 #define MODULE_PARAM_PREFIX "pcie_aspm." 216 217 /* Note: these are not register definitions */ 218 #define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */ 219 #define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */ 220 static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW)); 221 222 #define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\ 223 PCIE_LINK_STATE_L1_2_PCIPM) 224 #define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\ 225 PCIE_LINK_STATE_L1_2_PCIPM) 226 #define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\ 227 PCIE_LINK_STATE_L1_1_PCIPM |\ 228 PCIE_LINK_STATE_L1_2_MASK) 229 230 struct pcie_link_state { 231 struct pci_dev *pdev; /* Upstream component of the Link */ 232 struct pci_dev *downstream; /* Downstream component, function 0 */ 233 struct pcie_link_state *root; /* pointer to the root port link */ 234 struct pcie_link_state *parent; /* pointer to the parent Link state */ 235 struct list_head sibling; /* node in link_list */ 236 237 /* ASPM state */ 238 u32 aspm_support:7; /* Supported ASPM state */ 239 u32 aspm_enabled:7; /* Enabled ASPM state */ 240 u32 aspm_capable:7; /* Capable ASPM state with latency */ 241 u32 aspm_default:7; /* Default ASPM state by BIOS */ 242 u32 aspm_disable:7; /* Disabled ASPM state */ 243 244 /* Clock PM state */ 245 u32 clkpm_capable:1; /* Clock PM capable? */ 246 u32 clkpm_enabled:1; /* Current Clock PM state */ 247 u32 clkpm_default:1; /* Default Clock PM state by BIOS */ 248 u32 clkpm_disable:1; /* Clock PM disabled */ 249 }; 250 251 static int aspm_disabled, aspm_force; 252 static bool aspm_support_enabled = true; 253 static DEFINE_MUTEX(aspm_lock); 254 static LIST_HEAD(link_list); 255 256 #define POLICY_DEFAULT 0 /* BIOS default setting */ 257 #define POLICY_PERFORMANCE 1 /* high performance */ 258 #define POLICY_POWERSAVE 2 /* high power saving */ 259 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */ 260 261 #ifdef CONFIG_PCIEASPM_PERFORMANCE 262 static int aspm_policy = POLICY_PERFORMANCE; 263 #elif defined CONFIG_PCIEASPM_POWERSAVE 264 static int aspm_policy = POLICY_POWERSAVE; 265 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE 266 static int aspm_policy = POLICY_POWER_SUPERSAVE; 267 #else 268 static int aspm_policy; 269 #endif 270 271 static const char *policy_str[] = { 272 [POLICY_DEFAULT] = "default", 273 [POLICY_PERFORMANCE] = "performance", 274 [POLICY_POWERSAVE] = "powersave", 275 [POLICY_POWER_SUPERSAVE] = "powersupersave" 276 }; 277 278 /* 279 * The L1 PM substate capability is only implemented in function 0 in a 280 * multi function device. 281 */ 282 static struct pci_dev *pci_function_0(struct pci_bus *linkbus) 283 { 284 struct pci_dev *child; 285 286 list_for_each_entry(child, &linkbus->devices, bus_list) 287 if (PCI_FUNC(child->devfn) == 0) 288 return child; 289 return NULL; 290 } 291 292 static int policy_to_aspm_state(struct pcie_link_state *link) 293 { 294 switch (aspm_policy) { 295 case POLICY_PERFORMANCE: 296 /* Disable ASPM and Clock PM */ 297 return 0; 298 case POLICY_POWERSAVE: 299 /* Enable ASPM L0s/L1 */ 300 return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; 301 case POLICY_POWER_SUPERSAVE: 302 /* Enable Everything */ 303 return PCIE_LINK_STATE_ASPM_ALL; 304 case POLICY_DEFAULT: 305 return link->aspm_default; 306 } 307 return 0; 308 } 309 310 static int policy_to_clkpm_state(struct pcie_link_state *link) 311 { 312 switch (aspm_policy) { 313 case POLICY_PERFORMANCE: 314 /* Disable ASPM and Clock PM */ 315 return 0; 316 case POLICY_POWERSAVE: 317 case POLICY_POWER_SUPERSAVE: 318 /* Enable Clock PM */ 319 return 1; 320 case POLICY_DEFAULT: 321 return link->clkpm_default; 322 } 323 return 0; 324 } 325 326 static void pci_update_aspm_saved_state(struct pci_dev *dev) 327 { 328 struct pci_cap_saved_state *save_state; 329 u16 *cap, lnkctl, aspm_ctl; 330 331 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 332 if (!save_state) 333 return; 334 335 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl); 336 337 /* 338 * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only 339 * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't 340 * change after being captured in save_state. 341 */ 342 aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); 343 lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN); 344 345 /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */ 346 cap = (u16 *)&save_state->cap.data[0]; 347 cap[1] = lnkctl | aspm_ctl; 348 } 349 350 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) 351 { 352 struct pci_dev *child; 353 struct pci_bus *linkbus = link->pdev->subordinate; 354 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; 355 356 list_for_each_entry(child, &linkbus->devices, bus_list) { 357 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 358 PCI_EXP_LNKCTL_CLKREQ_EN, 359 val); 360 pci_update_aspm_saved_state(child); 361 } 362 link->clkpm_enabled = !!enable; 363 } 364 365 static void pcie_set_clkpm(struct pcie_link_state *link, int enable) 366 { 367 /* 368 * Don't enable Clock PM if the link is not Clock PM capable 369 * or Clock PM is disabled 370 */ 371 if (!link->clkpm_capable || link->clkpm_disable) 372 enable = 0; 373 /* Need nothing if the specified equals to current state */ 374 if (link->clkpm_enabled == enable) 375 return; 376 pcie_set_clkpm_nocheck(link, enable); 377 } 378 379 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 380 { 381 int capable = 1, enabled = 1; 382 u32 reg32; 383 u16 reg16; 384 struct pci_dev *child; 385 struct pci_bus *linkbus = link->pdev->subordinate; 386 387 /* All functions should have the same cap and state, take the worst */ 388 list_for_each_entry(child, &linkbus->devices, bus_list) { 389 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); 390 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 391 capable = 0; 392 enabled = 0; 393 break; 394 } 395 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 396 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 397 enabled = 0; 398 } 399 link->clkpm_enabled = enabled; 400 link->clkpm_default = enabled; 401 link->clkpm_capable = capable; 402 link->clkpm_disable = blacklist ? 1 : 0; 403 } 404 405 /* 406 * pcie_aspm_configure_common_clock: check if the 2 ends of a link 407 * could use common clock. If they are, configure them to use the 408 * common clock. That will reduce the ASPM state exit latency. 409 */ 410 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) 411 { 412 int same_clock = 1; 413 u16 reg16, ccc, parent_old_ccc, child_old_ccc[8]; 414 struct pci_dev *child, *parent = link->pdev; 415 struct pci_bus *linkbus = parent->subordinate; 416 /* 417 * All functions of a slot should have the same Slot Clock 418 * Configuration, so just check one function 419 */ 420 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 421 BUG_ON(!pci_is_pcie(child)); 422 423 /* Check downstream component if bit Slot Clock Configuration is 1 */ 424 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); 425 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 426 same_clock = 0; 427 428 /* Check upstream component if bit Slot Clock Configuration is 1 */ 429 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); 430 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 431 same_clock = 0; 432 433 /* Port might be already in common clock mode */ 434 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); 435 parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC; 436 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) { 437 bool consistent = true; 438 439 list_for_each_entry(child, &linkbus->devices, bus_list) { 440 pcie_capability_read_word(child, PCI_EXP_LNKCTL, 441 ®16); 442 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) { 443 consistent = false; 444 break; 445 } 446 } 447 if (consistent) 448 return; 449 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n"); 450 } 451 452 ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0; 453 /* Configure downstream component, all functions */ 454 list_for_each_entry(child, &linkbus->devices, bus_list) { 455 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); 456 child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC; 457 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 458 PCI_EXP_LNKCTL_CCC, ccc); 459 } 460 461 /* Configure upstream component */ 462 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, 463 PCI_EXP_LNKCTL_CCC, ccc); 464 465 if (pcie_retrain_link(link->pdev, true)) { 466 467 /* Training failed. Restore common clock configurations */ 468 pci_err(parent, "ASPM: Could not configure common clock\n"); 469 list_for_each_entry(child, &linkbus->devices, bus_list) 470 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, 471 PCI_EXP_LNKCTL_CCC, 472 child_old_ccc[PCI_FUNC(child->devfn)]); 473 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, 474 PCI_EXP_LNKCTL_CCC, parent_old_ccc); 475 } 476 } 477 478 /* Convert L0s latency encoding to ns */ 479 static u32 calc_l0s_latency(u32 lnkcap) 480 { 481 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap); 482 483 if (encoding == 0x7) 484 return 5 * NSEC_PER_USEC; /* > 4us */ 485 return (64 << encoding); 486 } 487 488 /* Convert L0s acceptable latency encoding to ns */ 489 static u32 calc_l0s_acceptable(u32 encoding) 490 { 491 if (encoding == 0x7) 492 return U32_MAX; 493 return (64 << encoding); 494 } 495 496 /* Convert L1 latency encoding to ns */ 497 static u32 calc_l1_latency(u32 lnkcap) 498 { 499 u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap); 500 501 if (encoding == 0x7) 502 return 65 * NSEC_PER_USEC; /* > 64us */ 503 return NSEC_PER_USEC << encoding; 504 } 505 506 /* Convert L1 acceptable latency encoding to ns */ 507 static u32 calc_l1_acceptable(u32 encoding) 508 { 509 if (encoding == 0x7) 510 return U32_MAX; 511 return NSEC_PER_USEC << encoding; 512 } 513 514 /* Convert L1SS T_pwr encoding to usec */ 515 static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val) 516 { 517 switch (scale) { 518 case 0: 519 return val * 2; 520 case 1: 521 return val * 10; 522 case 2: 523 return val * 100; 524 } 525 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale); 526 return 0; 527 } 528 529 /* 530 * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1 531 * register. Ports enter L1.2 when the most recent LTR value is greater 532 * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we 533 * don't enter L1.2 too aggressively. 534 * 535 * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3. 536 */ 537 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) 538 { 539 u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC; 540 541 /* 542 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max 543 * value of 0x3ff. 544 */ 545 if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 546 *scale = 0; /* Value times 1ns */ 547 *value = threshold_ns; 548 } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 549 *scale = 1; /* Value times 32ns */ 550 *value = roundup(threshold_ns, 32) / 32; 551 } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 552 *scale = 2; /* Value times 1024ns */ 553 *value = roundup(threshold_ns, 1024) / 1024; 554 } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 555 *scale = 3; /* Value times 32768ns */ 556 *value = roundup(threshold_ns, 32768) / 32768; 557 } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 558 *scale = 4; /* Value times 1048576ns */ 559 *value = roundup(threshold_ns, 1048576) / 1048576; 560 } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { 561 *scale = 5; /* Value times 33554432ns */ 562 *value = roundup(threshold_ns, 33554432) / 33554432; 563 } else { 564 *scale = 5; 565 *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE); 566 } 567 } 568 569 static void pcie_aspm_check_latency(struct pci_dev *endpoint) 570 { 571 u32 latency, encoding, lnkcap_up, lnkcap_dw; 572 u32 l1_switch_latency = 0, latency_up_l0s; 573 u32 latency_up_l1, latency_dw_l0s, latency_dw_l1; 574 u32 acceptable_l0s, acceptable_l1; 575 struct pcie_link_state *link; 576 577 /* Device not in D0 doesn't need latency check */ 578 if ((endpoint->current_state != PCI_D0) && 579 (endpoint->current_state != PCI_UNKNOWN)) 580 return; 581 582 link = endpoint->bus->self->link_state; 583 584 /* Calculate endpoint L0s acceptable latency */ 585 encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap); 586 acceptable_l0s = calc_l0s_acceptable(encoding); 587 588 /* Calculate endpoint L1 acceptable latency */ 589 encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap); 590 acceptable_l1 = calc_l1_acceptable(encoding); 591 592 while (link) { 593 struct pci_dev *dev = pci_function_0(link->pdev->subordinate); 594 595 /* Read direction exit latencies */ 596 pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP, 597 &lnkcap_up); 598 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, 599 &lnkcap_dw); 600 latency_up_l0s = calc_l0s_latency(lnkcap_up); 601 latency_up_l1 = calc_l1_latency(lnkcap_up); 602 latency_dw_l0s = calc_l0s_latency(lnkcap_dw); 603 latency_dw_l1 = calc_l1_latency(lnkcap_dw); 604 605 /* Check upstream direction L0s latency */ 606 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) && 607 (latency_up_l0s > acceptable_l0s)) 608 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP; 609 610 /* Check downstream direction L0s latency */ 611 if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) && 612 (latency_dw_l0s > acceptable_l0s)) 613 link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW; 614 /* 615 * Check L1 latency. 616 * Every switch on the path to root complex need 1 617 * more microsecond for L1. Spec doesn't mention L0s. 618 * 619 * The exit latencies for L1 substates are not advertised 620 * by a device. Since the spec also doesn't mention a way 621 * to determine max latencies introduced by enabling L1 622 * substates on the components, it is not clear how to do 623 * a L1 substate exit latency check. We assume that the 624 * L1 exit latencies advertised by a device include L1 625 * substate latencies (and hence do not do any check). 626 */ 627 latency = max_t(u32, latency_up_l1, latency_dw_l1); 628 if ((link->aspm_capable & PCIE_LINK_STATE_L1) && 629 (latency + l1_switch_latency > acceptable_l1)) 630 link->aspm_capable &= ~PCIE_LINK_STATE_L1; 631 l1_switch_latency += NSEC_PER_USEC; 632 633 link = link->parent; 634 } 635 } 636 637 /* Calculate L1.2 PM substate timing parameters */ 638 static void aspm_calc_l12_info(struct pcie_link_state *link, 639 u32 parent_l1ss_cap, u32 child_l1ss_cap) 640 { 641 struct pci_dev *child = link->downstream, *parent = link->pdev; 642 u32 val1, val2, scale1, scale2; 643 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; 644 u32 ctl1 = 0, ctl2 = 0; 645 u32 pctl1, pctl2, cctl1, cctl2; 646 u32 pl1_2_enables, cl1_2_enables; 647 648 /* Choose the greater of the two Port Common_Mode_Restore_Times */ 649 val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap); 650 val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap); 651 t_common_mode = max(val1, val2); 652 653 /* Choose the greater of the two Port T_POWER_ON times */ 654 val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap); 655 scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap); 656 val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap); 657 scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap); 658 659 if (calc_l12_pwron(parent, scale1, val1) > 660 calc_l12_pwron(child, scale2, val2)) { 661 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) | 662 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1); 663 t_power_on = calc_l12_pwron(parent, scale1, val1); 664 } else { 665 ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) | 666 FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2); 667 t_power_on = calc_l12_pwron(child, scale2, val2); 668 } 669 670 /* 671 * Set LTR_L1.2_THRESHOLD to the time required to transition the 672 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if 673 * downstream devices report (via LTR) that they can tolerate at 674 * least that much latency. 675 * 676 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and 677 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at 678 * least 4us. 679 */ 680 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; 681 encode_l12_threshold(l1_2_threshold, &scale, &value); 682 ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) | 683 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) | 684 FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale); 685 686 /* Some broken devices only support dword access to L1 SS */ 687 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1); 688 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2); 689 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1); 690 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2); 691 692 if (ctl1 == pctl1 && ctl1 == cctl1 && 693 ctl2 == pctl2 && ctl2 == cctl2) 694 return; 695 696 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */ 697 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK; 698 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; 699 700 if (pl1_2_enables || cl1_2_enables) { 701 pci_clear_and_set_config_dword(child, 702 child->l1ss + PCI_L1SS_CTL1, 703 PCI_L1SS_CTL1_L1_2_MASK, 0); 704 pci_clear_and_set_config_dword(parent, 705 parent->l1ss + PCI_L1SS_CTL1, 706 PCI_L1SS_CTL1_L1_2_MASK, 0); 707 } 708 709 /* Program T_POWER_ON times in both ports */ 710 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2); 711 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); 712 713 /* Program Common_Mode_Restore_Time in upstream device */ 714 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 715 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); 716 717 /* Program LTR_L1.2_THRESHOLD time in both ports */ 718 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 719 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 720 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 721 ctl1); 722 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 723 PCI_L1SS_CTL1_LTR_L12_TH_VALUE | 724 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, 725 ctl1); 726 727 if (pl1_2_enables || cl1_2_enables) { 728 pci_clear_and_set_config_dword(parent, 729 parent->l1ss + PCI_L1SS_CTL1, 0, 730 pl1_2_enables); 731 pci_clear_and_set_config_dword(child, 732 child->l1ss + PCI_L1SS_CTL1, 0, 733 cl1_2_enables); 734 } 735 } 736 737 static void aspm_l1ss_init(struct pcie_link_state *link) 738 { 739 struct pci_dev *child = link->downstream, *parent = link->pdev; 740 u32 parent_l1ss_cap, child_l1ss_cap; 741 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0; 742 743 if (!parent->l1ss || !child->l1ss) 744 return; 745 746 /* Setup L1 substate */ 747 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, 748 &parent_l1ss_cap); 749 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP, 750 &child_l1ss_cap); 751 752 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) 753 parent_l1ss_cap = 0; 754 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) 755 child_l1ss_cap = 0; 756 757 /* 758 * If we don't have LTR for the entire path from the Root Complex 759 * to this device, we can't use ASPM L1.2 because it relies on the 760 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. 761 */ 762 if (!child->ltr_path) 763 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; 764 765 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) 766 link->aspm_support |= PCIE_LINK_STATE_L1_1; 767 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) 768 link->aspm_support |= PCIE_LINK_STATE_L1_2; 769 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) 770 link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM; 771 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) 772 link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM; 773 774 if (parent_l1ss_cap) 775 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 776 &parent_l1ss_ctl1); 777 if (child_l1ss_cap) 778 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 779 &child_l1ss_ctl1); 780 781 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) 782 link->aspm_enabled |= PCIE_LINK_STATE_L1_1; 783 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) 784 link->aspm_enabled |= PCIE_LINK_STATE_L1_2; 785 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) 786 link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM; 787 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) 788 link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM; 789 790 if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK) 791 aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap); 792 } 793 794 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 795 { 796 struct pci_dev *child = link->downstream, *parent = link->pdev; 797 u32 parent_lnkcap, child_lnkcap; 798 u16 parent_lnkctl, child_lnkctl; 799 struct pci_bus *linkbus = parent->subordinate; 800 801 if (blacklist) { 802 /* Set enabled/disable so that we will disable ASPM later */ 803 link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL; 804 link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL; 805 return; 806 } 807 808 /* 809 * If ASPM not supported, don't mess with the clocks and link, 810 * bail out now. 811 */ 812 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); 813 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); 814 if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS)) 815 return; 816 817 /* Configure common clock before checking latencies */ 818 pcie_aspm_configure_common_clock(link); 819 820 /* 821 * Re-read upstream/downstream components' register state after 822 * clock configuration. L0s & L1 exit latencies in the otherwise 823 * read-only Link Capabilities may change depending on common clock 824 * configuration (PCIe r5.0, sec 7.5.3.6). 825 */ 826 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap); 827 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap); 828 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl); 829 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl); 830 831 /* Disable L0s/L1 before updating L1SS config */ 832 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || 833 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { 834 pcie_capability_write_word(child, PCI_EXP_LNKCTL, 835 child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); 836 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, 837 parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC); 838 } 839 840 /* 841 * Setup L0s state 842 * 843 * Note that we must not enable L0s in either direction on a 844 * given link unless components on both sides of the link each 845 * support L0s. 846 */ 847 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S) 848 link->aspm_support |= PCIE_LINK_STATE_L0S; 849 850 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) 851 link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP; 852 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) 853 link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW; 854 855 /* Setup L1 state */ 856 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1) 857 link->aspm_support |= PCIE_LINK_STATE_L1; 858 859 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) 860 link->aspm_enabled |= PCIE_LINK_STATE_L1; 861 862 aspm_l1ss_init(link); 863 864 /* Restore L0s/L1 if they were enabled */ 865 if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) || 866 FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) { 867 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl); 868 pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl); 869 } 870 871 /* Save default state */ 872 link->aspm_default = link->aspm_enabled; 873 874 /* Setup initial capable state. Will be updated later */ 875 link->aspm_capable = link->aspm_support; 876 877 /* Get and check endpoint acceptable latencies */ 878 list_for_each_entry(child, &linkbus->devices, bus_list) { 879 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && 880 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) 881 continue; 882 883 pcie_aspm_check_latency(child); 884 } 885 } 886 887 /* Configure the ASPM L1 substates. Caller must disable L1 first. */ 888 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 889 { 890 u32 val; 891 struct pci_dev *child = link->downstream, *parent = link->pdev; 892 893 val = 0; 894 if (state & PCIE_LINK_STATE_L1_1) 895 val |= PCI_L1SS_CTL1_ASPM_L1_1; 896 if (state & PCIE_LINK_STATE_L1_2) 897 val |= PCI_L1SS_CTL1_ASPM_L1_2; 898 if (state & PCIE_LINK_STATE_L1_1_PCIPM) 899 val |= PCI_L1SS_CTL1_PCIPM_L1_1; 900 if (state & PCIE_LINK_STATE_L1_2_PCIPM) 901 val |= PCI_L1SS_CTL1_PCIPM_L1_2; 902 903 /* 904 * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates: 905 * - Clear L1.x enable bits at child first, then at parent 906 * - Set L1.x enable bits at parent first, then at child 907 * - ASPM/PCIPM L1.2 must be disabled while programming timing 908 * parameters 909 */ 910 911 /* Disable all L1 substates */ 912 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 913 PCI_L1SS_CTL1_L1SS_MASK, 0); 914 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 915 PCI_L1SS_CTL1_L1SS_MASK, 0); 916 917 /* Enable what we need to enable */ 918 pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 919 PCI_L1SS_CTL1_L1SS_MASK, val); 920 pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, 921 PCI_L1SS_CTL1_L1SS_MASK, val); 922 } 923 924 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 925 { 926 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 927 PCI_EXP_LNKCTL_ASPMC, val); 928 } 929 930 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) 931 { 932 u32 upstream = 0, dwstream = 0; 933 struct pci_dev *child = link->downstream, *parent = link->pdev; 934 struct pci_bus *linkbus = parent->subordinate; 935 936 /* Enable only the states that were not explicitly disabled */ 937 state &= (link->aspm_capable & ~link->aspm_disable); 938 939 /* Can't enable any substates if L1 is not enabled */ 940 if (!(state & PCIE_LINK_STATE_L1)) 941 state &= ~PCIE_LINK_STATE_L1SS; 942 943 /* Spec says both ports must be in D0 before enabling PCI PM substates*/ 944 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { 945 state &= ~PCIE_LINK_STATE_L1_SS_PCIPM; 946 state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM); 947 } 948 949 /* Nothing to do if the link is already in the requested state */ 950 if (link->aspm_enabled == state) 951 return; 952 /* Convert ASPM state to upstream/downstream ASPM register state */ 953 if (state & PCIE_LINK_STATE_L0S_UP) 954 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; 955 if (state & PCIE_LINK_STATE_L0S_DW) 956 upstream |= PCI_EXP_LNKCTL_ASPM_L0S; 957 if (state & PCIE_LINK_STATE_L1) { 958 upstream |= PCI_EXP_LNKCTL_ASPM_L1; 959 dwstream |= PCI_EXP_LNKCTL_ASPM_L1; 960 } 961 962 /* 963 * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable 964 * bits for ASPM L1 PM Substates must be done while ASPM L1 is 965 * disabled. Disable L1 here and apply new configuration after L1SS 966 * configuration has been completed. 967 * 968 * Per sec 7.5.3.7, when disabling ASPM L1, software must disable 969 * it in the Downstream component prior to disabling it in the 970 * Upstream component, and ASPM L1 must be enabled in the Upstream 971 * component prior to enabling it in the Downstream component. 972 * 973 * Sec 7.5.3.7 also recommends programming the same ASPM Control 974 * value for all functions of a multi-function device. 975 */ 976 list_for_each_entry(child, &linkbus->devices, bus_list) 977 pcie_config_aspm_dev(child, 0); 978 pcie_config_aspm_dev(parent, 0); 979 980 if (link->aspm_capable & PCIE_LINK_STATE_L1SS) 981 pcie_config_aspm_l1ss(link, state); 982 983 pcie_config_aspm_dev(parent, upstream); 984 list_for_each_entry(child, &linkbus->devices, bus_list) 985 pcie_config_aspm_dev(child, dwstream); 986 987 link->aspm_enabled = state; 988 989 /* Update latest ASPM configuration in saved context */ 990 pci_save_aspm_l1ss_state(link->downstream); 991 pci_update_aspm_saved_state(link->downstream); 992 pci_save_aspm_l1ss_state(parent); 993 pci_update_aspm_saved_state(parent); 994 } 995 996 static void pcie_config_aspm_path(struct pcie_link_state *link) 997 { 998 while (link) { 999 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1000 link = link->parent; 1001 } 1002 } 1003 1004 static void free_link_state(struct pcie_link_state *link) 1005 { 1006 link->pdev->link_state = NULL; 1007 kfree(link); 1008 } 1009 1010 static int pcie_aspm_sanity_check(struct pci_dev *pdev) 1011 { 1012 struct pci_dev *child; 1013 u32 reg32; 1014 1015 /* 1016 * Some functions in a slot might not all be PCIe functions, 1017 * very strange. Disable ASPM for the whole slot 1018 */ 1019 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 1020 if (!pci_is_pcie(child)) 1021 return -EINVAL; 1022 1023 /* 1024 * If ASPM is disabled then we're not going to change 1025 * the BIOS state. It's safe to continue even if it's a 1026 * pre-1.1 device 1027 */ 1028 1029 if (aspm_disabled) 1030 continue; 1031 1032 /* 1033 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 1034 * RBER bit to determine if a function is 1.1 version device 1035 */ 1036 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); 1037 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 1038 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); 1039 return -EINVAL; 1040 } 1041 } 1042 return 0; 1043 } 1044 1045 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) 1046 { 1047 struct pcie_link_state *link; 1048 1049 link = kzalloc(sizeof(*link), GFP_KERNEL); 1050 if (!link) 1051 return NULL; 1052 1053 INIT_LIST_HEAD(&link->sibling); 1054 link->pdev = pdev; 1055 link->downstream = pci_function_0(pdev->subordinate); 1056 1057 /* 1058 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe 1059 * hierarchies. Note that some PCIe host implementations omit 1060 * the root ports entirely, in which case a downstream port on 1061 * a switch may become the root of the link state chain for all 1062 * its subordinate endpoints. 1063 */ 1064 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 1065 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || 1066 !pdev->bus->parent->self) { 1067 link->root = link; 1068 } else { 1069 struct pcie_link_state *parent; 1070 1071 parent = pdev->bus->parent->self->link_state; 1072 if (!parent) { 1073 kfree(link); 1074 return NULL; 1075 } 1076 1077 link->parent = parent; 1078 link->root = link->parent->root; 1079 } 1080 1081 list_add(&link->sibling, &link_list); 1082 pdev->link_state = link; 1083 return link; 1084 } 1085 1086 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev) 1087 { 1088 struct pci_dev *child; 1089 1090 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) 1091 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group); 1092 } 1093 1094 /* 1095 * pcie_aspm_init_link_state: Initiate PCI express link state. 1096 * It is called after the pcie and its children devices are scanned. 1097 * @pdev: the root port or switch downstream port 1098 */ 1099 void pcie_aspm_init_link_state(struct pci_dev *pdev) 1100 { 1101 struct pcie_link_state *link; 1102 int blacklist = !!pcie_aspm_sanity_check(pdev); 1103 1104 if (!aspm_support_enabled) 1105 return; 1106 1107 if (pdev->link_state) 1108 return; 1109 1110 /* 1111 * We allocate pcie_link_state for the component on the upstream 1112 * end of a Link, so there's nothing to do unless this device is 1113 * downstream port. 1114 */ 1115 if (!pcie_downstream_port(pdev)) 1116 return; 1117 1118 /* VIA has a strange chipset, root port is under a bridge */ 1119 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && 1120 pdev->bus->self) 1121 return; 1122 1123 down_read(&pci_bus_sem); 1124 if (list_empty(&pdev->subordinate->devices)) 1125 goto out; 1126 1127 mutex_lock(&aspm_lock); 1128 link = alloc_pcie_link_state(pdev); 1129 if (!link) 1130 goto unlock; 1131 /* 1132 * Setup initial ASPM state. Note that we need to configure 1133 * upstream links also because capable state of them can be 1134 * update through pcie_aspm_cap_init(). 1135 */ 1136 pcie_aspm_cap_init(link, blacklist); 1137 1138 /* Setup initial Clock PM state */ 1139 pcie_clkpm_cap_init(link, blacklist); 1140 1141 /* 1142 * At this stage drivers haven't had an opportunity to change the 1143 * link policy setting. Enabling ASPM on broken hardware can cripple 1144 * it even before the driver has had a chance to disable ASPM, so 1145 * default to a safe level right now. If we're enabling ASPM beyond 1146 * the BIOS's expectation, we'll do so once pci_enable_device() is 1147 * called. 1148 */ 1149 if (aspm_policy != POLICY_POWERSAVE && 1150 aspm_policy != POLICY_POWER_SUPERSAVE) { 1151 pcie_config_aspm_path(link); 1152 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1153 } 1154 1155 pcie_aspm_update_sysfs_visibility(pdev); 1156 1157 unlock: 1158 mutex_unlock(&aspm_lock); 1159 out: 1160 up_read(&pci_bus_sem); 1161 } 1162 1163 void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) 1164 { 1165 struct pci_dev *bridge; 1166 u32 ctl; 1167 1168 bridge = pci_upstream_bridge(pdev); 1169 if (bridge && bridge->ltr_path) { 1170 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); 1171 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { 1172 pci_dbg(bridge, "re-enabling LTR\n"); 1173 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, 1174 PCI_EXP_DEVCTL2_LTR_EN); 1175 } 1176 } 1177 } 1178 1179 void pci_configure_ltr(struct pci_dev *pdev) 1180 { 1181 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); 1182 struct pci_dev *bridge; 1183 u32 cap, ctl; 1184 1185 if (!pci_is_pcie(pdev)) 1186 return; 1187 1188 pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap); 1189 if (!(cap & PCI_EXP_DEVCAP2_LTR)) 1190 return; 1191 1192 pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl); 1193 if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { 1194 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { 1195 pdev->ltr_path = 1; 1196 return; 1197 } 1198 1199 bridge = pci_upstream_bridge(pdev); 1200 if (bridge && bridge->ltr_path) 1201 pdev->ltr_path = 1; 1202 1203 return; 1204 } 1205 1206 if (!host->native_ltr) 1207 return; 1208 1209 /* 1210 * Software must not enable LTR in an Endpoint unless the Root 1211 * Complex and all intermediate Switches indicate support for LTR. 1212 * PCIe r4.0, sec 6.18. 1213 */ 1214 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { 1215 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 1216 PCI_EXP_DEVCTL2_LTR_EN); 1217 pdev->ltr_path = 1; 1218 return; 1219 } 1220 1221 /* 1222 * If we're configuring a hot-added device, LTR was likely 1223 * disabled in the upstream bridge, so re-enable it before enabling 1224 * it in the new device. 1225 */ 1226 bridge = pci_upstream_bridge(pdev); 1227 if (bridge && bridge->ltr_path) { 1228 pci_bridge_reconfigure_ltr(pdev); 1229 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 1230 PCI_EXP_DEVCTL2_LTR_EN); 1231 pdev->ltr_path = 1; 1232 } 1233 } 1234 1235 /* Recheck latencies and update aspm_capable for links under the root */ 1236 static void pcie_update_aspm_capable(struct pcie_link_state *root) 1237 { 1238 struct pcie_link_state *link; 1239 BUG_ON(root->parent); 1240 list_for_each_entry(link, &link_list, sibling) { 1241 if (link->root != root) 1242 continue; 1243 link->aspm_capable = link->aspm_support; 1244 } 1245 list_for_each_entry(link, &link_list, sibling) { 1246 struct pci_dev *child; 1247 struct pci_bus *linkbus = link->pdev->subordinate; 1248 if (link->root != root) 1249 continue; 1250 list_for_each_entry(child, &linkbus->devices, bus_list) { 1251 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && 1252 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) 1253 continue; 1254 pcie_aspm_check_latency(child); 1255 } 1256 } 1257 } 1258 1259 /* @pdev: the endpoint device */ 1260 void pcie_aspm_exit_link_state(struct pci_dev *pdev) 1261 { 1262 struct pci_dev *parent = pdev->bus->self; 1263 struct pcie_link_state *link, *root, *parent_link; 1264 1265 if (!parent || !parent->link_state) 1266 return; 1267 1268 down_read(&pci_bus_sem); 1269 mutex_lock(&aspm_lock); 1270 1271 link = parent->link_state; 1272 root = link->root; 1273 parent_link = link->parent; 1274 1275 /* 1276 * link->downstream is a pointer to the pci_dev of function 0. If 1277 * we remove that function, the pci_dev is about to be deallocated, 1278 * so we can't use link->downstream again. Free the link state to 1279 * avoid this. 1280 * 1281 * If we're removing a non-0 function, it's possible we could 1282 * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends 1283 * programming the same ASPM Control value for all functions of 1284 * multi-function devices, so disable ASPM for all of them. 1285 */ 1286 pcie_config_aspm_link(link, 0); 1287 list_del(&link->sibling); 1288 free_link_state(link); 1289 1290 /* Recheck latencies and configure upstream links */ 1291 if (parent_link) { 1292 pcie_update_aspm_capable(root); 1293 pcie_config_aspm_path(parent_link); 1294 } 1295 1296 mutex_unlock(&aspm_lock); 1297 up_read(&pci_bus_sem); 1298 } 1299 1300 /* 1301 * @pdev: the root port or switch downstream port 1302 * @locked: whether pci_bus_sem is held 1303 */ 1304 void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) 1305 { 1306 struct pcie_link_state *link = pdev->link_state; 1307 1308 if (aspm_disabled || !link) 1309 return; 1310 /* 1311 * Devices changed PM state, we should recheck if latency 1312 * meets all functions' requirement 1313 */ 1314 if (!locked) 1315 down_read(&pci_bus_sem); 1316 mutex_lock(&aspm_lock); 1317 pcie_update_aspm_capable(link->root); 1318 pcie_config_aspm_path(link); 1319 mutex_unlock(&aspm_lock); 1320 if (!locked) 1321 up_read(&pci_bus_sem); 1322 } 1323 1324 void pcie_aspm_powersave_config_link(struct pci_dev *pdev) 1325 { 1326 struct pcie_link_state *link = pdev->link_state; 1327 1328 if (aspm_disabled || !link) 1329 return; 1330 1331 if (aspm_policy != POLICY_POWERSAVE && 1332 aspm_policy != POLICY_POWER_SUPERSAVE) 1333 return; 1334 1335 down_read(&pci_bus_sem); 1336 mutex_lock(&aspm_lock); 1337 pcie_config_aspm_path(link); 1338 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1339 mutex_unlock(&aspm_lock); 1340 up_read(&pci_bus_sem); 1341 } 1342 1343 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev) 1344 { 1345 struct pci_dev *bridge; 1346 1347 if (!pci_is_pcie(pdev)) 1348 return NULL; 1349 1350 bridge = pci_upstream_bridge(pdev); 1351 if (!bridge || !pci_is_pcie(bridge)) 1352 return NULL; 1353 1354 return bridge->link_state; 1355 } 1356 1357 static u8 pci_calc_aspm_disable_mask(int state) 1358 { 1359 state &= ~PCIE_LINK_STATE_CLKPM; 1360 1361 /* L1 PM substates require L1 */ 1362 if (state & PCIE_LINK_STATE_L1) 1363 state |= PCIE_LINK_STATE_L1SS; 1364 1365 return state; 1366 } 1367 1368 static u8 pci_calc_aspm_enable_mask(int state) 1369 { 1370 state &= ~PCIE_LINK_STATE_CLKPM; 1371 1372 /* L1 PM substates require L1 */ 1373 if (state & PCIE_LINK_STATE_L1SS) 1374 state |= PCIE_LINK_STATE_L1; 1375 1376 return state; 1377 } 1378 1379 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked) 1380 { 1381 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1382 1383 if (!link) 1384 return -EINVAL; 1385 /* 1386 * A driver requested that ASPM be disabled on this device, but 1387 * if we don't have permission to manage ASPM (e.g., on ACPI 1388 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and 1389 * the _OSC method), we can't honor that request. Windows has 1390 * a similar mechanism using "PciASPMOptOut", which is also 1391 * ignored in this situation. 1392 */ 1393 if (aspm_disabled) { 1394 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n"); 1395 return -EPERM; 1396 } 1397 1398 if (!locked) 1399 down_read(&pci_bus_sem); 1400 mutex_lock(&aspm_lock); 1401 link->aspm_disable |= pci_calc_aspm_disable_mask(state); 1402 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1403 1404 if (state & PCIE_LINK_STATE_CLKPM) 1405 link->clkpm_disable = 1; 1406 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1407 mutex_unlock(&aspm_lock); 1408 if (!locked) 1409 up_read(&pci_bus_sem); 1410 1411 return 0; 1412 } 1413 1414 int pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1415 { 1416 lockdep_assert_held_read(&pci_bus_sem); 1417 1418 return __pci_disable_link_state(pdev, state, true); 1419 } 1420 EXPORT_SYMBOL(pci_disable_link_state_locked); 1421 1422 /** 1423 * pci_disable_link_state - Disable device's link state, so the link will 1424 * never enter specific states. Note that if the BIOS didn't grant ASPM 1425 * control to the OS, this does nothing because we can't touch the LNKCTL 1426 * register. Returns 0 or a negative errno. 1427 * 1428 * @pdev: PCI device 1429 * @state: ASPM link state to disable 1430 */ 1431 int pci_disable_link_state(struct pci_dev *pdev, int state) 1432 { 1433 return __pci_disable_link_state(pdev, state, false); 1434 } 1435 EXPORT_SYMBOL(pci_disable_link_state); 1436 1437 static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) 1438 { 1439 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1440 1441 if (!link) 1442 return -EINVAL; 1443 /* 1444 * A driver requested that ASPM be enabled on this device, but 1445 * if we don't have permission to manage ASPM (e.g., on ACPI 1446 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and 1447 * the _OSC method), we can't honor that request. 1448 */ 1449 if (aspm_disabled) { 1450 pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n"); 1451 return -EPERM; 1452 } 1453 1454 if (!locked) 1455 down_read(&pci_bus_sem); 1456 mutex_lock(&aspm_lock); 1457 link->aspm_default = pci_calc_aspm_enable_mask(state); 1458 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1459 1460 link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0; 1461 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1462 mutex_unlock(&aspm_lock); 1463 if (!locked) 1464 up_read(&pci_bus_sem); 1465 1466 return 0; 1467 } 1468 1469 /** 1470 * pci_enable_link_state - Clear and set the default device link state so that 1471 * the link may be allowed to enter the specified states. Note that if the 1472 * BIOS didn't grant ASPM control to the OS, this does nothing because we can't 1473 * touch the LNKCTL register. Also note that this does not enable states 1474 * disabled by pci_disable_link_state(). Return 0 or a negative errno. 1475 * 1476 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per 1477 * PCIe r6.0, sec 5.5.4. 1478 * 1479 * @pdev: PCI device 1480 * @state: Mask of ASPM link states to enable 1481 */ 1482 int pci_enable_link_state(struct pci_dev *pdev, int state) 1483 { 1484 return __pci_enable_link_state(pdev, state, false); 1485 } 1486 EXPORT_SYMBOL(pci_enable_link_state); 1487 1488 /** 1489 * pci_enable_link_state_locked - Clear and set the default device link state 1490 * so that the link may be allowed to enter the specified states. Note that if 1491 * the BIOS didn't grant ASPM control to the OS, this does nothing because we 1492 * can't touch the LNKCTL register. Also note that this does not enable states 1493 * disabled by pci_disable_link_state(). Return 0 or a negative errno. 1494 * 1495 * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per 1496 * PCIe r6.0, sec 5.5.4. 1497 * 1498 * @pdev: PCI device 1499 * @state: Mask of ASPM link states to enable 1500 * 1501 * Context: Caller holds pci_bus_sem read lock. 1502 */ 1503 int pci_enable_link_state_locked(struct pci_dev *pdev, int state) 1504 { 1505 lockdep_assert_held_read(&pci_bus_sem); 1506 1507 return __pci_enable_link_state(pdev, state, true); 1508 } 1509 EXPORT_SYMBOL(pci_enable_link_state_locked); 1510 1511 static int pcie_aspm_set_policy(const char *val, 1512 const struct kernel_param *kp) 1513 { 1514 int i; 1515 struct pcie_link_state *link; 1516 1517 if (aspm_disabled) 1518 return -EPERM; 1519 i = sysfs_match_string(policy_str, val); 1520 if (i < 0) 1521 return i; 1522 if (i == aspm_policy) 1523 return 0; 1524 1525 down_read(&pci_bus_sem); 1526 mutex_lock(&aspm_lock); 1527 aspm_policy = i; 1528 list_for_each_entry(link, &link_list, sibling) { 1529 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1530 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1531 } 1532 mutex_unlock(&aspm_lock); 1533 up_read(&pci_bus_sem); 1534 return 0; 1535 } 1536 1537 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) 1538 { 1539 int i, cnt = 0; 1540 for (i = 0; i < ARRAY_SIZE(policy_str); i++) 1541 if (i == aspm_policy) 1542 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); 1543 else 1544 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); 1545 cnt += sprintf(buffer + cnt, "\n"); 1546 return cnt; 1547 } 1548 1549 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1550 NULL, 0644); 1551 1552 /** 1553 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. 1554 * @pdev: Target device. 1555 * 1556 * Relies on the upstream bridge's link_state being valid. The link_state 1557 * is deallocated only when the last child of the bridge (i.e., @pdev or a 1558 * sibling) is removed, and the caller should be holding a reference to 1559 * @pdev, so this should be safe. 1560 */ 1561 bool pcie_aspm_enabled(struct pci_dev *pdev) 1562 { 1563 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1564 1565 if (!link) 1566 return false; 1567 1568 return link->aspm_enabled; 1569 } 1570 EXPORT_SYMBOL_GPL(pcie_aspm_enabled); 1571 1572 static ssize_t aspm_attr_show_common(struct device *dev, 1573 struct device_attribute *attr, 1574 char *buf, u8 state) 1575 { 1576 struct pci_dev *pdev = to_pci_dev(dev); 1577 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1578 1579 return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0); 1580 } 1581 1582 static ssize_t aspm_attr_store_common(struct device *dev, 1583 struct device_attribute *attr, 1584 const char *buf, size_t len, u8 state) 1585 { 1586 struct pci_dev *pdev = to_pci_dev(dev); 1587 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1588 bool state_enable; 1589 1590 if (kstrtobool(buf, &state_enable) < 0) 1591 return -EINVAL; 1592 1593 down_read(&pci_bus_sem); 1594 mutex_lock(&aspm_lock); 1595 1596 if (state_enable) { 1597 link->aspm_disable &= ~state; 1598 /* need to enable L1 for substates */ 1599 if (state & PCIE_LINK_STATE_L1SS) 1600 link->aspm_disable &= ~PCIE_LINK_STATE_L1; 1601 } else { 1602 link->aspm_disable |= state; 1603 if (state & PCIE_LINK_STATE_L1) 1604 link->aspm_disable |= PCIE_LINK_STATE_L1SS; 1605 } 1606 1607 pcie_config_aspm_link(link, policy_to_aspm_state(link)); 1608 1609 mutex_unlock(&aspm_lock); 1610 up_read(&pci_bus_sem); 1611 1612 return len; 1613 } 1614 1615 #define ASPM_ATTR(_f, _s) \ 1616 static ssize_t _f##_show(struct device *dev, \ 1617 struct device_attribute *attr, char *buf) \ 1618 { return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \ 1619 \ 1620 static ssize_t _f##_store(struct device *dev, \ 1621 struct device_attribute *attr, \ 1622 const char *buf, size_t len) \ 1623 { return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); } 1624 1625 ASPM_ATTR(l0s_aspm, L0S) 1626 ASPM_ATTR(l1_aspm, L1) 1627 ASPM_ATTR(l1_1_aspm, L1_1) 1628 ASPM_ATTR(l1_2_aspm, L1_2) 1629 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM) 1630 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM) 1631 1632 static ssize_t clkpm_show(struct device *dev, 1633 struct device_attribute *attr, char *buf) 1634 { 1635 struct pci_dev *pdev = to_pci_dev(dev); 1636 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1637 1638 return sysfs_emit(buf, "%d\n", link->clkpm_enabled); 1639 } 1640 1641 static ssize_t clkpm_store(struct device *dev, 1642 struct device_attribute *attr, 1643 const char *buf, size_t len) 1644 { 1645 struct pci_dev *pdev = to_pci_dev(dev); 1646 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1647 bool state_enable; 1648 1649 if (kstrtobool(buf, &state_enable) < 0) 1650 return -EINVAL; 1651 1652 down_read(&pci_bus_sem); 1653 mutex_lock(&aspm_lock); 1654 1655 link->clkpm_disable = !state_enable; 1656 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 1657 1658 mutex_unlock(&aspm_lock); 1659 up_read(&pci_bus_sem); 1660 1661 return len; 1662 } 1663 1664 static DEVICE_ATTR_RW(clkpm); 1665 static DEVICE_ATTR_RW(l0s_aspm); 1666 static DEVICE_ATTR_RW(l1_aspm); 1667 static DEVICE_ATTR_RW(l1_1_aspm); 1668 static DEVICE_ATTR_RW(l1_2_aspm); 1669 static DEVICE_ATTR_RW(l1_1_pcipm); 1670 static DEVICE_ATTR_RW(l1_2_pcipm); 1671 1672 static struct attribute *aspm_ctrl_attrs[] = { 1673 &dev_attr_clkpm.attr, 1674 &dev_attr_l0s_aspm.attr, 1675 &dev_attr_l1_aspm.attr, 1676 &dev_attr_l1_1_aspm.attr, 1677 &dev_attr_l1_2_aspm.attr, 1678 &dev_attr_l1_1_pcipm.attr, 1679 &dev_attr_l1_2_pcipm.attr, 1680 NULL 1681 }; 1682 1683 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj, 1684 struct attribute *a, int n) 1685 { 1686 struct device *dev = kobj_to_dev(kobj); 1687 struct pci_dev *pdev = to_pci_dev(dev); 1688 struct pcie_link_state *link = pcie_aspm_get_link(pdev); 1689 static const u8 aspm_state_map[] = { 1690 PCIE_LINK_STATE_L0S, 1691 PCIE_LINK_STATE_L1, 1692 PCIE_LINK_STATE_L1_1, 1693 PCIE_LINK_STATE_L1_2, 1694 PCIE_LINK_STATE_L1_1_PCIPM, 1695 PCIE_LINK_STATE_L1_2_PCIPM, 1696 }; 1697 1698 if (aspm_disabled || !link) 1699 return 0; 1700 1701 if (n == 0) 1702 return link->clkpm_capable ? a->mode : 0; 1703 1704 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0; 1705 } 1706 1707 const struct attribute_group aspm_ctrl_attr_group = { 1708 .name = "link", 1709 .attrs = aspm_ctrl_attrs, 1710 .is_visible = aspm_ctrl_attrs_are_visible, 1711 }; 1712 1713 static int __init pcie_aspm_disable(char *str) 1714 { 1715 if (!strcmp(str, "off")) { 1716 aspm_policy = POLICY_DEFAULT; 1717 aspm_disabled = 1; 1718 aspm_support_enabled = false; 1719 pr_info("PCIe ASPM is disabled\n"); 1720 } else if (!strcmp(str, "force")) { 1721 aspm_force = 1; 1722 pr_info("PCIe ASPM is forcibly enabled\n"); 1723 } 1724 return 1; 1725 } 1726 1727 __setup("pcie_aspm=", pcie_aspm_disable); 1728 1729 void pcie_no_aspm(void) 1730 { 1731 /* 1732 * Disabling ASPM is intended to prevent the kernel from modifying 1733 * existing hardware state, not to clear existing state. To that end: 1734 * (a) set policy to POLICY_DEFAULT in order to avoid changing state 1735 * (b) prevent userspace from changing policy 1736 */ 1737 if (!aspm_force) { 1738 aspm_policy = POLICY_DEFAULT; 1739 aspm_disabled = 1; 1740 } 1741 } 1742 1743 bool pcie_aspm_support_enabled(void) 1744 { 1745 return aspm_support_enabled; 1746 } 1747 1748 #endif /* CONFIG_PCIEASPM */ 1749