1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/log2.h> 19 #include <linux/pci-aspm.h> 20 #include <linux/pm_wakeup.h> 21 #include <linux/interrupt.h> 22 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 23 #include "pci.h" 24 25 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 26 27 #ifdef CONFIG_PCI_DOMAINS 28 int pci_domains_supported = 1; 29 #endif 30 31 #define DEFAULT_CARDBUS_IO_SIZE (256) 32 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 33 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 34 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 35 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 36 37 /** 38 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 39 * @bus: pointer to PCI bus structure to search 40 * 41 * Given a PCI bus, returns the highest PCI bus number present in the set 42 * including the given PCI bus and its list of child PCI buses. 43 */ 44 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 45 { 46 struct list_head *tmp; 47 unsigned char max, n; 48 49 max = bus->subordinate; 50 list_for_each(tmp, &bus->children) { 51 n = pci_bus_max_busnr(pci_bus_b(tmp)); 52 if(n > max) 53 max = n; 54 } 55 return max; 56 } 57 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 58 59 #ifdef CONFIG_HAS_IOMEM 60 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 61 { 62 /* 63 * Make sure the BAR is actually a memory resource, not an IO resource 64 */ 65 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 66 WARN_ON(1); 67 return NULL; 68 } 69 return ioremap_nocache(pci_resource_start(pdev, bar), 70 pci_resource_len(pdev, bar)); 71 } 72 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 73 #endif 74 75 #if 0 76 /** 77 * pci_max_busnr - returns maximum PCI bus number 78 * 79 * Returns the highest PCI bus number present in the system global list of 80 * PCI buses. 81 */ 82 unsigned char __devinit 83 pci_max_busnr(void) 84 { 85 struct pci_bus *bus = NULL; 86 unsigned char max, n; 87 88 max = 0; 89 while ((bus = pci_find_next_bus(bus)) != NULL) { 90 n = pci_bus_max_busnr(bus); 91 if(n > max) 92 max = n; 93 } 94 return max; 95 } 96 97 #endif /* 0 */ 98 99 #define PCI_FIND_CAP_TTL 48 100 101 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 102 u8 pos, int cap, int *ttl) 103 { 104 u8 id; 105 106 while ((*ttl)--) { 107 pci_bus_read_config_byte(bus, devfn, pos, &pos); 108 if (pos < 0x40) 109 break; 110 pos &= ~3; 111 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 112 &id); 113 if (id == 0xff) 114 break; 115 if (id == cap) 116 return pos; 117 pos += PCI_CAP_LIST_NEXT; 118 } 119 return 0; 120 } 121 122 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 123 u8 pos, int cap) 124 { 125 int ttl = PCI_FIND_CAP_TTL; 126 127 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 128 } 129 130 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 131 { 132 return __pci_find_next_cap(dev->bus, dev->devfn, 133 pos + PCI_CAP_LIST_NEXT, cap); 134 } 135 EXPORT_SYMBOL_GPL(pci_find_next_capability); 136 137 static int __pci_bus_find_cap_start(struct pci_bus *bus, 138 unsigned int devfn, u8 hdr_type) 139 { 140 u16 status; 141 142 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 143 if (!(status & PCI_STATUS_CAP_LIST)) 144 return 0; 145 146 switch (hdr_type) { 147 case PCI_HEADER_TYPE_NORMAL: 148 case PCI_HEADER_TYPE_BRIDGE: 149 return PCI_CAPABILITY_LIST; 150 case PCI_HEADER_TYPE_CARDBUS: 151 return PCI_CB_CAPABILITY_LIST; 152 default: 153 return 0; 154 } 155 156 return 0; 157 } 158 159 /** 160 * pci_find_capability - query for devices' capabilities 161 * @dev: PCI device to query 162 * @cap: capability code 163 * 164 * Tell if a device supports a given PCI capability. 165 * Returns the address of the requested capability structure within the 166 * device's PCI configuration space or 0 in case the device does not 167 * support it. Possible values for @cap: 168 * 169 * %PCI_CAP_ID_PM Power Management 170 * %PCI_CAP_ID_AGP Accelerated Graphics Port 171 * %PCI_CAP_ID_VPD Vital Product Data 172 * %PCI_CAP_ID_SLOTID Slot Identification 173 * %PCI_CAP_ID_MSI Message Signalled Interrupts 174 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 175 * %PCI_CAP_ID_PCIX PCI-X 176 * %PCI_CAP_ID_EXP PCI Express 177 */ 178 int pci_find_capability(struct pci_dev *dev, int cap) 179 { 180 int pos; 181 182 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 183 if (pos) 184 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 185 186 return pos; 187 } 188 189 /** 190 * pci_bus_find_capability - query for devices' capabilities 191 * @bus: the PCI bus to query 192 * @devfn: PCI device to query 193 * @cap: capability code 194 * 195 * Like pci_find_capability() but works for pci devices that do not have a 196 * pci_dev structure set up yet. 197 * 198 * Returns the address of the requested capability structure within the 199 * device's PCI configuration space or 0 in case the device does not 200 * support it. 201 */ 202 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 203 { 204 int pos; 205 u8 hdr_type; 206 207 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 208 209 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 210 if (pos) 211 pos = __pci_find_next_cap(bus, devfn, pos, cap); 212 213 return pos; 214 } 215 216 /** 217 * pci_find_ext_capability - Find an extended capability 218 * @dev: PCI device to query 219 * @cap: capability code 220 * 221 * Returns the address of the requested extended capability structure 222 * within the device's PCI configuration space or 0 if the device does 223 * not support it. Possible values for @cap: 224 * 225 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 226 * %PCI_EXT_CAP_ID_VC Virtual Channel 227 * %PCI_EXT_CAP_ID_DSN Device Serial Number 228 * %PCI_EXT_CAP_ID_PWR Power Budgeting 229 */ 230 int pci_find_ext_capability(struct pci_dev *dev, int cap) 231 { 232 u32 header; 233 int ttl; 234 int pos = PCI_CFG_SPACE_SIZE; 235 236 /* minimum 8 bytes per capability */ 237 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 238 239 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 240 return 0; 241 242 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 243 return 0; 244 245 /* 246 * If we have no capabilities, this is indicated by cap ID, 247 * cap version and next pointer all being 0. 248 */ 249 if (header == 0) 250 return 0; 251 252 while (ttl-- > 0) { 253 if (PCI_EXT_CAP_ID(header) == cap) 254 return pos; 255 256 pos = PCI_EXT_CAP_NEXT(header); 257 if (pos < PCI_CFG_SPACE_SIZE) 258 break; 259 260 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 261 break; 262 } 263 264 return 0; 265 } 266 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 267 268 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 269 { 270 int rc, ttl = PCI_FIND_CAP_TTL; 271 u8 cap, mask; 272 273 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 274 mask = HT_3BIT_CAP_MASK; 275 else 276 mask = HT_5BIT_CAP_MASK; 277 278 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 279 PCI_CAP_ID_HT, &ttl); 280 while (pos) { 281 rc = pci_read_config_byte(dev, pos + 3, &cap); 282 if (rc != PCIBIOS_SUCCESSFUL) 283 return 0; 284 285 if ((cap & mask) == ht_cap) 286 return pos; 287 288 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 289 pos + PCI_CAP_LIST_NEXT, 290 PCI_CAP_ID_HT, &ttl); 291 } 292 293 return 0; 294 } 295 /** 296 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 297 * @dev: PCI device to query 298 * @pos: Position from which to continue searching 299 * @ht_cap: Hypertransport capability code 300 * 301 * To be used in conjunction with pci_find_ht_capability() to search for 302 * all capabilities matching @ht_cap. @pos should always be a value returned 303 * from pci_find_ht_capability(). 304 * 305 * NB. To be 100% safe against broken PCI devices, the caller should take 306 * steps to avoid an infinite loop. 307 */ 308 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 309 { 310 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 311 } 312 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 313 314 /** 315 * pci_find_ht_capability - query a device's Hypertransport capabilities 316 * @dev: PCI device to query 317 * @ht_cap: Hypertransport capability code 318 * 319 * Tell if a device supports a given Hypertransport capability. 320 * Returns an address within the device's PCI configuration space 321 * or 0 in case the device does not support the request capability. 322 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 323 * which has a Hypertransport capability matching @ht_cap. 324 */ 325 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 326 { 327 int pos; 328 329 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 330 if (pos) 331 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 332 333 return pos; 334 } 335 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 336 337 /** 338 * pci_find_parent_resource - return resource region of parent bus of given region 339 * @dev: PCI device structure contains resources to be searched 340 * @res: child resource record for which parent is sought 341 * 342 * For given resource region of given device, return the resource 343 * region of parent bus the given region is contained in or where 344 * it should be allocated from. 345 */ 346 struct resource * 347 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 348 { 349 const struct pci_bus *bus = dev->bus; 350 int i; 351 struct resource *best = NULL; 352 353 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 354 struct resource *r = bus->resource[i]; 355 if (!r) 356 continue; 357 if (res->start && !(res->start >= r->start && res->end <= r->end)) 358 continue; /* Not contained */ 359 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 360 continue; /* Wrong type */ 361 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 362 return r; /* Exact match */ 363 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 364 best = r; /* Approximating prefetchable by non-prefetchable */ 365 } 366 return best; 367 } 368 369 /** 370 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 371 * @dev: PCI device to have its BARs restored 372 * 373 * Restore the BAR values for a given device, so as to make it 374 * accessible by its driver. 375 */ 376 static void 377 pci_restore_bars(struct pci_dev *dev) 378 { 379 int i; 380 381 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 382 pci_update_resource(dev, i); 383 } 384 385 static struct pci_platform_pm_ops *pci_platform_pm; 386 387 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 388 { 389 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 390 || !ops->sleep_wake || !ops->can_wakeup) 391 return -EINVAL; 392 pci_platform_pm = ops; 393 return 0; 394 } 395 396 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 397 { 398 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 399 } 400 401 static inline int platform_pci_set_power_state(struct pci_dev *dev, 402 pci_power_t t) 403 { 404 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 405 } 406 407 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 408 { 409 return pci_platform_pm ? 410 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 411 } 412 413 static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 414 { 415 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 416 } 417 418 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 419 { 420 return pci_platform_pm ? 421 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 422 } 423 424 /** 425 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 426 * given PCI device 427 * @dev: PCI device to handle. 428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 429 * @wait: If 'true', wait for the device to change its power state 430 * 431 * RETURN VALUE: 432 * -EINVAL if the requested state is invalid. 433 * -EIO if device does not support PCI PM or its PM capabilities register has a 434 * wrong version, or device doesn't support the requested state. 435 * 0 if device already is in the requested state. 436 * 0 if device's power state has been successfully changed. 437 */ 438 static int 439 pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait) 440 { 441 u16 pmcsr; 442 bool need_restore = false; 443 444 if (!dev->pm_cap) 445 return -EIO; 446 447 if (state < PCI_D0 || state > PCI_D3hot) 448 return -EINVAL; 449 450 /* Validate current state: 451 * Can enter D0 from any state, but if we can only go deeper 452 * to sleep if we're already in a low power state 453 */ 454 if (dev->current_state == state) { 455 /* we're already there */ 456 return 0; 457 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold 458 && dev->current_state > state) { 459 dev_err(&dev->dev, "invalid power transition " 460 "(from state %d to %d)\n", dev->current_state, state); 461 return -EINVAL; 462 } 463 464 /* check if this device supports the desired state */ 465 if ((state == PCI_D1 && !dev->d1_support) 466 || (state == PCI_D2 && !dev->d2_support)) 467 return -EIO; 468 469 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 470 471 /* If we're (effectively) in D3, force entire word to 0. 472 * This doesn't affect PME_Status, disables PME_En, and 473 * sets PowerState to 0. 474 */ 475 switch (dev->current_state) { 476 case PCI_D0: 477 case PCI_D1: 478 case PCI_D2: 479 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 480 pmcsr |= state; 481 break; 482 case PCI_UNKNOWN: /* Boot-up */ 483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) { 485 need_restore = true; 486 wait = true; 487 } 488 /* Fall-through: force to D0 */ 489 default: 490 pmcsr = 0; 491 break; 492 } 493 494 /* enter specified state */ 495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 496 497 if (!wait) 498 return 0; 499 500 /* Mandatory power management transition delays */ 501 /* see PCI PM 1.1 5.6.1 table 18 */ 502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 503 msleep(pci_pm_d3_delay); 504 else if (state == PCI_D2 || dev->current_state == PCI_D2) 505 udelay(PCI_PM_D2_DELAY); 506 507 dev->current_state = state; 508 509 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 510 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 511 * from D3hot to D0 _may_ perform an internal reset, thereby 512 * going to "D0 Uninitialized" rather than "D0 Initialized". 513 * For example, at least some versions of the 3c905B and the 514 * 3c556B exhibit this behaviour. 515 * 516 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 517 * devices in a D3hot state at boot. Consequently, we need to 518 * restore at least the BARs so that the device will be 519 * accessible to its driver. 520 */ 521 if (need_restore) 522 pci_restore_bars(dev); 523 524 if (wait && dev->bus->self) 525 pcie_aspm_pm_state_change(dev->bus->self); 526 527 return 0; 528 } 529 530 /** 531 * pci_update_current_state - Read PCI power state of given device from its 532 * PCI PM registers and cache it 533 * @dev: PCI device to handle. 534 * @state: State to cache in case the device doesn't have the PM capability 535 */ 536 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 537 { 538 if (dev->pm_cap) { 539 u16 pmcsr; 540 541 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 542 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 543 } else { 544 dev->current_state = state; 545 } 546 } 547 548 /** 549 * pci_set_power_state - Set the power state of a PCI device 550 * @dev: PCI device to handle. 551 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 552 * 553 * Transition a device to a new power state, using the platform formware and/or 554 * the device's PCI PM registers. 555 * 556 * RETURN VALUE: 557 * -EINVAL if the requested state is invalid. 558 * -EIO if device does not support PCI PM or its PM capabilities register has a 559 * wrong version, or device doesn't support the requested state. 560 * 0 if device already is in the requested state. 561 * 0 if device's power state has been successfully changed. 562 */ 563 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 564 { 565 int error; 566 567 /* bound the state we're entering */ 568 if (state > PCI_D3hot) 569 state = PCI_D3hot; 570 else if (state < PCI_D0) 571 state = PCI_D0; 572 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 573 /* 574 * If the device or the parent bridge do not support PCI PM, 575 * ignore the request if we're doing anything other than putting 576 * it into D0 (which would only happen on boot). 577 */ 578 return 0; 579 580 if (state == PCI_D0 && platform_pci_power_manageable(dev)) { 581 /* 582 * Allow the platform to change the state, for example via ACPI 583 * _PR0, _PS0 and some such, but do not trust it. 584 */ 585 int ret = platform_pci_set_power_state(dev, PCI_D0); 586 if (!ret) 587 pci_update_current_state(dev, PCI_D0); 588 } 589 /* This device is quirked not to be put into D3, so 590 don't put it in D3 */ 591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 592 return 0; 593 594 error = pci_raw_set_power_state(dev, state, true); 595 596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 597 /* Allow the platform to finalize the transition */ 598 int ret = platform_pci_set_power_state(dev, state); 599 if (!ret) { 600 pci_update_current_state(dev, state); 601 error = 0; 602 } 603 } 604 605 return error; 606 } 607 608 /** 609 * pci_choose_state - Choose the power state of a PCI device 610 * @dev: PCI device to be suspended 611 * @state: target sleep state for the whole system. This is the value 612 * that is passed to suspend() function. 613 * 614 * Returns PCI power state suitable for given device and given system 615 * message. 616 */ 617 618 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 619 { 620 pci_power_t ret; 621 622 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 623 return PCI_D0; 624 625 ret = platform_pci_choose_state(dev); 626 if (ret != PCI_POWER_ERROR) 627 return ret; 628 629 switch (state.event) { 630 case PM_EVENT_ON: 631 return PCI_D0; 632 case PM_EVENT_FREEZE: 633 case PM_EVENT_PRETHAW: 634 /* REVISIT both freeze and pre-thaw "should" use D0 */ 635 case PM_EVENT_SUSPEND: 636 case PM_EVENT_HIBERNATE: 637 return PCI_D3hot; 638 default: 639 dev_info(&dev->dev, "unrecognized suspend event %d\n", 640 state.event); 641 BUG(); 642 } 643 return PCI_D0; 644 } 645 646 EXPORT_SYMBOL(pci_choose_state); 647 648 static int pci_save_pcie_state(struct pci_dev *dev) 649 { 650 int pos, i = 0; 651 struct pci_cap_saved_state *save_state; 652 u16 *cap; 653 654 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 655 if (pos <= 0) 656 return 0; 657 658 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 659 if (!save_state) { 660 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 661 return -ENOMEM; 662 } 663 cap = (u16 *)&save_state->data[0]; 664 665 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 666 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 667 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 668 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 669 670 return 0; 671 } 672 673 static void pci_restore_pcie_state(struct pci_dev *dev) 674 { 675 int i = 0, pos; 676 struct pci_cap_saved_state *save_state; 677 u16 *cap; 678 679 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 680 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 681 if (!save_state || pos <= 0) 682 return; 683 cap = (u16 *)&save_state->data[0]; 684 685 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 686 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 687 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 688 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 689 } 690 691 692 static int pci_save_pcix_state(struct pci_dev *dev) 693 { 694 int pos; 695 struct pci_cap_saved_state *save_state; 696 697 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 698 if (pos <= 0) 699 return 0; 700 701 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 702 if (!save_state) { 703 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 704 return -ENOMEM; 705 } 706 707 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 708 709 return 0; 710 } 711 712 static void pci_restore_pcix_state(struct pci_dev *dev) 713 { 714 int i = 0, pos; 715 struct pci_cap_saved_state *save_state; 716 u16 *cap; 717 718 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 719 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 720 if (!save_state || pos <= 0) 721 return; 722 cap = (u16 *)&save_state->data[0]; 723 724 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 725 } 726 727 728 /** 729 * pci_save_state - save the PCI configuration space of a device before suspending 730 * @dev: - PCI device that we're dealing with 731 */ 732 int 733 pci_save_state(struct pci_dev *dev) 734 { 735 int i; 736 /* XXX: 100% dword access ok here? */ 737 for (i = 0; i < 16; i++) 738 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 739 dev->state_saved = true; 740 if ((i = pci_save_pcie_state(dev)) != 0) 741 return i; 742 if ((i = pci_save_pcix_state(dev)) != 0) 743 return i; 744 return 0; 745 } 746 747 /** 748 * pci_restore_state - Restore the saved state of a PCI device 749 * @dev: - PCI device that we're dealing with 750 */ 751 int 752 pci_restore_state(struct pci_dev *dev) 753 { 754 int i; 755 u32 val; 756 757 /* PCI Express register must be restored first */ 758 pci_restore_pcie_state(dev); 759 760 /* 761 * The Base Address register should be programmed before the command 762 * register(s) 763 */ 764 for (i = 15; i >= 0; i--) { 765 pci_read_config_dword(dev, i * 4, &val); 766 if (val != dev->saved_config_space[i]) { 767 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 768 "space at offset %#x (was %#x, writing %#x)\n", 769 i, val, (int)dev->saved_config_space[i]); 770 pci_write_config_dword(dev,i * 4, 771 dev->saved_config_space[i]); 772 } 773 } 774 pci_restore_pcix_state(dev); 775 pci_restore_msi_state(dev); 776 777 return 0; 778 } 779 780 static int do_pci_enable_device(struct pci_dev *dev, int bars) 781 { 782 int err; 783 784 err = pci_set_power_state(dev, PCI_D0); 785 if (err < 0 && err != -EIO) 786 return err; 787 err = pcibios_enable_device(dev, bars); 788 if (err < 0) 789 return err; 790 pci_fixup_device(pci_fixup_enable, dev); 791 792 return 0; 793 } 794 795 /** 796 * pci_reenable_device - Resume abandoned device 797 * @dev: PCI device to be resumed 798 * 799 * Note this function is a backend of pci_default_resume and is not supposed 800 * to be called by normal code, write proper resume handler and use it instead. 801 */ 802 int pci_reenable_device(struct pci_dev *dev) 803 { 804 if (atomic_read(&dev->enable_cnt)) 805 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 806 return 0; 807 } 808 809 static int __pci_enable_device_flags(struct pci_dev *dev, 810 resource_size_t flags) 811 { 812 int err; 813 int i, bars = 0; 814 815 if (atomic_add_return(1, &dev->enable_cnt) > 1) 816 return 0; /* already enabled */ 817 818 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 819 if (dev->resource[i].flags & flags) 820 bars |= (1 << i); 821 822 err = do_pci_enable_device(dev, bars); 823 if (err < 0) 824 atomic_dec(&dev->enable_cnt); 825 return err; 826 } 827 828 /** 829 * pci_enable_device_io - Initialize a device for use with IO space 830 * @dev: PCI device to be initialized 831 * 832 * Initialize device before it's used by a driver. Ask low-level code 833 * to enable I/O resources. Wake up the device if it was suspended. 834 * Beware, this function can fail. 835 */ 836 int pci_enable_device_io(struct pci_dev *dev) 837 { 838 return __pci_enable_device_flags(dev, IORESOURCE_IO); 839 } 840 841 /** 842 * pci_enable_device_mem - Initialize a device for use with Memory space 843 * @dev: PCI device to be initialized 844 * 845 * Initialize device before it's used by a driver. Ask low-level code 846 * to enable Memory resources. Wake up the device if it was suspended. 847 * Beware, this function can fail. 848 */ 849 int pci_enable_device_mem(struct pci_dev *dev) 850 { 851 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 852 } 853 854 /** 855 * pci_enable_device - Initialize device before it's used by a driver. 856 * @dev: PCI device to be initialized 857 * 858 * Initialize device before it's used by a driver. Ask low-level code 859 * to enable I/O and memory. Wake up the device if it was suspended. 860 * Beware, this function can fail. 861 * 862 * Note we don't actually enable the device many times if we call 863 * this function repeatedly (we just increment the count). 864 */ 865 int pci_enable_device(struct pci_dev *dev) 866 { 867 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 868 } 869 870 /* 871 * Managed PCI resources. This manages device on/off, intx/msi/msix 872 * on/off and BAR regions. pci_dev itself records msi/msix status, so 873 * there's no need to track it separately. pci_devres is initialized 874 * when a device is enabled using managed PCI device enable interface. 875 */ 876 struct pci_devres { 877 unsigned int enabled:1; 878 unsigned int pinned:1; 879 unsigned int orig_intx:1; 880 unsigned int restore_intx:1; 881 u32 region_mask; 882 }; 883 884 static void pcim_release(struct device *gendev, void *res) 885 { 886 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 887 struct pci_devres *this = res; 888 int i; 889 890 if (dev->msi_enabled) 891 pci_disable_msi(dev); 892 if (dev->msix_enabled) 893 pci_disable_msix(dev); 894 895 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 896 if (this->region_mask & (1 << i)) 897 pci_release_region(dev, i); 898 899 if (this->restore_intx) 900 pci_intx(dev, this->orig_intx); 901 902 if (this->enabled && !this->pinned) 903 pci_disable_device(dev); 904 } 905 906 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 907 { 908 struct pci_devres *dr, *new_dr; 909 910 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 911 if (dr) 912 return dr; 913 914 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 915 if (!new_dr) 916 return NULL; 917 return devres_get(&pdev->dev, new_dr, NULL, NULL); 918 } 919 920 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 921 { 922 if (pci_is_managed(pdev)) 923 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 924 return NULL; 925 } 926 927 /** 928 * pcim_enable_device - Managed pci_enable_device() 929 * @pdev: PCI device to be initialized 930 * 931 * Managed pci_enable_device(). 932 */ 933 int pcim_enable_device(struct pci_dev *pdev) 934 { 935 struct pci_devres *dr; 936 int rc; 937 938 dr = get_pci_dr(pdev); 939 if (unlikely(!dr)) 940 return -ENOMEM; 941 if (dr->enabled) 942 return 0; 943 944 rc = pci_enable_device(pdev); 945 if (!rc) { 946 pdev->is_managed = 1; 947 dr->enabled = 1; 948 } 949 return rc; 950 } 951 952 /** 953 * pcim_pin_device - Pin managed PCI device 954 * @pdev: PCI device to pin 955 * 956 * Pin managed PCI device @pdev. Pinned device won't be disabled on 957 * driver detach. @pdev must have been enabled with 958 * pcim_enable_device(). 959 */ 960 void pcim_pin_device(struct pci_dev *pdev) 961 { 962 struct pci_devres *dr; 963 964 dr = find_pci_dr(pdev); 965 WARN_ON(!dr || !dr->enabled); 966 if (dr) 967 dr->pinned = 1; 968 } 969 970 /** 971 * pcibios_disable_device - disable arch specific PCI resources for device dev 972 * @dev: the PCI device to disable 973 * 974 * Disables architecture specific PCI resources for the device. This 975 * is the default implementation. Architecture implementations can 976 * override this. 977 */ 978 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 979 980 static void do_pci_disable_device(struct pci_dev *dev) 981 { 982 u16 pci_command; 983 984 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 985 if (pci_command & PCI_COMMAND_MASTER) { 986 pci_command &= ~PCI_COMMAND_MASTER; 987 pci_write_config_word(dev, PCI_COMMAND, pci_command); 988 } 989 990 pcibios_disable_device(dev); 991 } 992 993 /** 994 * pci_disable_enabled_device - Disable device without updating enable_cnt 995 * @dev: PCI device to disable 996 * 997 * NOTE: This function is a backend of PCI power management routines and is 998 * not supposed to be called drivers. 999 */ 1000 void pci_disable_enabled_device(struct pci_dev *dev) 1001 { 1002 if (atomic_read(&dev->enable_cnt)) 1003 do_pci_disable_device(dev); 1004 } 1005 1006 /** 1007 * pci_disable_device - Disable PCI device after use 1008 * @dev: PCI device to be disabled 1009 * 1010 * Signal to the system that the PCI device is not in use by the system 1011 * anymore. This only involves disabling PCI bus-mastering, if active. 1012 * 1013 * Note we don't actually disable the device until all callers of 1014 * pci_device_enable() have called pci_device_disable(). 1015 */ 1016 void 1017 pci_disable_device(struct pci_dev *dev) 1018 { 1019 struct pci_devres *dr; 1020 1021 dr = find_pci_dr(dev); 1022 if (dr) 1023 dr->enabled = 0; 1024 1025 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1026 return; 1027 1028 do_pci_disable_device(dev); 1029 1030 dev->is_busmaster = 0; 1031 } 1032 1033 /** 1034 * pcibios_set_pcie_reset_state - set reset state for device dev 1035 * @dev: the PCI-E device reset 1036 * @state: Reset state to enter into 1037 * 1038 * 1039 * Sets the PCI-E reset state for the device. This is the default 1040 * implementation. Architecture implementations can override this. 1041 */ 1042 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1043 enum pcie_reset_state state) 1044 { 1045 return -EINVAL; 1046 } 1047 1048 /** 1049 * pci_set_pcie_reset_state - set reset state for device dev 1050 * @dev: the PCI-E device reset 1051 * @state: Reset state to enter into 1052 * 1053 * 1054 * Sets the PCI reset state for the device. 1055 */ 1056 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1057 { 1058 return pcibios_set_pcie_reset_state(dev, state); 1059 } 1060 1061 /** 1062 * pci_pme_capable - check the capability of PCI device to generate PME# 1063 * @dev: PCI device to handle. 1064 * @state: PCI state from which device will issue PME#. 1065 */ 1066 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1067 { 1068 if (!dev->pm_cap) 1069 return false; 1070 1071 return !!(dev->pme_support & (1 << state)); 1072 } 1073 1074 /** 1075 * pci_pme_active - enable or disable PCI device's PME# function 1076 * @dev: PCI device to handle. 1077 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1078 * 1079 * The caller must verify that the device is capable of generating PME# before 1080 * calling this function with @enable equal to 'true'. 1081 */ 1082 void pci_pme_active(struct pci_dev *dev, bool enable) 1083 { 1084 u16 pmcsr; 1085 1086 if (!dev->pm_cap) 1087 return; 1088 1089 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1090 /* Clear PME_Status by writing 1 to it and enable PME# */ 1091 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1092 if (!enable) 1093 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1094 1095 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1096 1097 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", 1098 enable ? "enabled" : "disabled"); 1099 } 1100 1101 /** 1102 * pci_enable_wake - enable PCI device as wakeup event source 1103 * @dev: PCI device affected 1104 * @state: PCI state from which device will issue wakeup events 1105 * @enable: True to enable event generation; false to disable 1106 * 1107 * This enables the device as a wakeup event source, or disables it. 1108 * When such events involves platform-specific hooks, those hooks are 1109 * called automatically by this routine. 1110 * 1111 * Devices with legacy power management (no standard PCI PM capabilities) 1112 * always require such platform hooks. 1113 * 1114 * RETURN VALUE: 1115 * 0 is returned on success 1116 * -EINVAL is returned if device is not supposed to wake up the system 1117 * Error code depending on the platform is returned if both the platform and 1118 * the native mechanism fail to enable the generation of wake-up events 1119 */ 1120 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1121 { 1122 int error = 0; 1123 bool pme_done = false; 1124 1125 if (enable && !device_may_wakeup(&dev->dev)) 1126 return -EINVAL; 1127 1128 /* 1129 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1130 * Anderson we should be doing PME# wake enable followed by ACPI wake 1131 * enable. To disable wake-up we call the platform first, for symmetry. 1132 */ 1133 1134 if (!enable && platform_pci_can_wakeup(dev)) 1135 error = platform_pci_sleep_wake(dev, false); 1136 1137 if (!enable || pci_pme_capable(dev, state)) { 1138 pci_pme_active(dev, enable); 1139 pme_done = true; 1140 } 1141 1142 if (enable && platform_pci_can_wakeup(dev)) 1143 error = platform_pci_sleep_wake(dev, true); 1144 1145 return pme_done ? 0 : error; 1146 } 1147 1148 /** 1149 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1150 * @dev: PCI device to prepare 1151 * @enable: True to enable wake-up event generation; false to disable 1152 * 1153 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1154 * and this function allows them to set that up cleanly - pci_enable_wake() 1155 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1156 * ordering constraints. 1157 * 1158 * This function only returns error code if the device is not capable of 1159 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1160 * enable wake-up power for it. 1161 */ 1162 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1163 { 1164 return pci_pme_capable(dev, PCI_D3cold) ? 1165 pci_enable_wake(dev, PCI_D3cold, enable) : 1166 pci_enable_wake(dev, PCI_D3hot, enable); 1167 } 1168 1169 /** 1170 * pci_target_state - find an appropriate low power state for a given PCI dev 1171 * @dev: PCI device 1172 * 1173 * Use underlying platform code to find a supported low power state for @dev. 1174 * If the platform can't manage @dev, return the deepest state from which it 1175 * can generate wake events, based on any available PME info. 1176 */ 1177 pci_power_t pci_target_state(struct pci_dev *dev) 1178 { 1179 pci_power_t target_state = PCI_D3hot; 1180 1181 if (platform_pci_power_manageable(dev)) { 1182 /* 1183 * Call the platform to choose the target state of the device 1184 * and enable wake-up from this state if supported. 1185 */ 1186 pci_power_t state = platform_pci_choose_state(dev); 1187 1188 switch (state) { 1189 case PCI_POWER_ERROR: 1190 case PCI_UNKNOWN: 1191 break; 1192 case PCI_D1: 1193 case PCI_D2: 1194 if (pci_no_d1d2(dev)) 1195 break; 1196 default: 1197 target_state = state; 1198 } 1199 } else if (device_may_wakeup(&dev->dev)) { 1200 /* 1201 * Find the deepest state from which the device can generate 1202 * wake-up events, make it the target state and enable device 1203 * to generate PME#. 1204 */ 1205 if (!dev->pm_cap) 1206 return PCI_POWER_ERROR; 1207 1208 if (dev->pme_support) { 1209 while (target_state 1210 && !(dev->pme_support & (1 << target_state))) 1211 target_state--; 1212 } 1213 } 1214 1215 return target_state; 1216 } 1217 1218 /** 1219 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1220 * @dev: Device to handle. 1221 * 1222 * Choose the power state appropriate for the device depending on whether 1223 * it can wake up the system and/or is power manageable by the platform 1224 * (PCI_D3hot is the default) and put the device into that state. 1225 */ 1226 int pci_prepare_to_sleep(struct pci_dev *dev) 1227 { 1228 pci_power_t target_state = pci_target_state(dev); 1229 int error; 1230 1231 if (target_state == PCI_POWER_ERROR) 1232 return -EIO; 1233 1234 pci_enable_wake(dev, target_state, true); 1235 1236 error = pci_set_power_state(dev, target_state); 1237 1238 if (error) 1239 pci_enable_wake(dev, target_state, false); 1240 1241 return error; 1242 } 1243 1244 /** 1245 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1246 * @dev: Device to handle. 1247 * 1248 * Disable device's sytem wake-up capability and put it into D0. 1249 */ 1250 int pci_back_from_sleep(struct pci_dev *dev) 1251 { 1252 pci_enable_wake(dev, PCI_D0, false); 1253 return pci_set_power_state(dev, PCI_D0); 1254 } 1255 1256 /** 1257 * pci_pm_init - Initialize PM functions of given PCI device 1258 * @dev: PCI device to handle. 1259 */ 1260 void pci_pm_init(struct pci_dev *dev) 1261 { 1262 int pm; 1263 u16 pmc; 1264 1265 dev->pm_cap = 0; 1266 1267 /* find PCI PM capability in list */ 1268 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1269 if (!pm) 1270 return; 1271 /* Check device's ability to generate PME# */ 1272 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1273 1274 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1275 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1276 pmc & PCI_PM_CAP_VER_MASK); 1277 return; 1278 } 1279 1280 dev->pm_cap = pm; 1281 1282 dev->d1_support = false; 1283 dev->d2_support = false; 1284 if (!pci_no_d1d2(dev)) { 1285 if (pmc & PCI_PM_CAP_D1) 1286 dev->d1_support = true; 1287 if (pmc & PCI_PM_CAP_D2) 1288 dev->d2_support = true; 1289 1290 if (dev->d1_support || dev->d2_support) 1291 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1292 dev->d1_support ? " D1" : "", 1293 dev->d2_support ? " D2" : ""); 1294 } 1295 1296 pmc &= PCI_PM_CAP_PME_MASK; 1297 if (pmc) { 1298 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", 1299 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1300 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1301 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1302 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1303 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1304 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1305 /* 1306 * Make device's PM flags reflect the wake-up capability, but 1307 * let the user space enable it to wake up the system as needed. 1308 */ 1309 device_set_wakeup_capable(&dev->dev, true); 1310 device_set_wakeup_enable(&dev->dev, false); 1311 /* Disable the PME# generation functionality */ 1312 pci_pme_active(dev, false); 1313 } else { 1314 dev->pme_support = 0; 1315 } 1316 } 1317 1318 /** 1319 * platform_pci_wakeup_init - init platform wakeup if present 1320 * @dev: PCI device 1321 * 1322 * Some devices don't have PCI PM caps but can still generate wakeup 1323 * events through platform methods (like ACPI events). If @dev supports 1324 * platform wakeup events, set the device flag to indicate as much. This 1325 * may be redundant if the device also supports PCI PM caps, but double 1326 * initialization should be safe in that case. 1327 */ 1328 void platform_pci_wakeup_init(struct pci_dev *dev) 1329 { 1330 if (!platform_pci_can_wakeup(dev)) 1331 return; 1332 1333 device_set_wakeup_capable(&dev->dev, true); 1334 device_set_wakeup_enable(&dev->dev, false); 1335 platform_pci_sleep_wake(dev, false); 1336 } 1337 1338 /** 1339 * pci_add_save_buffer - allocate buffer for saving given capability registers 1340 * @dev: the PCI device 1341 * @cap: the capability to allocate the buffer for 1342 * @size: requested size of the buffer 1343 */ 1344 static int pci_add_cap_save_buffer( 1345 struct pci_dev *dev, char cap, unsigned int size) 1346 { 1347 int pos; 1348 struct pci_cap_saved_state *save_state; 1349 1350 pos = pci_find_capability(dev, cap); 1351 if (pos <= 0) 1352 return 0; 1353 1354 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1355 if (!save_state) 1356 return -ENOMEM; 1357 1358 save_state->cap_nr = cap; 1359 pci_add_saved_cap(dev, save_state); 1360 1361 return 0; 1362 } 1363 1364 /** 1365 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1366 * @dev: the PCI device 1367 */ 1368 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1369 { 1370 int error; 1371 1372 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1373 if (error) 1374 dev_err(&dev->dev, 1375 "unable to preallocate PCI Express save buffer\n"); 1376 1377 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1378 if (error) 1379 dev_err(&dev->dev, 1380 "unable to preallocate PCI-X save buffer\n"); 1381 } 1382 1383 /** 1384 * pci_restore_standard_config - restore standard config registers of PCI device 1385 * @dev: PCI device to handle 1386 * 1387 * This function assumes that the device's configuration space is accessible. 1388 * If the device needs to be powered up, the function will wait for it to 1389 * change the state. 1390 */ 1391 int pci_restore_standard_config(struct pci_dev *dev) 1392 { 1393 pci_power_t prev_state; 1394 int error; 1395 1396 pci_update_current_state(dev, PCI_D0); 1397 1398 prev_state = dev->current_state; 1399 if (prev_state == PCI_D0) 1400 goto Restore; 1401 1402 error = pci_raw_set_power_state(dev, PCI_D0, false); 1403 if (error) 1404 return error; 1405 1406 /* 1407 * This assumes that we won't get a bus in B2 or B3 from the BIOS, but 1408 * we've made this assumption forever and it appears to be universally 1409 * satisfied. 1410 */ 1411 switch(prev_state) { 1412 case PCI_D3cold: 1413 case PCI_D3hot: 1414 mdelay(pci_pm_d3_delay); 1415 break; 1416 case PCI_D2: 1417 udelay(PCI_PM_D2_DELAY); 1418 break; 1419 } 1420 1421 pci_update_current_state(dev, PCI_D0); 1422 1423 Restore: 1424 return dev->state_saved ? pci_restore_state(dev) : 0; 1425 } 1426 1427 /** 1428 * pci_enable_ari - enable ARI forwarding if hardware support it 1429 * @dev: the PCI device 1430 */ 1431 void pci_enable_ari(struct pci_dev *dev) 1432 { 1433 int pos; 1434 u32 cap; 1435 u16 ctrl; 1436 struct pci_dev *bridge; 1437 1438 if (!dev->is_pcie || dev->devfn) 1439 return; 1440 1441 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1442 if (!pos) 1443 return; 1444 1445 bridge = dev->bus->self; 1446 if (!bridge || !bridge->is_pcie) 1447 return; 1448 1449 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 1450 if (!pos) 1451 return; 1452 1453 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1454 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1455 return; 1456 1457 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1458 ctrl |= PCI_EXP_DEVCTL2_ARI; 1459 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1460 1461 bridge->ari_enabled = 1; 1462 } 1463 1464 /** 1465 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1466 * @dev: the PCI device 1467 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1468 * 1469 * Perform INTx swizzling for a device behind one level of bridge. This is 1470 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1471 * behind bridges on add-in cards. 1472 */ 1473 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1474 { 1475 return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; 1476 } 1477 1478 int 1479 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1480 { 1481 u8 pin; 1482 1483 pin = dev->pin; 1484 if (!pin) 1485 return -1; 1486 1487 while (dev->bus->self) { 1488 pin = pci_swizzle_interrupt_pin(dev, pin); 1489 dev = dev->bus->self; 1490 } 1491 *bridge = dev; 1492 return pin; 1493 } 1494 1495 /** 1496 * pci_common_swizzle - swizzle INTx all the way to root bridge 1497 * @dev: the PCI device 1498 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1499 * 1500 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1501 * bridges all the way up to a PCI root bus. 1502 */ 1503 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1504 { 1505 u8 pin = *pinp; 1506 1507 while (dev->bus->self) { 1508 pin = pci_swizzle_interrupt_pin(dev, pin); 1509 dev = dev->bus->self; 1510 } 1511 *pinp = pin; 1512 return PCI_SLOT(dev->devfn); 1513 } 1514 1515 /** 1516 * pci_release_region - Release a PCI bar 1517 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1518 * @bar: BAR to release 1519 * 1520 * Releases the PCI I/O and memory resources previously reserved by a 1521 * successful call to pci_request_region. Call this function only 1522 * after all use of the PCI regions has ceased. 1523 */ 1524 void pci_release_region(struct pci_dev *pdev, int bar) 1525 { 1526 struct pci_devres *dr; 1527 1528 if (pci_resource_len(pdev, bar) == 0) 1529 return; 1530 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1531 release_region(pci_resource_start(pdev, bar), 1532 pci_resource_len(pdev, bar)); 1533 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1534 release_mem_region(pci_resource_start(pdev, bar), 1535 pci_resource_len(pdev, bar)); 1536 1537 dr = find_pci_dr(pdev); 1538 if (dr) 1539 dr->region_mask &= ~(1 << bar); 1540 } 1541 1542 /** 1543 * pci_request_region - Reserved PCI I/O and memory resource 1544 * @pdev: PCI device whose resources are to be reserved 1545 * @bar: BAR to be reserved 1546 * @res_name: Name to be associated with resource. 1547 * 1548 * Mark the PCI region associated with PCI device @pdev BR @bar as 1549 * being reserved by owner @res_name. Do not access any 1550 * address inside the PCI regions unless this call returns 1551 * successfully. 1552 * 1553 * Returns 0 on success, or %EBUSY on error. A warning 1554 * message is also printed on failure. 1555 */ 1556 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1557 int exclusive) 1558 { 1559 struct pci_devres *dr; 1560 1561 if (pci_resource_len(pdev, bar) == 0) 1562 return 0; 1563 1564 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1565 if (!request_region(pci_resource_start(pdev, bar), 1566 pci_resource_len(pdev, bar), res_name)) 1567 goto err_out; 1568 } 1569 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1570 if (!__request_mem_region(pci_resource_start(pdev, bar), 1571 pci_resource_len(pdev, bar), res_name, 1572 exclusive)) 1573 goto err_out; 1574 } 1575 1576 dr = find_pci_dr(pdev); 1577 if (dr) 1578 dr->region_mask |= 1 << bar; 1579 1580 return 0; 1581 1582 err_out: 1583 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", 1584 bar, 1585 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1586 &pdev->resource[bar]); 1587 return -EBUSY; 1588 } 1589 1590 /** 1591 * pci_request_region - Reserved PCI I/O and memory resource 1592 * @pdev: PCI device whose resources are to be reserved 1593 * @bar: BAR to be reserved 1594 * @res_name: Name to be associated with resource. 1595 * 1596 * Mark the PCI region associated with PCI device @pdev BR @bar as 1597 * being reserved by owner @res_name. Do not access any 1598 * address inside the PCI regions unless this call returns 1599 * successfully. 1600 * 1601 * Returns 0 on success, or %EBUSY on error. A warning 1602 * message is also printed on failure. 1603 */ 1604 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1605 { 1606 return __pci_request_region(pdev, bar, res_name, 0); 1607 } 1608 1609 /** 1610 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1611 * @pdev: PCI device whose resources are to be reserved 1612 * @bar: BAR to be reserved 1613 * @res_name: Name to be associated with resource. 1614 * 1615 * Mark the PCI region associated with PCI device @pdev BR @bar as 1616 * being reserved by owner @res_name. Do not access any 1617 * address inside the PCI regions unless this call returns 1618 * successfully. 1619 * 1620 * Returns 0 on success, or %EBUSY on error. A warning 1621 * message is also printed on failure. 1622 * 1623 * The key difference that _exclusive makes it that userspace is 1624 * explicitly not allowed to map the resource via /dev/mem or 1625 * sysfs. 1626 */ 1627 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1628 { 1629 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1630 } 1631 /** 1632 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1633 * @pdev: PCI device whose resources were previously reserved 1634 * @bars: Bitmask of BARs to be released 1635 * 1636 * Release selected PCI I/O and memory resources previously reserved. 1637 * Call this function only after all use of the PCI regions has ceased. 1638 */ 1639 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1640 { 1641 int i; 1642 1643 for (i = 0; i < 6; i++) 1644 if (bars & (1 << i)) 1645 pci_release_region(pdev, i); 1646 } 1647 1648 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1649 const char *res_name, int excl) 1650 { 1651 int i; 1652 1653 for (i = 0; i < 6; i++) 1654 if (bars & (1 << i)) 1655 if (__pci_request_region(pdev, i, res_name, excl)) 1656 goto err_out; 1657 return 0; 1658 1659 err_out: 1660 while(--i >= 0) 1661 if (bars & (1 << i)) 1662 pci_release_region(pdev, i); 1663 1664 return -EBUSY; 1665 } 1666 1667 1668 /** 1669 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1670 * @pdev: PCI device whose resources are to be reserved 1671 * @bars: Bitmask of BARs to be requested 1672 * @res_name: Name to be associated with resource 1673 */ 1674 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1675 const char *res_name) 1676 { 1677 return __pci_request_selected_regions(pdev, bars, res_name, 0); 1678 } 1679 1680 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1681 int bars, const char *res_name) 1682 { 1683 return __pci_request_selected_regions(pdev, bars, res_name, 1684 IORESOURCE_EXCLUSIVE); 1685 } 1686 1687 /** 1688 * pci_release_regions - Release reserved PCI I/O and memory resources 1689 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 1690 * 1691 * Releases all PCI I/O and memory resources previously reserved by a 1692 * successful call to pci_request_regions. Call this function only 1693 * after all use of the PCI regions has ceased. 1694 */ 1695 1696 void pci_release_regions(struct pci_dev *pdev) 1697 { 1698 pci_release_selected_regions(pdev, (1 << 6) - 1); 1699 } 1700 1701 /** 1702 * pci_request_regions - Reserved PCI I/O and memory resources 1703 * @pdev: PCI device whose resources are to be reserved 1704 * @res_name: Name to be associated with resource. 1705 * 1706 * Mark all PCI regions associated with PCI device @pdev as 1707 * being reserved by owner @res_name. Do not access any 1708 * address inside the PCI regions unless this call returns 1709 * successfully. 1710 * 1711 * Returns 0 on success, or %EBUSY on error. A warning 1712 * message is also printed on failure. 1713 */ 1714 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 1715 { 1716 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 1717 } 1718 1719 /** 1720 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 1721 * @pdev: PCI device whose resources are to be reserved 1722 * @res_name: Name to be associated with resource. 1723 * 1724 * Mark all PCI regions associated with PCI device @pdev as 1725 * being reserved by owner @res_name. Do not access any 1726 * address inside the PCI regions unless this call returns 1727 * successfully. 1728 * 1729 * pci_request_regions_exclusive() will mark the region so that 1730 * /dev/mem and the sysfs MMIO access will not be allowed. 1731 * 1732 * Returns 0 on success, or %EBUSY on error. A warning 1733 * message is also printed on failure. 1734 */ 1735 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 1736 { 1737 return pci_request_selected_regions_exclusive(pdev, 1738 ((1 << 6) - 1), res_name); 1739 } 1740 1741 static void __pci_set_master(struct pci_dev *dev, bool enable) 1742 { 1743 u16 old_cmd, cmd; 1744 1745 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 1746 if (enable) 1747 cmd = old_cmd | PCI_COMMAND_MASTER; 1748 else 1749 cmd = old_cmd & ~PCI_COMMAND_MASTER; 1750 if (cmd != old_cmd) { 1751 dev_dbg(&dev->dev, "%s bus mastering\n", 1752 enable ? "enabling" : "disabling"); 1753 pci_write_config_word(dev, PCI_COMMAND, cmd); 1754 } 1755 dev->is_busmaster = enable; 1756 } 1757 1758 /** 1759 * pci_set_master - enables bus-mastering for device dev 1760 * @dev: the PCI device to enable 1761 * 1762 * Enables bus-mastering on the device and calls pcibios_set_master() 1763 * to do the needed arch specific settings. 1764 */ 1765 void pci_set_master(struct pci_dev *dev) 1766 { 1767 __pci_set_master(dev, true); 1768 pcibios_set_master(dev); 1769 } 1770 1771 /** 1772 * pci_clear_master - disables bus-mastering for device dev 1773 * @dev: the PCI device to disable 1774 */ 1775 void pci_clear_master(struct pci_dev *dev) 1776 { 1777 __pci_set_master(dev, false); 1778 } 1779 1780 #ifdef PCI_DISABLE_MWI 1781 int pci_set_mwi(struct pci_dev *dev) 1782 { 1783 return 0; 1784 } 1785 1786 int pci_try_set_mwi(struct pci_dev *dev) 1787 { 1788 return 0; 1789 } 1790 1791 void pci_clear_mwi(struct pci_dev *dev) 1792 { 1793 } 1794 1795 #else 1796 1797 #ifndef PCI_CACHE_LINE_BYTES 1798 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 1799 #endif 1800 1801 /* This can be overridden by arch code. */ 1802 /* Don't forget this is measured in 32-bit words, not bytes */ 1803 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 1804 1805 /** 1806 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1807 * @dev: the PCI device for which MWI is to be enabled 1808 * 1809 * Helper function for pci_set_mwi. 1810 * Originally copied from drivers/net/acenic.c. 1811 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 1812 * 1813 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1814 */ 1815 static int 1816 pci_set_cacheline_size(struct pci_dev *dev) 1817 { 1818 u8 cacheline_size; 1819 1820 if (!pci_cache_line_size) 1821 return -EINVAL; /* The system doesn't support MWI. */ 1822 1823 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1824 equal to or multiple of the right value. */ 1825 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1826 if (cacheline_size >= pci_cache_line_size && 1827 (cacheline_size % pci_cache_line_size) == 0) 1828 return 0; 1829 1830 /* Write the correct value. */ 1831 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 1832 /* Read it back. */ 1833 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1834 if (cacheline_size == pci_cache_line_size) 1835 return 0; 1836 1837 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 1838 "supported\n", pci_cache_line_size << 2); 1839 1840 return -EINVAL; 1841 } 1842 1843 /** 1844 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1845 * @dev: the PCI device for which MWI is enabled 1846 * 1847 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1848 * 1849 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1850 */ 1851 int 1852 pci_set_mwi(struct pci_dev *dev) 1853 { 1854 int rc; 1855 u16 cmd; 1856 1857 rc = pci_set_cacheline_size(dev); 1858 if (rc) 1859 return rc; 1860 1861 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1862 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1863 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 1864 cmd |= PCI_COMMAND_INVALIDATE; 1865 pci_write_config_word(dev, PCI_COMMAND, cmd); 1866 } 1867 1868 return 0; 1869 } 1870 1871 /** 1872 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 1873 * @dev: the PCI device for which MWI is enabled 1874 * 1875 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1876 * Callers are not required to check the return value. 1877 * 1878 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1879 */ 1880 int pci_try_set_mwi(struct pci_dev *dev) 1881 { 1882 int rc = pci_set_mwi(dev); 1883 return rc; 1884 } 1885 1886 /** 1887 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 1888 * @dev: the PCI device to disable 1889 * 1890 * Disables PCI Memory-Write-Invalidate transaction on the device 1891 */ 1892 void 1893 pci_clear_mwi(struct pci_dev *dev) 1894 { 1895 u16 cmd; 1896 1897 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1898 if (cmd & PCI_COMMAND_INVALIDATE) { 1899 cmd &= ~PCI_COMMAND_INVALIDATE; 1900 pci_write_config_word(dev, PCI_COMMAND, cmd); 1901 } 1902 } 1903 #endif /* ! PCI_DISABLE_MWI */ 1904 1905 /** 1906 * pci_intx - enables/disables PCI INTx for device dev 1907 * @pdev: the PCI device to operate on 1908 * @enable: boolean: whether to enable or disable PCI INTx 1909 * 1910 * Enables/disables PCI INTx for device dev 1911 */ 1912 void 1913 pci_intx(struct pci_dev *pdev, int enable) 1914 { 1915 u16 pci_command, new; 1916 1917 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1918 1919 if (enable) { 1920 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 1921 } else { 1922 new = pci_command | PCI_COMMAND_INTX_DISABLE; 1923 } 1924 1925 if (new != pci_command) { 1926 struct pci_devres *dr; 1927 1928 pci_write_config_word(pdev, PCI_COMMAND, new); 1929 1930 dr = find_pci_dr(pdev); 1931 if (dr && !dr->restore_intx) { 1932 dr->restore_intx = 1; 1933 dr->orig_intx = !enable; 1934 } 1935 } 1936 } 1937 1938 /** 1939 * pci_msi_off - disables any msi or msix capabilities 1940 * @dev: the PCI device to operate on 1941 * 1942 * If you want to use msi see pci_enable_msi and friends. 1943 * This is a lower level primitive that allows us to disable 1944 * msi operation at the device level. 1945 */ 1946 void pci_msi_off(struct pci_dev *dev) 1947 { 1948 int pos; 1949 u16 control; 1950 1951 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 1952 if (pos) { 1953 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 1954 control &= ~PCI_MSI_FLAGS_ENABLE; 1955 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 1956 } 1957 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 1958 if (pos) { 1959 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 1960 control &= ~PCI_MSIX_FLAGS_ENABLE; 1961 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 1962 } 1963 } 1964 1965 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 1966 /* 1967 * These can be overridden by arch-specific implementations 1968 */ 1969 int 1970 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 1971 { 1972 if (!pci_dma_supported(dev, mask)) 1973 return -EIO; 1974 1975 dev->dma_mask = mask; 1976 1977 return 0; 1978 } 1979 1980 int 1981 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 1982 { 1983 if (!pci_dma_supported(dev, mask)) 1984 return -EIO; 1985 1986 dev->dev.coherent_dma_mask = mask; 1987 1988 return 0; 1989 } 1990 #endif 1991 1992 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE 1993 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 1994 { 1995 return dma_set_max_seg_size(&dev->dev, size); 1996 } 1997 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 1998 #endif 1999 2000 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY 2001 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2002 { 2003 return dma_set_seg_boundary(&dev->dev, mask); 2004 } 2005 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2006 #endif 2007 2008 static int __pcie_flr(struct pci_dev *dev, int probe) 2009 { 2010 u16 status; 2011 u32 cap; 2012 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2013 2014 if (!exppos) 2015 return -ENOTTY; 2016 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2017 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2018 return -ENOTTY; 2019 2020 if (probe) 2021 return 0; 2022 2023 pci_block_user_cfg_access(dev); 2024 2025 /* Wait for Transaction Pending bit clean */ 2026 msleep(100); 2027 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2028 if (status & PCI_EXP_DEVSTA_TRPND) { 2029 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2030 "sleeping for 1 second\n"); 2031 ssleep(1); 2032 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2033 if (status & PCI_EXP_DEVSTA_TRPND) 2034 dev_info(&dev->dev, "Still busy after 1s; " 2035 "proceeding with reset anyway\n"); 2036 } 2037 2038 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2039 PCI_EXP_DEVCTL_BCR_FLR); 2040 mdelay(100); 2041 2042 pci_unblock_user_cfg_access(dev); 2043 return 0; 2044 } 2045 2046 static int __pci_af_flr(struct pci_dev *dev, int probe) 2047 { 2048 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 2049 u8 status; 2050 u8 cap; 2051 2052 if (!cappos) 2053 return -ENOTTY; 2054 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 2055 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2056 return -ENOTTY; 2057 2058 if (probe) 2059 return 0; 2060 2061 pci_block_user_cfg_access(dev); 2062 2063 /* Wait for Transaction Pending bit clean */ 2064 msleep(100); 2065 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2066 if (status & PCI_AF_STATUS_TP) { 2067 dev_info(&dev->dev, "Busy after 100ms while trying to" 2068 " reset; sleeping for 1 second\n"); 2069 ssleep(1); 2070 pci_read_config_byte(dev, 2071 cappos + PCI_AF_STATUS, &status); 2072 if (status & PCI_AF_STATUS_TP) 2073 dev_info(&dev->dev, "Still busy after 1s; " 2074 "proceeding with reset anyway\n"); 2075 } 2076 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2077 mdelay(100); 2078 2079 pci_unblock_user_cfg_access(dev); 2080 return 0; 2081 } 2082 2083 static int __pci_reset_function(struct pci_dev *pdev, int probe) 2084 { 2085 int res; 2086 2087 res = __pcie_flr(pdev, probe); 2088 if (res != -ENOTTY) 2089 return res; 2090 2091 res = __pci_af_flr(pdev, probe); 2092 if (res != -ENOTTY) 2093 return res; 2094 2095 return res; 2096 } 2097 2098 /** 2099 * pci_execute_reset_function() - Reset a PCI device function 2100 * @dev: Device function to reset 2101 * 2102 * Some devices allow an individual function to be reset without affecting 2103 * other functions in the same device. The PCI device must be responsive 2104 * to PCI config space in order to use this function. 2105 * 2106 * The device function is presumed to be unused when this function is called. 2107 * Resetting the device will make the contents of PCI configuration space 2108 * random, so any caller of this must be prepared to reinitialise the 2109 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2110 * etc. 2111 * 2112 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2113 * device doesn't support resetting a single function. 2114 */ 2115 int pci_execute_reset_function(struct pci_dev *dev) 2116 { 2117 return __pci_reset_function(dev, 0); 2118 } 2119 EXPORT_SYMBOL_GPL(pci_execute_reset_function); 2120 2121 /** 2122 * pci_reset_function() - quiesce and reset a PCI device function 2123 * @dev: Device function to reset 2124 * 2125 * Some devices allow an individual function to be reset without affecting 2126 * other functions in the same device. The PCI device must be responsive 2127 * to PCI config space in order to use this function. 2128 * 2129 * This function does not just reset the PCI portion of a device, but 2130 * clears all the state associated with the device. This function differs 2131 * from pci_execute_reset_function in that it saves and restores device state 2132 * over the reset. 2133 * 2134 * Returns 0 if the device function was successfully reset or -ENOTTY if the 2135 * device doesn't support resetting a single function. 2136 */ 2137 int pci_reset_function(struct pci_dev *dev) 2138 { 2139 int r = __pci_reset_function(dev, 1); 2140 2141 if (r < 0) 2142 return r; 2143 2144 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 2145 disable_irq(dev->irq); 2146 pci_save_state(dev); 2147 2148 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2149 2150 r = pci_execute_reset_function(dev); 2151 2152 pci_restore_state(dev); 2153 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 2154 enable_irq(dev->irq); 2155 2156 return r; 2157 } 2158 EXPORT_SYMBOL_GPL(pci_reset_function); 2159 2160 /** 2161 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2162 * @dev: PCI device to query 2163 * 2164 * Returns mmrbc: maximum designed memory read count in bytes 2165 * or appropriate error value. 2166 */ 2167 int pcix_get_max_mmrbc(struct pci_dev *dev) 2168 { 2169 int err, cap; 2170 u32 stat; 2171 2172 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2173 if (!cap) 2174 return -EINVAL; 2175 2176 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2177 if (err) 2178 return -EINVAL; 2179 2180 return (stat & PCI_X_STATUS_MAX_READ) >> 12; 2181 } 2182 EXPORT_SYMBOL(pcix_get_max_mmrbc); 2183 2184 /** 2185 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2186 * @dev: PCI device to query 2187 * 2188 * Returns mmrbc: maximum memory read count in bytes 2189 * or appropriate error value. 2190 */ 2191 int pcix_get_mmrbc(struct pci_dev *dev) 2192 { 2193 int ret, cap; 2194 u32 cmd; 2195 2196 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2197 if (!cap) 2198 return -EINVAL; 2199 2200 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2201 if (!ret) 2202 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2203 2204 return ret; 2205 } 2206 EXPORT_SYMBOL(pcix_get_mmrbc); 2207 2208 /** 2209 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2210 * @dev: PCI device to query 2211 * @mmrbc: maximum memory read count in bytes 2212 * valid values are 512, 1024, 2048, 4096 2213 * 2214 * If possible sets maximum memory read byte count, some bridges have erratas 2215 * that prevent this. 2216 */ 2217 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2218 { 2219 int cap, err = -EINVAL; 2220 u32 stat, cmd, v, o; 2221 2222 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2223 goto out; 2224 2225 v = ffs(mmrbc) - 10; 2226 2227 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2228 if (!cap) 2229 goto out; 2230 2231 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2232 if (err) 2233 goto out; 2234 2235 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2236 return -E2BIG; 2237 2238 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2239 if (err) 2240 goto out; 2241 2242 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2243 if (o != v) { 2244 if (v > o && dev->bus && 2245 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2246 return -EIO; 2247 2248 cmd &= ~PCI_X_CMD_MAX_READ; 2249 cmd |= v << 2; 2250 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); 2251 } 2252 out: 2253 return err; 2254 } 2255 EXPORT_SYMBOL(pcix_set_mmrbc); 2256 2257 /** 2258 * pcie_get_readrq - get PCI Express read request size 2259 * @dev: PCI device to query 2260 * 2261 * Returns maximum memory read request in bytes 2262 * or appropriate error value. 2263 */ 2264 int pcie_get_readrq(struct pci_dev *dev) 2265 { 2266 int ret, cap; 2267 u16 ctl; 2268 2269 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2270 if (!cap) 2271 return -EINVAL; 2272 2273 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2274 if (!ret) 2275 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2276 2277 return ret; 2278 } 2279 EXPORT_SYMBOL(pcie_get_readrq); 2280 2281 /** 2282 * pcie_set_readrq - set PCI Express maximum memory read request 2283 * @dev: PCI device to query 2284 * @rq: maximum memory read count in bytes 2285 * valid values are 128, 256, 512, 1024, 2048, 4096 2286 * 2287 * If possible sets maximum read byte count 2288 */ 2289 int pcie_set_readrq(struct pci_dev *dev, int rq) 2290 { 2291 int cap, err = -EINVAL; 2292 u16 ctl, v; 2293 2294 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2295 goto out; 2296 2297 v = (ffs(rq) - 8) << 12; 2298 2299 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2300 if (!cap) 2301 goto out; 2302 2303 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2304 if (err) 2305 goto out; 2306 2307 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2308 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2309 ctl |= v; 2310 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2311 } 2312 2313 out: 2314 return err; 2315 } 2316 EXPORT_SYMBOL(pcie_set_readrq); 2317 2318 /** 2319 * pci_select_bars - Make BAR mask from the type of resource 2320 * @dev: the PCI device for which BAR mask is made 2321 * @flags: resource type mask to be selected 2322 * 2323 * This helper routine makes bar mask from the type of resource. 2324 */ 2325 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2326 { 2327 int i, bars = 0; 2328 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2329 if (pci_resource_flags(dev, i) & flags) 2330 bars |= (1 << i); 2331 return bars; 2332 } 2333 2334 /** 2335 * pci_resource_bar - get position of the BAR associated with a resource 2336 * @dev: the PCI device 2337 * @resno: the resource number 2338 * @type: the BAR type to be filled in 2339 * 2340 * Returns BAR position in config space, or 0 if the BAR is invalid. 2341 */ 2342 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2343 { 2344 if (resno < PCI_ROM_RESOURCE) { 2345 *type = pci_bar_unknown; 2346 return PCI_BASE_ADDRESS_0 + 4 * resno; 2347 } else if (resno == PCI_ROM_RESOURCE) { 2348 *type = pci_bar_mem32; 2349 return dev->rom_base_reg; 2350 } 2351 2352 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2353 return 0; 2354 } 2355 2356 static void __devinit pci_no_domains(void) 2357 { 2358 #ifdef CONFIG_PCI_DOMAINS 2359 pci_domains_supported = 0; 2360 #endif 2361 } 2362 2363 /** 2364 * pci_ext_cfg_enabled - can we access extended PCI config space? 2365 * @dev: The PCI device of the root bridge. 2366 * 2367 * Returns 1 if we can access PCI extended config space (offsets 2368 * greater than 0xff). This is the default implementation. Architecture 2369 * implementations can override this. 2370 */ 2371 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2372 { 2373 return 1; 2374 } 2375 2376 static int __devinit pci_init(void) 2377 { 2378 struct pci_dev *dev = NULL; 2379 2380 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2381 pci_fixup_device(pci_fixup_final, dev); 2382 } 2383 2384 return 0; 2385 } 2386 2387 static int __init pci_setup(char *str) 2388 { 2389 while (str) { 2390 char *k = strchr(str, ','); 2391 if (k) 2392 *k++ = 0; 2393 if (*str && (str = pcibios_setup(str)) && *str) { 2394 if (!strcmp(str, "nomsi")) { 2395 pci_no_msi(); 2396 } else if (!strcmp(str, "noaer")) { 2397 pci_no_aer(); 2398 } else if (!strcmp(str, "nodomains")) { 2399 pci_no_domains(); 2400 } else if (!strncmp(str, "cbiosize=", 9)) { 2401 pci_cardbus_io_size = memparse(str + 9, &str); 2402 } else if (!strncmp(str, "cbmemsize=", 10)) { 2403 pci_cardbus_mem_size = memparse(str + 10, &str); 2404 } else { 2405 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2406 str); 2407 } 2408 } 2409 str = k; 2410 } 2411 return 0; 2412 } 2413 early_param("pci", pci_setup); 2414 2415 device_initcall(pci_init); 2416 2417 EXPORT_SYMBOL(pci_reenable_device); 2418 EXPORT_SYMBOL(pci_enable_device_io); 2419 EXPORT_SYMBOL(pci_enable_device_mem); 2420 EXPORT_SYMBOL(pci_enable_device); 2421 EXPORT_SYMBOL(pcim_enable_device); 2422 EXPORT_SYMBOL(pcim_pin_device); 2423 EXPORT_SYMBOL(pci_disable_device); 2424 EXPORT_SYMBOL(pci_find_capability); 2425 EXPORT_SYMBOL(pci_bus_find_capability); 2426 EXPORT_SYMBOL(pci_release_regions); 2427 EXPORT_SYMBOL(pci_request_regions); 2428 EXPORT_SYMBOL(pci_request_regions_exclusive); 2429 EXPORT_SYMBOL(pci_release_region); 2430 EXPORT_SYMBOL(pci_request_region); 2431 EXPORT_SYMBOL(pci_request_region_exclusive); 2432 EXPORT_SYMBOL(pci_release_selected_regions); 2433 EXPORT_SYMBOL(pci_request_selected_regions); 2434 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 2435 EXPORT_SYMBOL(pci_set_master); 2436 EXPORT_SYMBOL(pci_clear_master); 2437 EXPORT_SYMBOL(pci_set_mwi); 2438 EXPORT_SYMBOL(pci_try_set_mwi); 2439 EXPORT_SYMBOL(pci_clear_mwi); 2440 EXPORT_SYMBOL_GPL(pci_intx); 2441 EXPORT_SYMBOL(pci_set_dma_mask); 2442 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 2443 EXPORT_SYMBOL(pci_assign_resource); 2444 EXPORT_SYMBOL(pci_find_parent_resource); 2445 EXPORT_SYMBOL(pci_select_bars); 2446 2447 EXPORT_SYMBOL(pci_set_power_state); 2448 EXPORT_SYMBOL(pci_save_state); 2449 EXPORT_SYMBOL(pci_restore_state); 2450 EXPORT_SYMBOL(pci_pme_capable); 2451 EXPORT_SYMBOL(pci_pme_active); 2452 EXPORT_SYMBOL(pci_enable_wake); 2453 EXPORT_SYMBOL(pci_wake_from_d3); 2454 EXPORT_SYMBOL(pci_target_state); 2455 EXPORT_SYMBOL(pci_prepare_to_sleep); 2456 EXPORT_SYMBOL(pci_back_from_sleep); 2457 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2458 2459