1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/log2.h> 19 #include <linux/pci-aspm.h> 20 #include <linux/pm_wakeup.h> 21 #include <linux/interrupt.h> 22 #include <linux/device.h> 23 #include <linux/pm_runtime.h> 24 #include <asm/setup.h> 25 #include "pci.h" 26 27 const char *pci_power_names[] = { 28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 29 }; 30 EXPORT_SYMBOL_GPL(pci_power_names); 31 32 int isa_dma_bridge_buggy; 33 EXPORT_SYMBOL(isa_dma_bridge_buggy); 34 35 int pci_pci_problems; 36 EXPORT_SYMBOL(pci_pci_problems); 37 38 unsigned int pci_pm_d3_delay; 39 40 static void pci_dev_d3_sleep(struct pci_dev *dev) 41 { 42 unsigned int delay = dev->d3_delay; 43 44 if (delay < pci_pm_d3_delay) 45 delay = pci_pm_d3_delay; 46 47 msleep(delay); 48 } 49 50 #ifdef CONFIG_PCI_DOMAINS 51 int pci_domains_supported = 1; 52 #endif 53 54 #define DEFAULT_CARDBUS_IO_SIZE (256) 55 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 56 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 57 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 58 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 59 60 #define DEFAULT_HOTPLUG_IO_SIZE (256) 61 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 62 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 63 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 64 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 65 66 /* 67 * The default CLS is used if arch didn't set CLS explicitly and not 68 * all pci devices agree on the same value. Arch can override either 69 * the dfl or actual value as it sees fit. Don't forget this is 70 * measured in 32-bit words, not bytes. 71 */ 72 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2; 73 u8 pci_cache_line_size; 74 75 /** 76 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 77 * @bus: pointer to PCI bus structure to search 78 * 79 * Given a PCI bus, returns the highest PCI bus number present in the set 80 * including the given PCI bus and its list of child PCI buses. 81 */ 82 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 83 { 84 struct list_head *tmp; 85 unsigned char max, n; 86 87 max = bus->subordinate; 88 list_for_each(tmp, &bus->children) { 89 n = pci_bus_max_busnr(pci_bus_b(tmp)); 90 if(n > max) 91 max = n; 92 } 93 return max; 94 } 95 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 96 97 #ifdef CONFIG_HAS_IOMEM 98 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 99 { 100 /* 101 * Make sure the BAR is actually a memory resource, not an IO resource 102 */ 103 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 104 WARN_ON(1); 105 return NULL; 106 } 107 return ioremap_nocache(pci_resource_start(pdev, bar), 108 pci_resource_len(pdev, bar)); 109 } 110 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 111 #endif 112 113 #if 0 114 /** 115 * pci_max_busnr - returns maximum PCI bus number 116 * 117 * Returns the highest PCI bus number present in the system global list of 118 * PCI buses. 119 */ 120 unsigned char __devinit 121 pci_max_busnr(void) 122 { 123 struct pci_bus *bus = NULL; 124 unsigned char max, n; 125 126 max = 0; 127 while ((bus = pci_find_next_bus(bus)) != NULL) { 128 n = pci_bus_max_busnr(bus); 129 if(n > max) 130 max = n; 131 } 132 return max; 133 } 134 135 #endif /* 0 */ 136 137 #define PCI_FIND_CAP_TTL 48 138 139 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 140 u8 pos, int cap, int *ttl) 141 { 142 u8 id; 143 144 while ((*ttl)--) { 145 pci_bus_read_config_byte(bus, devfn, pos, &pos); 146 if (pos < 0x40) 147 break; 148 pos &= ~3; 149 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 150 &id); 151 if (id == 0xff) 152 break; 153 if (id == cap) 154 return pos; 155 pos += PCI_CAP_LIST_NEXT; 156 } 157 return 0; 158 } 159 160 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 161 u8 pos, int cap) 162 { 163 int ttl = PCI_FIND_CAP_TTL; 164 165 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 166 } 167 168 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 169 { 170 return __pci_find_next_cap(dev->bus, dev->devfn, 171 pos + PCI_CAP_LIST_NEXT, cap); 172 } 173 EXPORT_SYMBOL_GPL(pci_find_next_capability); 174 175 static int __pci_bus_find_cap_start(struct pci_bus *bus, 176 unsigned int devfn, u8 hdr_type) 177 { 178 u16 status; 179 180 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 181 if (!(status & PCI_STATUS_CAP_LIST)) 182 return 0; 183 184 switch (hdr_type) { 185 case PCI_HEADER_TYPE_NORMAL: 186 case PCI_HEADER_TYPE_BRIDGE: 187 return PCI_CAPABILITY_LIST; 188 case PCI_HEADER_TYPE_CARDBUS: 189 return PCI_CB_CAPABILITY_LIST; 190 default: 191 return 0; 192 } 193 194 return 0; 195 } 196 197 /** 198 * pci_find_capability - query for devices' capabilities 199 * @dev: PCI device to query 200 * @cap: capability code 201 * 202 * Tell if a device supports a given PCI capability. 203 * Returns the address of the requested capability structure within the 204 * device's PCI configuration space or 0 in case the device does not 205 * support it. Possible values for @cap: 206 * 207 * %PCI_CAP_ID_PM Power Management 208 * %PCI_CAP_ID_AGP Accelerated Graphics Port 209 * %PCI_CAP_ID_VPD Vital Product Data 210 * %PCI_CAP_ID_SLOTID Slot Identification 211 * %PCI_CAP_ID_MSI Message Signalled Interrupts 212 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 213 * %PCI_CAP_ID_PCIX PCI-X 214 * %PCI_CAP_ID_EXP PCI Express 215 */ 216 int pci_find_capability(struct pci_dev *dev, int cap) 217 { 218 int pos; 219 220 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 221 if (pos) 222 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 223 224 return pos; 225 } 226 227 /** 228 * pci_bus_find_capability - query for devices' capabilities 229 * @bus: the PCI bus to query 230 * @devfn: PCI device to query 231 * @cap: capability code 232 * 233 * Like pci_find_capability() but works for pci devices that do not have a 234 * pci_dev structure set up yet. 235 * 236 * Returns the address of the requested capability structure within the 237 * device's PCI configuration space or 0 in case the device does not 238 * support it. 239 */ 240 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 241 { 242 int pos; 243 u8 hdr_type; 244 245 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 246 247 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 248 if (pos) 249 pos = __pci_find_next_cap(bus, devfn, pos, cap); 250 251 return pos; 252 } 253 254 /** 255 * pci_find_ext_capability - Find an extended capability 256 * @dev: PCI device to query 257 * @cap: capability code 258 * 259 * Returns the address of the requested extended capability structure 260 * within the device's PCI configuration space or 0 if the device does 261 * not support it. Possible values for @cap: 262 * 263 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 264 * %PCI_EXT_CAP_ID_VC Virtual Channel 265 * %PCI_EXT_CAP_ID_DSN Device Serial Number 266 * %PCI_EXT_CAP_ID_PWR Power Budgeting 267 */ 268 int pci_find_ext_capability(struct pci_dev *dev, int cap) 269 { 270 u32 header; 271 int ttl; 272 int pos = PCI_CFG_SPACE_SIZE; 273 274 /* minimum 8 bytes per capability */ 275 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 276 277 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 278 return 0; 279 280 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 281 return 0; 282 283 /* 284 * If we have no capabilities, this is indicated by cap ID, 285 * cap version and next pointer all being 0. 286 */ 287 if (header == 0) 288 return 0; 289 290 while (ttl-- > 0) { 291 if (PCI_EXT_CAP_ID(header) == cap) 292 return pos; 293 294 pos = PCI_EXT_CAP_NEXT(header); 295 if (pos < PCI_CFG_SPACE_SIZE) 296 break; 297 298 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 299 break; 300 } 301 302 return 0; 303 } 304 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 305 306 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 307 { 308 int rc, ttl = PCI_FIND_CAP_TTL; 309 u8 cap, mask; 310 311 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 312 mask = HT_3BIT_CAP_MASK; 313 else 314 mask = HT_5BIT_CAP_MASK; 315 316 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 317 PCI_CAP_ID_HT, &ttl); 318 while (pos) { 319 rc = pci_read_config_byte(dev, pos + 3, &cap); 320 if (rc != PCIBIOS_SUCCESSFUL) 321 return 0; 322 323 if ((cap & mask) == ht_cap) 324 return pos; 325 326 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 327 pos + PCI_CAP_LIST_NEXT, 328 PCI_CAP_ID_HT, &ttl); 329 } 330 331 return 0; 332 } 333 /** 334 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 335 * @dev: PCI device to query 336 * @pos: Position from which to continue searching 337 * @ht_cap: Hypertransport capability code 338 * 339 * To be used in conjunction with pci_find_ht_capability() to search for 340 * all capabilities matching @ht_cap. @pos should always be a value returned 341 * from pci_find_ht_capability(). 342 * 343 * NB. To be 100% safe against broken PCI devices, the caller should take 344 * steps to avoid an infinite loop. 345 */ 346 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 347 { 348 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 349 } 350 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 351 352 /** 353 * pci_find_ht_capability - query a device's Hypertransport capabilities 354 * @dev: PCI device to query 355 * @ht_cap: Hypertransport capability code 356 * 357 * Tell if a device supports a given Hypertransport capability. 358 * Returns an address within the device's PCI configuration space 359 * or 0 in case the device does not support the request capability. 360 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 361 * which has a Hypertransport capability matching @ht_cap. 362 */ 363 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 364 { 365 int pos; 366 367 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 368 if (pos) 369 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 370 371 return pos; 372 } 373 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 374 375 /** 376 * pci_find_parent_resource - return resource region of parent bus of given region 377 * @dev: PCI device structure contains resources to be searched 378 * @res: child resource record for which parent is sought 379 * 380 * For given resource region of given device, return the resource 381 * region of parent bus the given region is contained in or where 382 * it should be allocated from. 383 */ 384 struct resource * 385 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 386 { 387 const struct pci_bus *bus = dev->bus; 388 int i; 389 struct resource *best = NULL, *r; 390 391 pci_bus_for_each_resource(bus, r, i) { 392 if (!r) 393 continue; 394 if (res->start && !(res->start >= r->start && res->end <= r->end)) 395 continue; /* Not contained */ 396 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 397 continue; /* Wrong type */ 398 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 399 return r; /* Exact match */ 400 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ 401 if (r->flags & IORESOURCE_PREFETCH) 402 continue; 403 /* .. but we can put a prefetchable resource inside a non-prefetchable one */ 404 if (!best) 405 best = r; 406 } 407 return best; 408 } 409 410 /** 411 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 412 * @dev: PCI device to have its BARs restored 413 * 414 * Restore the BAR values for a given device, so as to make it 415 * accessible by its driver. 416 */ 417 static void 418 pci_restore_bars(struct pci_dev *dev) 419 { 420 int i; 421 422 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 423 pci_update_resource(dev, i); 424 } 425 426 static struct pci_platform_pm_ops *pci_platform_pm; 427 428 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 429 { 430 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 431 || !ops->sleep_wake || !ops->can_wakeup) 432 return -EINVAL; 433 pci_platform_pm = ops; 434 return 0; 435 } 436 437 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 438 { 439 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 440 } 441 442 static inline int platform_pci_set_power_state(struct pci_dev *dev, 443 pci_power_t t) 444 { 445 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 446 } 447 448 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 449 { 450 return pci_platform_pm ? 451 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 452 } 453 454 static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 455 { 456 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 457 } 458 459 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 460 { 461 return pci_platform_pm ? 462 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 463 } 464 465 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 466 { 467 return pci_platform_pm ? 468 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 469 } 470 471 /** 472 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 473 * given PCI device 474 * @dev: PCI device to handle. 475 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 476 * 477 * RETURN VALUE: 478 * -EINVAL if the requested state is invalid. 479 * -EIO if device does not support PCI PM or its PM capabilities register has a 480 * wrong version, or device doesn't support the requested state. 481 * 0 if device already is in the requested state. 482 * 0 if device's power state has been successfully changed. 483 */ 484 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 485 { 486 u16 pmcsr; 487 bool need_restore = false; 488 489 /* Check if we're already there */ 490 if (dev->current_state == state) 491 return 0; 492 493 if (!dev->pm_cap) 494 return -EIO; 495 496 if (state < PCI_D0 || state > PCI_D3hot) 497 return -EINVAL; 498 499 /* Validate current state: 500 * Can enter D0 from any state, but if we can only go deeper 501 * to sleep if we're already in a low power state 502 */ 503 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 504 && dev->current_state > state) { 505 dev_err(&dev->dev, "invalid power transition " 506 "(from state %d to %d)\n", dev->current_state, state); 507 return -EINVAL; 508 } 509 510 /* check if this device supports the desired state */ 511 if ((state == PCI_D1 && !dev->d1_support) 512 || (state == PCI_D2 && !dev->d2_support)) 513 return -EIO; 514 515 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 516 517 /* If we're (effectively) in D3, force entire word to 0. 518 * This doesn't affect PME_Status, disables PME_En, and 519 * sets PowerState to 0. 520 */ 521 switch (dev->current_state) { 522 case PCI_D0: 523 case PCI_D1: 524 case PCI_D2: 525 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 526 pmcsr |= state; 527 break; 528 case PCI_D3hot: 529 case PCI_D3cold: 530 case PCI_UNKNOWN: /* Boot-up */ 531 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 532 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 533 need_restore = true; 534 /* Fall-through: force to D0 */ 535 default: 536 pmcsr = 0; 537 break; 538 } 539 540 /* enter specified state */ 541 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 542 543 /* Mandatory power management transition delays */ 544 /* see PCI PM 1.1 5.6.1 table 18 */ 545 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 546 pci_dev_d3_sleep(dev); 547 else if (state == PCI_D2 || dev->current_state == PCI_D2) 548 udelay(PCI_PM_D2_DELAY); 549 550 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 551 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 552 if (dev->current_state != state && printk_ratelimit()) 553 dev_info(&dev->dev, "Refused to change power state, " 554 "currently in D%d\n", dev->current_state); 555 556 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 557 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 558 * from D3hot to D0 _may_ perform an internal reset, thereby 559 * going to "D0 Uninitialized" rather than "D0 Initialized". 560 * For example, at least some versions of the 3c905B and the 561 * 3c556B exhibit this behaviour. 562 * 563 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 564 * devices in a D3hot state at boot. Consequently, we need to 565 * restore at least the BARs so that the device will be 566 * accessible to its driver. 567 */ 568 if (need_restore) 569 pci_restore_bars(dev); 570 571 if (dev->bus->self) 572 pcie_aspm_pm_state_change(dev->bus->self); 573 574 return 0; 575 } 576 577 /** 578 * pci_update_current_state - Read PCI power state of given device from its 579 * PCI PM registers and cache it 580 * @dev: PCI device to handle. 581 * @state: State to cache in case the device doesn't have the PM capability 582 */ 583 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 584 { 585 if (dev->pm_cap) { 586 u16 pmcsr; 587 588 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 589 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 590 } else { 591 dev->current_state = state; 592 } 593 } 594 595 /** 596 * pci_platform_power_transition - Use platform to change device power state 597 * @dev: PCI device to handle. 598 * @state: State to put the device into. 599 */ 600 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 601 { 602 int error; 603 604 if (platform_pci_power_manageable(dev)) { 605 error = platform_pci_set_power_state(dev, state); 606 if (!error) 607 pci_update_current_state(dev, state); 608 } else { 609 error = -ENODEV; 610 /* Fall back to PCI_D0 if native PM is not supported */ 611 if (!dev->pm_cap) 612 dev->current_state = PCI_D0; 613 } 614 615 return error; 616 } 617 618 /** 619 * __pci_start_power_transition - Start power transition of a PCI device 620 * @dev: PCI device to handle. 621 * @state: State to put the device into. 622 */ 623 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 624 { 625 if (state == PCI_D0) 626 pci_platform_power_transition(dev, PCI_D0); 627 } 628 629 /** 630 * __pci_complete_power_transition - Complete power transition of a PCI device 631 * @dev: PCI device to handle. 632 * @state: State to put the device into. 633 * 634 * This function should not be called directly by device drivers. 635 */ 636 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 637 { 638 return state > PCI_D0 ? 639 pci_platform_power_transition(dev, state) : -EINVAL; 640 } 641 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 642 643 /** 644 * pci_set_power_state - Set the power state of a PCI device 645 * @dev: PCI device to handle. 646 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 647 * 648 * Transition a device to a new power state, using the platform firmware and/or 649 * the device's PCI PM registers. 650 * 651 * RETURN VALUE: 652 * -EINVAL if the requested state is invalid. 653 * -EIO if device does not support PCI PM or its PM capabilities register has a 654 * wrong version, or device doesn't support the requested state. 655 * 0 if device already is in the requested state. 656 * 0 if device's power state has been successfully changed. 657 */ 658 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 659 { 660 int error; 661 662 /* bound the state we're entering */ 663 if (state > PCI_D3hot) 664 state = PCI_D3hot; 665 else if (state < PCI_D0) 666 state = PCI_D0; 667 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 668 /* 669 * If the device or the parent bridge do not support PCI PM, 670 * ignore the request if we're doing anything other than putting 671 * it into D0 (which would only happen on boot). 672 */ 673 return 0; 674 675 /* Check if we're already there */ 676 if (dev->current_state == state) 677 return 0; 678 679 __pci_start_power_transition(dev, state); 680 681 /* This device is quirked not to be put into D3, so 682 don't put it in D3 */ 683 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 684 return 0; 685 686 error = pci_raw_set_power_state(dev, state); 687 688 if (!__pci_complete_power_transition(dev, state)) 689 error = 0; 690 691 return error; 692 } 693 694 /** 695 * pci_choose_state - Choose the power state of a PCI device 696 * @dev: PCI device to be suspended 697 * @state: target sleep state for the whole system. This is the value 698 * that is passed to suspend() function. 699 * 700 * Returns PCI power state suitable for given device and given system 701 * message. 702 */ 703 704 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 705 { 706 pci_power_t ret; 707 708 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 709 return PCI_D0; 710 711 ret = platform_pci_choose_state(dev); 712 if (ret != PCI_POWER_ERROR) 713 return ret; 714 715 switch (state.event) { 716 case PM_EVENT_ON: 717 return PCI_D0; 718 case PM_EVENT_FREEZE: 719 case PM_EVENT_PRETHAW: 720 /* REVISIT both freeze and pre-thaw "should" use D0 */ 721 case PM_EVENT_SUSPEND: 722 case PM_EVENT_HIBERNATE: 723 return PCI_D3hot; 724 default: 725 dev_info(&dev->dev, "unrecognized suspend event %d\n", 726 state.event); 727 BUG(); 728 } 729 return PCI_D0; 730 } 731 732 EXPORT_SYMBOL(pci_choose_state); 733 734 #define PCI_EXP_SAVE_REGS 7 735 736 #define pcie_cap_has_devctl(type, flags) 1 737 #define pcie_cap_has_lnkctl(type, flags) \ 738 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 739 (type == PCI_EXP_TYPE_ROOT_PORT || \ 740 type == PCI_EXP_TYPE_ENDPOINT || \ 741 type == PCI_EXP_TYPE_LEG_END)) 742 #define pcie_cap_has_sltctl(type, flags) \ 743 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 744 ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 745 (type == PCI_EXP_TYPE_DOWNSTREAM && \ 746 (flags & PCI_EXP_FLAGS_SLOT)))) 747 #define pcie_cap_has_rtctl(type, flags) \ 748 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 749 (type == PCI_EXP_TYPE_ROOT_PORT || \ 750 type == PCI_EXP_TYPE_RC_EC)) 751 #define pcie_cap_has_devctl2(type, flags) \ 752 ((flags & PCI_EXP_FLAGS_VERS) > 1) 753 #define pcie_cap_has_lnkctl2(type, flags) \ 754 ((flags & PCI_EXP_FLAGS_VERS) > 1) 755 #define pcie_cap_has_sltctl2(type, flags) \ 756 ((flags & PCI_EXP_FLAGS_VERS) > 1) 757 758 static int pci_save_pcie_state(struct pci_dev *dev) 759 { 760 int pos, i = 0; 761 struct pci_cap_saved_state *save_state; 762 u16 *cap; 763 u16 flags; 764 765 pos = pci_pcie_cap(dev); 766 if (!pos) 767 return 0; 768 769 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 770 if (!save_state) { 771 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 772 return -ENOMEM; 773 } 774 cap = (u16 *)&save_state->data[0]; 775 776 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 777 778 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 779 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 780 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 781 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 782 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 783 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 784 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 785 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 786 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 787 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 788 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 789 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 790 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 791 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 792 793 return 0; 794 } 795 796 static void pci_restore_pcie_state(struct pci_dev *dev) 797 { 798 int i = 0, pos; 799 struct pci_cap_saved_state *save_state; 800 u16 *cap; 801 u16 flags; 802 803 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 804 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 805 if (!save_state || pos <= 0) 806 return; 807 cap = (u16 *)&save_state->data[0]; 808 809 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 810 811 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 812 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 813 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 814 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 815 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 816 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 817 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 818 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 819 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 820 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 821 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 822 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 823 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 824 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 825 } 826 827 828 static int pci_save_pcix_state(struct pci_dev *dev) 829 { 830 int pos; 831 struct pci_cap_saved_state *save_state; 832 833 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 834 if (pos <= 0) 835 return 0; 836 837 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 838 if (!save_state) { 839 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 840 return -ENOMEM; 841 } 842 843 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 844 845 return 0; 846 } 847 848 static void pci_restore_pcix_state(struct pci_dev *dev) 849 { 850 int i = 0, pos; 851 struct pci_cap_saved_state *save_state; 852 u16 *cap; 853 854 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 855 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 856 if (!save_state || pos <= 0) 857 return; 858 cap = (u16 *)&save_state->data[0]; 859 860 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 861 } 862 863 864 /** 865 * pci_save_state - save the PCI configuration space of a device before suspending 866 * @dev: - PCI device that we're dealing with 867 */ 868 int 869 pci_save_state(struct pci_dev *dev) 870 { 871 int i; 872 /* XXX: 100% dword access ok here? */ 873 for (i = 0; i < 16; i++) 874 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 875 dev->state_saved = true; 876 if ((i = pci_save_pcie_state(dev)) != 0) 877 return i; 878 if ((i = pci_save_pcix_state(dev)) != 0) 879 return i; 880 return 0; 881 } 882 883 /** 884 * pci_restore_state - Restore the saved state of a PCI device 885 * @dev: - PCI device that we're dealing with 886 */ 887 int 888 pci_restore_state(struct pci_dev *dev) 889 { 890 int i; 891 u32 val; 892 893 if (!dev->state_saved) 894 return 0; 895 896 /* PCI Express register must be restored first */ 897 pci_restore_pcie_state(dev); 898 899 /* 900 * The Base Address register should be programmed before the command 901 * register(s) 902 */ 903 for (i = 15; i >= 0; i--) { 904 pci_read_config_dword(dev, i * 4, &val); 905 if (val != dev->saved_config_space[i]) { 906 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 907 "space at offset %#x (was %#x, writing %#x)\n", 908 i, val, (int)dev->saved_config_space[i]); 909 pci_write_config_dword(dev,i * 4, 910 dev->saved_config_space[i]); 911 } 912 } 913 pci_restore_pcix_state(dev); 914 pci_restore_msi_state(dev); 915 pci_restore_iov_state(dev); 916 917 dev->state_saved = false; 918 919 return 0; 920 } 921 922 static int do_pci_enable_device(struct pci_dev *dev, int bars) 923 { 924 int err; 925 926 err = pci_set_power_state(dev, PCI_D0); 927 if (err < 0 && err != -EIO) 928 return err; 929 err = pcibios_enable_device(dev, bars); 930 if (err < 0) 931 return err; 932 pci_fixup_device(pci_fixup_enable, dev); 933 934 return 0; 935 } 936 937 /** 938 * pci_reenable_device - Resume abandoned device 939 * @dev: PCI device to be resumed 940 * 941 * Note this function is a backend of pci_default_resume and is not supposed 942 * to be called by normal code, write proper resume handler and use it instead. 943 */ 944 int pci_reenable_device(struct pci_dev *dev) 945 { 946 if (pci_is_enabled(dev)) 947 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 948 return 0; 949 } 950 951 static int __pci_enable_device_flags(struct pci_dev *dev, 952 resource_size_t flags) 953 { 954 int err; 955 int i, bars = 0; 956 957 if (atomic_add_return(1, &dev->enable_cnt) > 1) 958 return 0; /* already enabled */ 959 960 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 961 if (dev->resource[i].flags & flags) 962 bars |= (1 << i); 963 964 err = do_pci_enable_device(dev, bars); 965 if (err < 0) 966 atomic_dec(&dev->enable_cnt); 967 return err; 968 } 969 970 /** 971 * pci_enable_device_io - Initialize a device for use with IO space 972 * @dev: PCI device to be initialized 973 * 974 * Initialize device before it's used by a driver. Ask low-level code 975 * to enable I/O resources. Wake up the device if it was suspended. 976 * Beware, this function can fail. 977 */ 978 int pci_enable_device_io(struct pci_dev *dev) 979 { 980 return __pci_enable_device_flags(dev, IORESOURCE_IO); 981 } 982 983 /** 984 * pci_enable_device_mem - Initialize a device for use with Memory space 985 * @dev: PCI device to be initialized 986 * 987 * Initialize device before it's used by a driver. Ask low-level code 988 * to enable Memory resources. Wake up the device if it was suspended. 989 * Beware, this function can fail. 990 */ 991 int pci_enable_device_mem(struct pci_dev *dev) 992 { 993 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 994 } 995 996 /** 997 * pci_enable_device - Initialize device before it's used by a driver. 998 * @dev: PCI device to be initialized 999 * 1000 * Initialize device before it's used by a driver. Ask low-level code 1001 * to enable I/O and memory. Wake up the device if it was suspended. 1002 * Beware, this function can fail. 1003 * 1004 * Note we don't actually enable the device many times if we call 1005 * this function repeatedly (we just increment the count). 1006 */ 1007 int pci_enable_device(struct pci_dev *dev) 1008 { 1009 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 1010 } 1011 1012 /* 1013 * Managed PCI resources. This manages device on/off, intx/msi/msix 1014 * on/off and BAR regions. pci_dev itself records msi/msix status, so 1015 * there's no need to track it separately. pci_devres is initialized 1016 * when a device is enabled using managed PCI device enable interface. 1017 */ 1018 struct pci_devres { 1019 unsigned int enabled:1; 1020 unsigned int pinned:1; 1021 unsigned int orig_intx:1; 1022 unsigned int restore_intx:1; 1023 u32 region_mask; 1024 }; 1025 1026 static void pcim_release(struct device *gendev, void *res) 1027 { 1028 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 1029 struct pci_devres *this = res; 1030 int i; 1031 1032 if (dev->msi_enabled) 1033 pci_disable_msi(dev); 1034 if (dev->msix_enabled) 1035 pci_disable_msix(dev); 1036 1037 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1038 if (this->region_mask & (1 << i)) 1039 pci_release_region(dev, i); 1040 1041 if (this->restore_intx) 1042 pci_intx(dev, this->orig_intx); 1043 1044 if (this->enabled && !this->pinned) 1045 pci_disable_device(dev); 1046 } 1047 1048 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1049 { 1050 struct pci_devres *dr, *new_dr; 1051 1052 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1053 if (dr) 1054 return dr; 1055 1056 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1057 if (!new_dr) 1058 return NULL; 1059 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1060 } 1061 1062 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1063 { 1064 if (pci_is_managed(pdev)) 1065 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1066 return NULL; 1067 } 1068 1069 /** 1070 * pcim_enable_device - Managed pci_enable_device() 1071 * @pdev: PCI device to be initialized 1072 * 1073 * Managed pci_enable_device(). 1074 */ 1075 int pcim_enable_device(struct pci_dev *pdev) 1076 { 1077 struct pci_devres *dr; 1078 int rc; 1079 1080 dr = get_pci_dr(pdev); 1081 if (unlikely(!dr)) 1082 return -ENOMEM; 1083 if (dr->enabled) 1084 return 0; 1085 1086 rc = pci_enable_device(pdev); 1087 if (!rc) { 1088 pdev->is_managed = 1; 1089 dr->enabled = 1; 1090 } 1091 return rc; 1092 } 1093 1094 /** 1095 * pcim_pin_device - Pin managed PCI device 1096 * @pdev: PCI device to pin 1097 * 1098 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1099 * driver detach. @pdev must have been enabled with 1100 * pcim_enable_device(). 1101 */ 1102 void pcim_pin_device(struct pci_dev *pdev) 1103 { 1104 struct pci_devres *dr; 1105 1106 dr = find_pci_dr(pdev); 1107 WARN_ON(!dr || !dr->enabled); 1108 if (dr) 1109 dr->pinned = 1; 1110 } 1111 1112 /** 1113 * pcibios_disable_device - disable arch specific PCI resources for device dev 1114 * @dev: the PCI device to disable 1115 * 1116 * Disables architecture specific PCI resources for the device. This 1117 * is the default implementation. Architecture implementations can 1118 * override this. 1119 */ 1120 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1121 1122 static void do_pci_disable_device(struct pci_dev *dev) 1123 { 1124 u16 pci_command; 1125 1126 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1127 if (pci_command & PCI_COMMAND_MASTER) { 1128 pci_command &= ~PCI_COMMAND_MASTER; 1129 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1130 } 1131 1132 pcibios_disable_device(dev); 1133 } 1134 1135 /** 1136 * pci_disable_enabled_device - Disable device without updating enable_cnt 1137 * @dev: PCI device to disable 1138 * 1139 * NOTE: This function is a backend of PCI power management routines and is 1140 * not supposed to be called drivers. 1141 */ 1142 void pci_disable_enabled_device(struct pci_dev *dev) 1143 { 1144 if (pci_is_enabled(dev)) 1145 do_pci_disable_device(dev); 1146 } 1147 1148 /** 1149 * pci_disable_device - Disable PCI device after use 1150 * @dev: PCI device to be disabled 1151 * 1152 * Signal to the system that the PCI device is not in use by the system 1153 * anymore. This only involves disabling PCI bus-mastering, if active. 1154 * 1155 * Note we don't actually disable the device until all callers of 1156 * pci_device_enable() have called pci_device_disable(). 1157 */ 1158 void 1159 pci_disable_device(struct pci_dev *dev) 1160 { 1161 struct pci_devres *dr; 1162 1163 dr = find_pci_dr(dev); 1164 if (dr) 1165 dr->enabled = 0; 1166 1167 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1168 return; 1169 1170 do_pci_disable_device(dev); 1171 1172 dev->is_busmaster = 0; 1173 } 1174 1175 /** 1176 * pcibios_set_pcie_reset_state - set reset state for device dev 1177 * @dev: the PCIe device reset 1178 * @state: Reset state to enter into 1179 * 1180 * 1181 * Sets the PCIe reset state for the device. This is the default 1182 * implementation. Architecture implementations can override this. 1183 */ 1184 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1185 enum pcie_reset_state state) 1186 { 1187 return -EINVAL; 1188 } 1189 1190 /** 1191 * pci_set_pcie_reset_state - set reset state for device dev 1192 * @dev: the PCIe device reset 1193 * @state: Reset state to enter into 1194 * 1195 * 1196 * Sets the PCI reset state for the device. 1197 */ 1198 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1199 { 1200 return pcibios_set_pcie_reset_state(dev, state); 1201 } 1202 1203 /** 1204 * pci_check_pme_status - Check if given device has generated PME. 1205 * @dev: Device to check. 1206 * 1207 * Check the PME status of the device and if set, clear it and clear PME enable 1208 * (if set). Return 'true' if PME status and PME enable were both set or 1209 * 'false' otherwise. 1210 */ 1211 bool pci_check_pme_status(struct pci_dev *dev) 1212 { 1213 int pmcsr_pos; 1214 u16 pmcsr; 1215 bool ret = false; 1216 1217 if (!dev->pm_cap) 1218 return false; 1219 1220 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1221 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1222 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1223 return false; 1224 1225 /* Clear PME status. */ 1226 pmcsr |= PCI_PM_CTRL_PME_STATUS; 1227 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1228 /* Disable PME to avoid interrupt flood. */ 1229 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1230 ret = true; 1231 } 1232 1233 pci_write_config_word(dev, pmcsr_pos, pmcsr); 1234 1235 return ret; 1236 } 1237 1238 /** 1239 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1240 * @dev: Device to handle. 1241 * @ign: Ignored. 1242 * 1243 * Check if @dev has generated PME and queue a resume request for it in that 1244 * case. 1245 */ 1246 static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1247 { 1248 if (pci_check_pme_status(dev)) 1249 pm_request_resume(&dev->dev); 1250 return 0; 1251 } 1252 1253 /** 1254 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1255 * @bus: Top bus of the subtree to walk. 1256 */ 1257 void pci_pme_wakeup_bus(struct pci_bus *bus) 1258 { 1259 if (bus) 1260 pci_walk_bus(bus, pci_pme_wakeup, NULL); 1261 } 1262 1263 /** 1264 * pci_pme_capable - check the capability of PCI device to generate PME# 1265 * @dev: PCI device to handle. 1266 * @state: PCI state from which device will issue PME#. 1267 */ 1268 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1269 { 1270 if (!dev->pm_cap) 1271 return false; 1272 1273 return !!(dev->pme_support & (1 << state)); 1274 } 1275 1276 /** 1277 * pci_pme_active - enable or disable PCI device's PME# function 1278 * @dev: PCI device to handle. 1279 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1280 * 1281 * The caller must verify that the device is capable of generating PME# before 1282 * calling this function with @enable equal to 'true'. 1283 */ 1284 void pci_pme_active(struct pci_dev *dev, bool enable) 1285 { 1286 u16 pmcsr; 1287 1288 if (!dev->pm_cap) 1289 return; 1290 1291 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1292 /* Clear PME_Status by writing 1 to it and enable PME# */ 1293 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1294 if (!enable) 1295 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1296 1297 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1298 1299 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", 1300 enable ? "enabled" : "disabled"); 1301 } 1302 1303 /** 1304 * __pci_enable_wake - enable PCI device as wakeup event source 1305 * @dev: PCI device affected 1306 * @state: PCI state from which device will issue wakeup events 1307 * @runtime: True if the events are to be generated at run time 1308 * @enable: True to enable event generation; false to disable 1309 * 1310 * This enables the device as a wakeup event source, or disables it. 1311 * When such events involves platform-specific hooks, those hooks are 1312 * called automatically by this routine. 1313 * 1314 * Devices with legacy power management (no standard PCI PM capabilities) 1315 * always require such platform hooks. 1316 * 1317 * RETURN VALUE: 1318 * 0 is returned on success 1319 * -EINVAL is returned if device is not supposed to wake up the system 1320 * Error code depending on the platform is returned if both the platform and 1321 * the native mechanism fail to enable the generation of wake-up events 1322 */ 1323 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1324 bool runtime, bool enable) 1325 { 1326 int ret = 0; 1327 1328 if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1329 return -EINVAL; 1330 1331 /* Don't do the same thing twice in a row for one device. */ 1332 if (!!enable == !!dev->wakeup_prepared) 1333 return 0; 1334 1335 /* 1336 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1337 * Anderson we should be doing PME# wake enable followed by ACPI wake 1338 * enable. To disable wake-up we call the platform first, for symmetry. 1339 */ 1340 1341 if (enable) { 1342 int error; 1343 1344 if (pci_pme_capable(dev, state)) 1345 pci_pme_active(dev, true); 1346 else 1347 ret = 1; 1348 error = runtime ? platform_pci_run_wake(dev, true) : 1349 platform_pci_sleep_wake(dev, true); 1350 if (ret) 1351 ret = error; 1352 if (!ret) 1353 dev->wakeup_prepared = true; 1354 } else { 1355 if (runtime) 1356 platform_pci_run_wake(dev, false); 1357 else 1358 platform_pci_sleep_wake(dev, false); 1359 pci_pme_active(dev, false); 1360 dev->wakeup_prepared = false; 1361 } 1362 1363 return ret; 1364 } 1365 EXPORT_SYMBOL(__pci_enable_wake); 1366 1367 /** 1368 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1369 * @dev: PCI device to prepare 1370 * @enable: True to enable wake-up event generation; false to disable 1371 * 1372 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1373 * and this function allows them to set that up cleanly - pci_enable_wake() 1374 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1375 * ordering constraints. 1376 * 1377 * This function only returns error code if the device is not capable of 1378 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1379 * enable wake-up power for it. 1380 */ 1381 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1382 { 1383 return pci_pme_capable(dev, PCI_D3cold) ? 1384 pci_enable_wake(dev, PCI_D3cold, enable) : 1385 pci_enable_wake(dev, PCI_D3hot, enable); 1386 } 1387 1388 /** 1389 * pci_target_state - find an appropriate low power state for a given PCI dev 1390 * @dev: PCI device 1391 * 1392 * Use underlying platform code to find a supported low power state for @dev. 1393 * If the platform can't manage @dev, return the deepest state from which it 1394 * can generate wake events, based on any available PME info. 1395 */ 1396 pci_power_t pci_target_state(struct pci_dev *dev) 1397 { 1398 pci_power_t target_state = PCI_D3hot; 1399 1400 if (platform_pci_power_manageable(dev)) { 1401 /* 1402 * Call the platform to choose the target state of the device 1403 * and enable wake-up from this state if supported. 1404 */ 1405 pci_power_t state = platform_pci_choose_state(dev); 1406 1407 switch (state) { 1408 case PCI_POWER_ERROR: 1409 case PCI_UNKNOWN: 1410 break; 1411 case PCI_D1: 1412 case PCI_D2: 1413 if (pci_no_d1d2(dev)) 1414 break; 1415 default: 1416 target_state = state; 1417 } 1418 } else if (!dev->pm_cap) { 1419 target_state = PCI_D0; 1420 } else if (device_may_wakeup(&dev->dev)) { 1421 /* 1422 * Find the deepest state from which the device can generate 1423 * wake-up events, make it the target state and enable device 1424 * to generate PME#. 1425 */ 1426 if (dev->pme_support) { 1427 while (target_state 1428 && !(dev->pme_support & (1 << target_state))) 1429 target_state--; 1430 } 1431 } 1432 1433 return target_state; 1434 } 1435 1436 /** 1437 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1438 * @dev: Device to handle. 1439 * 1440 * Choose the power state appropriate for the device depending on whether 1441 * it can wake up the system and/or is power manageable by the platform 1442 * (PCI_D3hot is the default) and put the device into that state. 1443 */ 1444 int pci_prepare_to_sleep(struct pci_dev *dev) 1445 { 1446 pci_power_t target_state = pci_target_state(dev); 1447 int error; 1448 1449 if (target_state == PCI_POWER_ERROR) 1450 return -EIO; 1451 1452 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1453 1454 error = pci_set_power_state(dev, target_state); 1455 1456 if (error) 1457 pci_enable_wake(dev, target_state, false); 1458 1459 return error; 1460 } 1461 1462 /** 1463 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1464 * @dev: Device to handle. 1465 * 1466 * Disable device's sytem wake-up capability and put it into D0. 1467 */ 1468 int pci_back_from_sleep(struct pci_dev *dev) 1469 { 1470 pci_enable_wake(dev, PCI_D0, false); 1471 return pci_set_power_state(dev, PCI_D0); 1472 } 1473 1474 /** 1475 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1476 * @dev: PCI device being suspended. 1477 * 1478 * Prepare @dev to generate wake-up events at run time and put it into a low 1479 * power state. 1480 */ 1481 int pci_finish_runtime_suspend(struct pci_dev *dev) 1482 { 1483 pci_power_t target_state = pci_target_state(dev); 1484 int error; 1485 1486 if (target_state == PCI_POWER_ERROR) 1487 return -EIO; 1488 1489 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1490 1491 error = pci_set_power_state(dev, target_state); 1492 1493 if (error) 1494 __pci_enable_wake(dev, target_state, true, false); 1495 1496 return error; 1497 } 1498 1499 /** 1500 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1501 * @dev: Device to check. 1502 * 1503 * Return true if the device itself is cabable of generating wake-up events 1504 * (through the platform or using the native PCIe PME) or if the device supports 1505 * PME and one of its upstream bridges can generate wake-up events. 1506 */ 1507 bool pci_dev_run_wake(struct pci_dev *dev) 1508 { 1509 struct pci_bus *bus = dev->bus; 1510 1511 if (device_run_wake(&dev->dev)) 1512 return true; 1513 1514 if (!dev->pme_support) 1515 return false; 1516 1517 while (bus->parent) { 1518 struct pci_dev *bridge = bus->self; 1519 1520 if (device_run_wake(&bridge->dev)) 1521 return true; 1522 1523 bus = bus->parent; 1524 } 1525 1526 /* We have reached the root bus. */ 1527 if (bus->bridge) 1528 return device_run_wake(bus->bridge); 1529 1530 return false; 1531 } 1532 EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1533 1534 /** 1535 * pci_pm_init - Initialize PM functions of given PCI device 1536 * @dev: PCI device to handle. 1537 */ 1538 void pci_pm_init(struct pci_dev *dev) 1539 { 1540 int pm; 1541 u16 pmc; 1542 1543 device_enable_async_suspend(&dev->dev); 1544 dev->wakeup_prepared = false; 1545 dev->pm_cap = 0; 1546 1547 /* find PCI PM capability in list */ 1548 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1549 if (!pm) 1550 return; 1551 /* Check device's ability to generate PME# */ 1552 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1553 1554 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1555 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1556 pmc & PCI_PM_CAP_VER_MASK); 1557 return; 1558 } 1559 1560 dev->pm_cap = pm; 1561 dev->d3_delay = PCI_PM_D3_WAIT; 1562 1563 dev->d1_support = false; 1564 dev->d2_support = false; 1565 if (!pci_no_d1d2(dev)) { 1566 if (pmc & PCI_PM_CAP_D1) 1567 dev->d1_support = true; 1568 if (pmc & PCI_PM_CAP_D2) 1569 dev->d2_support = true; 1570 1571 if (dev->d1_support || dev->d2_support) 1572 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1573 dev->d1_support ? " D1" : "", 1574 dev->d2_support ? " D2" : ""); 1575 } 1576 1577 pmc &= PCI_PM_CAP_PME_MASK; 1578 if (pmc) { 1579 dev_printk(KERN_DEBUG, &dev->dev, 1580 "PME# supported from%s%s%s%s%s\n", 1581 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1582 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1583 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1584 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1585 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1586 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1587 /* 1588 * Make device's PM flags reflect the wake-up capability, but 1589 * let the user space enable it to wake up the system as needed. 1590 */ 1591 device_set_wakeup_capable(&dev->dev, true); 1592 device_set_wakeup_enable(&dev->dev, false); 1593 /* Disable the PME# generation functionality */ 1594 pci_pme_active(dev, false); 1595 } else { 1596 dev->pme_support = 0; 1597 } 1598 } 1599 1600 /** 1601 * platform_pci_wakeup_init - init platform wakeup if present 1602 * @dev: PCI device 1603 * 1604 * Some devices don't have PCI PM caps but can still generate wakeup 1605 * events through platform methods (like ACPI events). If @dev supports 1606 * platform wakeup events, set the device flag to indicate as much. This 1607 * may be redundant if the device also supports PCI PM caps, but double 1608 * initialization should be safe in that case. 1609 */ 1610 void platform_pci_wakeup_init(struct pci_dev *dev) 1611 { 1612 if (!platform_pci_can_wakeup(dev)) 1613 return; 1614 1615 device_set_wakeup_capable(&dev->dev, true); 1616 device_set_wakeup_enable(&dev->dev, false); 1617 platform_pci_sleep_wake(dev, false); 1618 } 1619 1620 /** 1621 * pci_add_save_buffer - allocate buffer for saving given capability registers 1622 * @dev: the PCI device 1623 * @cap: the capability to allocate the buffer for 1624 * @size: requested size of the buffer 1625 */ 1626 static int pci_add_cap_save_buffer( 1627 struct pci_dev *dev, char cap, unsigned int size) 1628 { 1629 int pos; 1630 struct pci_cap_saved_state *save_state; 1631 1632 pos = pci_find_capability(dev, cap); 1633 if (pos <= 0) 1634 return 0; 1635 1636 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1637 if (!save_state) 1638 return -ENOMEM; 1639 1640 save_state->cap_nr = cap; 1641 pci_add_saved_cap(dev, save_state); 1642 1643 return 0; 1644 } 1645 1646 /** 1647 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1648 * @dev: the PCI device 1649 */ 1650 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1651 { 1652 int error; 1653 1654 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 1655 PCI_EXP_SAVE_REGS * sizeof(u16)); 1656 if (error) 1657 dev_err(&dev->dev, 1658 "unable to preallocate PCI Express save buffer\n"); 1659 1660 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1661 if (error) 1662 dev_err(&dev->dev, 1663 "unable to preallocate PCI-X save buffer\n"); 1664 } 1665 1666 /** 1667 * pci_enable_ari - enable ARI forwarding if hardware support it 1668 * @dev: the PCI device 1669 */ 1670 void pci_enable_ari(struct pci_dev *dev) 1671 { 1672 int pos; 1673 u32 cap; 1674 u16 ctrl; 1675 struct pci_dev *bridge; 1676 1677 if (!pci_is_pcie(dev) || dev->devfn) 1678 return; 1679 1680 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1681 if (!pos) 1682 return; 1683 1684 bridge = dev->bus->self; 1685 if (!bridge || !pci_is_pcie(bridge)) 1686 return; 1687 1688 pos = pci_pcie_cap(bridge); 1689 if (!pos) 1690 return; 1691 1692 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1693 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1694 return; 1695 1696 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1697 ctrl |= PCI_EXP_DEVCTL2_ARI; 1698 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1699 1700 bridge->ari_enabled = 1; 1701 } 1702 1703 static int pci_acs_enable; 1704 1705 /** 1706 * pci_request_acs - ask for ACS to be enabled if supported 1707 */ 1708 void pci_request_acs(void) 1709 { 1710 pci_acs_enable = 1; 1711 } 1712 1713 /** 1714 * pci_enable_acs - enable ACS if hardware support it 1715 * @dev: the PCI device 1716 */ 1717 void pci_enable_acs(struct pci_dev *dev) 1718 { 1719 int pos; 1720 u16 cap; 1721 u16 ctrl; 1722 1723 if (!pci_acs_enable) 1724 return; 1725 1726 if (!pci_is_pcie(dev)) 1727 return; 1728 1729 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 1730 if (!pos) 1731 return; 1732 1733 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 1734 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 1735 1736 /* Source Validation */ 1737 ctrl |= (cap & PCI_ACS_SV); 1738 1739 /* P2P Request Redirect */ 1740 ctrl |= (cap & PCI_ACS_RR); 1741 1742 /* P2P Completion Redirect */ 1743 ctrl |= (cap & PCI_ACS_CR); 1744 1745 /* Upstream Forwarding */ 1746 ctrl |= (cap & PCI_ACS_UF); 1747 1748 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 1749 } 1750 1751 /** 1752 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1753 * @dev: the PCI device 1754 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1755 * 1756 * Perform INTx swizzling for a device behind one level of bridge. This is 1757 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1758 * behind bridges on add-in cards. For devices with ARI enabled, the slot 1759 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 1760 * the PCI Express Base Specification, Revision 2.1) 1761 */ 1762 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1763 { 1764 int slot; 1765 1766 if (pci_ari_enabled(dev->bus)) 1767 slot = 0; 1768 else 1769 slot = PCI_SLOT(dev->devfn); 1770 1771 return (((pin - 1) + slot) % 4) + 1; 1772 } 1773 1774 int 1775 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1776 { 1777 u8 pin; 1778 1779 pin = dev->pin; 1780 if (!pin) 1781 return -1; 1782 1783 while (!pci_is_root_bus(dev->bus)) { 1784 pin = pci_swizzle_interrupt_pin(dev, pin); 1785 dev = dev->bus->self; 1786 } 1787 *bridge = dev; 1788 return pin; 1789 } 1790 1791 /** 1792 * pci_common_swizzle - swizzle INTx all the way to root bridge 1793 * @dev: the PCI device 1794 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1795 * 1796 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1797 * bridges all the way up to a PCI root bus. 1798 */ 1799 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1800 { 1801 u8 pin = *pinp; 1802 1803 while (!pci_is_root_bus(dev->bus)) { 1804 pin = pci_swizzle_interrupt_pin(dev, pin); 1805 dev = dev->bus->self; 1806 } 1807 *pinp = pin; 1808 return PCI_SLOT(dev->devfn); 1809 } 1810 1811 /** 1812 * pci_release_region - Release a PCI bar 1813 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1814 * @bar: BAR to release 1815 * 1816 * Releases the PCI I/O and memory resources previously reserved by a 1817 * successful call to pci_request_region. Call this function only 1818 * after all use of the PCI regions has ceased. 1819 */ 1820 void pci_release_region(struct pci_dev *pdev, int bar) 1821 { 1822 struct pci_devres *dr; 1823 1824 if (pci_resource_len(pdev, bar) == 0) 1825 return; 1826 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1827 release_region(pci_resource_start(pdev, bar), 1828 pci_resource_len(pdev, bar)); 1829 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1830 release_mem_region(pci_resource_start(pdev, bar), 1831 pci_resource_len(pdev, bar)); 1832 1833 dr = find_pci_dr(pdev); 1834 if (dr) 1835 dr->region_mask &= ~(1 << bar); 1836 } 1837 1838 /** 1839 * __pci_request_region - Reserved PCI I/O and memory resource 1840 * @pdev: PCI device whose resources are to be reserved 1841 * @bar: BAR to be reserved 1842 * @res_name: Name to be associated with resource. 1843 * @exclusive: whether the region access is exclusive or not 1844 * 1845 * Mark the PCI region associated with PCI device @pdev BR @bar as 1846 * being reserved by owner @res_name. Do not access any 1847 * address inside the PCI regions unless this call returns 1848 * successfully. 1849 * 1850 * If @exclusive is set, then the region is marked so that userspace 1851 * is explicitly not allowed to map the resource via /dev/mem or 1852 * sysfs MMIO access. 1853 * 1854 * Returns 0 on success, or %EBUSY on error. A warning 1855 * message is also printed on failure. 1856 */ 1857 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1858 int exclusive) 1859 { 1860 struct pci_devres *dr; 1861 1862 if (pci_resource_len(pdev, bar) == 0) 1863 return 0; 1864 1865 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1866 if (!request_region(pci_resource_start(pdev, bar), 1867 pci_resource_len(pdev, bar), res_name)) 1868 goto err_out; 1869 } 1870 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1871 if (!__request_mem_region(pci_resource_start(pdev, bar), 1872 pci_resource_len(pdev, bar), res_name, 1873 exclusive)) 1874 goto err_out; 1875 } 1876 1877 dr = find_pci_dr(pdev); 1878 if (dr) 1879 dr->region_mask |= 1 << bar; 1880 1881 return 0; 1882 1883 err_out: 1884 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 1885 &pdev->resource[bar]); 1886 return -EBUSY; 1887 } 1888 1889 /** 1890 * pci_request_region - Reserve PCI I/O and memory resource 1891 * @pdev: PCI device whose resources are to be reserved 1892 * @bar: BAR to be reserved 1893 * @res_name: Name to be associated with resource 1894 * 1895 * Mark the PCI region associated with PCI device @pdev BAR @bar as 1896 * being reserved by owner @res_name. Do not access any 1897 * address inside the PCI regions unless this call returns 1898 * successfully. 1899 * 1900 * Returns 0 on success, or %EBUSY on error. A warning 1901 * message is also printed on failure. 1902 */ 1903 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1904 { 1905 return __pci_request_region(pdev, bar, res_name, 0); 1906 } 1907 1908 /** 1909 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1910 * @pdev: PCI device whose resources are to be reserved 1911 * @bar: BAR to be reserved 1912 * @res_name: Name to be associated with resource. 1913 * 1914 * Mark the PCI region associated with PCI device @pdev BR @bar as 1915 * being reserved by owner @res_name. Do not access any 1916 * address inside the PCI regions unless this call returns 1917 * successfully. 1918 * 1919 * Returns 0 on success, or %EBUSY on error. A warning 1920 * message is also printed on failure. 1921 * 1922 * The key difference that _exclusive makes it that userspace is 1923 * explicitly not allowed to map the resource via /dev/mem or 1924 * sysfs. 1925 */ 1926 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1927 { 1928 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1929 } 1930 /** 1931 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1932 * @pdev: PCI device whose resources were previously reserved 1933 * @bars: Bitmask of BARs to be released 1934 * 1935 * Release selected PCI I/O and memory resources previously reserved. 1936 * Call this function only after all use of the PCI regions has ceased. 1937 */ 1938 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1939 { 1940 int i; 1941 1942 for (i = 0; i < 6; i++) 1943 if (bars & (1 << i)) 1944 pci_release_region(pdev, i); 1945 } 1946 1947 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1948 const char *res_name, int excl) 1949 { 1950 int i; 1951 1952 for (i = 0; i < 6; i++) 1953 if (bars & (1 << i)) 1954 if (__pci_request_region(pdev, i, res_name, excl)) 1955 goto err_out; 1956 return 0; 1957 1958 err_out: 1959 while(--i >= 0) 1960 if (bars & (1 << i)) 1961 pci_release_region(pdev, i); 1962 1963 return -EBUSY; 1964 } 1965 1966 1967 /** 1968 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1969 * @pdev: PCI device whose resources are to be reserved 1970 * @bars: Bitmask of BARs to be requested 1971 * @res_name: Name to be associated with resource 1972 */ 1973 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1974 const char *res_name) 1975 { 1976 return __pci_request_selected_regions(pdev, bars, res_name, 0); 1977 } 1978 1979 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1980 int bars, const char *res_name) 1981 { 1982 return __pci_request_selected_regions(pdev, bars, res_name, 1983 IORESOURCE_EXCLUSIVE); 1984 } 1985 1986 /** 1987 * pci_release_regions - Release reserved PCI I/O and memory resources 1988 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 1989 * 1990 * Releases all PCI I/O and memory resources previously reserved by a 1991 * successful call to pci_request_regions. Call this function only 1992 * after all use of the PCI regions has ceased. 1993 */ 1994 1995 void pci_release_regions(struct pci_dev *pdev) 1996 { 1997 pci_release_selected_regions(pdev, (1 << 6) - 1); 1998 } 1999 2000 /** 2001 * pci_request_regions - Reserved PCI I/O and memory resources 2002 * @pdev: PCI device whose resources are to be reserved 2003 * @res_name: Name to be associated with resource. 2004 * 2005 * Mark all PCI regions associated with PCI device @pdev as 2006 * being reserved by owner @res_name. Do not access any 2007 * address inside the PCI regions unless this call returns 2008 * successfully. 2009 * 2010 * Returns 0 on success, or %EBUSY on error. A warning 2011 * message is also printed on failure. 2012 */ 2013 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 2014 { 2015 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 2016 } 2017 2018 /** 2019 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 2020 * @pdev: PCI device whose resources are to be reserved 2021 * @res_name: Name to be associated with resource. 2022 * 2023 * Mark all PCI regions associated with PCI device @pdev as 2024 * being reserved by owner @res_name. Do not access any 2025 * address inside the PCI regions unless this call returns 2026 * successfully. 2027 * 2028 * pci_request_regions_exclusive() will mark the region so that 2029 * /dev/mem and the sysfs MMIO access will not be allowed. 2030 * 2031 * Returns 0 on success, or %EBUSY on error. A warning 2032 * message is also printed on failure. 2033 */ 2034 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 2035 { 2036 return pci_request_selected_regions_exclusive(pdev, 2037 ((1 << 6) - 1), res_name); 2038 } 2039 2040 static void __pci_set_master(struct pci_dev *dev, bool enable) 2041 { 2042 u16 old_cmd, cmd; 2043 2044 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 2045 if (enable) 2046 cmd = old_cmd | PCI_COMMAND_MASTER; 2047 else 2048 cmd = old_cmd & ~PCI_COMMAND_MASTER; 2049 if (cmd != old_cmd) { 2050 dev_dbg(&dev->dev, "%s bus mastering\n", 2051 enable ? "enabling" : "disabling"); 2052 pci_write_config_word(dev, PCI_COMMAND, cmd); 2053 } 2054 dev->is_busmaster = enable; 2055 } 2056 2057 /** 2058 * pci_set_master - enables bus-mastering for device dev 2059 * @dev: the PCI device to enable 2060 * 2061 * Enables bus-mastering on the device and calls pcibios_set_master() 2062 * to do the needed arch specific settings. 2063 */ 2064 void pci_set_master(struct pci_dev *dev) 2065 { 2066 __pci_set_master(dev, true); 2067 pcibios_set_master(dev); 2068 } 2069 2070 /** 2071 * pci_clear_master - disables bus-mastering for device dev 2072 * @dev: the PCI device to disable 2073 */ 2074 void pci_clear_master(struct pci_dev *dev) 2075 { 2076 __pci_set_master(dev, false); 2077 } 2078 2079 /** 2080 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 2081 * @dev: the PCI device for which MWI is to be enabled 2082 * 2083 * Helper function for pci_set_mwi. 2084 * Originally copied from drivers/net/acenic.c. 2085 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 2086 * 2087 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2088 */ 2089 int pci_set_cacheline_size(struct pci_dev *dev) 2090 { 2091 u8 cacheline_size; 2092 2093 if (!pci_cache_line_size) 2094 return -EINVAL; 2095 2096 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 2097 equal to or multiple of the right value. */ 2098 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2099 if (cacheline_size >= pci_cache_line_size && 2100 (cacheline_size % pci_cache_line_size) == 0) 2101 return 0; 2102 2103 /* Write the correct value. */ 2104 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 2105 /* Read it back. */ 2106 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2107 if (cacheline_size == pci_cache_line_size) 2108 return 0; 2109 2110 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 2111 "supported\n", pci_cache_line_size << 2); 2112 2113 return -EINVAL; 2114 } 2115 EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 2116 2117 #ifdef PCI_DISABLE_MWI 2118 int pci_set_mwi(struct pci_dev *dev) 2119 { 2120 return 0; 2121 } 2122 2123 int pci_try_set_mwi(struct pci_dev *dev) 2124 { 2125 return 0; 2126 } 2127 2128 void pci_clear_mwi(struct pci_dev *dev) 2129 { 2130 } 2131 2132 #else 2133 2134 /** 2135 * pci_set_mwi - enables memory-write-invalidate PCI transaction 2136 * @dev: the PCI device for which MWI is enabled 2137 * 2138 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2139 * 2140 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2141 */ 2142 int 2143 pci_set_mwi(struct pci_dev *dev) 2144 { 2145 int rc; 2146 u16 cmd; 2147 2148 rc = pci_set_cacheline_size(dev); 2149 if (rc) 2150 return rc; 2151 2152 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2153 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 2154 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 2155 cmd |= PCI_COMMAND_INVALIDATE; 2156 pci_write_config_word(dev, PCI_COMMAND, cmd); 2157 } 2158 2159 return 0; 2160 } 2161 2162 /** 2163 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 2164 * @dev: the PCI device for which MWI is enabled 2165 * 2166 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2167 * Callers are not required to check the return value. 2168 * 2169 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2170 */ 2171 int pci_try_set_mwi(struct pci_dev *dev) 2172 { 2173 int rc = pci_set_mwi(dev); 2174 return rc; 2175 } 2176 2177 /** 2178 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 2179 * @dev: the PCI device to disable 2180 * 2181 * Disables PCI Memory-Write-Invalidate transaction on the device 2182 */ 2183 void 2184 pci_clear_mwi(struct pci_dev *dev) 2185 { 2186 u16 cmd; 2187 2188 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2189 if (cmd & PCI_COMMAND_INVALIDATE) { 2190 cmd &= ~PCI_COMMAND_INVALIDATE; 2191 pci_write_config_word(dev, PCI_COMMAND, cmd); 2192 } 2193 } 2194 #endif /* ! PCI_DISABLE_MWI */ 2195 2196 /** 2197 * pci_intx - enables/disables PCI INTx for device dev 2198 * @pdev: the PCI device to operate on 2199 * @enable: boolean: whether to enable or disable PCI INTx 2200 * 2201 * Enables/disables PCI INTx for device dev 2202 */ 2203 void 2204 pci_intx(struct pci_dev *pdev, int enable) 2205 { 2206 u16 pci_command, new; 2207 2208 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2209 2210 if (enable) { 2211 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2212 } else { 2213 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2214 } 2215 2216 if (new != pci_command) { 2217 struct pci_devres *dr; 2218 2219 pci_write_config_word(pdev, PCI_COMMAND, new); 2220 2221 dr = find_pci_dr(pdev); 2222 if (dr && !dr->restore_intx) { 2223 dr->restore_intx = 1; 2224 dr->orig_intx = !enable; 2225 } 2226 } 2227 } 2228 2229 /** 2230 * pci_msi_off - disables any msi or msix capabilities 2231 * @dev: the PCI device to operate on 2232 * 2233 * If you want to use msi see pci_enable_msi and friends. 2234 * This is a lower level primitive that allows us to disable 2235 * msi operation at the device level. 2236 */ 2237 void pci_msi_off(struct pci_dev *dev) 2238 { 2239 int pos; 2240 u16 control; 2241 2242 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 2243 if (pos) { 2244 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 2245 control &= ~PCI_MSI_FLAGS_ENABLE; 2246 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 2247 } 2248 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 2249 if (pos) { 2250 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 2251 control &= ~PCI_MSIX_FLAGS_ENABLE; 2252 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 2253 } 2254 } 2255 2256 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 2257 /* 2258 * These can be overridden by arch-specific implementations 2259 */ 2260 int 2261 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 2262 { 2263 if (!pci_dma_supported(dev, mask)) 2264 return -EIO; 2265 2266 dev->dma_mask = mask; 2267 dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask)); 2268 2269 return 0; 2270 } 2271 2272 int 2273 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 2274 { 2275 if (!pci_dma_supported(dev, mask)) 2276 return -EIO; 2277 2278 dev->dev.coherent_dma_mask = mask; 2279 dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask)); 2280 2281 return 0; 2282 } 2283 #endif 2284 2285 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE 2286 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 2287 { 2288 return dma_set_max_seg_size(&dev->dev, size); 2289 } 2290 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 2291 #endif 2292 2293 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY 2294 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2295 { 2296 return dma_set_seg_boundary(&dev->dev, mask); 2297 } 2298 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2299 #endif 2300 2301 static int pcie_flr(struct pci_dev *dev, int probe) 2302 { 2303 int i; 2304 int pos; 2305 u32 cap; 2306 u16 status, control; 2307 2308 pos = pci_pcie_cap(dev); 2309 if (!pos) 2310 return -ENOTTY; 2311 2312 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 2313 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2314 return -ENOTTY; 2315 2316 if (probe) 2317 return 0; 2318 2319 /* Wait for Transaction Pending bit clean */ 2320 for (i = 0; i < 4; i++) { 2321 if (i) 2322 msleep((1 << (i - 1)) * 100); 2323 2324 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 2325 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2326 goto clear; 2327 } 2328 2329 dev_err(&dev->dev, "transaction is not cleared; " 2330 "proceeding with reset anyway\n"); 2331 2332 clear: 2333 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); 2334 control |= PCI_EXP_DEVCTL_BCR_FLR; 2335 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); 2336 2337 msleep(100); 2338 2339 return 0; 2340 } 2341 2342 static int pci_af_flr(struct pci_dev *dev, int probe) 2343 { 2344 int i; 2345 int pos; 2346 u8 cap; 2347 u8 status; 2348 2349 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 2350 if (!pos) 2351 return -ENOTTY; 2352 2353 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 2354 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2355 return -ENOTTY; 2356 2357 if (probe) 2358 return 0; 2359 2360 /* Wait for Transaction Pending bit clean */ 2361 for (i = 0; i < 4; i++) { 2362 if (i) 2363 msleep((1 << (i - 1)) * 100); 2364 2365 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 2366 if (!(status & PCI_AF_STATUS_TP)) 2367 goto clear; 2368 } 2369 2370 dev_err(&dev->dev, "transaction is not cleared; " 2371 "proceeding with reset anyway\n"); 2372 2373 clear: 2374 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2375 msleep(100); 2376 2377 return 0; 2378 } 2379 2380 static int pci_pm_reset(struct pci_dev *dev, int probe) 2381 { 2382 u16 csr; 2383 2384 if (!dev->pm_cap) 2385 return -ENOTTY; 2386 2387 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 2388 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 2389 return -ENOTTY; 2390 2391 if (probe) 2392 return 0; 2393 2394 if (dev->current_state != PCI_D0) 2395 return -EINVAL; 2396 2397 csr &= ~PCI_PM_CTRL_STATE_MASK; 2398 csr |= PCI_D3hot; 2399 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2400 pci_dev_d3_sleep(dev); 2401 2402 csr &= ~PCI_PM_CTRL_STATE_MASK; 2403 csr |= PCI_D0; 2404 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2405 pci_dev_d3_sleep(dev); 2406 2407 return 0; 2408 } 2409 2410 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 2411 { 2412 u16 ctrl; 2413 struct pci_dev *pdev; 2414 2415 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 2416 return -ENOTTY; 2417 2418 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 2419 if (pdev != dev) 2420 return -ENOTTY; 2421 2422 if (probe) 2423 return 0; 2424 2425 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 2426 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 2427 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2428 msleep(100); 2429 2430 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 2431 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2432 msleep(100); 2433 2434 return 0; 2435 } 2436 2437 static int pci_dev_reset(struct pci_dev *dev, int probe) 2438 { 2439 int rc; 2440 2441 might_sleep(); 2442 2443 if (!probe) { 2444 pci_block_user_cfg_access(dev); 2445 /* block PM suspend, driver probe, etc. */ 2446 down(&dev->dev.sem); 2447 } 2448 2449 rc = pci_dev_specific_reset(dev, probe); 2450 if (rc != -ENOTTY) 2451 goto done; 2452 2453 rc = pcie_flr(dev, probe); 2454 if (rc != -ENOTTY) 2455 goto done; 2456 2457 rc = pci_af_flr(dev, probe); 2458 if (rc != -ENOTTY) 2459 goto done; 2460 2461 rc = pci_pm_reset(dev, probe); 2462 if (rc != -ENOTTY) 2463 goto done; 2464 2465 rc = pci_parent_bus_reset(dev, probe); 2466 done: 2467 if (!probe) { 2468 up(&dev->dev.sem); 2469 pci_unblock_user_cfg_access(dev); 2470 } 2471 2472 return rc; 2473 } 2474 2475 /** 2476 * __pci_reset_function - reset a PCI device function 2477 * @dev: PCI device to reset 2478 * 2479 * Some devices allow an individual function to be reset without affecting 2480 * other functions in the same device. The PCI device must be responsive 2481 * to PCI config space in order to use this function. 2482 * 2483 * The device function is presumed to be unused when this function is called. 2484 * Resetting the device will make the contents of PCI configuration space 2485 * random, so any caller of this must be prepared to reinitialise the 2486 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2487 * etc. 2488 * 2489 * Returns 0 if the device function was successfully reset or negative if the 2490 * device doesn't support resetting a single function. 2491 */ 2492 int __pci_reset_function(struct pci_dev *dev) 2493 { 2494 return pci_dev_reset(dev, 0); 2495 } 2496 EXPORT_SYMBOL_GPL(__pci_reset_function); 2497 2498 /** 2499 * pci_probe_reset_function - check whether the device can be safely reset 2500 * @dev: PCI device to reset 2501 * 2502 * Some devices allow an individual function to be reset without affecting 2503 * other functions in the same device. The PCI device must be responsive 2504 * to PCI config space in order to use this function. 2505 * 2506 * Returns 0 if the device function can be reset or negative if the 2507 * device doesn't support resetting a single function. 2508 */ 2509 int pci_probe_reset_function(struct pci_dev *dev) 2510 { 2511 return pci_dev_reset(dev, 1); 2512 } 2513 2514 /** 2515 * pci_reset_function - quiesce and reset a PCI device function 2516 * @dev: PCI device to reset 2517 * 2518 * Some devices allow an individual function to be reset without affecting 2519 * other functions in the same device. The PCI device must be responsive 2520 * to PCI config space in order to use this function. 2521 * 2522 * This function does not just reset the PCI portion of a device, but 2523 * clears all the state associated with the device. This function differs 2524 * from __pci_reset_function in that it saves and restores device state 2525 * over the reset. 2526 * 2527 * Returns 0 if the device function was successfully reset or negative if the 2528 * device doesn't support resetting a single function. 2529 */ 2530 int pci_reset_function(struct pci_dev *dev) 2531 { 2532 int rc; 2533 2534 rc = pci_dev_reset(dev, 1); 2535 if (rc) 2536 return rc; 2537 2538 pci_save_state(dev); 2539 2540 /* 2541 * both INTx and MSI are disabled after the Interrupt Disable bit 2542 * is set and the Bus Master bit is cleared. 2543 */ 2544 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2545 2546 rc = pci_dev_reset(dev, 0); 2547 2548 pci_restore_state(dev); 2549 2550 return rc; 2551 } 2552 EXPORT_SYMBOL_GPL(pci_reset_function); 2553 2554 /** 2555 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2556 * @dev: PCI device to query 2557 * 2558 * Returns mmrbc: maximum designed memory read count in bytes 2559 * or appropriate error value. 2560 */ 2561 int pcix_get_max_mmrbc(struct pci_dev *dev) 2562 { 2563 int err, cap; 2564 u32 stat; 2565 2566 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2567 if (!cap) 2568 return -EINVAL; 2569 2570 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2571 if (err) 2572 return -EINVAL; 2573 2574 return (stat & PCI_X_STATUS_MAX_READ) >> 12; 2575 } 2576 EXPORT_SYMBOL(pcix_get_max_mmrbc); 2577 2578 /** 2579 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2580 * @dev: PCI device to query 2581 * 2582 * Returns mmrbc: maximum memory read count in bytes 2583 * or appropriate error value. 2584 */ 2585 int pcix_get_mmrbc(struct pci_dev *dev) 2586 { 2587 int ret, cap; 2588 u32 cmd; 2589 2590 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2591 if (!cap) 2592 return -EINVAL; 2593 2594 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2595 if (!ret) 2596 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2597 2598 return ret; 2599 } 2600 EXPORT_SYMBOL(pcix_get_mmrbc); 2601 2602 /** 2603 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2604 * @dev: PCI device to query 2605 * @mmrbc: maximum memory read count in bytes 2606 * valid values are 512, 1024, 2048, 4096 2607 * 2608 * If possible sets maximum memory read byte count, some bridges have erratas 2609 * that prevent this. 2610 */ 2611 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2612 { 2613 int cap, err = -EINVAL; 2614 u32 stat, cmd, v, o; 2615 2616 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2617 goto out; 2618 2619 v = ffs(mmrbc) - 10; 2620 2621 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2622 if (!cap) 2623 goto out; 2624 2625 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2626 if (err) 2627 goto out; 2628 2629 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2630 return -E2BIG; 2631 2632 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2633 if (err) 2634 goto out; 2635 2636 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2637 if (o != v) { 2638 if (v > o && dev->bus && 2639 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2640 return -EIO; 2641 2642 cmd &= ~PCI_X_CMD_MAX_READ; 2643 cmd |= v << 2; 2644 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); 2645 } 2646 out: 2647 return err; 2648 } 2649 EXPORT_SYMBOL(pcix_set_mmrbc); 2650 2651 /** 2652 * pcie_get_readrq - get PCI Express read request size 2653 * @dev: PCI device to query 2654 * 2655 * Returns maximum memory read request in bytes 2656 * or appropriate error value. 2657 */ 2658 int pcie_get_readrq(struct pci_dev *dev) 2659 { 2660 int ret, cap; 2661 u16 ctl; 2662 2663 cap = pci_pcie_cap(dev); 2664 if (!cap) 2665 return -EINVAL; 2666 2667 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2668 if (!ret) 2669 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2670 2671 return ret; 2672 } 2673 EXPORT_SYMBOL(pcie_get_readrq); 2674 2675 /** 2676 * pcie_set_readrq - set PCI Express maximum memory read request 2677 * @dev: PCI device to query 2678 * @rq: maximum memory read count in bytes 2679 * valid values are 128, 256, 512, 1024, 2048, 4096 2680 * 2681 * If possible sets maximum read byte count 2682 */ 2683 int pcie_set_readrq(struct pci_dev *dev, int rq) 2684 { 2685 int cap, err = -EINVAL; 2686 u16 ctl, v; 2687 2688 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2689 goto out; 2690 2691 v = (ffs(rq) - 8) << 12; 2692 2693 cap = pci_pcie_cap(dev); 2694 if (!cap) 2695 goto out; 2696 2697 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2698 if (err) 2699 goto out; 2700 2701 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2702 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2703 ctl |= v; 2704 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2705 } 2706 2707 out: 2708 return err; 2709 } 2710 EXPORT_SYMBOL(pcie_set_readrq); 2711 2712 /** 2713 * pci_select_bars - Make BAR mask from the type of resource 2714 * @dev: the PCI device for which BAR mask is made 2715 * @flags: resource type mask to be selected 2716 * 2717 * This helper routine makes bar mask from the type of resource. 2718 */ 2719 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2720 { 2721 int i, bars = 0; 2722 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2723 if (pci_resource_flags(dev, i) & flags) 2724 bars |= (1 << i); 2725 return bars; 2726 } 2727 2728 /** 2729 * pci_resource_bar - get position of the BAR associated with a resource 2730 * @dev: the PCI device 2731 * @resno: the resource number 2732 * @type: the BAR type to be filled in 2733 * 2734 * Returns BAR position in config space, or 0 if the BAR is invalid. 2735 */ 2736 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2737 { 2738 int reg; 2739 2740 if (resno < PCI_ROM_RESOURCE) { 2741 *type = pci_bar_unknown; 2742 return PCI_BASE_ADDRESS_0 + 4 * resno; 2743 } else if (resno == PCI_ROM_RESOURCE) { 2744 *type = pci_bar_mem32; 2745 return dev->rom_base_reg; 2746 } else if (resno < PCI_BRIDGE_RESOURCES) { 2747 /* device specific resource */ 2748 reg = pci_iov_resource_bar(dev, resno, type); 2749 if (reg) 2750 return reg; 2751 } 2752 2753 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); 2754 return 0; 2755 } 2756 2757 /* Some architectures require additional programming to enable VGA */ 2758 static arch_set_vga_state_t arch_set_vga_state; 2759 2760 void __init pci_register_set_vga_state(arch_set_vga_state_t func) 2761 { 2762 arch_set_vga_state = func; /* NULL disables */ 2763 } 2764 2765 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 2766 unsigned int command_bits, bool change_bridge) 2767 { 2768 if (arch_set_vga_state) 2769 return arch_set_vga_state(dev, decode, command_bits, 2770 change_bridge); 2771 return 0; 2772 } 2773 2774 /** 2775 * pci_set_vga_state - set VGA decode state on device and parents if requested 2776 * @dev: the PCI device 2777 * @decode: true = enable decoding, false = disable decoding 2778 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 2779 * @change_bridge: traverse ancestors and change bridges 2780 */ 2781 int pci_set_vga_state(struct pci_dev *dev, bool decode, 2782 unsigned int command_bits, bool change_bridge) 2783 { 2784 struct pci_bus *bus; 2785 struct pci_dev *bridge; 2786 u16 cmd; 2787 int rc; 2788 2789 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 2790 2791 /* ARCH specific VGA enables */ 2792 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); 2793 if (rc) 2794 return rc; 2795 2796 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2797 if (decode == true) 2798 cmd |= command_bits; 2799 else 2800 cmd &= ~command_bits; 2801 pci_write_config_word(dev, PCI_COMMAND, cmd); 2802 2803 if (change_bridge == false) 2804 return 0; 2805 2806 bus = dev->bus; 2807 while (bus) { 2808 bridge = bus->self; 2809 if (bridge) { 2810 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 2811 &cmd); 2812 if (decode == true) 2813 cmd |= PCI_BRIDGE_CTL_VGA; 2814 else 2815 cmd &= ~PCI_BRIDGE_CTL_VGA; 2816 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 2817 cmd); 2818 } 2819 bus = bus->parent; 2820 } 2821 return 0; 2822 } 2823 2824 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2825 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2826 static DEFINE_SPINLOCK(resource_alignment_lock); 2827 2828 /** 2829 * pci_specified_resource_alignment - get resource alignment specified by user. 2830 * @dev: the PCI device to get 2831 * 2832 * RETURNS: Resource alignment if it is specified. 2833 * Zero if it is not specified. 2834 */ 2835 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 2836 { 2837 int seg, bus, slot, func, align_order, count; 2838 resource_size_t align = 0; 2839 char *p; 2840 2841 spin_lock(&resource_alignment_lock); 2842 p = resource_alignment_param; 2843 while (*p) { 2844 count = 0; 2845 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 2846 p[count] == '@') { 2847 p += count + 1; 2848 } else { 2849 align_order = -1; 2850 } 2851 if (sscanf(p, "%x:%x:%x.%x%n", 2852 &seg, &bus, &slot, &func, &count) != 4) { 2853 seg = 0; 2854 if (sscanf(p, "%x:%x.%x%n", 2855 &bus, &slot, &func, &count) != 3) { 2856 /* Invalid format */ 2857 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 2858 p); 2859 break; 2860 } 2861 } 2862 p += count; 2863 if (seg == pci_domain_nr(dev->bus) && 2864 bus == dev->bus->number && 2865 slot == PCI_SLOT(dev->devfn) && 2866 func == PCI_FUNC(dev->devfn)) { 2867 if (align_order == -1) { 2868 align = PAGE_SIZE; 2869 } else { 2870 align = 1 << align_order; 2871 } 2872 /* Found */ 2873 break; 2874 } 2875 if (*p != ';' && *p != ',') { 2876 /* End of param or invalid format */ 2877 break; 2878 } 2879 p++; 2880 } 2881 spin_unlock(&resource_alignment_lock); 2882 return align; 2883 } 2884 2885 /** 2886 * pci_is_reassigndev - check if specified PCI is target device to reassign 2887 * @dev: the PCI device to check 2888 * 2889 * RETURNS: non-zero for PCI device is a target device to reassign, 2890 * or zero is not. 2891 */ 2892 int pci_is_reassigndev(struct pci_dev *dev) 2893 { 2894 return (pci_specified_resource_alignment(dev) != 0); 2895 } 2896 2897 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 2898 { 2899 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 2900 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 2901 spin_lock(&resource_alignment_lock); 2902 strncpy(resource_alignment_param, buf, count); 2903 resource_alignment_param[count] = '\0'; 2904 spin_unlock(&resource_alignment_lock); 2905 return count; 2906 } 2907 2908 ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 2909 { 2910 size_t count; 2911 spin_lock(&resource_alignment_lock); 2912 count = snprintf(buf, size, "%s", resource_alignment_param); 2913 spin_unlock(&resource_alignment_lock); 2914 return count; 2915 } 2916 2917 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 2918 { 2919 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 2920 } 2921 2922 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 2923 const char *buf, size_t count) 2924 { 2925 return pci_set_resource_alignment_param(buf, count); 2926 } 2927 2928 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 2929 pci_resource_alignment_store); 2930 2931 static int __init pci_resource_alignment_sysfs_init(void) 2932 { 2933 return bus_create_file(&pci_bus_type, 2934 &bus_attr_resource_alignment); 2935 } 2936 2937 late_initcall(pci_resource_alignment_sysfs_init); 2938 2939 static void __devinit pci_no_domains(void) 2940 { 2941 #ifdef CONFIG_PCI_DOMAINS 2942 pci_domains_supported = 0; 2943 #endif 2944 } 2945 2946 /** 2947 * pci_ext_cfg_enabled - can we access extended PCI config space? 2948 * @dev: The PCI device of the root bridge. 2949 * 2950 * Returns 1 if we can access PCI extended config space (offsets 2951 * greater than 0xff). This is the default implementation. Architecture 2952 * implementations can override this. 2953 */ 2954 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2955 { 2956 return 1; 2957 } 2958 2959 void __weak pci_fixup_cardbus(struct pci_bus *bus) 2960 { 2961 } 2962 EXPORT_SYMBOL(pci_fixup_cardbus); 2963 2964 static int __init pci_setup(char *str) 2965 { 2966 while (str) { 2967 char *k = strchr(str, ','); 2968 if (k) 2969 *k++ = 0; 2970 if (*str && (str = pcibios_setup(str)) && *str) { 2971 if (!strcmp(str, "nomsi")) { 2972 pci_no_msi(); 2973 } else if (!strcmp(str, "noaer")) { 2974 pci_no_aer(); 2975 } else if (!strcmp(str, "nodomains")) { 2976 pci_no_domains(); 2977 } else if (!strncmp(str, "cbiosize=", 9)) { 2978 pci_cardbus_io_size = memparse(str + 9, &str); 2979 } else if (!strncmp(str, "cbmemsize=", 10)) { 2980 pci_cardbus_mem_size = memparse(str + 10, &str); 2981 } else if (!strncmp(str, "resource_alignment=", 19)) { 2982 pci_set_resource_alignment_param(str + 19, 2983 strlen(str + 19)); 2984 } else if (!strncmp(str, "ecrc=", 5)) { 2985 pcie_ecrc_get_policy(str + 5); 2986 } else if (!strncmp(str, "hpiosize=", 9)) { 2987 pci_hotplug_io_size = memparse(str + 9, &str); 2988 } else if (!strncmp(str, "hpmemsize=", 10)) { 2989 pci_hotplug_mem_size = memparse(str + 10, &str); 2990 } else { 2991 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2992 str); 2993 } 2994 } 2995 str = k; 2996 } 2997 return 0; 2998 } 2999 early_param("pci", pci_setup); 3000 3001 EXPORT_SYMBOL(pci_reenable_device); 3002 EXPORT_SYMBOL(pci_enable_device_io); 3003 EXPORT_SYMBOL(pci_enable_device_mem); 3004 EXPORT_SYMBOL(pci_enable_device); 3005 EXPORT_SYMBOL(pcim_enable_device); 3006 EXPORT_SYMBOL(pcim_pin_device); 3007 EXPORT_SYMBOL(pci_disable_device); 3008 EXPORT_SYMBOL(pci_find_capability); 3009 EXPORT_SYMBOL(pci_bus_find_capability); 3010 EXPORT_SYMBOL(pci_register_set_vga_state); 3011 EXPORT_SYMBOL(pci_release_regions); 3012 EXPORT_SYMBOL(pci_request_regions); 3013 EXPORT_SYMBOL(pci_request_regions_exclusive); 3014 EXPORT_SYMBOL(pci_release_region); 3015 EXPORT_SYMBOL(pci_request_region); 3016 EXPORT_SYMBOL(pci_request_region_exclusive); 3017 EXPORT_SYMBOL(pci_release_selected_regions); 3018 EXPORT_SYMBOL(pci_request_selected_regions); 3019 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 3020 EXPORT_SYMBOL(pci_set_master); 3021 EXPORT_SYMBOL(pci_clear_master); 3022 EXPORT_SYMBOL(pci_set_mwi); 3023 EXPORT_SYMBOL(pci_try_set_mwi); 3024 EXPORT_SYMBOL(pci_clear_mwi); 3025 EXPORT_SYMBOL_GPL(pci_intx); 3026 EXPORT_SYMBOL(pci_set_dma_mask); 3027 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 3028 EXPORT_SYMBOL(pci_assign_resource); 3029 EXPORT_SYMBOL(pci_find_parent_resource); 3030 EXPORT_SYMBOL(pci_select_bars); 3031 3032 EXPORT_SYMBOL(pci_set_power_state); 3033 EXPORT_SYMBOL(pci_save_state); 3034 EXPORT_SYMBOL(pci_restore_state); 3035 EXPORT_SYMBOL(pci_pme_capable); 3036 EXPORT_SYMBOL(pci_pme_active); 3037 EXPORT_SYMBOL(pci_wake_from_d3); 3038 EXPORT_SYMBOL(pci_target_state); 3039 EXPORT_SYMBOL(pci_prepare_to_sleep); 3040 EXPORT_SYMBOL(pci_back_from_sleep); 3041 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 3042