1 /* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/pci.h> 14 #include <linux/pm.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/string.h> 18 #include <linux/log2.h> 19 #include <linux/pci-aspm.h> 20 #include <linux/pm_wakeup.h> 21 #include <linux/interrupt.h> 22 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 23 #include <linux/device.h> 24 #include <asm/setup.h> 25 #include "pci.h" 26 27 const char *pci_power_names[] = { 28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 29 }; 30 EXPORT_SYMBOL_GPL(pci_power_names); 31 32 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 33 34 #ifdef CONFIG_PCI_DOMAINS 35 int pci_domains_supported = 1; 36 #endif 37 38 #define DEFAULT_CARDBUS_IO_SIZE (256) 39 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 40 /* pci=cbmemsize=nnM,cbiosize=nn can override this */ 41 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 42 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 43 44 #define DEFAULT_HOTPLUG_IO_SIZE (256) 45 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 46 /* pci=hpmemsize=nnM,hpiosize=nn can override this */ 47 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 48 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 49 50 /** 51 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 52 * @bus: pointer to PCI bus structure to search 53 * 54 * Given a PCI bus, returns the highest PCI bus number present in the set 55 * including the given PCI bus and its list of child PCI buses. 56 */ 57 unsigned char pci_bus_max_busnr(struct pci_bus* bus) 58 { 59 struct list_head *tmp; 60 unsigned char max, n; 61 62 max = bus->subordinate; 63 list_for_each(tmp, &bus->children) { 64 n = pci_bus_max_busnr(pci_bus_b(tmp)); 65 if(n > max) 66 max = n; 67 } 68 return max; 69 } 70 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 71 72 #ifdef CONFIG_HAS_IOMEM 73 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 74 { 75 /* 76 * Make sure the BAR is actually a memory resource, not an IO resource 77 */ 78 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 79 WARN_ON(1); 80 return NULL; 81 } 82 return ioremap_nocache(pci_resource_start(pdev, bar), 83 pci_resource_len(pdev, bar)); 84 } 85 EXPORT_SYMBOL_GPL(pci_ioremap_bar); 86 #endif 87 88 #if 0 89 /** 90 * pci_max_busnr - returns maximum PCI bus number 91 * 92 * Returns the highest PCI bus number present in the system global list of 93 * PCI buses. 94 */ 95 unsigned char __devinit 96 pci_max_busnr(void) 97 { 98 struct pci_bus *bus = NULL; 99 unsigned char max, n; 100 101 max = 0; 102 while ((bus = pci_find_next_bus(bus)) != NULL) { 103 n = pci_bus_max_busnr(bus); 104 if(n > max) 105 max = n; 106 } 107 return max; 108 } 109 110 #endif /* 0 */ 111 112 #define PCI_FIND_CAP_TTL 48 113 114 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 115 u8 pos, int cap, int *ttl) 116 { 117 u8 id; 118 119 while ((*ttl)--) { 120 pci_bus_read_config_byte(bus, devfn, pos, &pos); 121 if (pos < 0x40) 122 break; 123 pos &= ~3; 124 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 125 &id); 126 if (id == 0xff) 127 break; 128 if (id == cap) 129 return pos; 130 pos += PCI_CAP_LIST_NEXT; 131 } 132 return 0; 133 } 134 135 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 136 u8 pos, int cap) 137 { 138 int ttl = PCI_FIND_CAP_TTL; 139 140 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 141 } 142 143 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 144 { 145 return __pci_find_next_cap(dev->bus, dev->devfn, 146 pos + PCI_CAP_LIST_NEXT, cap); 147 } 148 EXPORT_SYMBOL_GPL(pci_find_next_capability); 149 150 static int __pci_bus_find_cap_start(struct pci_bus *bus, 151 unsigned int devfn, u8 hdr_type) 152 { 153 u16 status; 154 155 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 156 if (!(status & PCI_STATUS_CAP_LIST)) 157 return 0; 158 159 switch (hdr_type) { 160 case PCI_HEADER_TYPE_NORMAL: 161 case PCI_HEADER_TYPE_BRIDGE: 162 return PCI_CAPABILITY_LIST; 163 case PCI_HEADER_TYPE_CARDBUS: 164 return PCI_CB_CAPABILITY_LIST; 165 default: 166 return 0; 167 } 168 169 return 0; 170 } 171 172 /** 173 * pci_find_capability - query for devices' capabilities 174 * @dev: PCI device to query 175 * @cap: capability code 176 * 177 * Tell if a device supports a given PCI capability. 178 * Returns the address of the requested capability structure within the 179 * device's PCI configuration space or 0 in case the device does not 180 * support it. Possible values for @cap: 181 * 182 * %PCI_CAP_ID_PM Power Management 183 * %PCI_CAP_ID_AGP Accelerated Graphics Port 184 * %PCI_CAP_ID_VPD Vital Product Data 185 * %PCI_CAP_ID_SLOTID Slot Identification 186 * %PCI_CAP_ID_MSI Message Signalled Interrupts 187 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 188 * %PCI_CAP_ID_PCIX PCI-X 189 * %PCI_CAP_ID_EXP PCI Express 190 */ 191 int pci_find_capability(struct pci_dev *dev, int cap) 192 { 193 int pos; 194 195 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 196 if (pos) 197 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 198 199 return pos; 200 } 201 202 /** 203 * pci_bus_find_capability - query for devices' capabilities 204 * @bus: the PCI bus to query 205 * @devfn: PCI device to query 206 * @cap: capability code 207 * 208 * Like pci_find_capability() but works for pci devices that do not have a 209 * pci_dev structure set up yet. 210 * 211 * Returns the address of the requested capability structure within the 212 * device's PCI configuration space or 0 in case the device does not 213 * support it. 214 */ 215 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 216 { 217 int pos; 218 u8 hdr_type; 219 220 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 221 222 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 223 if (pos) 224 pos = __pci_find_next_cap(bus, devfn, pos, cap); 225 226 return pos; 227 } 228 229 /** 230 * pci_find_ext_capability - Find an extended capability 231 * @dev: PCI device to query 232 * @cap: capability code 233 * 234 * Returns the address of the requested extended capability structure 235 * within the device's PCI configuration space or 0 if the device does 236 * not support it. Possible values for @cap: 237 * 238 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 239 * %PCI_EXT_CAP_ID_VC Virtual Channel 240 * %PCI_EXT_CAP_ID_DSN Device Serial Number 241 * %PCI_EXT_CAP_ID_PWR Power Budgeting 242 */ 243 int pci_find_ext_capability(struct pci_dev *dev, int cap) 244 { 245 u32 header; 246 int ttl; 247 int pos = PCI_CFG_SPACE_SIZE; 248 249 /* minimum 8 bytes per capability */ 250 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 251 252 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 253 return 0; 254 255 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 256 return 0; 257 258 /* 259 * If we have no capabilities, this is indicated by cap ID, 260 * cap version and next pointer all being 0. 261 */ 262 if (header == 0) 263 return 0; 264 265 while (ttl-- > 0) { 266 if (PCI_EXT_CAP_ID(header) == cap) 267 return pos; 268 269 pos = PCI_EXT_CAP_NEXT(header); 270 if (pos < PCI_CFG_SPACE_SIZE) 271 break; 272 273 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 274 break; 275 } 276 277 return 0; 278 } 279 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 280 281 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 282 { 283 int rc, ttl = PCI_FIND_CAP_TTL; 284 u8 cap, mask; 285 286 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 287 mask = HT_3BIT_CAP_MASK; 288 else 289 mask = HT_5BIT_CAP_MASK; 290 291 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 292 PCI_CAP_ID_HT, &ttl); 293 while (pos) { 294 rc = pci_read_config_byte(dev, pos + 3, &cap); 295 if (rc != PCIBIOS_SUCCESSFUL) 296 return 0; 297 298 if ((cap & mask) == ht_cap) 299 return pos; 300 301 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 302 pos + PCI_CAP_LIST_NEXT, 303 PCI_CAP_ID_HT, &ttl); 304 } 305 306 return 0; 307 } 308 /** 309 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 310 * @dev: PCI device to query 311 * @pos: Position from which to continue searching 312 * @ht_cap: Hypertransport capability code 313 * 314 * To be used in conjunction with pci_find_ht_capability() to search for 315 * all capabilities matching @ht_cap. @pos should always be a value returned 316 * from pci_find_ht_capability(). 317 * 318 * NB. To be 100% safe against broken PCI devices, the caller should take 319 * steps to avoid an infinite loop. 320 */ 321 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 322 { 323 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 324 } 325 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 326 327 /** 328 * pci_find_ht_capability - query a device's Hypertransport capabilities 329 * @dev: PCI device to query 330 * @ht_cap: Hypertransport capability code 331 * 332 * Tell if a device supports a given Hypertransport capability. 333 * Returns an address within the device's PCI configuration space 334 * or 0 in case the device does not support the request capability. 335 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 336 * which has a Hypertransport capability matching @ht_cap. 337 */ 338 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 339 { 340 int pos; 341 342 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 343 if (pos) 344 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 345 346 return pos; 347 } 348 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 349 350 /** 351 * pci_find_parent_resource - return resource region of parent bus of given region 352 * @dev: PCI device structure contains resources to be searched 353 * @res: child resource record for which parent is sought 354 * 355 * For given resource region of given device, return the resource 356 * region of parent bus the given region is contained in or where 357 * it should be allocated from. 358 */ 359 struct resource * 360 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 361 { 362 const struct pci_bus *bus = dev->bus; 363 int i; 364 struct resource *best = NULL; 365 366 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 367 struct resource *r = bus->resource[i]; 368 if (!r) 369 continue; 370 if (res->start && !(res->start >= r->start && res->end <= r->end)) 371 continue; /* Not contained */ 372 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 373 continue; /* Wrong type */ 374 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 375 return r; /* Exact match */ 376 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 377 best = r; /* Approximating prefetchable by non-prefetchable */ 378 } 379 return best; 380 } 381 382 /** 383 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 384 * @dev: PCI device to have its BARs restored 385 * 386 * Restore the BAR values for a given device, so as to make it 387 * accessible by its driver. 388 */ 389 static void 390 pci_restore_bars(struct pci_dev *dev) 391 { 392 int i; 393 394 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 395 pci_update_resource(dev, i); 396 } 397 398 static struct pci_platform_pm_ops *pci_platform_pm; 399 400 int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 401 { 402 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 403 || !ops->sleep_wake || !ops->can_wakeup) 404 return -EINVAL; 405 pci_platform_pm = ops; 406 return 0; 407 } 408 409 static inline bool platform_pci_power_manageable(struct pci_dev *dev) 410 { 411 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 412 } 413 414 static inline int platform_pci_set_power_state(struct pci_dev *dev, 415 pci_power_t t) 416 { 417 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 418 } 419 420 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 421 { 422 return pci_platform_pm ? 423 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 424 } 425 426 static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 427 { 428 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 429 } 430 431 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 432 { 433 return pci_platform_pm ? 434 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 435 } 436 437 /** 438 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 439 * given PCI device 440 * @dev: PCI device to handle. 441 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 442 * 443 * RETURN VALUE: 444 * -EINVAL if the requested state is invalid. 445 * -EIO if device does not support PCI PM or its PM capabilities register has a 446 * wrong version, or device doesn't support the requested state. 447 * 0 if device already is in the requested state. 448 * 0 if device's power state has been successfully changed. 449 */ 450 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 451 { 452 u16 pmcsr; 453 bool need_restore = false; 454 455 /* Check if we're already there */ 456 if (dev->current_state == state) 457 return 0; 458 459 if (!dev->pm_cap) 460 return -EIO; 461 462 if (state < PCI_D0 || state > PCI_D3hot) 463 return -EINVAL; 464 465 /* Validate current state: 466 * Can enter D0 from any state, but if we can only go deeper 467 * to sleep if we're already in a low power state 468 */ 469 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 470 && dev->current_state > state) { 471 dev_err(&dev->dev, "invalid power transition " 472 "(from state %d to %d)\n", dev->current_state, state); 473 return -EINVAL; 474 } 475 476 /* check if this device supports the desired state */ 477 if ((state == PCI_D1 && !dev->d1_support) 478 || (state == PCI_D2 && !dev->d2_support)) 479 return -EIO; 480 481 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 482 483 /* If we're (effectively) in D3, force entire word to 0. 484 * This doesn't affect PME_Status, disables PME_En, and 485 * sets PowerState to 0. 486 */ 487 switch (dev->current_state) { 488 case PCI_D0: 489 case PCI_D1: 490 case PCI_D2: 491 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 492 pmcsr |= state; 493 break; 494 case PCI_D3hot: 495 case PCI_D3cold: 496 case PCI_UNKNOWN: /* Boot-up */ 497 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 498 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 499 need_restore = true; 500 /* Fall-through: force to D0 */ 501 default: 502 pmcsr = 0; 503 break; 504 } 505 506 /* enter specified state */ 507 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 508 509 /* Mandatory power management transition delays */ 510 /* see PCI PM 1.1 5.6.1 table 18 */ 511 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 512 msleep(pci_pm_d3_delay); 513 else if (state == PCI_D2 || dev->current_state == PCI_D2) 514 udelay(PCI_PM_D2_DELAY); 515 516 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 517 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 518 if (dev->current_state != state && printk_ratelimit()) 519 dev_info(&dev->dev, "Refused to change power state, " 520 "currently in D%d\n", dev->current_state); 521 522 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 523 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 524 * from D3hot to D0 _may_ perform an internal reset, thereby 525 * going to "D0 Uninitialized" rather than "D0 Initialized". 526 * For example, at least some versions of the 3c905B and the 527 * 3c556B exhibit this behaviour. 528 * 529 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 530 * devices in a D3hot state at boot. Consequently, we need to 531 * restore at least the BARs so that the device will be 532 * accessible to its driver. 533 */ 534 if (need_restore) 535 pci_restore_bars(dev); 536 537 if (dev->bus->self) 538 pcie_aspm_pm_state_change(dev->bus->self); 539 540 return 0; 541 } 542 543 /** 544 * pci_update_current_state - Read PCI power state of given device from its 545 * PCI PM registers and cache it 546 * @dev: PCI device to handle. 547 * @state: State to cache in case the device doesn't have the PM capability 548 */ 549 void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 550 { 551 if (dev->pm_cap) { 552 u16 pmcsr; 553 554 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 555 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 556 } else { 557 dev->current_state = state; 558 } 559 } 560 561 /** 562 * pci_platform_power_transition - Use platform to change device power state 563 * @dev: PCI device to handle. 564 * @state: State to put the device into. 565 */ 566 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 567 { 568 int error; 569 570 if (platform_pci_power_manageable(dev)) { 571 error = platform_pci_set_power_state(dev, state); 572 if (!error) 573 pci_update_current_state(dev, state); 574 } else { 575 error = -ENODEV; 576 /* Fall back to PCI_D0 if native PM is not supported */ 577 if (!dev->pm_cap) 578 dev->current_state = PCI_D0; 579 } 580 581 return error; 582 } 583 584 /** 585 * __pci_start_power_transition - Start power transition of a PCI device 586 * @dev: PCI device to handle. 587 * @state: State to put the device into. 588 */ 589 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 590 { 591 if (state == PCI_D0) 592 pci_platform_power_transition(dev, PCI_D0); 593 } 594 595 /** 596 * __pci_complete_power_transition - Complete power transition of a PCI device 597 * @dev: PCI device to handle. 598 * @state: State to put the device into. 599 * 600 * This function should not be called directly by device drivers. 601 */ 602 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 603 { 604 return state > PCI_D0 ? 605 pci_platform_power_transition(dev, state) : -EINVAL; 606 } 607 EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 608 609 /** 610 * pci_set_power_state - Set the power state of a PCI device 611 * @dev: PCI device to handle. 612 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 613 * 614 * Transition a device to a new power state, using the platform firmware and/or 615 * the device's PCI PM registers. 616 * 617 * RETURN VALUE: 618 * -EINVAL if the requested state is invalid. 619 * -EIO if device does not support PCI PM or its PM capabilities register has a 620 * wrong version, or device doesn't support the requested state. 621 * 0 if device already is in the requested state. 622 * 0 if device's power state has been successfully changed. 623 */ 624 int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 625 { 626 int error; 627 628 /* bound the state we're entering */ 629 if (state > PCI_D3hot) 630 state = PCI_D3hot; 631 else if (state < PCI_D0) 632 state = PCI_D0; 633 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 634 /* 635 * If the device or the parent bridge do not support PCI PM, 636 * ignore the request if we're doing anything other than putting 637 * it into D0 (which would only happen on boot). 638 */ 639 return 0; 640 641 /* Check if we're already there */ 642 if (dev->current_state == state) 643 return 0; 644 645 __pci_start_power_transition(dev, state); 646 647 /* This device is quirked not to be put into D3, so 648 don't put it in D3 */ 649 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 650 return 0; 651 652 error = pci_raw_set_power_state(dev, state); 653 654 if (!__pci_complete_power_transition(dev, state)) 655 error = 0; 656 657 return error; 658 } 659 660 /** 661 * pci_choose_state - Choose the power state of a PCI device 662 * @dev: PCI device to be suspended 663 * @state: target sleep state for the whole system. This is the value 664 * that is passed to suspend() function. 665 * 666 * Returns PCI power state suitable for given device and given system 667 * message. 668 */ 669 670 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 671 { 672 pci_power_t ret; 673 674 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 675 return PCI_D0; 676 677 ret = platform_pci_choose_state(dev); 678 if (ret != PCI_POWER_ERROR) 679 return ret; 680 681 switch (state.event) { 682 case PM_EVENT_ON: 683 return PCI_D0; 684 case PM_EVENT_FREEZE: 685 case PM_EVENT_PRETHAW: 686 /* REVISIT both freeze and pre-thaw "should" use D0 */ 687 case PM_EVENT_SUSPEND: 688 case PM_EVENT_HIBERNATE: 689 return PCI_D3hot; 690 default: 691 dev_info(&dev->dev, "unrecognized suspend event %d\n", 692 state.event); 693 BUG(); 694 } 695 return PCI_D0; 696 } 697 698 EXPORT_SYMBOL(pci_choose_state); 699 700 #define PCI_EXP_SAVE_REGS 7 701 702 #define pcie_cap_has_devctl(type, flags) 1 703 #define pcie_cap_has_lnkctl(type, flags) \ 704 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 705 (type == PCI_EXP_TYPE_ROOT_PORT || \ 706 type == PCI_EXP_TYPE_ENDPOINT || \ 707 type == PCI_EXP_TYPE_LEG_END)) 708 #define pcie_cap_has_sltctl(type, flags) \ 709 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 710 ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 711 (type == PCI_EXP_TYPE_DOWNSTREAM && \ 712 (flags & PCI_EXP_FLAGS_SLOT)))) 713 #define pcie_cap_has_rtctl(type, flags) \ 714 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 715 (type == PCI_EXP_TYPE_ROOT_PORT || \ 716 type == PCI_EXP_TYPE_RC_EC)) 717 #define pcie_cap_has_devctl2(type, flags) \ 718 ((flags & PCI_EXP_FLAGS_VERS) > 1) 719 #define pcie_cap_has_lnkctl2(type, flags) \ 720 ((flags & PCI_EXP_FLAGS_VERS) > 1) 721 #define pcie_cap_has_sltctl2(type, flags) \ 722 ((flags & PCI_EXP_FLAGS_VERS) > 1) 723 724 static int pci_save_pcie_state(struct pci_dev *dev) 725 { 726 int pos, i = 0; 727 struct pci_cap_saved_state *save_state; 728 u16 *cap; 729 u16 flags; 730 731 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 732 if (pos <= 0) 733 return 0; 734 735 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 736 if (!save_state) { 737 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 738 return -ENOMEM; 739 } 740 cap = (u16 *)&save_state->data[0]; 741 742 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 743 744 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 745 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 746 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 747 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 748 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 749 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 750 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 751 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 752 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 753 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 754 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 755 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 756 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 757 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 758 759 return 0; 760 } 761 762 static void pci_restore_pcie_state(struct pci_dev *dev) 763 { 764 int i = 0, pos; 765 struct pci_cap_saved_state *save_state; 766 u16 *cap; 767 u16 flags; 768 769 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 770 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 771 if (!save_state || pos <= 0) 772 return; 773 cap = (u16 *)&save_state->data[0]; 774 775 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 776 777 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 778 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 779 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 780 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 781 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 782 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 783 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 784 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 785 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 786 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 787 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 788 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 789 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 790 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 791 } 792 793 794 static int pci_save_pcix_state(struct pci_dev *dev) 795 { 796 int pos; 797 struct pci_cap_saved_state *save_state; 798 799 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 800 if (pos <= 0) 801 return 0; 802 803 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 804 if (!save_state) { 805 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 806 return -ENOMEM; 807 } 808 809 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 810 811 return 0; 812 } 813 814 static void pci_restore_pcix_state(struct pci_dev *dev) 815 { 816 int i = 0, pos; 817 struct pci_cap_saved_state *save_state; 818 u16 *cap; 819 820 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 821 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 822 if (!save_state || pos <= 0) 823 return; 824 cap = (u16 *)&save_state->data[0]; 825 826 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 827 } 828 829 830 /** 831 * pci_save_state - save the PCI configuration space of a device before suspending 832 * @dev: - PCI device that we're dealing with 833 */ 834 int 835 pci_save_state(struct pci_dev *dev) 836 { 837 int i; 838 /* XXX: 100% dword access ok here? */ 839 for (i = 0; i < 16; i++) 840 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 841 dev->state_saved = true; 842 if ((i = pci_save_pcie_state(dev)) != 0) 843 return i; 844 if ((i = pci_save_pcix_state(dev)) != 0) 845 return i; 846 return 0; 847 } 848 849 /** 850 * pci_restore_state - Restore the saved state of a PCI device 851 * @dev: - PCI device that we're dealing with 852 */ 853 int 854 pci_restore_state(struct pci_dev *dev) 855 { 856 int i; 857 u32 val; 858 859 if (!dev->state_saved) 860 return 0; 861 862 /* PCI Express register must be restored first */ 863 pci_restore_pcie_state(dev); 864 865 /* 866 * The Base Address register should be programmed before the command 867 * register(s) 868 */ 869 for (i = 15; i >= 0; i--) { 870 pci_read_config_dword(dev, i * 4, &val); 871 if (val != dev->saved_config_space[i]) { 872 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 873 "space at offset %#x (was %#x, writing %#x)\n", 874 i, val, (int)dev->saved_config_space[i]); 875 pci_write_config_dword(dev,i * 4, 876 dev->saved_config_space[i]); 877 } 878 } 879 pci_restore_pcix_state(dev); 880 pci_restore_msi_state(dev); 881 pci_restore_iov_state(dev); 882 883 dev->state_saved = false; 884 885 return 0; 886 } 887 888 static int do_pci_enable_device(struct pci_dev *dev, int bars) 889 { 890 int err; 891 892 err = pci_set_power_state(dev, PCI_D0); 893 if (err < 0 && err != -EIO) 894 return err; 895 err = pcibios_enable_device(dev, bars); 896 if (err < 0) 897 return err; 898 pci_fixup_device(pci_fixup_enable, dev); 899 900 return 0; 901 } 902 903 /** 904 * pci_reenable_device - Resume abandoned device 905 * @dev: PCI device to be resumed 906 * 907 * Note this function is a backend of pci_default_resume and is not supposed 908 * to be called by normal code, write proper resume handler and use it instead. 909 */ 910 int pci_reenable_device(struct pci_dev *dev) 911 { 912 if (pci_is_enabled(dev)) 913 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 914 return 0; 915 } 916 917 static int __pci_enable_device_flags(struct pci_dev *dev, 918 resource_size_t flags) 919 { 920 int err; 921 int i, bars = 0; 922 923 if (atomic_add_return(1, &dev->enable_cnt) > 1) 924 return 0; /* already enabled */ 925 926 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 927 if (dev->resource[i].flags & flags) 928 bars |= (1 << i); 929 930 err = do_pci_enable_device(dev, bars); 931 if (err < 0) 932 atomic_dec(&dev->enable_cnt); 933 return err; 934 } 935 936 /** 937 * pci_enable_device_io - Initialize a device for use with IO space 938 * @dev: PCI device to be initialized 939 * 940 * Initialize device before it's used by a driver. Ask low-level code 941 * to enable I/O resources. Wake up the device if it was suspended. 942 * Beware, this function can fail. 943 */ 944 int pci_enable_device_io(struct pci_dev *dev) 945 { 946 return __pci_enable_device_flags(dev, IORESOURCE_IO); 947 } 948 949 /** 950 * pci_enable_device_mem - Initialize a device for use with Memory space 951 * @dev: PCI device to be initialized 952 * 953 * Initialize device before it's used by a driver. Ask low-level code 954 * to enable Memory resources. Wake up the device if it was suspended. 955 * Beware, this function can fail. 956 */ 957 int pci_enable_device_mem(struct pci_dev *dev) 958 { 959 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 960 } 961 962 /** 963 * pci_enable_device - Initialize device before it's used by a driver. 964 * @dev: PCI device to be initialized 965 * 966 * Initialize device before it's used by a driver. Ask low-level code 967 * to enable I/O and memory. Wake up the device if it was suspended. 968 * Beware, this function can fail. 969 * 970 * Note we don't actually enable the device many times if we call 971 * this function repeatedly (we just increment the count). 972 */ 973 int pci_enable_device(struct pci_dev *dev) 974 { 975 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 976 } 977 978 /* 979 * Managed PCI resources. This manages device on/off, intx/msi/msix 980 * on/off and BAR regions. pci_dev itself records msi/msix status, so 981 * there's no need to track it separately. pci_devres is initialized 982 * when a device is enabled using managed PCI device enable interface. 983 */ 984 struct pci_devres { 985 unsigned int enabled:1; 986 unsigned int pinned:1; 987 unsigned int orig_intx:1; 988 unsigned int restore_intx:1; 989 u32 region_mask; 990 }; 991 992 static void pcim_release(struct device *gendev, void *res) 993 { 994 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 995 struct pci_devres *this = res; 996 int i; 997 998 if (dev->msi_enabled) 999 pci_disable_msi(dev); 1000 if (dev->msix_enabled) 1001 pci_disable_msix(dev); 1002 1003 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1004 if (this->region_mask & (1 << i)) 1005 pci_release_region(dev, i); 1006 1007 if (this->restore_intx) 1008 pci_intx(dev, this->orig_intx); 1009 1010 if (this->enabled && !this->pinned) 1011 pci_disable_device(dev); 1012 } 1013 1014 static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1015 { 1016 struct pci_devres *dr, *new_dr; 1017 1018 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1019 if (dr) 1020 return dr; 1021 1022 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1023 if (!new_dr) 1024 return NULL; 1025 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1026 } 1027 1028 static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1029 { 1030 if (pci_is_managed(pdev)) 1031 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1032 return NULL; 1033 } 1034 1035 /** 1036 * pcim_enable_device - Managed pci_enable_device() 1037 * @pdev: PCI device to be initialized 1038 * 1039 * Managed pci_enable_device(). 1040 */ 1041 int pcim_enable_device(struct pci_dev *pdev) 1042 { 1043 struct pci_devres *dr; 1044 int rc; 1045 1046 dr = get_pci_dr(pdev); 1047 if (unlikely(!dr)) 1048 return -ENOMEM; 1049 if (dr->enabled) 1050 return 0; 1051 1052 rc = pci_enable_device(pdev); 1053 if (!rc) { 1054 pdev->is_managed = 1; 1055 dr->enabled = 1; 1056 } 1057 return rc; 1058 } 1059 1060 /** 1061 * pcim_pin_device - Pin managed PCI device 1062 * @pdev: PCI device to pin 1063 * 1064 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1065 * driver detach. @pdev must have been enabled with 1066 * pcim_enable_device(). 1067 */ 1068 void pcim_pin_device(struct pci_dev *pdev) 1069 { 1070 struct pci_devres *dr; 1071 1072 dr = find_pci_dr(pdev); 1073 WARN_ON(!dr || !dr->enabled); 1074 if (dr) 1075 dr->pinned = 1; 1076 } 1077 1078 /** 1079 * pcibios_disable_device - disable arch specific PCI resources for device dev 1080 * @dev: the PCI device to disable 1081 * 1082 * Disables architecture specific PCI resources for the device. This 1083 * is the default implementation. Architecture implementations can 1084 * override this. 1085 */ 1086 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1087 1088 static void do_pci_disable_device(struct pci_dev *dev) 1089 { 1090 u16 pci_command; 1091 1092 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1093 if (pci_command & PCI_COMMAND_MASTER) { 1094 pci_command &= ~PCI_COMMAND_MASTER; 1095 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1096 } 1097 1098 pcibios_disable_device(dev); 1099 } 1100 1101 /** 1102 * pci_disable_enabled_device - Disable device without updating enable_cnt 1103 * @dev: PCI device to disable 1104 * 1105 * NOTE: This function is a backend of PCI power management routines and is 1106 * not supposed to be called drivers. 1107 */ 1108 void pci_disable_enabled_device(struct pci_dev *dev) 1109 { 1110 if (pci_is_enabled(dev)) 1111 do_pci_disable_device(dev); 1112 } 1113 1114 /** 1115 * pci_disable_device - Disable PCI device after use 1116 * @dev: PCI device to be disabled 1117 * 1118 * Signal to the system that the PCI device is not in use by the system 1119 * anymore. This only involves disabling PCI bus-mastering, if active. 1120 * 1121 * Note we don't actually disable the device until all callers of 1122 * pci_device_enable() have called pci_device_disable(). 1123 */ 1124 void 1125 pci_disable_device(struct pci_dev *dev) 1126 { 1127 struct pci_devres *dr; 1128 1129 dr = find_pci_dr(dev); 1130 if (dr) 1131 dr->enabled = 0; 1132 1133 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1134 return; 1135 1136 do_pci_disable_device(dev); 1137 1138 dev->is_busmaster = 0; 1139 } 1140 1141 /** 1142 * pcibios_set_pcie_reset_state - set reset state for device dev 1143 * @dev: the PCI-E device reset 1144 * @state: Reset state to enter into 1145 * 1146 * 1147 * Sets the PCI-E reset state for the device. This is the default 1148 * implementation. Architecture implementations can override this. 1149 */ 1150 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1151 enum pcie_reset_state state) 1152 { 1153 return -EINVAL; 1154 } 1155 1156 /** 1157 * pci_set_pcie_reset_state - set reset state for device dev 1158 * @dev: the PCI-E device reset 1159 * @state: Reset state to enter into 1160 * 1161 * 1162 * Sets the PCI reset state for the device. 1163 */ 1164 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1165 { 1166 return pcibios_set_pcie_reset_state(dev, state); 1167 } 1168 1169 /** 1170 * pci_pme_capable - check the capability of PCI device to generate PME# 1171 * @dev: PCI device to handle. 1172 * @state: PCI state from which device will issue PME#. 1173 */ 1174 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1175 { 1176 if (!dev->pm_cap) 1177 return false; 1178 1179 return !!(dev->pme_support & (1 << state)); 1180 } 1181 1182 /** 1183 * pci_pme_active - enable or disable PCI device's PME# function 1184 * @dev: PCI device to handle. 1185 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1186 * 1187 * The caller must verify that the device is capable of generating PME# before 1188 * calling this function with @enable equal to 'true'. 1189 */ 1190 void pci_pme_active(struct pci_dev *dev, bool enable) 1191 { 1192 u16 pmcsr; 1193 1194 if (!dev->pm_cap) 1195 return; 1196 1197 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1198 /* Clear PME_Status by writing 1 to it and enable PME# */ 1199 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1200 if (!enable) 1201 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1202 1203 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1204 1205 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", 1206 enable ? "enabled" : "disabled"); 1207 } 1208 1209 /** 1210 * pci_enable_wake - enable PCI device as wakeup event source 1211 * @dev: PCI device affected 1212 * @state: PCI state from which device will issue wakeup events 1213 * @enable: True to enable event generation; false to disable 1214 * 1215 * This enables the device as a wakeup event source, or disables it. 1216 * When such events involves platform-specific hooks, those hooks are 1217 * called automatically by this routine. 1218 * 1219 * Devices with legacy power management (no standard PCI PM capabilities) 1220 * always require such platform hooks. 1221 * 1222 * RETURN VALUE: 1223 * 0 is returned on success 1224 * -EINVAL is returned if device is not supposed to wake up the system 1225 * Error code depending on the platform is returned if both the platform and 1226 * the native mechanism fail to enable the generation of wake-up events 1227 */ 1228 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) 1229 { 1230 int ret = 0; 1231 1232 if (enable && !device_may_wakeup(&dev->dev)) 1233 return -EINVAL; 1234 1235 /* Don't do the same thing twice in a row for one device. */ 1236 if (!!enable == !!dev->wakeup_prepared) 1237 return 0; 1238 1239 /* 1240 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1241 * Anderson we should be doing PME# wake enable followed by ACPI wake 1242 * enable. To disable wake-up we call the platform first, for symmetry. 1243 */ 1244 1245 if (enable) { 1246 int error; 1247 1248 if (pci_pme_capable(dev, state)) 1249 pci_pme_active(dev, true); 1250 else 1251 ret = 1; 1252 error = platform_pci_sleep_wake(dev, true); 1253 if (ret) 1254 ret = error; 1255 if (!ret) 1256 dev->wakeup_prepared = true; 1257 } else { 1258 platform_pci_sleep_wake(dev, false); 1259 pci_pme_active(dev, false); 1260 dev->wakeup_prepared = false; 1261 } 1262 1263 return ret; 1264 } 1265 1266 /** 1267 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1268 * @dev: PCI device to prepare 1269 * @enable: True to enable wake-up event generation; false to disable 1270 * 1271 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1272 * and this function allows them to set that up cleanly - pci_enable_wake() 1273 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1274 * ordering constraints. 1275 * 1276 * This function only returns error code if the device is not capable of 1277 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1278 * enable wake-up power for it. 1279 */ 1280 int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1281 { 1282 return pci_pme_capable(dev, PCI_D3cold) ? 1283 pci_enable_wake(dev, PCI_D3cold, enable) : 1284 pci_enable_wake(dev, PCI_D3hot, enable); 1285 } 1286 1287 /** 1288 * pci_target_state - find an appropriate low power state for a given PCI dev 1289 * @dev: PCI device 1290 * 1291 * Use underlying platform code to find a supported low power state for @dev. 1292 * If the platform can't manage @dev, return the deepest state from which it 1293 * can generate wake events, based on any available PME info. 1294 */ 1295 pci_power_t pci_target_state(struct pci_dev *dev) 1296 { 1297 pci_power_t target_state = PCI_D3hot; 1298 1299 if (platform_pci_power_manageable(dev)) { 1300 /* 1301 * Call the platform to choose the target state of the device 1302 * and enable wake-up from this state if supported. 1303 */ 1304 pci_power_t state = platform_pci_choose_state(dev); 1305 1306 switch (state) { 1307 case PCI_POWER_ERROR: 1308 case PCI_UNKNOWN: 1309 break; 1310 case PCI_D1: 1311 case PCI_D2: 1312 if (pci_no_d1d2(dev)) 1313 break; 1314 default: 1315 target_state = state; 1316 } 1317 } else if (!dev->pm_cap) { 1318 target_state = PCI_D0; 1319 } else if (device_may_wakeup(&dev->dev)) { 1320 /* 1321 * Find the deepest state from which the device can generate 1322 * wake-up events, make it the target state and enable device 1323 * to generate PME#. 1324 */ 1325 if (dev->pme_support) { 1326 while (target_state 1327 && !(dev->pme_support & (1 << target_state))) 1328 target_state--; 1329 } 1330 } 1331 1332 return target_state; 1333 } 1334 1335 /** 1336 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1337 * @dev: Device to handle. 1338 * 1339 * Choose the power state appropriate for the device depending on whether 1340 * it can wake up the system and/or is power manageable by the platform 1341 * (PCI_D3hot is the default) and put the device into that state. 1342 */ 1343 int pci_prepare_to_sleep(struct pci_dev *dev) 1344 { 1345 pci_power_t target_state = pci_target_state(dev); 1346 int error; 1347 1348 if (target_state == PCI_POWER_ERROR) 1349 return -EIO; 1350 1351 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1352 1353 error = pci_set_power_state(dev, target_state); 1354 1355 if (error) 1356 pci_enable_wake(dev, target_state, false); 1357 1358 return error; 1359 } 1360 1361 /** 1362 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1363 * @dev: Device to handle. 1364 * 1365 * Disable device's sytem wake-up capability and put it into D0. 1366 */ 1367 int pci_back_from_sleep(struct pci_dev *dev) 1368 { 1369 pci_enable_wake(dev, PCI_D0, false); 1370 return pci_set_power_state(dev, PCI_D0); 1371 } 1372 1373 /** 1374 * pci_pm_init - Initialize PM functions of given PCI device 1375 * @dev: PCI device to handle. 1376 */ 1377 void pci_pm_init(struct pci_dev *dev) 1378 { 1379 int pm; 1380 u16 pmc; 1381 1382 dev->wakeup_prepared = false; 1383 dev->pm_cap = 0; 1384 1385 /* find PCI PM capability in list */ 1386 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1387 if (!pm) 1388 return; 1389 /* Check device's ability to generate PME# */ 1390 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1391 1392 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1393 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1394 pmc & PCI_PM_CAP_VER_MASK); 1395 return; 1396 } 1397 1398 dev->pm_cap = pm; 1399 1400 dev->d1_support = false; 1401 dev->d2_support = false; 1402 if (!pci_no_d1d2(dev)) { 1403 if (pmc & PCI_PM_CAP_D1) 1404 dev->d1_support = true; 1405 if (pmc & PCI_PM_CAP_D2) 1406 dev->d2_support = true; 1407 1408 if (dev->d1_support || dev->d2_support) 1409 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1410 dev->d1_support ? " D1" : "", 1411 dev->d2_support ? " D2" : ""); 1412 } 1413 1414 pmc &= PCI_PM_CAP_PME_MASK; 1415 if (pmc) { 1416 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", 1417 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1418 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1419 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1420 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1421 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1422 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1423 /* 1424 * Make device's PM flags reflect the wake-up capability, but 1425 * let the user space enable it to wake up the system as needed. 1426 */ 1427 device_set_wakeup_capable(&dev->dev, true); 1428 device_set_wakeup_enable(&dev->dev, false); 1429 /* Disable the PME# generation functionality */ 1430 pci_pme_active(dev, false); 1431 } else { 1432 dev->pme_support = 0; 1433 } 1434 } 1435 1436 /** 1437 * platform_pci_wakeup_init - init platform wakeup if present 1438 * @dev: PCI device 1439 * 1440 * Some devices don't have PCI PM caps but can still generate wakeup 1441 * events through platform methods (like ACPI events). If @dev supports 1442 * platform wakeup events, set the device flag to indicate as much. This 1443 * may be redundant if the device also supports PCI PM caps, but double 1444 * initialization should be safe in that case. 1445 */ 1446 void platform_pci_wakeup_init(struct pci_dev *dev) 1447 { 1448 if (!platform_pci_can_wakeup(dev)) 1449 return; 1450 1451 device_set_wakeup_capable(&dev->dev, true); 1452 device_set_wakeup_enable(&dev->dev, false); 1453 platform_pci_sleep_wake(dev, false); 1454 } 1455 1456 /** 1457 * pci_add_save_buffer - allocate buffer for saving given capability registers 1458 * @dev: the PCI device 1459 * @cap: the capability to allocate the buffer for 1460 * @size: requested size of the buffer 1461 */ 1462 static int pci_add_cap_save_buffer( 1463 struct pci_dev *dev, char cap, unsigned int size) 1464 { 1465 int pos; 1466 struct pci_cap_saved_state *save_state; 1467 1468 pos = pci_find_capability(dev, cap); 1469 if (pos <= 0) 1470 return 0; 1471 1472 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1473 if (!save_state) 1474 return -ENOMEM; 1475 1476 save_state->cap_nr = cap; 1477 pci_add_saved_cap(dev, save_state); 1478 1479 return 0; 1480 } 1481 1482 /** 1483 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1484 * @dev: the PCI device 1485 */ 1486 void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1487 { 1488 int error; 1489 1490 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 1491 PCI_EXP_SAVE_REGS * sizeof(u16)); 1492 if (error) 1493 dev_err(&dev->dev, 1494 "unable to preallocate PCI Express save buffer\n"); 1495 1496 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1497 if (error) 1498 dev_err(&dev->dev, 1499 "unable to preallocate PCI-X save buffer\n"); 1500 } 1501 1502 /** 1503 * pci_enable_ari - enable ARI forwarding if hardware support it 1504 * @dev: the PCI device 1505 */ 1506 void pci_enable_ari(struct pci_dev *dev) 1507 { 1508 int pos; 1509 u32 cap; 1510 u16 ctrl; 1511 struct pci_dev *bridge; 1512 1513 if (!dev->is_pcie || dev->devfn) 1514 return; 1515 1516 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1517 if (!pos) 1518 return; 1519 1520 bridge = dev->bus->self; 1521 if (!bridge || !bridge->is_pcie) 1522 return; 1523 1524 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 1525 if (!pos) 1526 return; 1527 1528 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1529 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1530 return; 1531 1532 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1533 ctrl |= PCI_EXP_DEVCTL2_ARI; 1534 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1535 1536 bridge->ari_enabled = 1; 1537 } 1538 1539 /** 1540 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1541 * @dev: the PCI device 1542 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1543 * 1544 * Perform INTx swizzling for a device behind one level of bridge. This is 1545 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1546 * behind bridges on add-in cards. For devices with ARI enabled, the slot 1547 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 1548 * the PCI Express Base Specification, Revision 2.1) 1549 */ 1550 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1551 { 1552 int slot; 1553 1554 if (pci_ari_enabled(dev->bus)) 1555 slot = 0; 1556 else 1557 slot = PCI_SLOT(dev->devfn); 1558 1559 return (((pin - 1) + slot) % 4) + 1; 1560 } 1561 1562 int 1563 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1564 { 1565 u8 pin; 1566 1567 pin = dev->pin; 1568 if (!pin) 1569 return -1; 1570 1571 while (!pci_is_root_bus(dev->bus)) { 1572 pin = pci_swizzle_interrupt_pin(dev, pin); 1573 dev = dev->bus->self; 1574 } 1575 *bridge = dev; 1576 return pin; 1577 } 1578 1579 /** 1580 * pci_common_swizzle - swizzle INTx all the way to root bridge 1581 * @dev: the PCI device 1582 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1583 * 1584 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1585 * bridges all the way up to a PCI root bus. 1586 */ 1587 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1588 { 1589 u8 pin = *pinp; 1590 1591 while (!pci_is_root_bus(dev->bus)) { 1592 pin = pci_swizzle_interrupt_pin(dev, pin); 1593 dev = dev->bus->self; 1594 } 1595 *pinp = pin; 1596 return PCI_SLOT(dev->devfn); 1597 } 1598 1599 /** 1600 * pci_release_region - Release a PCI bar 1601 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1602 * @bar: BAR to release 1603 * 1604 * Releases the PCI I/O and memory resources previously reserved by a 1605 * successful call to pci_request_region. Call this function only 1606 * after all use of the PCI regions has ceased. 1607 */ 1608 void pci_release_region(struct pci_dev *pdev, int bar) 1609 { 1610 struct pci_devres *dr; 1611 1612 if (pci_resource_len(pdev, bar) == 0) 1613 return; 1614 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1615 release_region(pci_resource_start(pdev, bar), 1616 pci_resource_len(pdev, bar)); 1617 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1618 release_mem_region(pci_resource_start(pdev, bar), 1619 pci_resource_len(pdev, bar)); 1620 1621 dr = find_pci_dr(pdev); 1622 if (dr) 1623 dr->region_mask &= ~(1 << bar); 1624 } 1625 1626 /** 1627 * __pci_request_region - Reserved PCI I/O and memory resource 1628 * @pdev: PCI device whose resources are to be reserved 1629 * @bar: BAR to be reserved 1630 * @res_name: Name to be associated with resource. 1631 * @exclusive: whether the region access is exclusive or not 1632 * 1633 * Mark the PCI region associated with PCI device @pdev BR @bar as 1634 * being reserved by owner @res_name. Do not access any 1635 * address inside the PCI regions unless this call returns 1636 * successfully. 1637 * 1638 * If @exclusive is set, then the region is marked so that userspace 1639 * is explicitly not allowed to map the resource via /dev/mem or 1640 * sysfs MMIO access. 1641 * 1642 * Returns 0 on success, or %EBUSY on error. A warning 1643 * message is also printed on failure. 1644 */ 1645 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1646 int exclusive) 1647 { 1648 struct pci_devres *dr; 1649 1650 if (pci_resource_len(pdev, bar) == 0) 1651 return 0; 1652 1653 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1654 if (!request_region(pci_resource_start(pdev, bar), 1655 pci_resource_len(pdev, bar), res_name)) 1656 goto err_out; 1657 } 1658 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1659 if (!__request_mem_region(pci_resource_start(pdev, bar), 1660 pci_resource_len(pdev, bar), res_name, 1661 exclusive)) 1662 goto err_out; 1663 } 1664 1665 dr = find_pci_dr(pdev); 1666 if (dr) 1667 dr->region_mask |= 1 << bar; 1668 1669 return 0; 1670 1671 err_out: 1672 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", 1673 bar, 1674 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1675 &pdev->resource[bar]); 1676 return -EBUSY; 1677 } 1678 1679 /** 1680 * pci_request_region - Reserve PCI I/O and memory resource 1681 * @pdev: PCI device whose resources are to be reserved 1682 * @bar: BAR to be reserved 1683 * @res_name: Name to be associated with resource 1684 * 1685 * Mark the PCI region associated with PCI device @pdev BAR @bar as 1686 * being reserved by owner @res_name. Do not access any 1687 * address inside the PCI regions unless this call returns 1688 * successfully. 1689 * 1690 * Returns 0 on success, or %EBUSY on error. A warning 1691 * message is also printed on failure. 1692 */ 1693 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1694 { 1695 return __pci_request_region(pdev, bar, res_name, 0); 1696 } 1697 1698 /** 1699 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1700 * @pdev: PCI device whose resources are to be reserved 1701 * @bar: BAR to be reserved 1702 * @res_name: Name to be associated with resource. 1703 * 1704 * Mark the PCI region associated with PCI device @pdev BR @bar as 1705 * being reserved by owner @res_name. Do not access any 1706 * address inside the PCI regions unless this call returns 1707 * successfully. 1708 * 1709 * Returns 0 on success, or %EBUSY on error. A warning 1710 * message is also printed on failure. 1711 * 1712 * The key difference that _exclusive makes it that userspace is 1713 * explicitly not allowed to map the resource via /dev/mem or 1714 * sysfs. 1715 */ 1716 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1717 { 1718 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1719 } 1720 /** 1721 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1722 * @pdev: PCI device whose resources were previously reserved 1723 * @bars: Bitmask of BARs to be released 1724 * 1725 * Release selected PCI I/O and memory resources previously reserved. 1726 * Call this function only after all use of the PCI regions has ceased. 1727 */ 1728 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1729 { 1730 int i; 1731 1732 for (i = 0; i < 6; i++) 1733 if (bars & (1 << i)) 1734 pci_release_region(pdev, i); 1735 } 1736 1737 int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1738 const char *res_name, int excl) 1739 { 1740 int i; 1741 1742 for (i = 0; i < 6; i++) 1743 if (bars & (1 << i)) 1744 if (__pci_request_region(pdev, i, res_name, excl)) 1745 goto err_out; 1746 return 0; 1747 1748 err_out: 1749 while(--i >= 0) 1750 if (bars & (1 << i)) 1751 pci_release_region(pdev, i); 1752 1753 return -EBUSY; 1754 } 1755 1756 1757 /** 1758 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1759 * @pdev: PCI device whose resources are to be reserved 1760 * @bars: Bitmask of BARs to be requested 1761 * @res_name: Name to be associated with resource 1762 */ 1763 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1764 const char *res_name) 1765 { 1766 return __pci_request_selected_regions(pdev, bars, res_name, 0); 1767 } 1768 1769 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1770 int bars, const char *res_name) 1771 { 1772 return __pci_request_selected_regions(pdev, bars, res_name, 1773 IORESOURCE_EXCLUSIVE); 1774 } 1775 1776 /** 1777 * pci_release_regions - Release reserved PCI I/O and memory resources 1778 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 1779 * 1780 * Releases all PCI I/O and memory resources previously reserved by a 1781 * successful call to pci_request_regions. Call this function only 1782 * after all use of the PCI regions has ceased. 1783 */ 1784 1785 void pci_release_regions(struct pci_dev *pdev) 1786 { 1787 pci_release_selected_regions(pdev, (1 << 6) - 1); 1788 } 1789 1790 /** 1791 * pci_request_regions - Reserved PCI I/O and memory resources 1792 * @pdev: PCI device whose resources are to be reserved 1793 * @res_name: Name to be associated with resource. 1794 * 1795 * Mark all PCI regions associated with PCI device @pdev as 1796 * being reserved by owner @res_name. Do not access any 1797 * address inside the PCI regions unless this call returns 1798 * successfully. 1799 * 1800 * Returns 0 on success, or %EBUSY on error. A warning 1801 * message is also printed on failure. 1802 */ 1803 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 1804 { 1805 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 1806 } 1807 1808 /** 1809 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 1810 * @pdev: PCI device whose resources are to be reserved 1811 * @res_name: Name to be associated with resource. 1812 * 1813 * Mark all PCI regions associated with PCI device @pdev as 1814 * being reserved by owner @res_name. Do not access any 1815 * address inside the PCI regions unless this call returns 1816 * successfully. 1817 * 1818 * pci_request_regions_exclusive() will mark the region so that 1819 * /dev/mem and the sysfs MMIO access will not be allowed. 1820 * 1821 * Returns 0 on success, or %EBUSY on error. A warning 1822 * message is also printed on failure. 1823 */ 1824 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 1825 { 1826 return pci_request_selected_regions_exclusive(pdev, 1827 ((1 << 6) - 1), res_name); 1828 } 1829 1830 static void __pci_set_master(struct pci_dev *dev, bool enable) 1831 { 1832 u16 old_cmd, cmd; 1833 1834 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 1835 if (enable) 1836 cmd = old_cmd | PCI_COMMAND_MASTER; 1837 else 1838 cmd = old_cmd & ~PCI_COMMAND_MASTER; 1839 if (cmd != old_cmd) { 1840 dev_dbg(&dev->dev, "%s bus mastering\n", 1841 enable ? "enabling" : "disabling"); 1842 pci_write_config_word(dev, PCI_COMMAND, cmd); 1843 } 1844 dev->is_busmaster = enable; 1845 } 1846 1847 /** 1848 * pci_set_master - enables bus-mastering for device dev 1849 * @dev: the PCI device to enable 1850 * 1851 * Enables bus-mastering on the device and calls pcibios_set_master() 1852 * to do the needed arch specific settings. 1853 */ 1854 void pci_set_master(struct pci_dev *dev) 1855 { 1856 __pci_set_master(dev, true); 1857 pcibios_set_master(dev); 1858 } 1859 1860 /** 1861 * pci_clear_master - disables bus-mastering for device dev 1862 * @dev: the PCI device to disable 1863 */ 1864 void pci_clear_master(struct pci_dev *dev) 1865 { 1866 __pci_set_master(dev, false); 1867 } 1868 1869 #ifdef PCI_DISABLE_MWI 1870 int pci_set_mwi(struct pci_dev *dev) 1871 { 1872 return 0; 1873 } 1874 1875 int pci_try_set_mwi(struct pci_dev *dev) 1876 { 1877 return 0; 1878 } 1879 1880 void pci_clear_mwi(struct pci_dev *dev) 1881 { 1882 } 1883 1884 #else 1885 1886 #ifndef PCI_CACHE_LINE_BYTES 1887 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 1888 #endif 1889 1890 /* This can be overridden by arch code. */ 1891 /* Don't forget this is measured in 32-bit words, not bytes */ 1892 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 1893 1894 /** 1895 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1896 * @dev: the PCI device for which MWI is to be enabled 1897 * 1898 * Helper function for pci_set_mwi. 1899 * Originally copied from drivers/net/acenic.c. 1900 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 1901 * 1902 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1903 */ 1904 static int 1905 pci_set_cacheline_size(struct pci_dev *dev) 1906 { 1907 u8 cacheline_size; 1908 1909 if (!pci_cache_line_size) 1910 return -EINVAL; /* The system doesn't support MWI. */ 1911 1912 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1913 equal to or multiple of the right value. */ 1914 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1915 if (cacheline_size >= pci_cache_line_size && 1916 (cacheline_size % pci_cache_line_size) == 0) 1917 return 0; 1918 1919 /* Write the correct value. */ 1920 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 1921 /* Read it back. */ 1922 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1923 if (cacheline_size == pci_cache_line_size) 1924 return 0; 1925 1926 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 1927 "supported\n", pci_cache_line_size << 2); 1928 1929 return -EINVAL; 1930 } 1931 1932 /** 1933 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1934 * @dev: the PCI device for which MWI is enabled 1935 * 1936 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1937 * 1938 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1939 */ 1940 int 1941 pci_set_mwi(struct pci_dev *dev) 1942 { 1943 int rc; 1944 u16 cmd; 1945 1946 rc = pci_set_cacheline_size(dev); 1947 if (rc) 1948 return rc; 1949 1950 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1951 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1952 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 1953 cmd |= PCI_COMMAND_INVALIDATE; 1954 pci_write_config_word(dev, PCI_COMMAND, cmd); 1955 } 1956 1957 return 0; 1958 } 1959 1960 /** 1961 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 1962 * @dev: the PCI device for which MWI is enabled 1963 * 1964 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 1965 * Callers are not required to check the return value. 1966 * 1967 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1968 */ 1969 int pci_try_set_mwi(struct pci_dev *dev) 1970 { 1971 int rc = pci_set_mwi(dev); 1972 return rc; 1973 } 1974 1975 /** 1976 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 1977 * @dev: the PCI device to disable 1978 * 1979 * Disables PCI Memory-Write-Invalidate transaction on the device 1980 */ 1981 void 1982 pci_clear_mwi(struct pci_dev *dev) 1983 { 1984 u16 cmd; 1985 1986 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1987 if (cmd & PCI_COMMAND_INVALIDATE) { 1988 cmd &= ~PCI_COMMAND_INVALIDATE; 1989 pci_write_config_word(dev, PCI_COMMAND, cmd); 1990 } 1991 } 1992 #endif /* ! PCI_DISABLE_MWI */ 1993 1994 /** 1995 * pci_intx - enables/disables PCI INTx for device dev 1996 * @pdev: the PCI device to operate on 1997 * @enable: boolean: whether to enable or disable PCI INTx 1998 * 1999 * Enables/disables PCI INTx for device dev 2000 */ 2001 void 2002 pci_intx(struct pci_dev *pdev, int enable) 2003 { 2004 u16 pci_command, new; 2005 2006 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2007 2008 if (enable) { 2009 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2010 } else { 2011 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2012 } 2013 2014 if (new != pci_command) { 2015 struct pci_devres *dr; 2016 2017 pci_write_config_word(pdev, PCI_COMMAND, new); 2018 2019 dr = find_pci_dr(pdev); 2020 if (dr && !dr->restore_intx) { 2021 dr->restore_intx = 1; 2022 dr->orig_intx = !enable; 2023 } 2024 } 2025 } 2026 2027 /** 2028 * pci_msi_off - disables any msi or msix capabilities 2029 * @dev: the PCI device to operate on 2030 * 2031 * If you want to use msi see pci_enable_msi and friends. 2032 * This is a lower level primitive that allows us to disable 2033 * msi operation at the device level. 2034 */ 2035 void pci_msi_off(struct pci_dev *dev) 2036 { 2037 int pos; 2038 u16 control; 2039 2040 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 2041 if (pos) { 2042 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 2043 control &= ~PCI_MSI_FLAGS_ENABLE; 2044 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 2045 } 2046 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 2047 if (pos) { 2048 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 2049 control &= ~PCI_MSIX_FLAGS_ENABLE; 2050 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 2051 } 2052 } 2053 2054 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 2055 /* 2056 * These can be overridden by arch-specific implementations 2057 */ 2058 int 2059 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 2060 { 2061 if (!pci_dma_supported(dev, mask)) 2062 return -EIO; 2063 2064 dev->dma_mask = mask; 2065 2066 return 0; 2067 } 2068 2069 int 2070 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 2071 { 2072 if (!pci_dma_supported(dev, mask)) 2073 return -EIO; 2074 2075 dev->dev.coherent_dma_mask = mask; 2076 2077 return 0; 2078 } 2079 #endif 2080 2081 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE 2082 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 2083 { 2084 return dma_set_max_seg_size(&dev->dev, size); 2085 } 2086 EXPORT_SYMBOL(pci_set_dma_max_seg_size); 2087 #endif 2088 2089 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY 2090 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2091 { 2092 return dma_set_seg_boundary(&dev->dev, mask); 2093 } 2094 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2095 #endif 2096 2097 static int pcie_flr(struct pci_dev *dev, int probe) 2098 { 2099 int i; 2100 int pos; 2101 u32 cap; 2102 u16 status; 2103 2104 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2105 if (!pos) 2106 return -ENOTTY; 2107 2108 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 2109 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2110 return -ENOTTY; 2111 2112 if (probe) 2113 return 0; 2114 2115 /* Wait for Transaction Pending bit clean */ 2116 for (i = 0; i < 4; i++) { 2117 if (i) 2118 msleep((1 << (i - 1)) * 100); 2119 2120 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 2121 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2122 goto clear; 2123 } 2124 2125 dev_err(&dev->dev, "transaction is not cleared; " 2126 "proceeding with reset anyway\n"); 2127 2128 clear: 2129 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, 2130 PCI_EXP_DEVCTL_BCR_FLR); 2131 msleep(100); 2132 2133 return 0; 2134 } 2135 2136 static int pci_af_flr(struct pci_dev *dev, int probe) 2137 { 2138 int i; 2139 int pos; 2140 u8 cap; 2141 u8 status; 2142 2143 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 2144 if (!pos) 2145 return -ENOTTY; 2146 2147 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 2148 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2149 return -ENOTTY; 2150 2151 if (probe) 2152 return 0; 2153 2154 /* Wait for Transaction Pending bit clean */ 2155 for (i = 0; i < 4; i++) { 2156 if (i) 2157 msleep((1 << (i - 1)) * 100); 2158 2159 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 2160 if (!(status & PCI_AF_STATUS_TP)) 2161 goto clear; 2162 } 2163 2164 dev_err(&dev->dev, "transaction is not cleared; " 2165 "proceeding with reset anyway\n"); 2166 2167 clear: 2168 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2169 msleep(100); 2170 2171 return 0; 2172 } 2173 2174 static int pci_pm_reset(struct pci_dev *dev, int probe) 2175 { 2176 u16 csr; 2177 2178 if (!dev->pm_cap) 2179 return -ENOTTY; 2180 2181 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 2182 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 2183 return -ENOTTY; 2184 2185 if (probe) 2186 return 0; 2187 2188 if (dev->current_state != PCI_D0) 2189 return -EINVAL; 2190 2191 csr &= ~PCI_PM_CTRL_STATE_MASK; 2192 csr |= PCI_D3hot; 2193 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2194 msleep(pci_pm_d3_delay); 2195 2196 csr &= ~PCI_PM_CTRL_STATE_MASK; 2197 csr |= PCI_D0; 2198 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2199 msleep(pci_pm_d3_delay); 2200 2201 return 0; 2202 } 2203 2204 static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 2205 { 2206 u16 ctrl; 2207 struct pci_dev *pdev; 2208 2209 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 2210 return -ENOTTY; 2211 2212 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 2213 if (pdev != dev) 2214 return -ENOTTY; 2215 2216 if (probe) 2217 return 0; 2218 2219 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 2220 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 2221 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2222 msleep(100); 2223 2224 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 2225 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2226 msleep(100); 2227 2228 return 0; 2229 } 2230 2231 static int pci_dev_reset(struct pci_dev *dev, int probe) 2232 { 2233 int rc; 2234 2235 might_sleep(); 2236 2237 if (!probe) { 2238 pci_block_user_cfg_access(dev); 2239 /* block PM suspend, driver probe, etc. */ 2240 down(&dev->dev.sem); 2241 } 2242 2243 rc = pcie_flr(dev, probe); 2244 if (rc != -ENOTTY) 2245 goto done; 2246 2247 rc = pci_af_flr(dev, probe); 2248 if (rc != -ENOTTY) 2249 goto done; 2250 2251 rc = pci_pm_reset(dev, probe); 2252 if (rc != -ENOTTY) 2253 goto done; 2254 2255 rc = pci_parent_bus_reset(dev, probe); 2256 done: 2257 if (!probe) { 2258 up(&dev->dev.sem); 2259 pci_unblock_user_cfg_access(dev); 2260 } 2261 2262 return rc; 2263 } 2264 2265 /** 2266 * __pci_reset_function - reset a PCI device function 2267 * @dev: PCI device to reset 2268 * 2269 * Some devices allow an individual function to be reset without affecting 2270 * other functions in the same device. The PCI device must be responsive 2271 * to PCI config space in order to use this function. 2272 * 2273 * The device function is presumed to be unused when this function is called. 2274 * Resetting the device will make the contents of PCI configuration space 2275 * random, so any caller of this must be prepared to reinitialise the 2276 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2277 * etc. 2278 * 2279 * Returns 0 if the device function was successfully reset or negative if the 2280 * device doesn't support resetting a single function. 2281 */ 2282 int __pci_reset_function(struct pci_dev *dev) 2283 { 2284 return pci_dev_reset(dev, 0); 2285 } 2286 EXPORT_SYMBOL_GPL(__pci_reset_function); 2287 2288 /** 2289 * pci_probe_reset_function - check whether the device can be safely reset 2290 * @dev: PCI device to reset 2291 * 2292 * Some devices allow an individual function to be reset without affecting 2293 * other functions in the same device. The PCI device must be responsive 2294 * to PCI config space in order to use this function. 2295 * 2296 * Returns 0 if the device function can be reset or negative if the 2297 * device doesn't support resetting a single function. 2298 */ 2299 int pci_probe_reset_function(struct pci_dev *dev) 2300 { 2301 return pci_dev_reset(dev, 1); 2302 } 2303 2304 /** 2305 * pci_reset_function - quiesce and reset a PCI device function 2306 * @dev: PCI device to reset 2307 * 2308 * Some devices allow an individual function to be reset without affecting 2309 * other functions in the same device. The PCI device must be responsive 2310 * to PCI config space in order to use this function. 2311 * 2312 * This function does not just reset the PCI portion of a device, but 2313 * clears all the state associated with the device. This function differs 2314 * from __pci_reset_function in that it saves and restores device state 2315 * over the reset. 2316 * 2317 * Returns 0 if the device function was successfully reset or negative if the 2318 * device doesn't support resetting a single function. 2319 */ 2320 int pci_reset_function(struct pci_dev *dev) 2321 { 2322 int rc; 2323 2324 rc = pci_dev_reset(dev, 1); 2325 if (rc) 2326 return rc; 2327 2328 pci_save_state(dev); 2329 2330 /* 2331 * both INTx and MSI are disabled after the Interrupt Disable bit 2332 * is set and the Bus Master bit is cleared. 2333 */ 2334 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2335 2336 rc = pci_dev_reset(dev, 0); 2337 2338 pci_restore_state(dev); 2339 2340 return rc; 2341 } 2342 EXPORT_SYMBOL_GPL(pci_reset_function); 2343 2344 /** 2345 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2346 * @dev: PCI device to query 2347 * 2348 * Returns mmrbc: maximum designed memory read count in bytes 2349 * or appropriate error value. 2350 */ 2351 int pcix_get_max_mmrbc(struct pci_dev *dev) 2352 { 2353 int err, cap; 2354 u32 stat; 2355 2356 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2357 if (!cap) 2358 return -EINVAL; 2359 2360 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2361 if (err) 2362 return -EINVAL; 2363 2364 return (stat & PCI_X_STATUS_MAX_READ) >> 12; 2365 } 2366 EXPORT_SYMBOL(pcix_get_max_mmrbc); 2367 2368 /** 2369 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2370 * @dev: PCI device to query 2371 * 2372 * Returns mmrbc: maximum memory read count in bytes 2373 * or appropriate error value. 2374 */ 2375 int pcix_get_mmrbc(struct pci_dev *dev) 2376 { 2377 int ret, cap; 2378 u32 cmd; 2379 2380 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2381 if (!cap) 2382 return -EINVAL; 2383 2384 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2385 if (!ret) 2386 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2387 2388 return ret; 2389 } 2390 EXPORT_SYMBOL(pcix_get_mmrbc); 2391 2392 /** 2393 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2394 * @dev: PCI device to query 2395 * @mmrbc: maximum memory read count in bytes 2396 * valid values are 512, 1024, 2048, 4096 2397 * 2398 * If possible sets maximum memory read byte count, some bridges have erratas 2399 * that prevent this. 2400 */ 2401 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2402 { 2403 int cap, err = -EINVAL; 2404 u32 stat, cmd, v, o; 2405 2406 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2407 goto out; 2408 2409 v = ffs(mmrbc) - 10; 2410 2411 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2412 if (!cap) 2413 goto out; 2414 2415 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); 2416 if (err) 2417 goto out; 2418 2419 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2420 return -E2BIG; 2421 2422 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); 2423 if (err) 2424 goto out; 2425 2426 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2427 if (o != v) { 2428 if (v > o && dev->bus && 2429 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2430 return -EIO; 2431 2432 cmd &= ~PCI_X_CMD_MAX_READ; 2433 cmd |= v << 2; 2434 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); 2435 } 2436 out: 2437 return err; 2438 } 2439 EXPORT_SYMBOL(pcix_set_mmrbc); 2440 2441 /** 2442 * pcie_get_readrq - get PCI Express read request size 2443 * @dev: PCI device to query 2444 * 2445 * Returns maximum memory read request in bytes 2446 * or appropriate error value. 2447 */ 2448 int pcie_get_readrq(struct pci_dev *dev) 2449 { 2450 int ret, cap; 2451 u16 ctl; 2452 2453 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2454 if (!cap) 2455 return -EINVAL; 2456 2457 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2458 if (!ret) 2459 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2460 2461 return ret; 2462 } 2463 EXPORT_SYMBOL(pcie_get_readrq); 2464 2465 /** 2466 * pcie_set_readrq - set PCI Express maximum memory read request 2467 * @dev: PCI device to query 2468 * @rq: maximum memory read count in bytes 2469 * valid values are 128, 256, 512, 1024, 2048, 4096 2470 * 2471 * If possible sets maximum read byte count 2472 */ 2473 int pcie_set_readrq(struct pci_dev *dev, int rq) 2474 { 2475 int cap, err = -EINVAL; 2476 u16 ctl, v; 2477 2478 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2479 goto out; 2480 2481 v = (ffs(rq) - 8) << 12; 2482 2483 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2484 if (!cap) 2485 goto out; 2486 2487 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2488 if (err) 2489 goto out; 2490 2491 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2492 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2493 ctl |= v; 2494 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2495 } 2496 2497 out: 2498 return err; 2499 } 2500 EXPORT_SYMBOL(pcie_set_readrq); 2501 2502 /** 2503 * pci_select_bars - Make BAR mask from the type of resource 2504 * @dev: the PCI device for which BAR mask is made 2505 * @flags: resource type mask to be selected 2506 * 2507 * This helper routine makes bar mask from the type of resource. 2508 */ 2509 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2510 { 2511 int i, bars = 0; 2512 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2513 if (pci_resource_flags(dev, i) & flags) 2514 bars |= (1 << i); 2515 return bars; 2516 } 2517 2518 /** 2519 * pci_resource_bar - get position of the BAR associated with a resource 2520 * @dev: the PCI device 2521 * @resno: the resource number 2522 * @type: the BAR type to be filled in 2523 * 2524 * Returns BAR position in config space, or 0 if the BAR is invalid. 2525 */ 2526 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2527 { 2528 int reg; 2529 2530 if (resno < PCI_ROM_RESOURCE) { 2531 *type = pci_bar_unknown; 2532 return PCI_BASE_ADDRESS_0 + 4 * resno; 2533 } else if (resno == PCI_ROM_RESOURCE) { 2534 *type = pci_bar_mem32; 2535 return dev->rom_base_reg; 2536 } else if (resno < PCI_BRIDGE_RESOURCES) { 2537 /* device specific resource */ 2538 reg = pci_iov_resource_bar(dev, resno, type); 2539 if (reg) 2540 return reg; 2541 } 2542 2543 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2544 return 0; 2545 } 2546 2547 /** 2548 * pci_set_vga_state - set VGA decode state on device and parents if requested 2549 * @dev: the PCI device 2550 * @decode: true = enable decoding, false = disable decoding 2551 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 2552 * @change_bridge: traverse ancestors and change bridges 2553 */ 2554 int pci_set_vga_state(struct pci_dev *dev, bool decode, 2555 unsigned int command_bits, bool change_bridge) 2556 { 2557 struct pci_bus *bus; 2558 struct pci_dev *bridge; 2559 u16 cmd; 2560 2561 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 2562 2563 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2564 if (decode == true) 2565 cmd |= command_bits; 2566 else 2567 cmd &= ~command_bits; 2568 pci_write_config_word(dev, PCI_COMMAND, cmd); 2569 2570 if (change_bridge == false) 2571 return 0; 2572 2573 bus = dev->bus; 2574 while (bus) { 2575 bridge = bus->self; 2576 if (bridge) { 2577 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 2578 &cmd); 2579 if (decode == true) 2580 cmd |= PCI_BRIDGE_CTL_VGA; 2581 else 2582 cmd &= ~PCI_BRIDGE_CTL_VGA; 2583 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 2584 cmd); 2585 } 2586 bus = bus->parent; 2587 } 2588 return 0; 2589 } 2590 2591 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2592 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2593 spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; 2594 2595 /** 2596 * pci_specified_resource_alignment - get resource alignment specified by user. 2597 * @dev: the PCI device to get 2598 * 2599 * RETURNS: Resource alignment if it is specified. 2600 * Zero if it is not specified. 2601 */ 2602 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 2603 { 2604 int seg, bus, slot, func, align_order, count; 2605 resource_size_t align = 0; 2606 char *p; 2607 2608 spin_lock(&resource_alignment_lock); 2609 p = resource_alignment_param; 2610 while (*p) { 2611 count = 0; 2612 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 2613 p[count] == '@') { 2614 p += count + 1; 2615 } else { 2616 align_order = -1; 2617 } 2618 if (sscanf(p, "%x:%x:%x.%x%n", 2619 &seg, &bus, &slot, &func, &count) != 4) { 2620 seg = 0; 2621 if (sscanf(p, "%x:%x.%x%n", 2622 &bus, &slot, &func, &count) != 3) { 2623 /* Invalid format */ 2624 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 2625 p); 2626 break; 2627 } 2628 } 2629 p += count; 2630 if (seg == pci_domain_nr(dev->bus) && 2631 bus == dev->bus->number && 2632 slot == PCI_SLOT(dev->devfn) && 2633 func == PCI_FUNC(dev->devfn)) { 2634 if (align_order == -1) { 2635 align = PAGE_SIZE; 2636 } else { 2637 align = 1 << align_order; 2638 } 2639 /* Found */ 2640 break; 2641 } 2642 if (*p != ';' && *p != ',') { 2643 /* End of param or invalid format */ 2644 break; 2645 } 2646 p++; 2647 } 2648 spin_unlock(&resource_alignment_lock); 2649 return align; 2650 } 2651 2652 /** 2653 * pci_is_reassigndev - check if specified PCI is target device to reassign 2654 * @dev: the PCI device to check 2655 * 2656 * RETURNS: non-zero for PCI device is a target device to reassign, 2657 * or zero is not. 2658 */ 2659 int pci_is_reassigndev(struct pci_dev *dev) 2660 { 2661 return (pci_specified_resource_alignment(dev) != 0); 2662 } 2663 2664 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 2665 { 2666 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 2667 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 2668 spin_lock(&resource_alignment_lock); 2669 strncpy(resource_alignment_param, buf, count); 2670 resource_alignment_param[count] = '\0'; 2671 spin_unlock(&resource_alignment_lock); 2672 return count; 2673 } 2674 2675 ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 2676 { 2677 size_t count; 2678 spin_lock(&resource_alignment_lock); 2679 count = snprintf(buf, size, "%s", resource_alignment_param); 2680 spin_unlock(&resource_alignment_lock); 2681 return count; 2682 } 2683 2684 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 2685 { 2686 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 2687 } 2688 2689 static ssize_t pci_resource_alignment_store(struct bus_type *bus, 2690 const char *buf, size_t count) 2691 { 2692 return pci_set_resource_alignment_param(buf, count); 2693 } 2694 2695 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 2696 pci_resource_alignment_store); 2697 2698 static int __init pci_resource_alignment_sysfs_init(void) 2699 { 2700 return bus_create_file(&pci_bus_type, 2701 &bus_attr_resource_alignment); 2702 } 2703 2704 late_initcall(pci_resource_alignment_sysfs_init); 2705 2706 static void __devinit pci_no_domains(void) 2707 { 2708 #ifdef CONFIG_PCI_DOMAINS 2709 pci_domains_supported = 0; 2710 #endif 2711 } 2712 2713 /** 2714 * pci_ext_cfg_enabled - can we access extended PCI config space? 2715 * @dev: The PCI device of the root bridge. 2716 * 2717 * Returns 1 if we can access PCI extended config space (offsets 2718 * greater than 0xff). This is the default implementation. Architecture 2719 * implementations can override this. 2720 */ 2721 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2722 { 2723 return 1; 2724 } 2725 2726 static int __init pci_setup(char *str) 2727 { 2728 while (str) { 2729 char *k = strchr(str, ','); 2730 if (k) 2731 *k++ = 0; 2732 if (*str && (str = pcibios_setup(str)) && *str) { 2733 if (!strcmp(str, "nomsi")) { 2734 pci_no_msi(); 2735 } else if (!strcmp(str, "noaer")) { 2736 pci_no_aer(); 2737 } else if (!strcmp(str, "nodomains")) { 2738 pci_no_domains(); 2739 } else if (!strncmp(str, "cbiosize=", 9)) { 2740 pci_cardbus_io_size = memparse(str + 9, &str); 2741 } else if (!strncmp(str, "cbmemsize=", 10)) { 2742 pci_cardbus_mem_size = memparse(str + 10, &str); 2743 } else if (!strncmp(str, "resource_alignment=", 19)) { 2744 pci_set_resource_alignment_param(str + 19, 2745 strlen(str + 19)); 2746 } else if (!strncmp(str, "ecrc=", 5)) { 2747 pcie_ecrc_get_policy(str + 5); 2748 } else if (!strncmp(str, "hpiosize=", 9)) { 2749 pci_hotplug_io_size = memparse(str + 9, &str); 2750 } else if (!strncmp(str, "hpmemsize=", 10)) { 2751 pci_hotplug_mem_size = memparse(str + 10, &str); 2752 } else { 2753 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2754 str); 2755 } 2756 } 2757 str = k; 2758 } 2759 return 0; 2760 } 2761 early_param("pci", pci_setup); 2762 2763 EXPORT_SYMBOL(pci_reenable_device); 2764 EXPORT_SYMBOL(pci_enable_device_io); 2765 EXPORT_SYMBOL(pci_enable_device_mem); 2766 EXPORT_SYMBOL(pci_enable_device); 2767 EXPORT_SYMBOL(pcim_enable_device); 2768 EXPORT_SYMBOL(pcim_pin_device); 2769 EXPORT_SYMBOL(pci_disable_device); 2770 EXPORT_SYMBOL(pci_find_capability); 2771 EXPORT_SYMBOL(pci_bus_find_capability); 2772 EXPORT_SYMBOL(pci_release_regions); 2773 EXPORT_SYMBOL(pci_request_regions); 2774 EXPORT_SYMBOL(pci_request_regions_exclusive); 2775 EXPORT_SYMBOL(pci_release_region); 2776 EXPORT_SYMBOL(pci_request_region); 2777 EXPORT_SYMBOL(pci_request_region_exclusive); 2778 EXPORT_SYMBOL(pci_release_selected_regions); 2779 EXPORT_SYMBOL(pci_request_selected_regions); 2780 EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 2781 EXPORT_SYMBOL(pci_set_master); 2782 EXPORT_SYMBOL(pci_clear_master); 2783 EXPORT_SYMBOL(pci_set_mwi); 2784 EXPORT_SYMBOL(pci_try_set_mwi); 2785 EXPORT_SYMBOL(pci_clear_mwi); 2786 EXPORT_SYMBOL_GPL(pci_intx); 2787 EXPORT_SYMBOL(pci_set_dma_mask); 2788 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 2789 EXPORT_SYMBOL(pci_assign_resource); 2790 EXPORT_SYMBOL(pci_find_parent_resource); 2791 EXPORT_SYMBOL(pci_select_bars); 2792 2793 EXPORT_SYMBOL(pci_set_power_state); 2794 EXPORT_SYMBOL(pci_save_state); 2795 EXPORT_SYMBOL(pci_restore_state); 2796 EXPORT_SYMBOL(pci_pme_capable); 2797 EXPORT_SYMBOL(pci_pme_active); 2798 EXPORT_SYMBOL(pci_enable_wake); 2799 EXPORT_SYMBOL(pci_wake_from_d3); 2800 EXPORT_SYMBOL(pci_target_state); 2801 EXPORT_SYMBOL(pci_prepare_to_sleep); 2802 EXPORT_SYMBOL(pci_back_from_sleep); 2803 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 2804 2805