1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 unsigned int pci_pm_d3_delay = 10; 23 24 /** 25 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 26 * @bus: pointer to PCI bus structure to search 27 * 28 * Given a PCI bus, returns the highest PCI bus number present in the set 29 * including the given PCI bus and its list of child PCI buses. 30 */ 31 unsigned char __devinit 32 pci_bus_max_busnr(struct pci_bus* bus) 33 { 34 struct list_head *tmp; 35 unsigned char max, n; 36 37 max = bus->subordinate; 38 list_for_each(tmp, &bus->children) { 39 n = pci_bus_max_busnr(pci_bus_b(tmp)); 40 if(n > max) 41 max = n; 42 } 43 return max; 44 } 45 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 46 47 #if 0 48 /** 49 * pci_max_busnr - returns maximum PCI bus number 50 * 51 * Returns the highest PCI bus number present in the system global list of 52 * PCI buses. 53 */ 54 unsigned char __devinit 55 pci_max_busnr(void) 56 { 57 struct pci_bus *bus = NULL; 58 unsigned char max, n; 59 60 max = 0; 61 while ((bus = pci_find_next_bus(bus)) != NULL) { 62 n = pci_bus_max_busnr(bus); 63 if(n > max) 64 max = n; 65 } 66 return max; 67 } 68 69 #endif /* 0 */ 70 71 #define PCI_FIND_CAP_TTL 48 72 73 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 74 u8 pos, int cap, int *ttl) 75 { 76 u8 id; 77 78 while ((*ttl)--) { 79 pci_bus_read_config_byte(bus, devfn, pos, &pos); 80 if (pos < 0x40) 81 break; 82 pos &= ~3; 83 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 84 &id); 85 if (id == 0xff) 86 break; 87 if (id == cap) 88 return pos; 89 pos += PCI_CAP_LIST_NEXT; 90 } 91 return 0; 92 } 93 94 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 95 u8 pos, int cap) 96 { 97 int ttl = PCI_FIND_CAP_TTL; 98 99 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 100 } 101 102 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 103 { 104 return __pci_find_next_cap(dev->bus, dev->devfn, 105 pos + PCI_CAP_LIST_NEXT, cap); 106 } 107 EXPORT_SYMBOL_GPL(pci_find_next_capability); 108 109 static int __pci_bus_find_cap_start(struct pci_bus *bus, 110 unsigned int devfn, u8 hdr_type) 111 { 112 u16 status; 113 114 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 115 if (!(status & PCI_STATUS_CAP_LIST)) 116 return 0; 117 118 switch (hdr_type) { 119 case PCI_HEADER_TYPE_NORMAL: 120 case PCI_HEADER_TYPE_BRIDGE: 121 return PCI_CAPABILITY_LIST; 122 case PCI_HEADER_TYPE_CARDBUS: 123 return PCI_CB_CAPABILITY_LIST; 124 default: 125 return 0; 126 } 127 128 return 0; 129 } 130 131 /** 132 * pci_find_capability - query for devices' capabilities 133 * @dev: PCI device to query 134 * @cap: capability code 135 * 136 * Tell if a device supports a given PCI capability. 137 * Returns the address of the requested capability structure within the 138 * device's PCI configuration space or 0 in case the device does not 139 * support it. Possible values for @cap: 140 * 141 * %PCI_CAP_ID_PM Power Management 142 * %PCI_CAP_ID_AGP Accelerated Graphics Port 143 * %PCI_CAP_ID_VPD Vital Product Data 144 * %PCI_CAP_ID_SLOTID Slot Identification 145 * %PCI_CAP_ID_MSI Message Signalled Interrupts 146 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 147 * %PCI_CAP_ID_PCIX PCI-X 148 * %PCI_CAP_ID_EXP PCI Express 149 */ 150 int pci_find_capability(struct pci_dev *dev, int cap) 151 { 152 int pos; 153 154 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 155 if (pos) 156 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 157 158 return pos; 159 } 160 161 /** 162 * pci_bus_find_capability - query for devices' capabilities 163 * @bus: the PCI bus to query 164 * @devfn: PCI device to query 165 * @cap: capability code 166 * 167 * Like pci_find_capability() but works for pci devices that do not have a 168 * pci_dev structure set up yet. 169 * 170 * Returns the address of the requested capability structure within the 171 * device's PCI configuration space or 0 in case the device does not 172 * support it. 173 */ 174 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 175 { 176 int pos; 177 u8 hdr_type; 178 179 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 180 181 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 182 if (pos) 183 pos = __pci_find_next_cap(bus, devfn, pos, cap); 184 185 return pos; 186 } 187 188 /** 189 * pci_find_ext_capability - Find an extended capability 190 * @dev: PCI device to query 191 * @cap: capability code 192 * 193 * Returns the address of the requested extended capability structure 194 * within the device's PCI configuration space or 0 if the device does 195 * not support it. Possible values for @cap: 196 * 197 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 198 * %PCI_EXT_CAP_ID_VC Virtual Channel 199 * %PCI_EXT_CAP_ID_DSN Device Serial Number 200 * %PCI_EXT_CAP_ID_PWR Power Budgeting 201 */ 202 int pci_find_ext_capability(struct pci_dev *dev, int cap) 203 { 204 u32 header; 205 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 206 int pos = 0x100; 207 208 if (dev->cfg_size <= 256) 209 return 0; 210 211 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 212 return 0; 213 214 /* 215 * If we have no capabilities, this is indicated by cap ID, 216 * cap version and next pointer all being 0. 217 */ 218 if (header == 0) 219 return 0; 220 221 while (ttl-- > 0) { 222 if (PCI_EXT_CAP_ID(header) == cap) 223 return pos; 224 225 pos = PCI_EXT_CAP_NEXT(header); 226 if (pos < 0x100) 227 break; 228 229 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 230 break; 231 } 232 233 return 0; 234 } 235 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 236 237 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 238 { 239 int rc, ttl = PCI_FIND_CAP_TTL; 240 u8 cap, mask; 241 242 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 243 mask = HT_3BIT_CAP_MASK; 244 else 245 mask = HT_5BIT_CAP_MASK; 246 247 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 248 PCI_CAP_ID_HT, &ttl); 249 while (pos) { 250 rc = pci_read_config_byte(dev, pos + 3, &cap); 251 if (rc != PCIBIOS_SUCCESSFUL) 252 return 0; 253 254 if ((cap & mask) == ht_cap) 255 return pos; 256 257 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 258 pos + PCI_CAP_LIST_NEXT, 259 PCI_CAP_ID_HT, &ttl); 260 } 261 262 return 0; 263 } 264 /** 265 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 266 * @dev: PCI device to query 267 * @pos: Position from which to continue searching 268 * @ht_cap: Hypertransport capability code 269 * 270 * To be used in conjunction with pci_find_ht_capability() to search for 271 * all capabilities matching @ht_cap. @pos should always be a value returned 272 * from pci_find_ht_capability(). 273 * 274 * NB. To be 100% safe against broken PCI devices, the caller should take 275 * steps to avoid an infinite loop. 276 */ 277 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 278 { 279 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 280 } 281 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 282 283 /** 284 * pci_find_ht_capability - query a device's Hypertransport capabilities 285 * @dev: PCI device to query 286 * @ht_cap: Hypertransport capability code 287 * 288 * Tell if a device supports a given Hypertransport capability. 289 * Returns an address within the device's PCI configuration space 290 * or 0 in case the device does not support the request capability. 291 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 292 * which has a Hypertransport capability matching @ht_cap. 293 */ 294 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 295 { 296 int pos; 297 298 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 299 if (pos) 300 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 301 302 return pos; 303 } 304 EXPORT_SYMBOL_GPL(pci_find_ht_capability); 305 306 /** 307 * pci_find_parent_resource - return resource region of parent bus of given region 308 * @dev: PCI device structure contains resources to be searched 309 * @res: child resource record for which parent is sought 310 * 311 * For given resource region of given device, return the resource 312 * region of parent bus the given region is contained in or where 313 * it should be allocated from. 314 */ 315 struct resource * 316 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 317 { 318 const struct pci_bus *bus = dev->bus; 319 int i; 320 struct resource *best = NULL; 321 322 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 323 struct resource *r = bus->resource[i]; 324 if (!r) 325 continue; 326 if (res->start && !(res->start >= r->start && res->end <= r->end)) 327 continue; /* Not contained */ 328 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 329 continue; /* Wrong type */ 330 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 331 return r; /* Exact match */ 332 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 333 best = r; /* Approximating prefetchable by non-prefetchable */ 334 } 335 return best; 336 } 337 338 /** 339 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 340 * @dev: PCI device to have its BARs restored 341 * 342 * Restore the BAR values for a given device, so as to make it 343 * accessible by its driver. 344 */ 345 void 346 pci_restore_bars(struct pci_dev *dev) 347 { 348 int i, numres; 349 350 switch (dev->hdr_type) { 351 case PCI_HEADER_TYPE_NORMAL: 352 numres = 6; 353 break; 354 case PCI_HEADER_TYPE_BRIDGE: 355 numres = 2; 356 break; 357 case PCI_HEADER_TYPE_CARDBUS: 358 numres = 1; 359 break; 360 default: 361 /* Should never get here, but just in case... */ 362 return; 363 } 364 365 for (i = 0; i < numres; i ++) 366 pci_update_resource(dev, &dev->resource[i], i); 367 } 368 369 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 370 371 /** 372 * pci_set_power_state - Set the power state of a PCI device 373 * @dev: PCI device to be suspended 374 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 375 * 376 * Transition a device to a new power state, using the Power Management 377 * Capabilities in the device's config space. 378 * 379 * RETURN VALUE: 380 * -EINVAL if trying to enter a lower state than we're already in. 381 * 0 if we're already in the requested state. 382 * -EIO if device does not support PCI PM. 383 * 0 if we can successfully change the power state. 384 */ 385 int 386 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 387 { 388 int pm, need_restore = 0; 389 u16 pmcsr, pmc; 390 391 /* bound the state we're entering */ 392 if (state > PCI_D3hot) 393 state = PCI_D3hot; 394 395 /* 396 * If the device or the parent bridge can't support PCI PM, ignore 397 * the request if we're doing anything besides putting it into D0 398 * (which would only happen on boot). 399 */ 400 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 401 return 0; 402 403 /* Validate current state: 404 * Can enter D0 from any state, but if we can only go deeper 405 * to sleep if we're already in a low power state 406 */ 407 if (state != PCI_D0 && dev->current_state > state) { 408 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 409 __FUNCTION__, pci_name(dev), state, dev->current_state); 410 return -EINVAL; 411 } else if (dev->current_state == state) 412 return 0; /* we're already there */ 413 414 415 /* find PCI PM capability in list */ 416 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 417 418 /* abort if the device doesn't support PM capabilities */ 419 if (!pm) 420 return -EIO; 421 422 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 423 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 424 printk(KERN_DEBUG 425 "PCI: %s has unsupported PM cap regs version (%u)\n", 426 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 427 return -EIO; 428 } 429 430 /* check if this device supports the desired state */ 431 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 432 return -EIO; 433 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 434 return -EIO; 435 436 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 437 438 /* If we're (effectively) in D3, force entire word to 0. 439 * This doesn't affect PME_Status, disables PME_En, and 440 * sets PowerState to 0. 441 */ 442 switch (dev->current_state) { 443 case PCI_D0: 444 case PCI_D1: 445 case PCI_D2: 446 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 447 pmcsr |= state; 448 break; 449 case PCI_UNKNOWN: /* Boot-up */ 450 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 451 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 452 need_restore = 1; 453 /* Fall-through: force to D0 */ 454 default: 455 pmcsr = 0; 456 break; 457 } 458 459 /* enter specified state */ 460 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 461 462 /* Mandatory power management transition delays */ 463 /* see PCI PM 1.1 5.6.1 table 18 */ 464 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 465 msleep(pci_pm_d3_delay); 466 else if (state == PCI_D2 || dev->current_state == PCI_D2) 467 udelay(200); 468 469 /* 470 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 471 * Firmware method after native method ? 472 */ 473 if (platform_pci_set_power_state) 474 platform_pci_set_power_state(dev, state); 475 476 dev->current_state = state; 477 478 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 479 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 480 * from D3hot to D0 _may_ perform an internal reset, thereby 481 * going to "D0 Uninitialized" rather than "D0 Initialized". 482 * For example, at least some versions of the 3c905B and the 483 * 3c556B exhibit this behaviour. 484 * 485 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 486 * devices in a D3hot state at boot. Consequently, we need to 487 * restore at least the BARs so that the device will be 488 * accessible to its driver. 489 */ 490 if (need_restore) 491 pci_restore_bars(dev); 492 493 return 0; 494 } 495 496 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 497 498 /** 499 * pci_choose_state - Choose the power state of a PCI device 500 * @dev: PCI device to be suspended 501 * @state: target sleep state for the whole system. This is the value 502 * that is passed to suspend() function. 503 * 504 * Returns PCI power state suitable for given device and given system 505 * message. 506 */ 507 508 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 509 { 510 int ret; 511 512 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 513 return PCI_D0; 514 515 if (platform_pci_choose_state) { 516 ret = platform_pci_choose_state(dev, state); 517 if (ret >= 0) 518 state.event = ret; 519 } 520 521 switch (state.event) { 522 case PM_EVENT_ON: 523 return PCI_D0; 524 case PM_EVENT_FREEZE: 525 case PM_EVENT_PRETHAW: 526 /* REVISIT both freeze and pre-thaw "should" use D0 */ 527 case PM_EVENT_SUSPEND: 528 return PCI_D3hot; 529 default: 530 printk("Unrecognized suspend event %d\n", state.event); 531 BUG(); 532 } 533 return PCI_D0; 534 } 535 536 EXPORT_SYMBOL(pci_choose_state); 537 538 static int pci_save_pcie_state(struct pci_dev *dev) 539 { 540 int pos, i = 0; 541 struct pci_cap_saved_state *save_state; 542 u16 *cap; 543 544 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 545 if (pos <= 0) 546 return 0; 547 548 save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); 549 if (!save_state) { 550 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 551 return -ENOMEM; 552 } 553 cap = (u16 *)&save_state->data[0]; 554 555 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 556 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 557 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 558 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 559 pci_add_saved_cap(dev, save_state); 560 return 0; 561 } 562 563 static void pci_restore_pcie_state(struct pci_dev *dev) 564 { 565 int i = 0, pos; 566 struct pci_cap_saved_state *save_state; 567 u16 *cap; 568 569 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 570 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 571 if (!save_state || pos <= 0) 572 return; 573 cap = (u16 *)&save_state->data[0]; 574 575 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 576 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 577 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 578 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 579 pci_remove_saved_cap(save_state); 580 kfree(save_state); 581 } 582 583 584 static int pci_save_pcix_state(struct pci_dev *dev) 585 { 586 int pos, i = 0; 587 struct pci_cap_saved_state *save_state; 588 u16 *cap; 589 590 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 591 if (pos <= 0) 592 return 0; 593 594 save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); 595 if (!save_state) { 596 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 597 return -ENOMEM; 598 } 599 cap = (u16 *)&save_state->data[0]; 600 601 pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); 602 pci_add_saved_cap(dev, save_state); 603 return 0; 604 } 605 606 static void pci_restore_pcix_state(struct pci_dev *dev) 607 { 608 int i = 0, pos; 609 struct pci_cap_saved_state *save_state; 610 u16 *cap; 611 612 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 613 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 614 if (!save_state || pos <= 0) 615 return; 616 cap = (u16 *)&save_state->data[0]; 617 618 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 619 pci_remove_saved_cap(save_state); 620 kfree(save_state); 621 } 622 623 624 /** 625 * pci_save_state - save the PCI configuration space of a device before suspending 626 * @dev: - PCI device that we're dealing with 627 */ 628 int 629 pci_save_state(struct pci_dev *dev) 630 { 631 int i; 632 /* XXX: 100% dword access ok here? */ 633 for (i = 0; i < 16; i++) 634 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 635 if ((i = pci_save_msi_state(dev)) != 0) 636 return i; 637 if ((i = pci_save_pcie_state(dev)) != 0) 638 return i; 639 if ((i = pci_save_pcix_state(dev)) != 0) 640 return i; 641 return 0; 642 } 643 644 /** 645 * pci_restore_state - Restore the saved state of a PCI device 646 * @dev: - PCI device that we're dealing with 647 */ 648 int 649 pci_restore_state(struct pci_dev *dev) 650 { 651 int i; 652 int val; 653 654 /* PCI Express register must be restored first */ 655 pci_restore_pcie_state(dev); 656 657 /* 658 * The Base Address register should be programmed before the command 659 * register(s) 660 */ 661 for (i = 15; i >= 0; i--) { 662 pci_read_config_dword(dev, i * 4, &val); 663 if (val != dev->saved_config_space[i]) { 664 printk(KERN_DEBUG "PM: Writing back config space on " 665 "device %s at offset %x (was %x, writing %x)\n", 666 pci_name(dev), i, 667 val, (int)dev->saved_config_space[i]); 668 pci_write_config_dword(dev,i * 4, 669 dev->saved_config_space[i]); 670 } 671 } 672 pci_restore_pcix_state(dev); 673 pci_restore_msi_state(dev); 674 675 return 0; 676 } 677 678 static int do_pci_enable_device(struct pci_dev *dev, int bars) 679 { 680 int err; 681 682 err = pci_set_power_state(dev, PCI_D0); 683 if (err < 0 && err != -EIO) 684 return err; 685 err = pcibios_enable_device(dev, bars); 686 if (err < 0) 687 return err; 688 pci_fixup_device(pci_fixup_enable, dev); 689 690 return 0; 691 } 692 693 /** 694 * __pci_reenable_device - Resume abandoned device 695 * @dev: PCI device to be resumed 696 * 697 * Note this function is a backend of pci_default_resume and is not supposed 698 * to be called by normal code, write proper resume handler and use it instead. 699 */ 700 int 701 __pci_reenable_device(struct pci_dev *dev) 702 { 703 if (atomic_read(&dev->enable_cnt)) 704 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 705 return 0; 706 } 707 708 /** 709 * pci_enable_device_bars - Initialize some of a device for use 710 * @dev: PCI device to be initialized 711 * @bars: bitmask of BAR's that must be configured 712 * 713 * Initialize device before it's used by a driver. Ask low-level code 714 * to enable selected I/O and memory resources. Wake up the device if it 715 * was suspended. Beware, this function can fail. 716 */ 717 int 718 pci_enable_device_bars(struct pci_dev *dev, int bars) 719 { 720 int err; 721 722 if (atomic_add_return(1, &dev->enable_cnt) > 1) 723 return 0; /* already enabled */ 724 725 err = do_pci_enable_device(dev, bars); 726 if (err < 0) 727 atomic_dec(&dev->enable_cnt); 728 return err; 729 } 730 731 /** 732 * pci_enable_device - Initialize device before it's used by a driver. 733 * @dev: PCI device to be initialized 734 * 735 * Initialize device before it's used by a driver. Ask low-level code 736 * to enable I/O and memory. Wake up the device if it was suspended. 737 * Beware, this function can fail. 738 * 739 * Note we don't actually enable the device many times if we call 740 * this function repeatedly (we just increment the count). 741 */ 742 int pci_enable_device(struct pci_dev *dev) 743 { 744 return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 745 } 746 747 /** 748 * pcibios_disable_device - disable arch specific PCI resources for device dev 749 * @dev: the PCI device to disable 750 * 751 * Disables architecture specific PCI resources for the device. This 752 * is the default implementation. Architecture implementations can 753 * override this. 754 */ 755 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 756 757 /** 758 * pci_disable_device - Disable PCI device after use 759 * @dev: PCI device to be disabled 760 * 761 * Signal to the system that the PCI device is not in use by the system 762 * anymore. This only involves disabling PCI bus-mastering, if active. 763 * 764 * Note we don't actually disable the device until all callers of 765 * pci_device_enable() have called pci_device_disable(). 766 */ 767 void 768 pci_disable_device(struct pci_dev *dev) 769 { 770 u16 pci_command; 771 772 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 773 return; 774 775 if (dev->msi_enabled) 776 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 777 PCI_CAP_ID_MSI); 778 if (dev->msix_enabled) 779 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 780 PCI_CAP_ID_MSIX); 781 782 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 783 if (pci_command & PCI_COMMAND_MASTER) { 784 pci_command &= ~PCI_COMMAND_MASTER; 785 pci_write_config_word(dev, PCI_COMMAND, pci_command); 786 } 787 dev->is_busmaster = 0; 788 789 pcibios_disable_device(dev); 790 } 791 792 /** 793 * pci_enable_wake - enable device to generate PME# when suspended 794 * @dev: - PCI device to operate on 795 * @state: - Current state of device. 796 * @enable: - Flag to enable or disable generation 797 * 798 * Set the bits in the device's PM Capabilities to generate PME# when 799 * the system is suspended. 800 * 801 * -EIO is returned if device doesn't have PM Capabilities. 802 * -EINVAL is returned if device supports it, but can't generate wake events. 803 * 0 if operation is successful. 804 * 805 */ 806 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 807 { 808 int pm; 809 u16 value; 810 811 /* find PCI PM capability in list */ 812 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 813 814 /* If device doesn't support PM Capabilities, but request is to disable 815 * wake events, it's a nop; otherwise fail */ 816 if (!pm) 817 return enable ? -EIO : 0; 818 819 /* Check device's ability to generate PME# */ 820 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 821 822 value &= PCI_PM_CAP_PME_MASK; 823 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 824 825 /* Check if it can generate PME# from requested state. */ 826 if (!value || !(value & (1 << state))) 827 return enable ? -EINVAL : 0; 828 829 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 830 831 /* Clear PME_Status by writing 1 to it and enable PME# */ 832 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 833 834 if (!enable) 835 value &= ~PCI_PM_CTRL_PME_ENABLE; 836 837 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 838 839 return 0; 840 } 841 842 int 843 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 844 { 845 u8 pin; 846 847 pin = dev->pin; 848 if (!pin) 849 return -1; 850 pin--; 851 while (dev->bus->self) { 852 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 853 dev = dev->bus->self; 854 } 855 *bridge = dev; 856 return pin; 857 } 858 859 /** 860 * pci_release_region - Release a PCI bar 861 * @pdev: PCI device whose resources were previously reserved by pci_request_region 862 * @bar: BAR to release 863 * 864 * Releases the PCI I/O and memory resources previously reserved by a 865 * successful call to pci_request_region. Call this function only 866 * after all use of the PCI regions has ceased. 867 */ 868 void pci_release_region(struct pci_dev *pdev, int bar) 869 { 870 if (pci_resource_len(pdev, bar) == 0) 871 return; 872 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 873 release_region(pci_resource_start(pdev, bar), 874 pci_resource_len(pdev, bar)); 875 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 876 release_mem_region(pci_resource_start(pdev, bar), 877 pci_resource_len(pdev, bar)); 878 } 879 880 /** 881 * pci_request_region - Reserved PCI I/O and memory resource 882 * @pdev: PCI device whose resources are to be reserved 883 * @bar: BAR to be reserved 884 * @res_name: Name to be associated with resource. 885 * 886 * Mark the PCI region associated with PCI device @pdev BR @bar as 887 * being reserved by owner @res_name. Do not access any 888 * address inside the PCI regions unless this call returns 889 * successfully. 890 * 891 * Returns 0 on success, or %EBUSY on error. A warning 892 * message is also printed on failure. 893 */ 894 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 895 { 896 if (pci_resource_len(pdev, bar) == 0) 897 return 0; 898 899 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 900 if (!request_region(pci_resource_start(pdev, bar), 901 pci_resource_len(pdev, bar), res_name)) 902 goto err_out; 903 } 904 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 905 if (!request_mem_region(pci_resource_start(pdev, bar), 906 pci_resource_len(pdev, bar), res_name)) 907 goto err_out; 908 } 909 910 return 0; 911 912 err_out: 913 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " 914 "for device %s\n", 915 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 916 bar + 1, /* PCI BAR # */ 917 (unsigned long long)pci_resource_len(pdev, bar), 918 (unsigned long long)pci_resource_start(pdev, bar), 919 pci_name(pdev)); 920 return -EBUSY; 921 } 922 923 /** 924 * pci_release_selected_regions - Release selected PCI I/O and memory resources 925 * @pdev: PCI device whose resources were previously reserved 926 * @bars: Bitmask of BARs to be released 927 * 928 * Release selected PCI I/O and memory resources previously reserved. 929 * Call this function only after all use of the PCI regions has ceased. 930 */ 931 void pci_release_selected_regions(struct pci_dev *pdev, int bars) 932 { 933 int i; 934 935 for (i = 0; i < 6; i++) 936 if (bars & (1 << i)) 937 pci_release_region(pdev, i); 938 } 939 940 /** 941 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 942 * @pdev: PCI device whose resources are to be reserved 943 * @bars: Bitmask of BARs to be requested 944 * @res_name: Name to be associated with resource 945 */ 946 int pci_request_selected_regions(struct pci_dev *pdev, int bars, 947 const char *res_name) 948 { 949 int i; 950 951 for (i = 0; i < 6; i++) 952 if (bars & (1 << i)) 953 if(pci_request_region(pdev, i, res_name)) 954 goto err_out; 955 return 0; 956 957 err_out: 958 while(--i >= 0) 959 if (bars & (1 << i)) 960 pci_release_region(pdev, i); 961 962 return -EBUSY; 963 } 964 965 /** 966 * pci_release_regions - Release reserved PCI I/O and memory resources 967 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 968 * 969 * Releases all PCI I/O and memory resources previously reserved by a 970 * successful call to pci_request_regions. Call this function only 971 * after all use of the PCI regions has ceased. 972 */ 973 974 void pci_release_regions(struct pci_dev *pdev) 975 { 976 pci_release_selected_regions(pdev, (1 << 6) - 1); 977 } 978 979 /** 980 * pci_request_regions - Reserved PCI I/O and memory resources 981 * @pdev: PCI device whose resources are to be reserved 982 * @res_name: Name to be associated with resource. 983 * 984 * Mark all PCI regions associated with PCI device @pdev as 985 * being reserved by owner @res_name. Do not access any 986 * address inside the PCI regions unless this call returns 987 * successfully. 988 * 989 * Returns 0 on success, or %EBUSY on error. A warning 990 * message is also printed on failure. 991 */ 992 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 993 { 994 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 995 } 996 997 /** 998 * pci_set_master - enables bus-mastering for device dev 999 * @dev: the PCI device to enable 1000 * 1001 * Enables bus-mastering on the device and calls pcibios_set_master() 1002 * to do the needed arch specific settings. 1003 */ 1004 void 1005 pci_set_master(struct pci_dev *dev) 1006 { 1007 u16 cmd; 1008 1009 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1010 if (! (cmd & PCI_COMMAND_MASTER)) { 1011 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 1012 cmd |= PCI_COMMAND_MASTER; 1013 pci_write_config_word(dev, PCI_COMMAND, cmd); 1014 } 1015 dev->is_busmaster = 1; 1016 pcibios_set_master(dev); 1017 } 1018 1019 #ifdef PCI_DISABLE_MWI 1020 int pci_set_mwi(struct pci_dev *dev) 1021 { 1022 return 0; 1023 } 1024 1025 void pci_clear_mwi(struct pci_dev *dev) 1026 { 1027 } 1028 1029 #else 1030 1031 #ifndef PCI_CACHE_LINE_BYTES 1032 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 1033 #endif 1034 1035 /* This can be overridden by arch code. */ 1036 /* Don't forget this is measured in 32-bit words, not bytes */ 1037 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 1038 1039 /** 1040 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1041 * @dev: the PCI device for which MWI is to be enabled 1042 * 1043 * Helper function for pci_set_mwi. 1044 * Originally copied from drivers/net/acenic.c. 1045 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 1046 * 1047 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1048 */ 1049 static int 1050 pci_set_cacheline_size(struct pci_dev *dev) 1051 { 1052 u8 cacheline_size; 1053 1054 if (!pci_cache_line_size) 1055 return -EINVAL; /* The system doesn't support MWI. */ 1056 1057 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1058 equal to or multiple of the right value. */ 1059 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1060 if (cacheline_size >= pci_cache_line_size && 1061 (cacheline_size % pci_cache_line_size) == 0) 1062 return 0; 1063 1064 /* Write the correct value. */ 1065 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 1066 /* Read it back. */ 1067 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 1068 if (cacheline_size == pci_cache_line_size) 1069 return 0; 1070 1071 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 1072 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 1073 1074 return -EINVAL; 1075 } 1076 1077 /** 1078 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1079 * @dev: the PCI device for which MWI is enabled 1080 * 1081 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 1082 * and then calls @pcibios_set_mwi to do the needed arch specific 1083 * operations or a generic mwi-prep function. 1084 * 1085 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1086 */ 1087 int 1088 pci_set_mwi(struct pci_dev *dev) 1089 { 1090 int rc; 1091 u16 cmd; 1092 1093 rc = pci_set_cacheline_size(dev); 1094 if (rc) 1095 return rc; 1096 1097 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1098 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1099 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 1100 cmd |= PCI_COMMAND_INVALIDATE; 1101 pci_write_config_word(dev, PCI_COMMAND, cmd); 1102 } 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 1109 * @dev: the PCI device to disable 1110 * 1111 * Disables PCI Memory-Write-Invalidate transaction on the device 1112 */ 1113 void 1114 pci_clear_mwi(struct pci_dev *dev) 1115 { 1116 u16 cmd; 1117 1118 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1119 if (cmd & PCI_COMMAND_INVALIDATE) { 1120 cmd &= ~PCI_COMMAND_INVALIDATE; 1121 pci_write_config_word(dev, PCI_COMMAND, cmd); 1122 } 1123 } 1124 #endif /* ! PCI_DISABLE_MWI */ 1125 1126 /** 1127 * pci_intx - enables/disables PCI INTx for device dev 1128 * @pdev: the PCI device to operate on 1129 * @enable: boolean: whether to enable or disable PCI INTx 1130 * 1131 * Enables/disables PCI INTx for device dev 1132 */ 1133 void 1134 pci_intx(struct pci_dev *pdev, int enable) 1135 { 1136 u16 pci_command, new; 1137 1138 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1139 1140 if (enable) { 1141 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 1142 } else { 1143 new = pci_command | PCI_COMMAND_INTX_DISABLE; 1144 } 1145 1146 if (new != pci_command) { 1147 pci_write_config_word(pdev, PCI_COMMAND, new); 1148 } 1149 } 1150 1151 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 1152 /* 1153 * These can be overridden by arch-specific implementations 1154 */ 1155 int 1156 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 1157 { 1158 if (!pci_dma_supported(dev, mask)) 1159 return -EIO; 1160 1161 dev->dma_mask = mask; 1162 1163 return 0; 1164 } 1165 1166 int 1167 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 1168 { 1169 if (!pci_dma_supported(dev, mask)) 1170 return -EIO; 1171 1172 dev->dev.coherent_dma_mask = mask; 1173 1174 return 0; 1175 } 1176 #endif 1177 1178 /** 1179 * pci_select_bars - Make BAR mask from the type of resource 1180 * @pdev: the PCI device for which BAR mask is made 1181 * @flags: resource type mask to be selected 1182 * 1183 * This helper routine makes bar mask from the type of resource. 1184 */ 1185 int pci_select_bars(struct pci_dev *dev, unsigned long flags) 1186 { 1187 int i, bars = 0; 1188 for (i = 0; i < PCI_NUM_RESOURCES; i++) 1189 if (pci_resource_flags(dev, i) & flags) 1190 bars |= (1 << i); 1191 return bars; 1192 } 1193 1194 static int __devinit pci_init(void) 1195 { 1196 struct pci_dev *dev = NULL; 1197 1198 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1199 pci_fixup_device(pci_fixup_final, dev); 1200 } 1201 return 0; 1202 } 1203 1204 static int __devinit pci_setup(char *str) 1205 { 1206 while (str) { 1207 char *k = strchr(str, ','); 1208 if (k) 1209 *k++ = 0; 1210 if (*str && (str = pcibios_setup(str)) && *str) { 1211 if (!strcmp(str, "nomsi")) { 1212 pci_no_msi(); 1213 } else { 1214 printk(KERN_ERR "PCI: Unknown option `%s'\n", 1215 str); 1216 } 1217 } 1218 str = k; 1219 } 1220 return 0; 1221 } 1222 early_param("pci", pci_setup); 1223 1224 device_initcall(pci_init); 1225 1226 EXPORT_SYMBOL_GPL(pci_restore_bars); 1227 EXPORT_SYMBOL(pci_enable_device_bars); 1228 EXPORT_SYMBOL(pci_enable_device); 1229 EXPORT_SYMBOL(pci_disable_device); 1230 EXPORT_SYMBOL(pci_find_capability); 1231 EXPORT_SYMBOL(pci_bus_find_capability); 1232 EXPORT_SYMBOL(pci_release_regions); 1233 EXPORT_SYMBOL(pci_request_regions); 1234 EXPORT_SYMBOL(pci_release_region); 1235 EXPORT_SYMBOL(pci_request_region); 1236 EXPORT_SYMBOL(pci_release_selected_regions); 1237 EXPORT_SYMBOL(pci_request_selected_regions); 1238 EXPORT_SYMBOL(pci_set_master); 1239 EXPORT_SYMBOL(pci_set_mwi); 1240 EXPORT_SYMBOL(pci_clear_mwi); 1241 EXPORT_SYMBOL_GPL(pci_intx); 1242 EXPORT_SYMBOL(pci_set_dma_mask); 1243 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 1244 EXPORT_SYMBOL(pci_assign_resource); 1245 EXPORT_SYMBOL(pci_find_parent_resource); 1246 EXPORT_SYMBOL(pci_select_bars); 1247 1248 EXPORT_SYMBOL(pci_set_power_state); 1249 EXPORT_SYMBOL(pci_save_state); 1250 EXPORT_SYMBOL(pci_restore_state); 1251 EXPORT_SYMBOL(pci_enable_wake); 1252 1253