1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 unsigned int pci_pm_d3_delay = 10; 23 24 /** 25 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 26 * @bus: pointer to PCI bus structure to search 27 * 28 * Given a PCI bus, returns the highest PCI bus number present in the set 29 * including the given PCI bus and its list of child PCI buses. 30 */ 31 unsigned char __devinit 32 pci_bus_max_busnr(struct pci_bus* bus) 33 { 34 struct list_head *tmp; 35 unsigned char max, n; 36 37 max = bus->subordinate; 38 list_for_each(tmp, &bus->children) { 39 n = pci_bus_max_busnr(pci_bus_b(tmp)); 40 if(n > max) 41 max = n; 42 } 43 return max; 44 } 45 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 46 47 #if 0 48 /** 49 * pci_max_busnr - returns maximum PCI bus number 50 * 51 * Returns the highest PCI bus number present in the system global list of 52 * PCI buses. 53 */ 54 unsigned char __devinit 55 pci_max_busnr(void) 56 { 57 struct pci_bus *bus = NULL; 58 unsigned char max, n; 59 60 max = 0; 61 while ((bus = pci_find_next_bus(bus)) != NULL) { 62 n = pci_bus_max_busnr(bus); 63 if(n > max) 64 max = n; 65 } 66 return max; 67 } 68 69 #endif /* 0 */ 70 71 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) 72 { 73 u8 id; 74 int ttl = 48; 75 76 while (ttl--) { 77 pci_bus_read_config_byte(bus, devfn, pos, &pos); 78 if (pos < 0x40) 79 break; 80 pos &= ~3; 81 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 82 &id); 83 if (id == 0xff) 84 break; 85 if (id == cap) 86 return pos; 87 pos += PCI_CAP_LIST_NEXT; 88 } 89 return 0; 90 } 91 92 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 93 { 94 return __pci_find_next_cap(dev->bus, dev->devfn, 95 pos + PCI_CAP_LIST_NEXT, cap); 96 } 97 EXPORT_SYMBOL_GPL(pci_find_next_capability); 98 99 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) 100 { 101 u16 status; 102 u8 pos; 103 104 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 105 if (!(status & PCI_STATUS_CAP_LIST)) 106 return 0; 107 108 switch (hdr_type) { 109 case PCI_HEADER_TYPE_NORMAL: 110 case PCI_HEADER_TYPE_BRIDGE: 111 pos = PCI_CAPABILITY_LIST; 112 break; 113 case PCI_HEADER_TYPE_CARDBUS: 114 pos = PCI_CB_CAPABILITY_LIST; 115 break; 116 default: 117 return 0; 118 } 119 return __pci_find_next_cap(bus, devfn, pos, cap); 120 } 121 122 /** 123 * pci_find_capability - query for devices' capabilities 124 * @dev: PCI device to query 125 * @cap: capability code 126 * 127 * Tell if a device supports a given PCI capability. 128 * Returns the address of the requested capability structure within the 129 * device's PCI configuration space or 0 in case the device does not 130 * support it. Possible values for @cap: 131 * 132 * %PCI_CAP_ID_PM Power Management 133 * %PCI_CAP_ID_AGP Accelerated Graphics Port 134 * %PCI_CAP_ID_VPD Vital Product Data 135 * %PCI_CAP_ID_SLOTID Slot Identification 136 * %PCI_CAP_ID_MSI Message Signalled Interrupts 137 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 138 * %PCI_CAP_ID_PCIX PCI-X 139 * %PCI_CAP_ID_EXP PCI Express 140 */ 141 int pci_find_capability(struct pci_dev *dev, int cap) 142 { 143 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); 144 } 145 146 /** 147 * pci_bus_find_capability - query for devices' capabilities 148 * @bus: the PCI bus to query 149 * @devfn: PCI device to query 150 * @cap: capability code 151 * 152 * Like pci_find_capability() but works for pci devices that do not have a 153 * pci_dev structure set up yet. 154 * 155 * Returns the address of the requested capability structure within the 156 * device's PCI configuration space or 0 in case the device does not 157 * support it. 158 */ 159 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 160 { 161 u8 hdr_type; 162 163 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 164 165 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); 166 } 167 168 /** 169 * pci_find_ext_capability - Find an extended capability 170 * @dev: PCI device to query 171 * @cap: capability code 172 * 173 * Returns the address of the requested extended capability structure 174 * within the device's PCI configuration space or 0 if the device does 175 * not support it. Possible values for @cap: 176 * 177 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 178 * %PCI_EXT_CAP_ID_VC Virtual Channel 179 * %PCI_EXT_CAP_ID_DSN Device Serial Number 180 * %PCI_EXT_CAP_ID_PWR Power Budgeting 181 */ 182 int pci_find_ext_capability(struct pci_dev *dev, int cap) 183 { 184 u32 header; 185 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 186 int pos = 0x100; 187 188 if (dev->cfg_size <= 256) 189 return 0; 190 191 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 192 return 0; 193 194 /* 195 * If we have no capabilities, this is indicated by cap ID, 196 * cap version and next pointer all being 0. 197 */ 198 if (header == 0) 199 return 0; 200 201 while (ttl-- > 0) { 202 if (PCI_EXT_CAP_ID(header) == cap) 203 return pos; 204 205 pos = PCI_EXT_CAP_NEXT(header); 206 if (pos < 0x100) 207 break; 208 209 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 210 break; 211 } 212 213 return 0; 214 } 215 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 216 217 /** 218 * pci_find_parent_resource - return resource region of parent bus of given region 219 * @dev: PCI device structure contains resources to be searched 220 * @res: child resource record for which parent is sought 221 * 222 * For given resource region of given device, return the resource 223 * region of parent bus the given region is contained in or where 224 * it should be allocated from. 225 */ 226 struct resource * 227 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 228 { 229 const struct pci_bus *bus = dev->bus; 230 int i; 231 struct resource *best = NULL; 232 233 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 234 struct resource *r = bus->resource[i]; 235 if (!r) 236 continue; 237 if (res->start && !(res->start >= r->start && res->end <= r->end)) 238 continue; /* Not contained */ 239 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 240 continue; /* Wrong type */ 241 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 242 return r; /* Exact match */ 243 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 244 best = r; /* Approximating prefetchable by non-prefetchable */ 245 } 246 return best; 247 } 248 249 /** 250 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 251 * @dev: PCI device to have its BARs restored 252 * 253 * Restore the BAR values for a given device, so as to make it 254 * accessible by its driver. 255 */ 256 void 257 pci_restore_bars(struct pci_dev *dev) 258 { 259 int i, numres; 260 261 switch (dev->hdr_type) { 262 case PCI_HEADER_TYPE_NORMAL: 263 numres = 6; 264 break; 265 case PCI_HEADER_TYPE_BRIDGE: 266 numres = 2; 267 break; 268 case PCI_HEADER_TYPE_CARDBUS: 269 numres = 1; 270 break; 271 default: 272 /* Should never get here, but just in case... */ 273 return; 274 } 275 276 for (i = 0; i < numres; i ++) 277 pci_update_resource(dev, &dev->resource[i], i); 278 } 279 280 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 281 282 /** 283 * pci_set_power_state - Set the power state of a PCI device 284 * @dev: PCI device to be suspended 285 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 286 * 287 * Transition a device to a new power state, using the Power Management 288 * Capabilities in the device's config space. 289 * 290 * RETURN VALUE: 291 * -EINVAL if trying to enter a lower state than we're already in. 292 * 0 if we're already in the requested state. 293 * -EIO if device does not support PCI PM. 294 * 0 if we can successfully change the power state. 295 */ 296 int 297 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 298 { 299 int pm, need_restore = 0; 300 u16 pmcsr, pmc; 301 302 /* bound the state we're entering */ 303 if (state > PCI_D3hot) 304 state = PCI_D3hot; 305 306 /* Validate current state: 307 * Can enter D0 from any state, but if we can only go deeper 308 * to sleep if we're already in a low power state 309 */ 310 if (state != PCI_D0 && dev->current_state > state) { 311 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 312 __FUNCTION__, pci_name(dev), state, dev->current_state); 313 return -EINVAL; 314 } else if (dev->current_state == state) 315 return 0; /* we're already there */ 316 317 /* 318 * If the device or the parent bridge can't support PCI PM, ignore 319 * the request if we're doing anything besides putting it into D0 320 * (which would only happen on boot). 321 */ 322 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 323 return 0; 324 325 /* find PCI PM capability in list */ 326 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 327 328 /* abort if the device doesn't support PM capabilities */ 329 if (!pm) 330 return -EIO; 331 332 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 333 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 334 printk(KERN_DEBUG 335 "PCI: %s has unsupported PM cap regs version (%u)\n", 336 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 337 return -EIO; 338 } 339 340 /* check if this device supports the desired state */ 341 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 342 return -EIO; 343 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 344 return -EIO; 345 346 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 347 348 /* If we're (effectively) in D3, force entire word to 0. 349 * This doesn't affect PME_Status, disables PME_En, and 350 * sets PowerState to 0. 351 */ 352 switch (dev->current_state) { 353 case PCI_D0: 354 case PCI_D1: 355 case PCI_D2: 356 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 357 pmcsr |= state; 358 break; 359 case PCI_UNKNOWN: /* Boot-up */ 360 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 361 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 362 need_restore = 1; 363 /* Fall-through: force to D0 */ 364 default: 365 pmcsr = 0; 366 break; 367 } 368 369 /* enter specified state */ 370 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 371 372 /* Mandatory power management transition delays */ 373 /* see PCI PM 1.1 5.6.1 table 18 */ 374 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 375 msleep(pci_pm_d3_delay); 376 else if (state == PCI_D2 || dev->current_state == PCI_D2) 377 udelay(200); 378 379 /* 380 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 381 * Firmware method after native method ? 382 */ 383 if (platform_pci_set_power_state) 384 platform_pci_set_power_state(dev, state); 385 386 dev->current_state = state; 387 388 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 389 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 390 * from D3hot to D0 _may_ perform an internal reset, thereby 391 * going to "D0 Uninitialized" rather than "D0 Initialized". 392 * For example, at least some versions of the 3c905B and the 393 * 3c556B exhibit this behaviour. 394 * 395 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 396 * devices in a D3hot state at boot. Consequently, we need to 397 * restore at least the BARs so that the device will be 398 * accessible to its driver. 399 */ 400 if (need_restore) 401 pci_restore_bars(dev); 402 403 return 0; 404 } 405 406 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 407 408 /** 409 * pci_choose_state - Choose the power state of a PCI device 410 * @dev: PCI device to be suspended 411 * @state: target sleep state for the whole system. This is the value 412 * that is passed to suspend() function. 413 * 414 * Returns PCI power state suitable for given device and given system 415 * message. 416 */ 417 418 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 419 { 420 int ret; 421 422 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 423 return PCI_D0; 424 425 if (platform_pci_choose_state) { 426 ret = platform_pci_choose_state(dev, state); 427 if (ret >= 0) 428 state.event = ret; 429 } 430 431 switch (state.event) { 432 case PM_EVENT_ON: 433 return PCI_D0; 434 case PM_EVENT_FREEZE: 435 case PM_EVENT_PRETHAW: 436 /* REVISIT both freeze and pre-thaw "should" use D0 */ 437 case PM_EVENT_SUSPEND: 438 return PCI_D3hot; 439 default: 440 printk("Unrecognized suspend event %d\n", state.event); 441 BUG(); 442 } 443 return PCI_D0; 444 } 445 446 EXPORT_SYMBOL(pci_choose_state); 447 448 static int pci_save_pcie_state(struct pci_dev *dev) 449 { 450 int pos, i = 0; 451 struct pci_cap_saved_state *save_state; 452 u16 *cap; 453 454 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 455 if (pos <= 0) 456 return 0; 457 458 save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); 459 if (!save_state) { 460 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 461 return -ENOMEM; 462 } 463 cap = (u16 *)&save_state->data[0]; 464 465 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 466 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 467 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 468 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 469 pci_add_saved_cap(dev, save_state); 470 return 0; 471 } 472 473 static void pci_restore_pcie_state(struct pci_dev *dev) 474 { 475 int i = 0, pos; 476 struct pci_cap_saved_state *save_state; 477 u16 *cap; 478 479 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 480 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 481 if (!save_state || pos <= 0) 482 return; 483 cap = (u16 *)&save_state->data[0]; 484 485 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 486 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 487 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 488 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 489 pci_remove_saved_cap(save_state); 490 kfree(save_state); 491 } 492 493 494 static int pci_save_pcix_state(struct pci_dev *dev) 495 { 496 int pos, i = 0; 497 struct pci_cap_saved_state *save_state; 498 u16 *cap; 499 500 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 501 if (pos <= 0) 502 return 0; 503 504 save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); 505 if (!save_state) { 506 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 507 return -ENOMEM; 508 } 509 cap = (u16 *)&save_state->data[0]; 510 511 pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); 512 pci_add_saved_cap(dev, save_state); 513 return 0; 514 } 515 516 static void pci_restore_pcix_state(struct pci_dev *dev) 517 { 518 int i = 0, pos; 519 struct pci_cap_saved_state *save_state; 520 u16 *cap; 521 522 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 523 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 524 if (!save_state || pos <= 0) 525 return; 526 cap = (u16 *)&save_state->data[0]; 527 528 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 529 pci_remove_saved_cap(save_state); 530 kfree(save_state); 531 } 532 533 534 /** 535 * pci_save_state - save the PCI configuration space of a device before suspending 536 * @dev: - PCI device that we're dealing with 537 */ 538 int 539 pci_save_state(struct pci_dev *dev) 540 { 541 int i; 542 /* XXX: 100% dword access ok here? */ 543 for (i = 0; i < 16; i++) 544 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 545 if ((i = pci_save_msi_state(dev)) != 0) 546 return i; 547 if ((i = pci_save_msix_state(dev)) != 0) 548 return i; 549 if ((i = pci_save_pcie_state(dev)) != 0) 550 return i; 551 if ((i = pci_save_pcix_state(dev)) != 0) 552 return i; 553 return 0; 554 } 555 556 /** 557 * pci_restore_state - Restore the saved state of a PCI device 558 * @dev: - PCI device that we're dealing with 559 */ 560 int 561 pci_restore_state(struct pci_dev *dev) 562 { 563 int i; 564 int val; 565 566 /* PCI Express register must be restored first */ 567 pci_restore_pcie_state(dev); 568 569 /* 570 * The Base Address register should be programmed before the command 571 * register(s) 572 */ 573 for (i = 15; i >= 0; i--) { 574 pci_read_config_dword(dev, i * 4, &val); 575 if (val != dev->saved_config_space[i]) { 576 printk(KERN_DEBUG "PM: Writing back config space on " 577 "device %s at offset %x (was %x, writing %x)\n", 578 pci_name(dev), i, 579 val, (int)dev->saved_config_space[i]); 580 pci_write_config_dword(dev,i * 4, 581 dev->saved_config_space[i]); 582 } 583 } 584 pci_restore_pcix_state(dev); 585 pci_restore_msi_state(dev); 586 pci_restore_msix_state(dev); 587 return 0; 588 } 589 590 /** 591 * pci_enable_device_bars - Initialize some of a device for use 592 * @dev: PCI device to be initialized 593 * @bars: bitmask of BAR's that must be configured 594 * 595 * Initialize device before it's used by a driver. Ask low-level code 596 * to enable selected I/O and memory resources. Wake up the device if it 597 * was suspended. Beware, this function can fail. 598 */ 599 600 int 601 pci_enable_device_bars(struct pci_dev *dev, int bars) 602 { 603 int err; 604 605 err = pci_set_power_state(dev, PCI_D0); 606 if (err < 0 && err != -EIO) 607 return err; 608 err = pcibios_enable_device(dev, bars); 609 if (err < 0) 610 return err; 611 return 0; 612 } 613 614 /** 615 * __pci_enable_device - Initialize device before it's used by a driver. 616 * @dev: PCI device to be initialized 617 * 618 * Initialize device before it's used by a driver. Ask low-level code 619 * to enable I/O and memory. Wake up the device if it was suspended. 620 * Beware, this function can fail. 621 * 622 * Note this function is a backend and is not supposed to be called by 623 * normal code, use pci_enable_device() instead. 624 */ 625 int 626 __pci_enable_device(struct pci_dev *dev) 627 { 628 int err; 629 630 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 631 if (err) 632 return err; 633 pci_fixup_device(pci_fixup_enable, dev); 634 return 0; 635 } 636 637 /** 638 * pci_enable_device - Initialize device before it's used by a driver. 639 * @dev: PCI device to be initialized 640 * 641 * Initialize device before it's used by a driver. Ask low-level code 642 * to enable I/O and memory. Wake up the device if it was suspended. 643 * Beware, this function can fail. 644 * 645 * Note we don't actually enable the device many times if we call 646 * this function repeatedly (we just increment the count). 647 */ 648 int pci_enable_device(struct pci_dev *dev) 649 { 650 int result; 651 if (atomic_add_return(1, &dev->enable_cnt) > 1) 652 return 0; /* already enabled */ 653 result = __pci_enable_device(dev); 654 if (result < 0) 655 atomic_dec(&dev->enable_cnt); 656 return result; 657 } 658 659 /** 660 * pcibios_disable_device - disable arch specific PCI resources for device dev 661 * @dev: the PCI device to disable 662 * 663 * Disables architecture specific PCI resources for the device. This 664 * is the default implementation. Architecture implementations can 665 * override this. 666 */ 667 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 668 669 /** 670 * pci_disable_device - Disable PCI device after use 671 * @dev: PCI device to be disabled 672 * 673 * Signal to the system that the PCI device is not in use by the system 674 * anymore. This only involves disabling PCI bus-mastering, if active. 675 * 676 * Note we don't actually disable the device until all callers of 677 * pci_device_enable() have called pci_device_disable(). 678 */ 679 void 680 pci_disable_device(struct pci_dev *dev) 681 { 682 u16 pci_command; 683 684 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 685 return; 686 687 if (dev->msi_enabled) 688 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 689 PCI_CAP_ID_MSI); 690 if (dev->msix_enabled) 691 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 692 PCI_CAP_ID_MSIX); 693 694 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 695 if (pci_command & PCI_COMMAND_MASTER) { 696 pci_command &= ~PCI_COMMAND_MASTER; 697 pci_write_config_word(dev, PCI_COMMAND, pci_command); 698 } 699 dev->is_busmaster = 0; 700 701 pcibios_disable_device(dev); 702 } 703 704 /** 705 * pci_enable_wake - enable device to generate PME# when suspended 706 * @dev: - PCI device to operate on 707 * @state: - Current state of device. 708 * @enable: - Flag to enable or disable generation 709 * 710 * Set the bits in the device's PM Capabilities to generate PME# when 711 * the system is suspended. 712 * 713 * -EIO is returned if device doesn't have PM Capabilities. 714 * -EINVAL is returned if device supports it, but can't generate wake events. 715 * 0 if operation is successful. 716 * 717 */ 718 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 719 { 720 int pm; 721 u16 value; 722 723 /* find PCI PM capability in list */ 724 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 725 726 /* If device doesn't support PM Capabilities, but request is to disable 727 * wake events, it's a nop; otherwise fail */ 728 if (!pm) 729 return enable ? -EIO : 0; 730 731 /* Check device's ability to generate PME# */ 732 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 733 734 value &= PCI_PM_CAP_PME_MASK; 735 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 736 737 /* Check if it can generate PME# from requested state. */ 738 if (!value || !(value & (1 << state))) 739 return enable ? -EINVAL : 0; 740 741 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 742 743 /* Clear PME_Status by writing 1 to it and enable PME# */ 744 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 745 746 if (!enable) 747 value &= ~PCI_PM_CTRL_PME_ENABLE; 748 749 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 750 751 return 0; 752 } 753 754 int 755 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 756 { 757 u8 pin; 758 759 pin = dev->pin; 760 if (!pin) 761 return -1; 762 pin--; 763 while (dev->bus->self) { 764 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 765 dev = dev->bus->self; 766 } 767 *bridge = dev; 768 return pin; 769 } 770 771 /** 772 * pci_release_region - Release a PCI bar 773 * @pdev: PCI device whose resources were previously reserved by pci_request_region 774 * @bar: BAR to release 775 * 776 * Releases the PCI I/O and memory resources previously reserved by a 777 * successful call to pci_request_region. Call this function only 778 * after all use of the PCI regions has ceased. 779 */ 780 void pci_release_region(struct pci_dev *pdev, int bar) 781 { 782 if (pci_resource_len(pdev, bar) == 0) 783 return; 784 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 785 release_region(pci_resource_start(pdev, bar), 786 pci_resource_len(pdev, bar)); 787 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 788 release_mem_region(pci_resource_start(pdev, bar), 789 pci_resource_len(pdev, bar)); 790 } 791 792 /** 793 * pci_request_region - Reserved PCI I/O and memory resource 794 * @pdev: PCI device whose resources are to be reserved 795 * @bar: BAR to be reserved 796 * @res_name: Name to be associated with resource. 797 * 798 * Mark the PCI region associated with PCI device @pdev BR @bar as 799 * being reserved by owner @res_name. Do not access any 800 * address inside the PCI regions unless this call returns 801 * successfully. 802 * 803 * Returns 0 on success, or %EBUSY on error. A warning 804 * message is also printed on failure. 805 */ 806 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 807 { 808 if (pci_resource_len(pdev, bar) == 0) 809 return 0; 810 811 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 812 if (!request_region(pci_resource_start(pdev, bar), 813 pci_resource_len(pdev, bar), res_name)) 814 goto err_out; 815 } 816 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 817 if (!request_mem_region(pci_resource_start(pdev, bar), 818 pci_resource_len(pdev, bar), res_name)) 819 goto err_out; 820 } 821 822 return 0; 823 824 err_out: 825 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " 826 "for device %s\n", 827 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 828 bar + 1, /* PCI BAR # */ 829 (unsigned long long)pci_resource_len(pdev, bar), 830 (unsigned long long)pci_resource_start(pdev, bar), 831 pci_name(pdev)); 832 return -EBUSY; 833 } 834 835 836 /** 837 * pci_release_regions - Release reserved PCI I/O and memory resources 838 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 839 * 840 * Releases all PCI I/O and memory resources previously reserved by a 841 * successful call to pci_request_regions. Call this function only 842 * after all use of the PCI regions has ceased. 843 */ 844 845 void pci_release_regions(struct pci_dev *pdev) 846 { 847 int i; 848 849 for (i = 0; i < 6; i++) 850 pci_release_region(pdev, i); 851 } 852 853 /** 854 * pci_request_regions - Reserved PCI I/O and memory resources 855 * @pdev: PCI device whose resources are to be reserved 856 * @res_name: Name to be associated with resource. 857 * 858 * Mark all PCI regions associated with PCI device @pdev as 859 * being reserved by owner @res_name. Do not access any 860 * address inside the PCI regions unless this call returns 861 * successfully. 862 * 863 * Returns 0 on success, or %EBUSY on error. A warning 864 * message is also printed on failure. 865 */ 866 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 867 { 868 int i; 869 870 for (i = 0; i < 6; i++) 871 if(pci_request_region(pdev, i, res_name)) 872 goto err_out; 873 return 0; 874 875 err_out: 876 while(--i >= 0) 877 pci_release_region(pdev, i); 878 879 return -EBUSY; 880 } 881 882 /** 883 * pci_set_master - enables bus-mastering for device dev 884 * @dev: the PCI device to enable 885 * 886 * Enables bus-mastering on the device and calls pcibios_set_master() 887 * to do the needed arch specific settings. 888 */ 889 void 890 pci_set_master(struct pci_dev *dev) 891 { 892 u16 cmd; 893 894 pci_read_config_word(dev, PCI_COMMAND, &cmd); 895 if (! (cmd & PCI_COMMAND_MASTER)) { 896 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 897 cmd |= PCI_COMMAND_MASTER; 898 pci_write_config_word(dev, PCI_COMMAND, cmd); 899 } 900 dev->is_busmaster = 1; 901 pcibios_set_master(dev); 902 } 903 904 #ifdef PCI_DISABLE_MWI 905 int pci_set_mwi(struct pci_dev *dev) 906 { 907 return 0; 908 } 909 910 void pci_clear_mwi(struct pci_dev *dev) 911 { 912 } 913 914 #else 915 916 #ifndef PCI_CACHE_LINE_BYTES 917 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES 918 #endif 919 920 /* This can be overridden by arch code. */ 921 /* Don't forget this is measured in 32-bit words, not bytes */ 922 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; 923 924 /** 925 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 926 * @dev: the PCI device for which MWI is to be enabled 927 * 928 * Helper function for pci_set_mwi. 929 * Originally copied from drivers/net/acenic.c. 930 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 931 * 932 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 933 */ 934 static int 935 pci_set_cacheline_size(struct pci_dev *dev) 936 { 937 u8 cacheline_size; 938 939 if (!pci_cache_line_size) 940 return -EINVAL; /* The system doesn't support MWI. */ 941 942 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 943 equal to or multiple of the right value. */ 944 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 945 if (cacheline_size >= pci_cache_line_size && 946 (cacheline_size % pci_cache_line_size) == 0) 947 return 0; 948 949 /* Write the correct value. */ 950 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 951 /* Read it back. */ 952 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 953 if (cacheline_size == pci_cache_line_size) 954 return 0; 955 956 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 957 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 958 959 return -EINVAL; 960 } 961 962 /** 963 * pci_set_mwi - enables memory-write-invalidate PCI transaction 964 * @dev: the PCI device for which MWI is enabled 965 * 966 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 967 * and then calls @pcibios_set_mwi to do the needed arch specific 968 * operations or a generic mwi-prep function. 969 * 970 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 971 */ 972 int 973 pci_set_mwi(struct pci_dev *dev) 974 { 975 int rc; 976 u16 cmd; 977 978 rc = pci_set_cacheline_size(dev); 979 if (rc) 980 return rc; 981 982 pci_read_config_word(dev, PCI_COMMAND, &cmd); 983 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 984 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 985 cmd |= PCI_COMMAND_INVALIDATE; 986 pci_write_config_word(dev, PCI_COMMAND, cmd); 987 } 988 989 return 0; 990 } 991 992 /** 993 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 994 * @dev: the PCI device to disable 995 * 996 * Disables PCI Memory-Write-Invalidate transaction on the device 997 */ 998 void 999 pci_clear_mwi(struct pci_dev *dev) 1000 { 1001 u16 cmd; 1002 1003 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1004 if (cmd & PCI_COMMAND_INVALIDATE) { 1005 cmd &= ~PCI_COMMAND_INVALIDATE; 1006 pci_write_config_word(dev, PCI_COMMAND, cmd); 1007 } 1008 } 1009 #endif /* ! PCI_DISABLE_MWI */ 1010 1011 /** 1012 * pci_intx - enables/disables PCI INTx for device dev 1013 * @pdev: the PCI device to operate on 1014 * @enable: boolean: whether to enable or disable PCI INTx 1015 * 1016 * Enables/disables PCI INTx for device dev 1017 */ 1018 void 1019 pci_intx(struct pci_dev *pdev, int enable) 1020 { 1021 u16 pci_command, new; 1022 1023 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1024 1025 if (enable) { 1026 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 1027 } else { 1028 new = pci_command | PCI_COMMAND_INTX_DISABLE; 1029 } 1030 1031 if (new != pci_command) { 1032 pci_write_config_word(pdev, PCI_COMMAND, new); 1033 } 1034 } 1035 1036 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 1037 /* 1038 * These can be overridden by arch-specific implementations 1039 */ 1040 int 1041 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 1042 { 1043 if (!pci_dma_supported(dev, mask)) 1044 return -EIO; 1045 1046 dev->dma_mask = mask; 1047 1048 return 0; 1049 } 1050 1051 int 1052 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 1053 { 1054 if (!pci_dma_supported(dev, mask)) 1055 return -EIO; 1056 1057 dev->dev.coherent_dma_mask = mask; 1058 1059 return 0; 1060 } 1061 #endif 1062 1063 static int __devinit pci_init(void) 1064 { 1065 struct pci_dev *dev = NULL; 1066 1067 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1068 pci_fixup_device(pci_fixup_final, dev); 1069 } 1070 return 0; 1071 } 1072 1073 static int __devinit pci_setup(char *str) 1074 { 1075 while (str) { 1076 char *k = strchr(str, ','); 1077 if (k) 1078 *k++ = 0; 1079 if (*str && (str = pcibios_setup(str)) && *str) { 1080 if (!strcmp(str, "nomsi")) { 1081 pci_no_msi(); 1082 } else { 1083 printk(KERN_ERR "PCI: Unknown option `%s'\n", 1084 str); 1085 } 1086 } 1087 str = k; 1088 } 1089 return 0; 1090 } 1091 early_param("pci", pci_setup); 1092 1093 device_initcall(pci_init); 1094 1095 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 1096 /* FIXME: Some boxes have multiple ISA bridges! */ 1097 struct pci_dev *isa_bridge; 1098 EXPORT_SYMBOL(isa_bridge); 1099 #endif 1100 1101 EXPORT_SYMBOL_GPL(pci_restore_bars); 1102 EXPORT_SYMBOL(pci_enable_device_bars); 1103 EXPORT_SYMBOL(pci_enable_device); 1104 EXPORT_SYMBOL(pci_disable_device); 1105 EXPORT_SYMBOL(pci_find_capability); 1106 EXPORT_SYMBOL(pci_bus_find_capability); 1107 EXPORT_SYMBOL(pci_release_regions); 1108 EXPORT_SYMBOL(pci_request_regions); 1109 EXPORT_SYMBOL(pci_release_region); 1110 EXPORT_SYMBOL(pci_request_region); 1111 EXPORT_SYMBOL(pci_set_master); 1112 EXPORT_SYMBOL(pci_set_mwi); 1113 EXPORT_SYMBOL(pci_clear_mwi); 1114 EXPORT_SYMBOL_GPL(pci_intx); 1115 EXPORT_SYMBOL(pci_set_dma_mask); 1116 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 1117 EXPORT_SYMBOL(pci_assign_resource); 1118 EXPORT_SYMBOL(pci_find_parent_resource); 1119 1120 EXPORT_SYMBOL(pci_set_power_state); 1121 EXPORT_SYMBOL(pci_save_state); 1122 EXPORT_SYMBOL(pci_restore_state); 1123 EXPORT_SYMBOL(pci_enable_wake); 1124 1125 /* Quirk info */ 1126 1127 EXPORT_SYMBOL(isa_dma_bridge_buggy); 1128 EXPORT_SYMBOL(pci_pci_problems); 1129