1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 23 /** 24 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 25 * @bus: pointer to PCI bus structure to search 26 * 27 * Given a PCI bus, returns the highest PCI bus number present in the set 28 * including the given PCI bus and its list of child PCI buses. 29 */ 30 unsigned char __devinit 31 pci_bus_max_busnr(struct pci_bus* bus) 32 { 33 struct list_head *tmp; 34 unsigned char max, n; 35 36 max = bus->subordinate; 37 list_for_each(tmp, &bus->children) { 38 n = pci_bus_max_busnr(pci_bus_b(tmp)); 39 if(n > max) 40 max = n; 41 } 42 return max; 43 } 44 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 45 46 #if 0 47 /** 48 * pci_max_busnr - returns maximum PCI bus number 49 * 50 * Returns the highest PCI bus number present in the system global list of 51 * PCI buses. 52 */ 53 unsigned char __devinit 54 pci_max_busnr(void) 55 { 56 struct pci_bus *bus = NULL; 57 unsigned char max, n; 58 59 max = 0; 60 while ((bus = pci_find_next_bus(bus)) != NULL) { 61 n = pci_bus_max_busnr(bus); 62 if(n > max) 63 max = n; 64 } 65 return max; 66 } 67 68 #endif /* 0 */ 69 70 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) 71 { 72 u8 id; 73 int ttl = 48; 74 75 while (ttl--) { 76 pci_bus_read_config_byte(bus, devfn, pos, &pos); 77 if (pos < 0x40) 78 break; 79 pos &= ~3; 80 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 81 &id); 82 if (id == 0xff) 83 break; 84 if (id == cap) 85 return pos; 86 pos += PCI_CAP_LIST_NEXT; 87 } 88 return 0; 89 } 90 91 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 92 { 93 return __pci_find_next_cap(dev->bus, dev->devfn, 94 pos + PCI_CAP_LIST_NEXT, cap); 95 } 96 EXPORT_SYMBOL_GPL(pci_find_next_capability); 97 98 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) 99 { 100 u16 status; 101 u8 pos; 102 103 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 104 if (!(status & PCI_STATUS_CAP_LIST)) 105 return 0; 106 107 switch (hdr_type) { 108 case PCI_HEADER_TYPE_NORMAL: 109 case PCI_HEADER_TYPE_BRIDGE: 110 pos = PCI_CAPABILITY_LIST; 111 break; 112 case PCI_HEADER_TYPE_CARDBUS: 113 pos = PCI_CB_CAPABILITY_LIST; 114 break; 115 default: 116 return 0; 117 } 118 return __pci_find_next_cap(bus, devfn, pos, cap); 119 } 120 121 /** 122 * pci_find_capability - query for devices' capabilities 123 * @dev: PCI device to query 124 * @cap: capability code 125 * 126 * Tell if a device supports a given PCI capability. 127 * Returns the address of the requested capability structure within the 128 * device's PCI configuration space or 0 in case the device does not 129 * support it. Possible values for @cap: 130 * 131 * %PCI_CAP_ID_PM Power Management 132 * %PCI_CAP_ID_AGP Accelerated Graphics Port 133 * %PCI_CAP_ID_VPD Vital Product Data 134 * %PCI_CAP_ID_SLOTID Slot Identification 135 * %PCI_CAP_ID_MSI Message Signalled Interrupts 136 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 137 * %PCI_CAP_ID_PCIX PCI-X 138 * %PCI_CAP_ID_EXP PCI Express 139 */ 140 int pci_find_capability(struct pci_dev *dev, int cap) 141 { 142 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); 143 } 144 145 /** 146 * pci_bus_find_capability - query for devices' capabilities 147 * @bus: the PCI bus to query 148 * @devfn: PCI device to query 149 * @cap: capability code 150 * 151 * Like pci_find_capability() but works for pci devices that do not have a 152 * pci_dev structure set up yet. 153 * 154 * Returns the address of the requested capability structure within the 155 * device's PCI configuration space or 0 in case the device does not 156 * support it. 157 */ 158 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 159 { 160 u8 hdr_type; 161 162 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 163 164 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); 165 } 166 167 /** 168 * pci_find_ext_capability - Find an extended capability 169 * @dev: PCI device to query 170 * @cap: capability code 171 * 172 * Returns the address of the requested extended capability structure 173 * within the device's PCI configuration space or 0 if the device does 174 * not support it. Possible values for @cap: 175 * 176 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 177 * %PCI_EXT_CAP_ID_VC Virtual Channel 178 * %PCI_EXT_CAP_ID_DSN Device Serial Number 179 * %PCI_EXT_CAP_ID_PWR Power Budgeting 180 */ 181 int pci_find_ext_capability(struct pci_dev *dev, int cap) 182 { 183 u32 header; 184 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 185 int pos = 0x100; 186 187 if (dev->cfg_size <= 256) 188 return 0; 189 190 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 191 return 0; 192 193 /* 194 * If we have no capabilities, this is indicated by cap ID, 195 * cap version and next pointer all being 0. 196 */ 197 if (header == 0) 198 return 0; 199 200 while (ttl-- > 0) { 201 if (PCI_EXT_CAP_ID(header) == cap) 202 return pos; 203 204 pos = PCI_EXT_CAP_NEXT(header); 205 if (pos < 0x100) 206 break; 207 208 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 209 break; 210 } 211 212 return 0; 213 } 214 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 215 216 /** 217 * pci_find_parent_resource - return resource region of parent bus of given region 218 * @dev: PCI device structure contains resources to be searched 219 * @res: child resource record for which parent is sought 220 * 221 * For given resource region of given device, return the resource 222 * region of parent bus the given region is contained in or where 223 * it should be allocated from. 224 */ 225 struct resource * 226 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 227 { 228 const struct pci_bus *bus = dev->bus; 229 int i; 230 struct resource *best = NULL; 231 232 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 233 struct resource *r = bus->resource[i]; 234 if (!r) 235 continue; 236 if (res->start && !(res->start >= r->start && res->end <= r->end)) 237 continue; /* Not contained */ 238 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 239 continue; /* Wrong type */ 240 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 241 return r; /* Exact match */ 242 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 243 best = r; /* Approximating prefetchable by non-prefetchable */ 244 } 245 return best; 246 } 247 248 /** 249 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 250 * @dev: PCI device to have its BARs restored 251 * 252 * Restore the BAR values for a given device, so as to make it 253 * accessible by its driver. 254 */ 255 void 256 pci_restore_bars(struct pci_dev *dev) 257 { 258 int i, numres; 259 260 switch (dev->hdr_type) { 261 case PCI_HEADER_TYPE_NORMAL: 262 numres = 6; 263 break; 264 case PCI_HEADER_TYPE_BRIDGE: 265 numres = 2; 266 break; 267 case PCI_HEADER_TYPE_CARDBUS: 268 numres = 1; 269 break; 270 default: 271 /* Should never get here, but just in case... */ 272 return; 273 } 274 275 for (i = 0; i < numres; i ++) 276 pci_update_resource(dev, &dev->resource[i], i); 277 } 278 279 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 280 281 /** 282 * pci_set_power_state - Set the power state of a PCI device 283 * @dev: PCI device to be suspended 284 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 285 * 286 * Transition a device to a new power state, using the Power Management 287 * Capabilities in the device's config space. 288 * 289 * RETURN VALUE: 290 * -EINVAL if trying to enter a lower state than we're already in. 291 * 0 if we're already in the requested state. 292 * -EIO if device does not support PCI PM. 293 * 0 if we can successfully change the power state. 294 */ 295 int 296 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 297 { 298 int pm, need_restore = 0; 299 u16 pmcsr, pmc; 300 301 /* bound the state we're entering */ 302 if (state > PCI_D3hot) 303 state = PCI_D3hot; 304 305 /* Validate current state: 306 * Can enter D0 from any state, but if we can only go deeper 307 * to sleep if we're already in a low power state 308 */ 309 if (state != PCI_D0 && dev->current_state > state) { 310 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 311 __FUNCTION__, pci_name(dev), state, dev->current_state); 312 return -EINVAL; 313 } else if (dev->current_state == state) 314 return 0; /* we're already there */ 315 316 /* find PCI PM capability in list */ 317 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 318 319 /* abort if the device doesn't support PM capabilities */ 320 if (!pm) 321 return -EIO; 322 323 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 324 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 325 printk(KERN_DEBUG 326 "PCI: %s has unsupported PM cap regs version (%u)\n", 327 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 328 return -EIO; 329 } 330 331 /* check if this device supports the desired state */ 332 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 333 return -EIO; 334 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 335 return -EIO; 336 337 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 338 339 /* If we're (effectively) in D3, force entire word to 0. 340 * This doesn't affect PME_Status, disables PME_En, and 341 * sets PowerState to 0. 342 */ 343 switch (dev->current_state) { 344 case PCI_D0: 345 case PCI_D1: 346 case PCI_D2: 347 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 348 pmcsr |= state; 349 break; 350 case PCI_UNKNOWN: /* Boot-up */ 351 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 352 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 353 need_restore = 1; 354 /* Fall-through: force to D0 */ 355 default: 356 pmcsr = 0; 357 break; 358 } 359 360 /* enter specified state */ 361 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 362 363 /* Mandatory power management transition delays */ 364 /* see PCI PM 1.1 5.6.1 table 18 */ 365 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 366 msleep(10); 367 else if (state == PCI_D2 || dev->current_state == PCI_D2) 368 udelay(200); 369 370 /* 371 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 372 * Firmware method after native method ? 373 */ 374 if (platform_pci_set_power_state) 375 platform_pci_set_power_state(dev, state); 376 377 dev->current_state = state; 378 379 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 380 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 381 * from D3hot to D0 _may_ perform an internal reset, thereby 382 * going to "D0 Uninitialized" rather than "D0 Initialized". 383 * For example, at least some versions of the 3c905B and the 384 * 3c556B exhibit this behaviour. 385 * 386 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 387 * devices in a D3hot state at boot. Consequently, we need to 388 * restore at least the BARs so that the device will be 389 * accessible to its driver. 390 */ 391 if (need_restore) 392 pci_restore_bars(dev); 393 394 return 0; 395 } 396 397 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 398 399 /** 400 * pci_choose_state - Choose the power state of a PCI device 401 * @dev: PCI device to be suspended 402 * @state: target sleep state for the whole system. This is the value 403 * that is passed to suspend() function. 404 * 405 * Returns PCI power state suitable for given device and given system 406 * message. 407 */ 408 409 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 410 { 411 int ret; 412 413 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 414 return PCI_D0; 415 416 if (platform_pci_choose_state) { 417 ret = platform_pci_choose_state(dev, state); 418 if (ret >= 0) 419 state.event = ret; 420 } 421 422 switch (state.event) { 423 case PM_EVENT_ON: 424 return PCI_D0; 425 case PM_EVENT_FREEZE: 426 case PM_EVENT_SUSPEND: 427 return PCI_D3hot; 428 default: 429 printk("They asked me for state %d\n", state.event); 430 BUG(); 431 } 432 return PCI_D0; 433 } 434 435 EXPORT_SYMBOL(pci_choose_state); 436 437 /** 438 * pci_save_state - save the PCI configuration space of a device before suspending 439 * @dev: - PCI device that we're dealing with 440 */ 441 int 442 pci_save_state(struct pci_dev *dev) 443 { 444 int i; 445 /* XXX: 100% dword access ok here? */ 446 for (i = 0; i < 16; i++) 447 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 448 if ((i = pci_save_msi_state(dev)) != 0) 449 return i; 450 if ((i = pci_save_msix_state(dev)) != 0) 451 return i; 452 return 0; 453 } 454 455 /** 456 * pci_restore_state - Restore the saved state of a PCI device 457 * @dev: - PCI device that we're dealing with 458 */ 459 int 460 pci_restore_state(struct pci_dev *dev) 461 { 462 int i; 463 int val; 464 465 /* 466 * The Base Address register should be programmed before the command 467 * register(s) 468 */ 469 for (i = 15; i >= 0; i--) { 470 pci_read_config_dword(dev, i * 4, &val); 471 if (val != dev->saved_config_space[i]) { 472 printk(KERN_DEBUG "PM: Writing back config space on " 473 "device %s at offset %x (was %x, writing %x)\n", 474 pci_name(dev), i, 475 val, (int)dev->saved_config_space[i]); 476 pci_write_config_dword(dev,i * 4, 477 dev->saved_config_space[i]); 478 } 479 } 480 pci_restore_msi_state(dev); 481 pci_restore_msix_state(dev); 482 return 0; 483 } 484 485 /** 486 * pci_enable_device_bars - Initialize some of a device for use 487 * @dev: PCI device to be initialized 488 * @bars: bitmask of BAR's that must be configured 489 * 490 * Initialize device before it's used by a driver. Ask low-level code 491 * to enable selected I/O and memory resources. Wake up the device if it 492 * was suspended. Beware, this function can fail. 493 */ 494 495 int 496 pci_enable_device_bars(struct pci_dev *dev, int bars) 497 { 498 int err; 499 500 err = pci_set_power_state(dev, PCI_D0); 501 if (err < 0 && err != -EIO) 502 return err; 503 err = pcibios_enable_device(dev, bars); 504 if (err < 0) 505 return err; 506 return 0; 507 } 508 509 /** 510 * pci_enable_device - Initialize device before it's used by a driver. 511 * @dev: PCI device to be initialized 512 * 513 * Initialize device before it's used by a driver. Ask low-level code 514 * to enable I/O and memory. Wake up the device if it was suspended. 515 * Beware, this function can fail. 516 */ 517 int 518 pci_enable_device(struct pci_dev *dev) 519 { 520 int err; 521 522 if (dev->is_enabled) 523 return 0; 524 525 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 526 if (err) 527 return err; 528 pci_fixup_device(pci_fixup_enable, dev); 529 dev->is_enabled = 1; 530 return 0; 531 } 532 533 /** 534 * pcibios_disable_device - disable arch specific PCI resources for device dev 535 * @dev: the PCI device to disable 536 * 537 * Disables architecture specific PCI resources for the device. This 538 * is the default implementation. Architecture implementations can 539 * override this. 540 */ 541 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 542 543 /** 544 * pci_disable_device - Disable PCI device after use 545 * @dev: PCI device to be disabled 546 * 547 * Signal to the system that the PCI device is not in use by the system 548 * anymore. This only involves disabling PCI bus-mastering, if active. 549 */ 550 void 551 pci_disable_device(struct pci_dev *dev) 552 { 553 u16 pci_command; 554 555 if (dev->msi_enabled) 556 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 557 PCI_CAP_ID_MSI); 558 if (dev->msix_enabled) 559 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 560 PCI_CAP_ID_MSIX); 561 562 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 563 if (pci_command & PCI_COMMAND_MASTER) { 564 pci_command &= ~PCI_COMMAND_MASTER; 565 pci_write_config_word(dev, PCI_COMMAND, pci_command); 566 } 567 dev->is_busmaster = 0; 568 569 pcibios_disable_device(dev); 570 dev->is_enabled = 0; 571 } 572 573 /** 574 * pci_enable_wake - enable device to generate PME# when suspended 575 * @dev: - PCI device to operate on 576 * @state: - Current state of device. 577 * @enable: - Flag to enable or disable generation 578 * 579 * Set the bits in the device's PM Capabilities to generate PME# when 580 * the system is suspended. 581 * 582 * -EIO is returned if device doesn't have PM Capabilities. 583 * -EINVAL is returned if device supports it, but can't generate wake events. 584 * 0 if operation is successful. 585 * 586 */ 587 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 588 { 589 int pm; 590 u16 value; 591 592 /* find PCI PM capability in list */ 593 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 594 595 /* If device doesn't support PM Capabilities, but request is to disable 596 * wake events, it's a nop; otherwise fail */ 597 if (!pm) 598 return enable ? -EIO : 0; 599 600 /* Check device's ability to generate PME# */ 601 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 602 603 value &= PCI_PM_CAP_PME_MASK; 604 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 605 606 /* Check if it can generate PME# from requested state. */ 607 if (!value || !(value & (1 << state))) 608 return enable ? -EINVAL : 0; 609 610 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 611 612 /* Clear PME_Status by writing 1 to it and enable PME# */ 613 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 614 615 if (!enable) 616 value &= ~PCI_PM_CTRL_PME_ENABLE; 617 618 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 619 620 return 0; 621 } 622 623 int 624 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 625 { 626 u8 pin; 627 628 pin = dev->pin; 629 if (!pin) 630 return -1; 631 pin--; 632 while (dev->bus->self) { 633 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 634 dev = dev->bus->self; 635 } 636 *bridge = dev; 637 return pin; 638 } 639 640 /** 641 * pci_release_region - Release a PCI bar 642 * @pdev: PCI device whose resources were previously reserved by pci_request_region 643 * @bar: BAR to release 644 * 645 * Releases the PCI I/O and memory resources previously reserved by a 646 * successful call to pci_request_region. Call this function only 647 * after all use of the PCI regions has ceased. 648 */ 649 void pci_release_region(struct pci_dev *pdev, int bar) 650 { 651 if (pci_resource_len(pdev, bar) == 0) 652 return; 653 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 654 release_region(pci_resource_start(pdev, bar), 655 pci_resource_len(pdev, bar)); 656 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 657 release_mem_region(pci_resource_start(pdev, bar), 658 pci_resource_len(pdev, bar)); 659 } 660 661 /** 662 * pci_request_region - Reserved PCI I/O and memory resource 663 * @pdev: PCI device whose resources are to be reserved 664 * @bar: BAR to be reserved 665 * @res_name: Name to be associated with resource. 666 * 667 * Mark the PCI region associated with PCI device @pdev BR @bar as 668 * being reserved by owner @res_name. Do not access any 669 * address inside the PCI regions unless this call returns 670 * successfully. 671 * 672 * Returns 0 on success, or %EBUSY on error. A warning 673 * message is also printed on failure. 674 */ 675 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 676 { 677 if (pci_resource_len(pdev, bar) == 0) 678 return 0; 679 680 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 681 if (!request_region(pci_resource_start(pdev, bar), 682 pci_resource_len(pdev, bar), res_name)) 683 goto err_out; 684 } 685 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 686 if (!request_mem_region(pci_resource_start(pdev, bar), 687 pci_resource_len(pdev, bar), res_name)) 688 goto err_out; 689 } 690 691 return 0; 692 693 err_out: 694 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " 695 "for device %s\n", 696 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 697 bar + 1, /* PCI BAR # */ 698 (unsigned long long)pci_resource_len(pdev, bar), 699 (unsigned long long)pci_resource_start(pdev, bar), 700 pci_name(pdev)); 701 return -EBUSY; 702 } 703 704 705 /** 706 * pci_release_regions - Release reserved PCI I/O and memory resources 707 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 708 * 709 * Releases all PCI I/O and memory resources previously reserved by a 710 * successful call to pci_request_regions. Call this function only 711 * after all use of the PCI regions has ceased. 712 */ 713 714 void pci_release_regions(struct pci_dev *pdev) 715 { 716 int i; 717 718 for (i = 0; i < 6; i++) 719 pci_release_region(pdev, i); 720 } 721 722 /** 723 * pci_request_regions - Reserved PCI I/O and memory resources 724 * @pdev: PCI device whose resources are to be reserved 725 * @res_name: Name to be associated with resource. 726 * 727 * Mark all PCI regions associated with PCI device @pdev as 728 * being reserved by owner @res_name. Do not access any 729 * address inside the PCI regions unless this call returns 730 * successfully. 731 * 732 * Returns 0 on success, or %EBUSY on error. A warning 733 * message is also printed on failure. 734 */ 735 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 736 { 737 int i; 738 739 for (i = 0; i < 6; i++) 740 if(pci_request_region(pdev, i, res_name)) 741 goto err_out; 742 return 0; 743 744 err_out: 745 while(--i >= 0) 746 pci_release_region(pdev, i); 747 748 return -EBUSY; 749 } 750 751 /** 752 * pci_set_master - enables bus-mastering for device dev 753 * @dev: the PCI device to enable 754 * 755 * Enables bus-mastering on the device and calls pcibios_set_master() 756 * to do the needed arch specific settings. 757 */ 758 void 759 pci_set_master(struct pci_dev *dev) 760 { 761 u16 cmd; 762 763 pci_read_config_word(dev, PCI_COMMAND, &cmd); 764 if (! (cmd & PCI_COMMAND_MASTER)) { 765 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 766 cmd |= PCI_COMMAND_MASTER; 767 pci_write_config_word(dev, PCI_COMMAND, cmd); 768 } 769 dev->is_busmaster = 1; 770 pcibios_set_master(dev); 771 } 772 773 #ifndef HAVE_ARCH_PCI_MWI 774 /* This can be overridden by arch code. */ 775 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; 776 777 /** 778 * pci_generic_prep_mwi - helper function for pci_set_mwi 779 * @dev: the PCI device for which MWI is enabled 780 * 781 * Helper function for generic implementation of pcibios_prep_mwi 782 * function. Originally copied from drivers/net/acenic.c. 783 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 784 * 785 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 786 */ 787 static int 788 pci_generic_prep_mwi(struct pci_dev *dev) 789 { 790 u8 cacheline_size; 791 792 if (!pci_cache_line_size) 793 return -EINVAL; /* The system doesn't support MWI. */ 794 795 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 796 equal to or multiple of the right value. */ 797 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 798 if (cacheline_size >= pci_cache_line_size && 799 (cacheline_size % pci_cache_line_size) == 0) 800 return 0; 801 802 /* Write the correct value. */ 803 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 804 /* Read it back. */ 805 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 806 if (cacheline_size == pci_cache_line_size) 807 return 0; 808 809 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 810 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 811 812 return -EINVAL; 813 } 814 #endif /* !HAVE_ARCH_PCI_MWI */ 815 816 /** 817 * pci_set_mwi - enables memory-write-invalidate PCI transaction 818 * @dev: the PCI device for which MWI is enabled 819 * 820 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 821 * and then calls @pcibios_set_mwi to do the needed arch specific 822 * operations or a generic mwi-prep function. 823 * 824 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 825 */ 826 int 827 pci_set_mwi(struct pci_dev *dev) 828 { 829 int rc; 830 u16 cmd; 831 832 #ifdef HAVE_ARCH_PCI_MWI 833 rc = pcibios_prep_mwi(dev); 834 #else 835 rc = pci_generic_prep_mwi(dev); 836 #endif 837 838 if (rc) 839 return rc; 840 841 pci_read_config_word(dev, PCI_COMMAND, &cmd); 842 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 843 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 844 cmd |= PCI_COMMAND_INVALIDATE; 845 pci_write_config_word(dev, PCI_COMMAND, cmd); 846 } 847 848 return 0; 849 } 850 851 /** 852 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 853 * @dev: the PCI device to disable 854 * 855 * Disables PCI Memory-Write-Invalidate transaction on the device 856 */ 857 void 858 pci_clear_mwi(struct pci_dev *dev) 859 { 860 u16 cmd; 861 862 pci_read_config_word(dev, PCI_COMMAND, &cmd); 863 if (cmd & PCI_COMMAND_INVALIDATE) { 864 cmd &= ~PCI_COMMAND_INVALIDATE; 865 pci_write_config_word(dev, PCI_COMMAND, cmd); 866 } 867 } 868 869 /** 870 * pci_intx - enables/disables PCI INTx for device dev 871 * @pdev: the PCI device to operate on 872 * @enable: boolean: whether to enable or disable PCI INTx 873 * 874 * Enables/disables PCI INTx for device dev 875 */ 876 void 877 pci_intx(struct pci_dev *pdev, int enable) 878 { 879 u16 pci_command, new; 880 881 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 882 883 if (enable) { 884 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 885 } else { 886 new = pci_command | PCI_COMMAND_INTX_DISABLE; 887 } 888 889 if (new != pci_command) { 890 pci_write_config_word(pdev, PCI_COMMAND, new); 891 } 892 } 893 894 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 895 /* 896 * These can be overridden by arch-specific implementations 897 */ 898 int 899 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 900 { 901 if (!pci_dma_supported(dev, mask)) 902 return -EIO; 903 904 dev->dma_mask = mask; 905 906 return 0; 907 } 908 909 int 910 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 911 { 912 if (!pci_dma_supported(dev, mask)) 913 return -EIO; 914 915 dev->dev.coherent_dma_mask = mask; 916 917 return 0; 918 } 919 #endif 920 921 static int __devinit pci_init(void) 922 { 923 struct pci_dev *dev = NULL; 924 925 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 926 pci_fixup_device(pci_fixup_final, dev); 927 } 928 return 0; 929 } 930 931 static int __devinit pci_setup(char *str) 932 { 933 while (str) { 934 char *k = strchr(str, ','); 935 if (k) 936 *k++ = 0; 937 if (*str && (str = pcibios_setup(str)) && *str) { 938 if (!strcmp(str, "nomsi")) { 939 pci_no_msi(); 940 } else { 941 printk(KERN_ERR "PCI: Unknown option `%s'\n", 942 str); 943 } 944 } 945 str = k; 946 } 947 return 1; 948 } 949 950 device_initcall(pci_init); 951 952 __setup("pci=", pci_setup); 953 954 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 955 /* FIXME: Some boxes have multiple ISA bridges! */ 956 struct pci_dev *isa_bridge; 957 EXPORT_SYMBOL(isa_bridge); 958 #endif 959 960 EXPORT_SYMBOL_GPL(pci_restore_bars); 961 EXPORT_SYMBOL(pci_enable_device_bars); 962 EXPORT_SYMBOL(pci_enable_device); 963 EXPORT_SYMBOL(pci_disable_device); 964 EXPORT_SYMBOL(pci_find_capability); 965 EXPORT_SYMBOL(pci_bus_find_capability); 966 EXPORT_SYMBOL(pci_release_regions); 967 EXPORT_SYMBOL(pci_request_regions); 968 EXPORT_SYMBOL(pci_release_region); 969 EXPORT_SYMBOL(pci_request_region); 970 EXPORT_SYMBOL(pci_set_master); 971 EXPORT_SYMBOL(pci_set_mwi); 972 EXPORT_SYMBOL(pci_clear_mwi); 973 EXPORT_SYMBOL_GPL(pci_intx); 974 EXPORT_SYMBOL(pci_set_dma_mask); 975 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 976 EXPORT_SYMBOL(pci_assign_resource); 977 EXPORT_SYMBOL(pci_find_parent_resource); 978 979 EXPORT_SYMBOL(pci_set_power_state); 980 EXPORT_SYMBOL(pci_save_state); 981 EXPORT_SYMBOL(pci_restore_state); 982 EXPORT_SYMBOL(pci_enable_wake); 983 984 /* Quirk info */ 985 986 EXPORT_SYMBOL(isa_dma_bridge_buggy); 987 EXPORT_SYMBOL(pci_pci_problems); 988