1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 23 /** 24 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 25 * @bus: pointer to PCI bus structure to search 26 * 27 * Given a PCI bus, returns the highest PCI bus number present in the set 28 * including the given PCI bus and its list of child PCI buses. 29 */ 30 unsigned char __devinit 31 pci_bus_max_busnr(struct pci_bus* bus) 32 { 33 struct list_head *tmp; 34 unsigned char max, n; 35 36 max = bus->subordinate; 37 list_for_each(tmp, &bus->children) { 38 n = pci_bus_max_busnr(pci_bus_b(tmp)); 39 if(n > max) 40 max = n; 41 } 42 return max; 43 } 44 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 45 46 #if 0 47 /** 48 * pci_max_busnr - returns maximum PCI bus number 49 * 50 * Returns the highest PCI bus number present in the system global list of 51 * PCI buses. 52 */ 53 unsigned char __devinit 54 pci_max_busnr(void) 55 { 56 struct pci_bus *bus = NULL; 57 unsigned char max, n; 58 59 max = 0; 60 while ((bus = pci_find_next_bus(bus)) != NULL) { 61 n = pci_bus_max_busnr(bus); 62 if(n > max) 63 max = n; 64 } 65 return max; 66 } 67 68 #endif /* 0 */ 69 70 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) 71 { 72 u8 id; 73 int ttl = 48; 74 75 while (ttl--) { 76 pci_bus_read_config_byte(bus, devfn, pos, &pos); 77 if (pos < 0x40) 78 break; 79 pos &= ~3; 80 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 81 &id); 82 if (id == 0xff) 83 break; 84 if (id == cap) 85 return pos; 86 pos += PCI_CAP_LIST_NEXT; 87 } 88 return 0; 89 } 90 91 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 92 { 93 return __pci_find_next_cap(dev->bus, dev->devfn, 94 pos + PCI_CAP_LIST_NEXT, cap); 95 } 96 EXPORT_SYMBOL_GPL(pci_find_next_capability); 97 98 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) 99 { 100 u16 status; 101 u8 pos; 102 103 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 104 if (!(status & PCI_STATUS_CAP_LIST)) 105 return 0; 106 107 switch (hdr_type) { 108 case PCI_HEADER_TYPE_NORMAL: 109 case PCI_HEADER_TYPE_BRIDGE: 110 pos = PCI_CAPABILITY_LIST; 111 break; 112 case PCI_HEADER_TYPE_CARDBUS: 113 pos = PCI_CB_CAPABILITY_LIST; 114 break; 115 default: 116 return 0; 117 } 118 return __pci_find_next_cap(bus, devfn, pos, cap); 119 } 120 121 /** 122 * pci_find_capability - query for devices' capabilities 123 * @dev: PCI device to query 124 * @cap: capability code 125 * 126 * Tell if a device supports a given PCI capability. 127 * Returns the address of the requested capability structure within the 128 * device's PCI configuration space or 0 in case the device does not 129 * support it. Possible values for @cap: 130 * 131 * %PCI_CAP_ID_PM Power Management 132 * %PCI_CAP_ID_AGP Accelerated Graphics Port 133 * %PCI_CAP_ID_VPD Vital Product Data 134 * %PCI_CAP_ID_SLOTID Slot Identification 135 * %PCI_CAP_ID_MSI Message Signalled Interrupts 136 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 137 * %PCI_CAP_ID_PCIX PCI-X 138 * %PCI_CAP_ID_EXP PCI Express 139 */ 140 int pci_find_capability(struct pci_dev *dev, int cap) 141 { 142 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); 143 } 144 145 /** 146 * pci_bus_find_capability - query for devices' capabilities 147 * @bus: the PCI bus to query 148 * @devfn: PCI device to query 149 * @cap: capability code 150 * 151 * Like pci_find_capability() but works for pci devices that do not have a 152 * pci_dev structure set up yet. 153 * 154 * Returns the address of the requested capability structure within the 155 * device's PCI configuration space or 0 in case the device does not 156 * support it. 157 */ 158 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 159 { 160 u8 hdr_type; 161 162 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 163 164 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); 165 } 166 167 /** 168 * pci_find_ext_capability - Find an extended capability 169 * @dev: PCI device to query 170 * @cap: capability code 171 * 172 * Returns the address of the requested extended capability structure 173 * within the device's PCI configuration space or 0 if the device does 174 * not support it. Possible values for @cap: 175 * 176 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 177 * %PCI_EXT_CAP_ID_VC Virtual Channel 178 * %PCI_EXT_CAP_ID_DSN Device Serial Number 179 * %PCI_EXT_CAP_ID_PWR Power Budgeting 180 */ 181 int pci_find_ext_capability(struct pci_dev *dev, int cap) 182 { 183 u32 header; 184 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 185 int pos = 0x100; 186 187 if (dev->cfg_size <= 256) 188 return 0; 189 190 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 191 return 0; 192 193 /* 194 * If we have no capabilities, this is indicated by cap ID, 195 * cap version and next pointer all being 0. 196 */ 197 if (header == 0) 198 return 0; 199 200 while (ttl-- > 0) { 201 if (PCI_EXT_CAP_ID(header) == cap) 202 return pos; 203 204 pos = PCI_EXT_CAP_NEXT(header); 205 if (pos < 0x100) 206 break; 207 208 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 209 break; 210 } 211 212 return 0; 213 } 214 EXPORT_SYMBOL_GPL(pci_find_ext_capability); 215 216 /** 217 * pci_find_parent_resource - return resource region of parent bus of given region 218 * @dev: PCI device structure contains resources to be searched 219 * @res: child resource record for which parent is sought 220 * 221 * For given resource region of given device, return the resource 222 * region of parent bus the given region is contained in or where 223 * it should be allocated from. 224 */ 225 struct resource * 226 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 227 { 228 const struct pci_bus *bus = dev->bus; 229 int i; 230 struct resource *best = NULL; 231 232 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 233 struct resource *r = bus->resource[i]; 234 if (!r) 235 continue; 236 if (res->start && !(res->start >= r->start && res->end <= r->end)) 237 continue; /* Not contained */ 238 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 239 continue; /* Wrong type */ 240 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 241 return r; /* Exact match */ 242 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 243 best = r; /* Approximating prefetchable by non-prefetchable */ 244 } 245 return best; 246 } 247 248 /** 249 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 250 * @dev: PCI device to have its BARs restored 251 * 252 * Restore the BAR values for a given device, so as to make it 253 * accessible by its driver. 254 */ 255 void 256 pci_restore_bars(struct pci_dev *dev) 257 { 258 int i, numres; 259 260 switch (dev->hdr_type) { 261 case PCI_HEADER_TYPE_NORMAL: 262 numres = 6; 263 break; 264 case PCI_HEADER_TYPE_BRIDGE: 265 numres = 2; 266 break; 267 case PCI_HEADER_TYPE_CARDBUS: 268 numres = 1; 269 break; 270 default: 271 /* Should never get here, but just in case... */ 272 return; 273 } 274 275 for (i = 0; i < numres; i ++) 276 pci_update_resource(dev, &dev->resource[i], i); 277 } 278 279 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 280 281 /** 282 * pci_set_power_state - Set the power state of a PCI device 283 * @dev: PCI device to be suspended 284 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 285 * 286 * Transition a device to a new power state, using the Power Management 287 * Capabilities in the device's config space. 288 * 289 * RETURN VALUE: 290 * -EINVAL if trying to enter a lower state than we're already in. 291 * 0 if we're already in the requested state. 292 * -EIO if device does not support PCI PM. 293 * 0 if we can successfully change the power state. 294 */ 295 int 296 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 297 { 298 int pm, need_restore = 0; 299 u16 pmcsr, pmc; 300 301 /* bound the state we're entering */ 302 if (state > PCI_D3hot) 303 state = PCI_D3hot; 304 305 /* Validate current state: 306 * Can enter D0 from any state, but if we can only go deeper 307 * to sleep if we're already in a low power state 308 */ 309 if (state != PCI_D0 && dev->current_state > state) { 310 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 311 __FUNCTION__, pci_name(dev), state, dev->current_state); 312 return -EINVAL; 313 } else if (dev->current_state == state) 314 return 0; /* we're already there */ 315 316 /* find PCI PM capability in list */ 317 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 318 319 /* abort if the device doesn't support PM capabilities */ 320 if (!pm) 321 return -EIO; 322 323 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 324 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 325 printk(KERN_DEBUG 326 "PCI: %s has unsupported PM cap regs version (%u)\n", 327 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 328 return -EIO; 329 } 330 331 /* check if this device supports the desired state */ 332 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 333 return -EIO; 334 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 335 return -EIO; 336 337 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 338 339 /* If we're (effectively) in D3, force entire word to 0. 340 * This doesn't affect PME_Status, disables PME_En, and 341 * sets PowerState to 0. 342 */ 343 switch (dev->current_state) { 344 case PCI_D0: 345 case PCI_D1: 346 case PCI_D2: 347 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 348 pmcsr |= state; 349 break; 350 case PCI_UNKNOWN: /* Boot-up */ 351 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 352 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 353 need_restore = 1; 354 /* Fall-through: force to D0 */ 355 default: 356 pmcsr = 0; 357 break; 358 } 359 360 /* enter specified state */ 361 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 362 363 /* Mandatory power management transition delays */ 364 /* see PCI PM 1.1 5.6.1 table 18 */ 365 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 366 msleep(10); 367 else if (state == PCI_D2 || dev->current_state == PCI_D2) 368 udelay(200); 369 370 /* 371 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 372 * Firmware method after natice method ? 373 */ 374 if (platform_pci_set_power_state) 375 platform_pci_set_power_state(dev, state); 376 377 dev->current_state = state; 378 379 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 380 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 381 * from D3hot to D0 _may_ perform an internal reset, thereby 382 * going to "D0 Uninitialized" rather than "D0 Initialized". 383 * For example, at least some versions of the 3c905B and the 384 * 3c556B exhibit this behaviour. 385 * 386 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 387 * devices in a D3hot state at boot. Consequently, we need to 388 * restore at least the BARs so that the device will be 389 * accessible to its driver. 390 */ 391 if (need_restore) 392 pci_restore_bars(dev); 393 394 return 0; 395 } 396 397 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 398 399 /** 400 * pci_choose_state - Choose the power state of a PCI device 401 * @dev: PCI device to be suspended 402 * @state: target sleep state for the whole system. This is the value 403 * that is passed to suspend() function. 404 * 405 * Returns PCI power state suitable for given device and given system 406 * message. 407 */ 408 409 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 410 { 411 int ret; 412 413 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 414 return PCI_D0; 415 416 if (platform_pci_choose_state) { 417 ret = platform_pci_choose_state(dev, state); 418 if (ret >= 0) 419 state.event = ret; 420 } 421 422 switch (state.event) { 423 case PM_EVENT_ON: 424 return PCI_D0; 425 case PM_EVENT_FREEZE: 426 case PM_EVENT_SUSPEND: 427 return PCI_D3hot; 428 default: 429 printk("They asked me for state %d\n", state.event); 430 BUG(); 431 } 432 return PCI_D0; 433 } 434 435 EXPORT_SYMBOL(pci_choose_state); 436 437 /** 438 * pci_save_state - save the PCI configuration space of a device before suspending 439 * @dev: - PCI device that we're dealing with 440 */ 441 int 442 pci_save_state(struct pci_dev *dev) 443 { 444 int i; 445 /* XXX: 100% dword access ok here? */ 446 for (i = 0; i < 16; i++) 447 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 448 if ((i = pci_save_msi_state(dev)) != 0) 449 return i; 450 if ((i = pci_save_msix_state(dev)) != 0) 451 return i; 452 return 0; 453 } 454 455 /** 456 * pci_restore_state - Restore the saved state of a PCI device 457 * @dev: - PCI device that we're dealing with 458 */ 459 int 460 pci_restore_state(struct pci_dev *dev) 461 { 462 int i; 463 int val; 464 465 /* 466 * The Base Address register should be programmed before the command 467 * register(s) 468 */ 469 for (i = 15; i >= 0; i--) { 470 pci_read_config_dword(dev, i * 4, &val); 471 if (val != dev->saved_config_space[i]) { 472 printk(KERN_DEBUG "PM: Writing back config space on " 473 "device %s at offset %x (was %x, writing %x)\n", 474 pci_name(dev), i, 475 val, (int)dev->saved_config_space[i]); 476 pci_write_config_dword(dev,i * 4, 477 dev->saved_config_space[i]); 478 } 479 } 480 pci_restore_msi_state(dev); 481 pci_restore_msix_state(dev); 482 return 0; 483 } 484 485 /** 486 * pci_enable_device_bars - Initialize some of a device for use 487 * @dev: PCI device to be initialized 488 * @bars: bitmask of BAR's that must be configured 489 * 490 * Initialize device before it's used by a driver. Ask low-level code 491 * to enable selected I/O and memory resources. Wake up the device if it 492 * was suspended. Beware, this function can fail. 493 */ 494 495 int 496 pci_enable_device_bars(struct pci_dev *dev, int bars) 497 { 498 int err; 499 500 err = pci_set_power_state(dev, PCI_D0); 501 if (err < 0 && err != -EIO) 502 return err; 503 err = pcibios_enable_device(dev, bars); 504 if (err < 0) 505 return err; 506 return 0; 507 } 508 509 /** 510 * pci_enable_device - Initialize device before it's used by a driver. 511 * @dev: PCI device to be initialized 512 * 513 * Initialize device before it's used by a driver. Ask low-level code 514 * to enable I/O and memory. Wake up the device if it was suspended. 515 * Beware, this function can fail. 516 */ 517 int 518 pci_enable_device(struct pci_dev *dev) 519 { 520 int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 521 if (err) 522 return err; 523 pci_fixup_device(pci_fixup_enable, dev); 524 dev->is_enabled = 1; 525 return 0; 526 } 527 528 /** 529 * pcibios_disable_device - disable arch specific PCI resources for device dev 530 * @dev: the PCI device to disable 531 * 532 * Disables architecture specific PCI resources for the device. This 533 * is the default implementation. Architecture implementations can 534 * override this. 535 */ 536 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 537 538 /** 539 * pci_disable_device - Disable PCI device after use 540 * @dev: PCI device to be disabled 541 * 542 * Signal to the system that the PCI device is not in use by the system 543 * anymore. This only involves disabling PCI bus-mastering, if active. 544 */ 545 void 546 pci_disable_device(struct pci_dev *dev) 547 { 548 u16 pci_command; 549 550 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 551 if (pci_command & PCI_COMMAND_MASTER) { 552 pci_command &= ~PCI_COMMAND_MASTER; 553 pci_write_config_word(dev, PCI_COMMAND, pci_command); 554 } 555 dev->is_busmaster = 0; 556 557 pcibios_disable_device(dev); 558 dev->is_enabled = 0; 559 } 560 561 /** 562 * pci_enable_wake - enable device to generate PME# when suspended 563 * @dev: - PCI device to operate on 564 * @state: - Current state of device. 565 * @enable: - Flag to enable or disable generation 566 * 567 * Set the bits in the device's PM Capabilities to generate PME# when 568 * the system is suspended. 569 * 570 * -EIO is returned if device doesn't have PM Capabilities. 571 * -EINVAL is returned if device supports it, but can't generate wake events. 572 * 0 if operation is successful. 573 * 574 */ 575 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 576 { 577 int pm; 578 u16 value; 579 580 /* find PCI PM capability in list */ 581 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 582 583 /* If device doesn't support PM Capabilities, but request is to disable 584 * wake events, it's a nop; otherwise fail */ 585 if (!pm) 586 return enable ? -EIO : 0; 587 588 /* Check device's ability to generate PME# */ 589 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 590 591 value &= PCI_PM_CAP_PME_MASK; 592 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 593 594 /* Check if it can generate PME# from requested state. */ 595 if (!value || !(value & (1 << state))) 596 return enable ? -EINVAL : 0; 597 598 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 599 600 /* Clear PME_Status by writing 1 to it and enable PME# */ 601 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 602 603 if (!enable) 604 value &= ~PCI_PM_CTRL_PME_ENABLE; 605 606 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 607 608 return 0; 609 } 610 611 int 612 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 613 { 614 u8 pin; 615 616 pin = dev->pin; 617 if (!pin) 618 return -1; 619 pin--; 620 while (dev->bus->self) { 621 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 622 dev = dev->bus->self; 623 } 624 *bridge = dev; 625 return pin; 626 } 627 628 /** 629 * pci_release_region - Release a PCI bar 630 * @pdev: PCI device whose resources were previously reserved by pci_request_region 631 * @bar: BAR to release 632 * 633 * Releases the PCI I/O and memory resources previously reserved by a 634 * successful call to pci_request_region. Call this function only 635 * after all use of the PCI regions has ceased. 636 */ 637 void pci_release_region(struct pci_dev *pdev, int bar) 638 { 639 if (pci_resource_len(pdev, bar) == 0) 640 return; 641 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 642 release_region(pci_resource_start(pdev, bar), 643 pci_resource_len(pdev, bar)); 644 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 645 release_mem_region(pci_resource_start(pdev, bar), 646 pci_resource_len(pdev, bar)); 647 } 648 649 /** 650 * pci_request_region - Reserved PCI I/O and memory resource 651 * @pdev: PCI device whose resources are to be reserved 652 * @bar: BAR to be reserved 653 * @res_name: Name to be associated with resource. 654 * 655 * Mark the PCI region associated with PCI device @pdev BR @bar as 656 * being reserved by owner @res_name. Do not access any 657 * address inside the PCI regions unless this call returns 658 * successfully. 659 * 660 * Returns 0 on success, or %EBUSY on error. A warning 661 * message is also printed on failure. 662 */ 663 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 664 { 665 if (pci_resource_len(pdev, bar) == 0) 666 return 0; 667 668 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 669 if (!request_region(pci_resource_start(pdev, bar), 670 pci_resource_len(pdev, bar), res_name)) 671 goto err_out; 672 } 673 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 674 if (!request_mem_region(pci_resource_start(pdev, bar), 675 pci_resource_len(pdev, bar), res_name)) 676 goto err_out; 677 } 678 679 return 0; 680 681 err_out: 682 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", 683 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 684 bar + 1, /* PCI BAR # */ 685 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), 686 pci_name(pdev)); 687 return -EBUSY; 688 } 689 690 691 /** 692 * pci_release_regions - Release reserved PCI I/O and memory resources 693 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 694 * 695 * Releases all PCI I/O and memory resources previously reserved by a 696 * successful call to pci_request_regions. Call this function only 697 * after all use of the PCI regions has ceased. 698 */ 699 700 void pci_release_regions(struct pci_dev *pdev) 701 { 702 int i; 703 704 for (i = 0; i < 6; i++) 705 pci_release_region(pdev, i); 706 } 707 708 /** 709 * pci_request_regions - Reserved PCI I/O and memory resources 710 * @pdev: PCI device whose resources are to be reserved 711 * @res_name: Name to be associated with resource. 712 * 713 * Mark all PCI regions associated with PCI device @pdev as 714 * being reserved by owner @res_name. Do not access any 715 * address inside the PCI regions unless this call returns 716 * successfully. 717 * 718 * Returns 0 on success, or %EBUSY on error. A warning 719 * message is also printed on failure. 720 */ 721 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 722 { 723 int i; 724 725 for (i = 0; i < 6; i++) 726 if(pci_request_region(pdev, i, res_name)) 727 goto err_out; 728 return 0; 729 730 err_out: 731 while(--i >= 0) 732 pci_release_region(pdev, i); 733 734 return -EBUSY; 735 } 736 737 /** 738 * pci_set_master - enables bus-mastering for device dev 739 * @dev: the PCI device to enable 740 * 741 * Enables bus-mastering on the device and calls pcibios_set_master() 742 * to do the needed arch specific settings. 743 */ 744 void 745 pci_set_master(struct pci_dev *dev) 746 { 747 u16 cmd; 748 749 pci_read_config_word(dev, PCI_COMMAND, &cmd); 750 if (! (cmd & PCI_COMMAND_MASTER)) { 751 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 752 cmd |= PCI_COMMAND_MASTER; 753 pci_write_config_word(dev, PCI_COMMAND, cmd); 754 } 755 dev->is_busmaster = 1; 756 pcibios_set_master(dev); 757 } 758 759 #ifndef HAVE_ARCH_PCI_MWI 760 /* This can be overridden by arch code. */ 761 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; 762 763 /** 764 * pci_generic_prep_mwi - helper function for pci_set_mwi 765 * @dev: the PCI device for which MWI is enabled 766 * 767 * Helper function for generic implementation of pcibios_prep_mwi 768 * function. Originally copied from drivers/net/acenic.c. 769 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 770 * 771 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 772 */ 773 static int 774 pci_generic_prep_mwi(struct pci_dev *dev) 775 { 776 u8 cacheline_size; 777 778 if (!pci_cache_line_size) 779 return -EINVAL; /* The system doesn't support MWI. */ 780 781 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 782 equal to or multiple of the right value. */ 783 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 784 if (cacheline_size >= pci_cache_line_size && 785 (cacheline_size % pci_cache_line_size) == 0) 786 return 0; 787 788 /* Write the correct value. */ 789 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 790 /* Read it back. */ 791 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 792 if (cacheline_size == pci_cache_line_size) 793 return 0; 794 795 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 796 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 797 798 return -EINVAL; 799 } 800 #endif /* !HAVE_ARCH_PCI_MWI */ 801 802 /** 803 * pci_set_mwi - enables memory-write-invalidate PCI transaction 804 * @dev: the PCI device for which MWI is enabled 805 * 806 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 807 * and then calls @pcibios_set_mwi to do the needed arch specific 808 * operations or a generic mwi-prep function. 809 * 810 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 811 */ 812 int 813 pci_set_mwi(struct pci_dev *dev) 814 { 815 int rc; 816 u16 cmd; 817 818 #ifdef HAVE_ARCH_PCI_MWI 819 rc = pcibios_prep_mwi(dev); 820 #else 821 rc = pci_generic_prep_mwi(dev); 822 #endif 823 824 if (rc) 825 return rc; 826 827 pci_read_config_word(dev, PCI_COMMAND, &cmd); 828 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 829 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 830 cmd |= PCI_COMMAND_INVALIDATE; 831 pci_write_config_word(dev, PCI_COMMAND, cmd); 832 } 833 834 return 0; 835 } 836 837 /** 838 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 839 * @dev: the PCI device to disable 840 * 841 * Disables PCI Memory-Write-Invalidate transaction on the device 842 */ 843 void 844 pci_clear_mwi(struct pci_dev *dev) 845 { 846 u16 cmd; 847 848 pci_read_config_word(dev, PCI_COMMAND, &cmd); 849 if (cmd & PCI_COMMAND_INVALIDATE) { 850 cmd &= ~PCI_COMMAND_INVALIDATE; 851 pci_write_config_word(dev, PCI_COMMAND, cmd); 852 } 853 } 854 855 /** 856 * pci_intx - enables/disables PCI INTx for device dev 857 * @pdev: the PCI device to operate on 858 * @enable: boolean: whether to enable or disable PCI INTx 859 * 860 * Enables/disables PCI INTx for device dev 861 */ 862 void 863 pci_intx(struct pci_dev *pdev, int enable) 864 { 865 u16 pci_command, new; 866 867 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 868 869 if (enable) { 870 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 871 } else { 872 new = pci_command | PCI_COMMAND_INTX_DISABLE; 873 } 874 875 if (new != pci_command) { 876 pci_write_config_word(pdev, PCI_COMMAND, new); 877 } 878 } 879 880 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 881 /* 882 * These can be overridden by arch-specific implementations 883 */ 884 int 885 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 886 { 887 if (!pci_dma_supported(dev, mask)) 888 return -EIO; 889 890 dev->dma_mask = mask; 891 892 return 0; 893 } 894 895 int 896 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 897 { 898 if (!pci_dma_supported(dev, mask)) 899 return -EIO; 900 901 dev->dev.coherent_dma_mask = mask; 902 903 return 0; 904 } 905 #endif 906 907 static int __devinit pci_init(void) 908 { 909 struct pci_dev *dev = NULL; 910 911 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 912 pci_fixup_device(pci_fixup_final, dev); 913 } 914 return 0; 915 } 916 917 static int __devinit pci_setup(char *str) 918 { 919 while (str) { 920 char *k = strchr(str, ','); 921 if (k) 922 *k++ = 0; 923 if (*str && (str = pcibios_setup(str)) && *str) { 924 if (!strcmp(str, "nomsi")) { 925 pci_no_msi(); 926 } else { 927 printk(KERN_ERR "PCI: Unknown option `%s'\n", 928 str); 929 } 930 } 931 str = k; 932 } 933 return 1; 934 } 935 936 device_initcall(pci_init); 937 938 __setup("pci=", pci_setup); 939 940 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 941 /* FIXME: Some boxes have multiple ISA bridges! */ 942 struct pci_dev *isa_bridge; 943 EXPORT_SYMBOL(isa_bridge); 944 #endif 945 946 EXPORT_SYMBOL_GPL(pci_restore_bars); 947 EXPORT_SYMBOL(pci_enable_device_bars); 948 EXPORT_SYMBOL(pci_enable_device); 949 EXPORT_SYMBOL(pci_disable_device); 950 EXPORT_SYMBOL(pci_find_capability); 951 EXPORT_SYMBOL(pci_bus_find_capability); 952 EXPORT_SYMBOL(pci_release_regions); 953 EXPORT_SYMBOL(pci_request_regions); 954 EXPORT_SYMBOL(pci_release_region); 955 EXPORT_SYMBOL(pci_request_region); 956 EXPORT_SYMBOL(pci_set_master); 957 EXPORT_SYMBOL(pci_set_mwi); 958 EXPORT_SYMBOL(pci_clear_mwi); 959 EXPORT_SYMBOL_GPL(pci_intx); 960 EXPORT_SYMBOL(pci_set_dma_mask); 961 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 962 EXPORT_SYMBOL(pci_assign_resource); 963 EXPORT_SYMBOL(pci_find_parent_resource); 964 965 EXPORT_SYMBOL(pci_set_power_state); 966 EXPORT_SYMBOL(pci_save_state); 967 EXPORT_SYMBOL(pci_restore_state); 968 EXPORT_SYMBOL(pci_enable_wake); 969 970 /* Quirk info */ 971 972 EXPORT_SYMBOL(isa_dma_bridge_buggy); 973 EXPORT_SYMBOL(pci_pci_problems); 974