1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 23 /** 24 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 25 * @bus: pointer to PCI bus structure to search 26 * 27 * Given a PCI bus, returns the highest PCI bus number present in the set 28 * including the given PCI bus and its list of child PCI buses. 29 */ 30 unsigned char __devinit 31 pci_bus_max_busnr(struct pci_bus* bus) 32 { 33 struct list_head *tmp; 34 unsigned char max, n; 35 36 max = bus->subordinate; 37 list_for_each(tmp, &bus->children) { 38 n = pci_bus_max_busnr(pci_bus_b(tmp)); 39 if(n > max) 40 max = n; 41 } 42 return max; 43 } 44 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 45 46 #if 0 47 /** 48 * pci_max_busnr - returns maximum PCI bus number 49 * 50 * Returns the highest PCI bus number present in the system global list of 51 * PCI buses. 52 */ 53 unsigned char __devinit 54 pci_max_busnr(void) 55 { 56 struct pci_bus *bus = NULL; 57 unsigned char max, n; 58 59 max = 0; 60 while ((bus = pci_find_next_bus(bus)) != NULL) { 61 n = pci_bus_max_busnr(bus); 62 if(n > max) 63 max = n; 64 } 65 return max; 66 } 67 68 #endif /* 0 */ 69 70 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) 71 { 72 u8 id; 73 int ttl = 48; 74 75 while (ttl--) { 76 pci_bus_read_config_byte(bus, devfn, pos, &pos); 77 if (pos < 0x40) 78 break; 79 pos &= ~3; 80 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 81 &id); 82 if (id == 0xff) 83 break; 84 if (id == cap) 85 return pos; 86 pos += PCI_CAP_LIST_NEXT; 87 } 88 return 0; 89 } 90 91 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 92 { 93 return __pci_find_next_cap(dev->bus, dev->devfn, 94 pos + PCI_CAP_LIST_NEXT, cap); 95 } 96 EXPORT_SYMBOL_GPL(pci_find_next_capability); 97 98 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) 99 { 100 u16 status; 101 u8 pos; 102 103 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 104 if (!(status & PCI_STATUS_CAP_LIST)) 105 return 0; 106 107 switch (hdr_type) { 108 case PCI_HEADER_TYPE_NORMAL: 109 case PCI_HEADER_TYPE_BRIDGE: 110 pos = PCI_CAPABILITY_LIST; 111 break; 112 case PCI_HEADER_TYPE_CARDBUS: 113 pos = PCI_CB_CAPABILITY_LIST; 114 break; 115 default: 116 return 0; 117 } 118 return __pci_find_next_cap(bus, devfn, pos, cap); 119 } 120 121 /** 122 * pci_find_capability - query for devices' capabilities 123 * @dev: PCI device to query 124 * @cap: capability code 125 * 126 * Tell if a device supports a given PCI capability. 127 * Returns the address of the requested capability structure within the 128 * device's PCI configuration space or 0 in case the device does not 129 * support it. Possible values for @cap: 130 * 131 * %PCI_CAP_ID_PM Power Management 132 * %PCI_CAP_ID_AGP Accelerated Graphics Port 133 * %PCI_CAP_ID_VPD Vital Product Data 134 * %PCI_CAP_ID_SLOTID Slot Identification 135 * %PCI_CAP_ID_MSI Message Signalled Interrupts 136 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 137 * %PCI_CAP_ID_PCIX PCI-X 138 * %PCI_CAP_ID_EXP PCI Express 139 */ 140 int pci_find_capability(struct pci_dev *dev, int cap) 141 { 142 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); 143 } 144 145 /** 146 * pci_bus_find_capability - query for devices' capabilities 147 * @bus: the PCI bus to query 148 * @devfn: PCI device to query 149 * @cap: capability code 150 * 151 * Like pci_find_capability() but works for pci devices that do not have a 152 * pci_dev structure set up yet. 153 * 154 * Returns the address of the requested capability structure within the 155 * device's PCI configuration space or 0 in case the device does not 156 * support it. 157 */ 158 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 159 { 160 u8 hdr_type; 161 162 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 163 164 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); 165 } 166 167 #if 0 168 /** 169 * pci_find_ext_capability - Find an extended capability 170 * @dev: PCI device to query 171 * @cap: capability code 172 * 173 * Returns the address of the requested extended capability structure 174 * within the device's PCI configuration space or 0 if the device does 175 * not support it. Possible values for @cap: 176 * 177 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 178 * %PCI_EXT_CAP_ID_VC Virtual Channel 179 * %PCI_EXT_CAP_ID_DSN Device Serial Number 180 * %PCI_EXT_CAP_ID_PWR Power Budgeting 181 */ 182 int pci_find_ext_capability(struct pci_dev *dev, int cap) 183 { 184 u32 header; 185 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 186 int pos = 0x100; 187 188 if (dev->cfg_size <= 256) 189 return 0; 190 191 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 192 return 0; 193 194 /* 195 * If we have no capabilities, this is indicated by cap ID, 196 * cap version and next pointer all being 0. 197 */ 198 if (header == 0) 199 return 0; 200 201 while (ttl-- > 0) { 202 if (PCI_EXT_CAP_ID(header) == cap) 203 return pos; 204 205 pos = PCI_EXT_CAP_NEXT(header); 206 if (pos < 0x100) 207 break; 208 209 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 210 break; 211 } 212 213 return 0; 214 } 215 #endif /* 0 */ 216 217 /** 218 * pci_find_parent_resource - return resource region of parent bus of given region 219 * @dev: PCI device structure contains resources to be searched 220 * @res: child resource record for which parent is sought 221 * 222 * For given resource region of given device, return the resource 223 * region of parent bus the given region is contained in or where 224 * it should be allocated from. 225 */ 226 struct resource * 227 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 228 { 229 const struct pci_bus *bus = dev->bus; 230 int i; 231 struct resource *best = NULL; 232 233 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 234 struct resource *r = bus->resource[i]; 235 if (!r) 236 continue; 237 if (res->start && !(res->start >= r->start && res->end <= r->end)) 238 continue; /* Not contained */ 239 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 240 continue; /* Wrong type */ 241 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 242 return r; /* Exact match */ 243 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 244 best = r; /* Approximating prefetchable by non-prefetchable */ 245 } 246 return best; 247 } 248 249 /** 250 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 251 * @dev: PCI device to have its BARs restored 252 * 253 * Restore the BAR values for a given device, so as to make it 254 * accessible by its driver. 255 */ 256 void 257 pci_restore_bars(struct pci_dev *dev) 258 { 259 int i, numres; 260 261 switch (dev->hdr_type) { 262 case PCI_HEADER_TYPE_NORMAL: 263 numres = 6; 264 break; 265 case PCI_HEADER_TYPE_BRIDGE: 266 numres = 2; 267 break; 268 case PCI_HEADER_TYPE_CARDBUS: 269 numres = 1; 270 break; 271 default: 272 /* Should never get here, but just in case... */ 273 return; 274 } 275 276 for (i = 0; i < numres; i ++) 277 pci_update_resource(dev, &dev->resource[i], i); 278 } 279 280 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 281 282 /** 283 * pci_set_power_state - Set the power state of a PCI device 284 * @dev: PCI device to be suspended 285 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 286 * 287 * Transition a device to a new power state, using the Power Management 288 * Capabilities in the device's config space. 289 * 290 * RETURN VALUE: 291 * -EINVAL if trying to enter a lower state than we're already in. 292 * 0 if we're already in the requested state. 293 * -EIO if device does not support PCI PM. 294 * 0 if we can successfully change the power state. 295 */ 296 int 297 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 298 { 299 int pm, need_restore = 0; 300 u16 pmcsr, pmc; 301 302 /* bound the state we're entering */ 303 if (state > PCI_D3hot) 304 state = PCI_D3hot; 305 306 /* Validate current state: 307 * Can enter D0 from any state, but if we can only go deeper 308 * to sleep if we're already in a low power state 309 */ 310 if (state != PCI_D0 && dev->current_state > state) 311 return -EINVAL; 312 else if (dev->current_state == state) 313 return 0; /* we're already there */ 314 315 /* find PCI PM capability in list */ 316 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 317 318 /* abort if the device doesn't support PM capabilities */ 319 if (!pm) 320 return -EIO; 321 322 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 323 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 324 printk(KERN_DEBUG 325 "PCI: %s has unsupported PM cap regs version (%u)\n", 326 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 327 return -EIO; 328 } 329 330 /* check if this device supports the desired state */ 331 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 332 return -EIO; 333 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 334 return -EIO; 335 336 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 337 338 /* If we're (effectively) in D3, force entire word to 0. 339 * This doesn't affect PME_Status, disables PME_En, and 340 * sets PowerState to 0. 341 */ 342 switch (dev->current_state) { 343 case PCI_D0: 344 case PCI_D1: 345 case PCI_D2: 346 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 347 pmcsr |= state; 348 break; 349 case PCI_UNKNOWN: /* Boot-up */ 350 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 351 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 352 need_restore = 1; 353 /* Fall-through: force to D0 */ 354 default: 355 pmcsr = 0; 356 break; 357 } 358 359 /* enter specified state */ 360 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 361 362 /* Mandatory power management transition delays */ 363 /* see PCI PM 1.1 5.6.1 table 18 */ 364 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 365 msleep(10); 366 else if (state == PCI_D2 || dev->current_state == PCI_D2) 367 udelay(200); 368 369 /* 370 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 371 * Firmware method after natice method ? 372 */ 373 if (platform_pci_set_power_state) 374 platform_pci_set_power_state(dev, state); 375 376 dev->current_state = state; 377 378 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 379 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 380 * from D3hot to D0 _may_ perform an internal reset, thereby 381 * going to "D0 Uninitialized" rather than "D0 Initialized". 382 * For example, at least some versions of the 3c905B and the 383 * 3c556B exhibit this behaviour. 384 * 385 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 386 * devices in a D3hot state at boot. Consequently, we need to 387 * restore at least the BARs so that the device will be 388 * accessible to its driver. 389 */ 390 if (need_restore) 391 pci_restore_bars(dev); 392 393 return 0; 394 } 395 396 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 397 398 /** 399 * pci_choose_state - Choose the power state of a PCI device 400 * @dev: PCI device to be suspended 401 * @state: target sleep state for the whole system. This is the value 402 * that is passed to suspend() function. 403 * 404 * Returns PCI power state suitable for given device and given system 405 * message. 406 */ 407 408 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 409 { 410 int ret; 411 412 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 413 return PCI_D0; 414 415 if (platform_pci_choose_state) { 416 ret = platform_pci_choose_state(dev, state); 417 if (ret >= 0) 418 state.event = ret; 419 } 420 421 switch (state.event) { 422 case PM_EVENT_ON: 423 return PCI_D0; 424 case PM_EVENT_FREEZE: 425 case PM_EVENT_SUSPEND: 426 return PCI_D3hot; 427 default: 428 printk("They asked me for state %d\n", state.event); 429 BUG(); 430 } 431 return PCI_D0; 432 } 433 434 EXPORT_SYMBOL(pci_choose_state); 435 436 /** 437 * pci_save_state - save the PCI configuration space of a device before suspending 438 * @dev: - PCI device that we're dealing with 439 */ 440 int 441 pci_save_state(struct pci_dev *dev) 442 { 443 int i; 444 /* XXX: 100% dword access ok here? */ 445 for (i = 0; i < 16; i++) 446 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 447 return 0; 448 } 449 450 /** 451 * pci_restore_state - Restore the saved state of a PCI device 452 * @dev: - PCI device that we're dealing with 453 */ 454 int 455 pci_restore_state(struct pci_dev *dev) 456 { 457 int i; 458 459 for (i = 0; i < 16; i++) 460 pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); 461 return 0; 462 } 463 464 /** 465 * pci_enable_device_bars - Initialize some of a device for use 466 * @dev: PCI device to be initialized 467 * @bars: bitmask of BAR's that must be configured 468 * 469 * Initialize device before it's used by a driver. Ask low-level code 470 * to enable selected I/O and memory resources. Wake up the device if it 471 * was suspended. Beware, this function can fail. 472 */ 473 474 int 475 pci_enable_device_bars(struct pci_dev *dev, int bars) 476 { 477 int err; 478 479 err = pci_set_power_state(dev, PCI_D0); 480 if (err < 0 && err != -EIO) 481 return err; 482 err = pcibios_enable_device(dev, bars); 483 if (err < 0) 484 return err; 485 return 0; 486 } 487 488 /** 489 * pci_enable_device - Initialize device before it's used by a driver. 490 * @dev: PCI device to be initialized 491 * 492 * Initialize device before it's used by a driver. Ask low-level code 493 * to enable I/O and memory. Wake up the device if it was suspended. 494 * Beware, this function can fail. 495 */ 496 int 497 pci_enable_device(struct pci_dev *dev) 498 { 499 int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 500 if (err) 501 return err; 502 pci_fixup_device(pci_fixup_enable, dev); 503 dev->is_enabled = 1; 504 return 0; 505 } 506 507 /** 508 * pcibios_disable_device - disable arch specific PCI resources for device dev 509 * @dev: the PCI device to disable 510 * 511 * Disables architecture specific PCI resources for the device. This 512 * is the default implementation. Architecture implementations can 513 * override this. 514 */ 515 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 516 517 /** 518 * pci_disable_device - Disable PCI device after use 519 * @dev: PCI device to be disabled 520 * 521 * Signal to the system that the PCI device is not in use by the system 522 * anymore. This only involves disabling PCI bus-mastering, if active. 523 */ 524 void 525 pci_disable_device(struct pci_dev *dev) 526 { 527 u16 pci_command; 528 529 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 530 if (pci_command & PCI_COMMAND_MASTER) { 531 pci_command &= ~PCI_COMMAND_MASTER; 532 pci_write_config_word(dev, PCI_COMMAND, pci_command); 533 } 534 dev->is_busmaster = 0; 535 536 pcibios_disable_device(dev); 537 dev->is_enabled = 0; 538 } 539 540 /** 541 * pci_enable_wake - enable device to generate PME# when suspended 542 * @dev: - PCI device to operate on 543 * @state: - Current state of device. 544 * @enable: - Flag to enable or disable generation 545 * 546 * Set the bits in the device's PM Capabilities to generate PME# when 547 * the system is suspended. 548 * 549 * -EIO is returned if device doesn't have PM Capabilities. 550 * -EINVAL is returned if device supports it, but can't generate wake events. 551 * 0 if operation is successful. 552 * 553 */ 554 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 555 { 556 int pm; 557 u16 value; 558 559 /* find PCI PM capability in list */ 560 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 561 562 /* If device doesn't support PM Capabilities, but request is to disable 563 * wake events, it's a nop; otherwise fail */ 564 if (!pm) 565 return enable ? -EIO : 0; 566 567 /* Check device's ability to generate PME# */ 568 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 569 570 value &= PCI_PM_CAP_PME_MASK; 571 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 572 573 /* Check if it can generate PME# from requested state. */ 574 if (!value || !(value & (1 << state))) 575 return enable ? -EINVAL : 0; 576 577 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 578 579 /* Clear PME_Status by writing 1 to it and enable PME# */ 580 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 581 582 if (!enable) 583 value &= ~PCI_PM_CTRL_PME_ENABLE; 584 585 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 586 587 return 0; 588 } 589 590 int 591 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 592 { 593 u8 pin; 594 595 pin = dev->pin; 596 if (!pin) 597 return -1; 598 pin--; 599 while (dev->bus->self) { 600 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 601 dev = dev->bus->self; 602 } 603 *bridge = dev; 604 return pin; 605 } 606 607 /** 608 * pci_release_region - Release a PCI bar 609 * @pdev: PCI device whose resources were previously reserved by pci_request_region 610 * @bar: BAR to release 611 * 612 * Releases the PCI I/O and memory resources previously reserved by a 613 * successful call to pci_request_region. Call this function only 614 * after all use of the PCI regions has ceased. 615 */ 616 void pci_release_region(struct pci_dev *pdev, int bar) 617 { 618 if (pci_resource_len(pdev, bar) == 0) 619 return; 620 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 621 release_region(pci_resource_start(pdev, bar), 622 pci_resource_len(pdev, bar)); 623 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 624 release_mem_region(pci_resource_start(pdev, bar), 625 pci_resource_len(pdev, bar)); 626 } 627 628 /** 629 * pci_request_region - Reserved PCI I/O and memory resource 630 * @pdev: PCI device whose resources are to be reserved 631 * @bar: BAR to be reserved 632 * @res_name: Name to be associated with resource. 633 * 634 * Mark the PCI region associated with PCI device @pdev BR @bar as 635 * being reserved by owner @res_name. Do not access any 636 * address inside the PCI regions unless this call returns 637 * successfully. 638 * 639 * Returns 0 on success, or %EBUSY on error. A warning 640 * message is also printed on failure. 641 */ 642 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 643 { 644 if (pci_resource_len(pdev, bar) == 0) 645 return 0; 646 647 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 648 if (!request_region(pci_resource_start(pdev, bar), 649 pci_resource_len(pdev, bar), res_name)) 650 goto err_out; 651 } 652 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 653 if (!request_mem_region(pci_resource_start(pdev, bar), 654 pci_resource_len(pdev, bar), res_name)) 655 goto err_out; 656 } 657 658 return 0; 659 660 err_out: 661 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", 662 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 663 bar + 1, /* PCI BAR # */ 664 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), 665 pci_name(pdev)); 666 return -EBUSY; 667 } 668 669 670 /** 671 * pci_release_regions - Release reserved PCI I/O and memory resources 672 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 673 * 674 * Releases all PCI I/O and memory resources previously reserved by a 675 * successful call to pci_request_regions. Call this function only 676 * after all use of the PCI regions has ceased. 677 */ 678 679 void pci_release_regions(struct pci_dev *pdev) 680 { 681 int i; 682 683 for (i = 0; i < 6; i++) 684 pci_release_region(pdev, i); 685 } 686 687 /** 688 * pci_request_regions - Reserved PCI I/O and memory resources 689 * @pdev: PCI device whose resources are to be reserved 690 * @res_name: Name to be associated with resource. 691 * 692 * Mark all PCI regions associated with PCI device @pdev as 693 * being reserved by owner @res_name. Do not access any 694 * address inside the PCI regions unless this call returns 695 * successfully. 696 * 697 * Returns 0 on success, or %EBUSY on error. A warning 698 * message is also printed on failure. 699 */ 700 int pci_request_regions(struct pci_dev *pdev, const char *res_name) 701 { 702 int i; 703 704 for (i = 0; i < 6; i++) 705 if(pci_request_region(pdev, i, res_name)) 706 goto err_out; 707 return 0; 708 709 err_out: 710 while(--i >= 0) 711 pci_release_region(pdev, i); 712 713 return -EBUSY; 714 } 715 716 /** 717 * pci_set_master - enables bus-mastering for device dev 718 * @dev: the PCI device to enable 719 * 720 * Enables bus-mastering on the device and calls pcibios_set_master() 721 * to do the needed arch specific settings. 722 */ 723 void 724 pci_set_master(struct pci_dev *dev) 725 { 726 u16 cmd; 727 728 pci_read_config_word(dev, PCI_COMMAND, &cmd); 729 if (! (cmd & PCI_COMMAND_MASTER)) { 730 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 731 cmd |= PCI_COMMAND_MASTER; 732 pci_write_config_word(dev, PCI_COMMAND, cmd); 733 } 734 dev->is_busmaster = 1; 735 pcibios_set_master(dev); 736 } 737 738 #ifndef HAVE_ARCH_PCI_MWI 739 /* This can be overridden by arch code. */ 740 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; 741 742 /** 743 * pci_generic_prep_mwi - helper function for pci_set_mwi 744 * @dev: the PCI device for which MWI is enabled 745 * 746 * Helper function for generic implementation of pcibios_prep_mwi 747 * function. Originally copied from drivers/net/acenic.c. 748 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 749 * 750 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 751 */ 752 static int 753 pci_generic_prep_mwi(struct pci_dev *dev) 754 { 755 u8 cacheline_size; 756 757 if (!pci_cache_line_size) 758 return -EINVAL; /* The system doesn't support MWI. */ 759 760 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 761 equal to or multiple of the right value. */ 762 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 763 if (cacheline_size >= pci_cache_line_size && 764 (cacheline_size % pci_cache_line_size) == 0) 765 return 0; 766 767 /* Write the correct value. */ 768 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 769 /* Read it back. */ 770 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 771 if (cacheline_size == pci_cache_line_size) 772 return 0; 773 774 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 775 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 776 777 return -EINVAL; 778 } 779 #endif /* !HAVE_ARCH_PCI_MWI */ 780 781 /** 782 * pci_set_mwi - enables memory-write-invalidate PCI transaction 783 * @dev: the PCI device for which MWI is enabled 784 * 785 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 786 * and then calls @pcibios_set_mwi to do the needed arch specific 787 * operations or a generic mwi-prep function. 788 * 789 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 790 */ 791 int 792 pci_set_mwi(struct pci_dev *dev) 793 { 794 int rc; 795 u16 cmd; 796 797 #ifdef HAVE_ARCH_PCI_MWI 798 rc = pcibios_prep_mwi(dev); 799 #else 800 rc = pci_generic_prep_mwi(dev); 801 #endif 802 803 if (rc) 804 return rc; 805 806 pci_read_config_word(dev, PCI_COMMAND, &cmd); 807 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 808 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 809 cmd |= PCI_COMMAND_INVALIDATE; 810 pci_write_config_word(dev, PCI_COMMAND, cmd); 811 } 812 813 return 0; 814 } 815 816 /** 817 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 818 * @dev: the PCI device to disable 819 * 820 * Disables PCI Memory-Write-Invalidate transaction on the device 821 */ 822 void 823 pci_clear_mwi(struct pci_dev *dev) 824 { 825 u16 cmd; 826 827 pci_read_config_word(dev, PCI_COMMAND, &cmd); 828 if (cmd & PCI_COMMAND_INVALIDATE) { 829 cmd &= ~PCI_COMMAND_INVALIDATE; 830 pci_write_config_word(dev, PCI_COMMAND, cmd); 831 } 832 } 833 834 /** 835 * pci_intx - enables/disables PCI INTx for device dev 836 * @pdev: the PCI device to operate on 837 * @enable: boolean: whether to enable or disable PCI INTx 838 * 839 * Enables/disables PCI INTx for device dev 840 */ 841 void 842 pci_intx(struct pci_dev *pdev, int enable) 843 { 844 u16 pci_command, new; 845 846 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 847 848 if (enable) { 849 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 850 } else { 851 new = pci_command | PCI_COMMAND_INTX_DISABLE; 852 } 853 854 if (new != pci_command) { 855 pci_write_config_word(pdev, PCI_COMMAND, new); 856 } 857 } 858 859 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 860 /* 861 * These can be overridden by arch-specific implementations 862 */ 863 int 864 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 865 { 866 if (!pci_dma_supported(dev, mask)) 867 return -EIO; 868 869 dev->dma_mask = mask; 870 871 return 0; 872 } 873 874 int 875 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 876 { 877 if (!pci_dma_supported(dev, mask)) 878 return -EIO; 879 880 dev->dev.coherent_dma_mask = mask; 881 882 return 0; 883 } 884 #endif 885 886 static int __devinit pci_init(void) 887 { 888 struct pci_dev *dev = NULL; 889 890 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 891 pci_fixup_device(pci_fixup_final, dev); 892 } 893 return 0; 894 } 895 896 static int __devinit pci_setup(char *str) 897 { 898 while (str) { 899 char *k = strchr(str, ','); 900 if (k) 901 *k++ = 0; 902 if (*str && (str = pcibios_setup(str)) && *str) { 903 if (!strcmp(str, "nomsi")) { 904 pci_no_msi(); 905 } else { 906 printk(KERN_ERR "PCI: Unknown option `%s'\n", 907 str); 908 } 909 } 910 str = k; 911 } 912 return 1; 913 } 914 915 device_initcall(pci_init); 916 917 __setup("pci=", pci_setup); 918 919 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 920 /* FIXME: Some boxes have multiple ISA bridges! */ 921 struct pci_dev *isa_bridge; 922 EXPORT_SYMBOL(isa_bridge); 923 #endif 924 925 EXPORT_SYMBOL_GPL(pci_restore_bars); 926 EXPORT_SYMBOL(pci_enable_device_bars); 927 EXPORT_SYMBOL(pci_enable_device); 928 EXPORT_SYMBOL(pci_disable_device); 929 EXPORT_SYMBOL(pci_find_capability); 930 EXPORT_SYMBOL(pci_bus_find_capability); 931 EXPORT_SYMBOL(pci_release_regions); 932 EXPORT_SYMBOL(pci_request_regions); 933 EXPORT_SYMBOL(pci_release_region); 934 EXPORT_SYMBOL(pci_request_region); 935 EXPORT_SYMBOL(pci_set_master); 936 EXPORT_SYMBOL(pci_set_mwi); 937 EXPORT_SYMBOL(pci_clear_mwi); 938 EXPORT_SYMBOL_GPL(pci_intx); 939 EXPORT_SYMBOL(pci_set_dma_mask); 940 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 941 EXPORT_SYMBOL(pci_assign_resource); 942 EXPORT_SYMBOL(pci_find_parent_resource); 943 944 EXPORT_SYMBOL(pci_set_power_state); 945 EXPORT_SYMBOL(pci_save_state); 946 EXPORT_SYMBOL(pci_restore_state); 947 EXPORT_SYMBOL(pci_enable_wake); 948 949 /* Quirk info */ 950 951 EXPORT_SYMBOL(isa_dma_bridge_buggy); 952 EXPORT_SYMBOL(pci_pci_problems); 953