1 /* 2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ 3 * 4 * PCI Bus Services, see include/linux/pci.h for further explanation. 5 * 6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 7 * David Mosberger-Tang 8 * 9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/init.h> 15 #include <linux/pci.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 #include <linux/string.h> 19 #include <asm/dma.h> /* isa_dma_bridge_buggy */ 20 #include "pci.h" 21 22 #if 0 23 24 /** 25 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 26 * @bus: pointer to PCI bus structure to search 27 * 28 * Given a PCI bus, returns the highest PCI bus number present in the set 29 * including the given PCI bus and its list of child PCI buses. 30 */ 31 unsigned char __devinit 32 pci_bus_max_busnr(struct pci_bus* bus) 33 { 34 struct list_head *tmp; 35 unsigned char max, n; 36 37 max = bus->number; 38 list_for_each(tmp, &bus->children) { 39 n = pci_bus_max_busnr(pci_bus_b(tmp)); 40 if(n > max) 41 max = n; 42 } 43 return max; 44 } 45 46 /** 47 * pci_max_busnr - returns maximum PCI bus number 48 * 49 * Returns the highest PCI bus number present in the system global list of 50 * PCI buses. 51 */ 52 unsigned char __devinit 53 pci_max_busnr(void) 54 { 55 struct pci_bus *bus = NULL; 56 unsigned char max, n; 57 58 max = 0; 59 while ((bus = pci_find_next_bus(bus)) != NULL) { 60 n = pci_bus_max_busnr(bus); 61 if(n > max) 62 max = n; 63 } 64 return max; 65 } 66 67 #endif /* 0 */ 68 69 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) 70 { 71 u8 id; 72 int ttl = 48; 73 74 while (ttl--) { 75 pci_bus_read_config_byte(bus, devfn, pos, &pos); 76 if (pos < 0x40) 77 break; 78 pos &= ~3; 79 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 80 &id); 81 if (id == 0xff) 82 break; 83 if (id == cap) 84 return pos; 85 pos += PCI_CAP_LIST_NEXT; 86 } 87 return 0; 88 } 89 90 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 91 { 92 return __pci_find_next_cap(dev->bus, dev->devfn, 93 pos + PCI_CAP_LIST_NEXT, cap); 94 } 95 EXPORT_SYMBOL_GPL(pci_find_next_capability); 96 97 static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) 98 { 99 u16 status; 100 u8 pos; 101 102 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 103 if (!(status & PCI_STATUS_CAP_LIST)) 104 return 0; 105 106 switch (hdr_type) { 107 case PCI_HEADER_TYPE_NORMAL: 108 case PCI_HEADER_TYPE_BRIDGE: 109 pos = PCI_CAPABILITY_LIST; 110 break; 111 case PCI_HEADER_TYPE_CARDBUS: 112 pos = PCI_CB_CAPABILITY_LIST; 113 break; 114 default: 115 return 0; 116 } 117 return __pci_find_next_cap(bus, devfn, pos, cap); 118 } 119 120 /** 121 * pci_find_capability - query for devices' capabilities 122 * @dev: PCI device to query 123 * @cap: capability code 124 * 125 * Tell if a device supports a given PCI capability. 126 * Returns the address of the requested capability structure within the 127 * device's PCI configuration space or 0 in case the device does not 128 * support it. Possible values for @cap: 129 * 130 * %PCI_CAP_ID_PM Power Management 131 * %PCI_CAP_ID_AGP Accelerated Graphics Port 132 * %PCI_CAP_ID_VPD Vital Product Data 133 * %PCI_CAP_ID_SLOTID Slot Identification 134 * %PCI_CAP_ID_MSI Message Signalled Interrupts 135 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 136 * %PCI_CAP_ID_PCIX PCI-X 137 * %PCI_CAP_ID_EXP PCI Express 138 */ 139 int pci_find_capability(struct pci_dev *dev, int cap) 140 { 141 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); 142 } 143 144 /** 145 * pci_bus_find_capability - query for devices' capabilities 146 * @bus: the PCI bus to query 147 * @devfn: PCI device to query 148 * @cap: capability code 149 * 150 * Like pci_find_capability() but works for pci devices that do not have a 151 * pci_dev structure set up yet. 152 * 153 * Returns the address of the requested capability structure within the 154 * device's PCI configuration space or 0 in case the device does not 155 * support it. 156 */ 157 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 158 { 159 u8 hdr_type; 160 161 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 162 163 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); 164 } 165 166 #if 0 167 /** 168 * pci_find_ext_capability - Find an extended capability 169 * @dev: PCI device to query 170 * @cap: capability code 171 * 172 * Returns the address of the requested extended capability structure 173 * within the device's PCI configuration space or 0 if the device does 174 * not support it. Possible values for @cap: 175 * 176 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 177 * %PCI_EXT_CAP_ID_VC Virtual Channel 178 * %PCI_EXT_CAP_ID_DSN Device Serial Number 179 * %PCI_EXT_CAP_ID_PWR Power Budgeting 180 */ 181 int pci_find_ext_capability(struct pci_dev *dev, int cap) 182 { 183 u32 header; 184 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ 185 int pos = 0x100; 186 187 if (dev->cfg_size <= 256) 188 return 0; 189 190 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 191 return 0; 192 193 /* 194 * If we have no capabilities, this is indicated by cap ID, 195 * cap version and next pointer all being 0. 196 */ 197 if (header == 0) 198 return 0; 199 200 while (ttl-- > 0) { 201 if (PCI_EXT_CAP_ID(header) == cap) 202 return pos; 203 204 pos = PCI_EXT_CAP_NEXT(header); 205 if (pos < 0x100) 206 break; 207 208 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 209 break; 210 } 211 212 return 0; 213 } 214 #endif /* 0 */ 215 216 /** 217 * pci_find_parent_resource - return resource region of parent bus of given region 218 * @dev: PCI device structure contains resources to be searched 219 * @res: child resource record for which parent is sought 220 * 221 * For given resource region of given device, return the resource 222 * region of parent bus the given region is contained in or where 223 * it should be allocated from. 224 */ 225 struct resource * 226 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 227 { 228 const struct pci_bus *bus = dev->bus; 229 int i; 230 struct resource *best = NULL; 231 232 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 233 struct resource *r = bus->resource[i]; 234 if (!r) 235 continue; 236 if (res->start && !(res->start >= r->start && res->end <= r->end)) 237 continue; /* Not contained */ 238 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 239 continue; /* Wrong type */ 240 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 241 return r; /* Exact match */ 242 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 243 best = r; /* Approximating prefetchable by non-prefetchable */ 244 } 245 return best; 246 } 247 248 /** 249 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 250 * @dev: PCI device to have its BARs restored 251 * 252 * Restore the BAR values for a given device, so as to make it 253 * accessible by its driver. 254 */ 255 void 256 pci_restore_bars(struct pci_dev *dev) 257 { 258 int i, numres; 259 260 switch (dev->hdr_type) { 261 case PCI_HEADER_TYPE_NORMAL: 262 numres = 6; 263 break; 264 case PCI_HEADER_TYPE_BRIDGE: 265 numres = 2; 266 break; 267 case PCI_HEADER_TYPE_CARDBUS: 268 numres = 1; 269 break; 270 default: 271 /* Should never get here, but just in case... */ 272 return; 273 } 274 275 for (i = 0; i < numres; i ++) 276 pci_update_resource(dev, &dev->resource[i], i); 277 } 278 279 int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 280 281 /** 282 * pci_set_power_state - Set the power state of a PCI device 283 * @dev: PCI device to be suspended 284 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering 285 * 286 * Transition a device to a new power state, using the Power Management 287 * Capabilities in the device's config space. 288 * 289 * RETURN VALUE: 290 * -EINVAL if trying to enter a lower state than we're already in. 291 * 0 if we're already in the requested state. 292 * -EIO if device does not support PCI PM. 293 * 0 if we can successfully change the power state. 294 */ 295 int 296 pci_set_power_state(struct pci_dev *dev, pci_power_t state) 297 { 298 int pm, need_restore = 0; 299 u16 pmcsr, pmc; 300 301 /* bound the state we're entering */ 302 if (state > PCI_D3hot) 303 state = PCI_D3hot; 304 305 /* Validate current state: 306 * Can enter D0 from any state, but if we can only go deeper 307 * to sleep if we're already in a low power state 308 */ 309 if (state != PCI_D0 && dev->current_state > state) 310 return -EINVAL; 311 else if (dev->current_state == state) 312 return 0; /* we're already there */ 313 314 /* find PCI PM capability in list */ 315 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 316 317 /* abort if the device doesn't support PM capabilities */ 318 if (!pm) 319 return -EIO; 320 321 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); 322 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 323 printk(KERN_DEBUG 324 "PCI: %s has unsupported PM cap regs version (%u)\n", 325 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); 326 return -EIO; 327 } 328 329 /* check if this device supports the desired state */ 330 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 331 return -EIO; 332 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) 333 return -EIO; 334 335 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 336 337 /* If we're (effectively) in D3, force entire word to 0. 338 * This doesn't affect PME_Status, disables PME_En, and 339 * sets PowerState to 0. 340 */ 341 switch (dev->current_state) { 342 case PCI_D0: 343 case PCI_D1: 344 case PCI_D2: 345 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 346 pmcsr |= state; 347 break; 348 case PCI_UNKNOWN: /* Boot-up */ 349 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 350 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 351 need_restore = 1; 352 /* Fall-through: force to D0 */ 353 default: 354 pmcsr = 0; 355 break; 356 } 357 358 /* enter specified state */ 359 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 360 361 /* Mandatory power management transition delays */ 362 /* see PCI PM 1.1 5.6.1 table 18 */ 363 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 364 msleep(10); 365 else if (state == PCI_D2 || dev->current_state == PCI_D2) 366 udelay(200); 367 368 /* 369 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 370 * Firmware method after natice method ? 371 */ 372 if (platform_pci_set_power_state) 373 platform_pci_set_power_state(dev, state); 374 375 dev->current_state = state; 376 377 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 378 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 379 * from D3hot to D0 _may_ perform an internal reset, thereby 380 * going to "D0 Uninitialized" rather than "D0 Initialized". 381 * For example, at least some versions of the 3c905B and the 382 * 3c556B exhibit this behaviour. 383 * 384 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 385 * devices in a D3hot state at boot. Consequently, we need to 386 * restore at least the BARs so that the device will be 387 * accessible to its driver. 388 */ 389 if (need_restore) 390 pci_restore_bars(dev); 391 392 return 0; 393 } 394 395 int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 396 397 /** 398 * pci_choose_state - Choose the power state of a PCI device 399 * @dev: PCI device to be suspended 400 * @state: target sleep state for the whole system. This is the value 401 * that is passed to suspend() function. 402 * 403 * Returns PCI power state suitable for given device and given system 404 * message. 405 */ 406 407 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 408 { 409 int ret; 410 411 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 412 return PCI_D0; 413 414 if (platform_pci_choose_state) { 415 ret = platform_pci_choose_state(dev, state); 416 if (ret >= 0) 417 state.event = ret; 418 } 419 420 switch (state.event) { 421 case PM_EVENT_ON: 422 return PCI_D0; 423 case PM_EVENT_FREEZE: 424 case PM_EVENT_SUSPEND: 425 return PCI_D3hot; 426 default: 427 printk("They asked me for state %d\n", state.event); 428 BUG(); 429 } 430 return PCI_D0; 431 } 432 433 EXPORT_SYMBOL(pci_choose_state); 434 435 /** 436 * pci_save_state - save the PCI configuration space of a device before suspending 437 * @dev: - PCI device that we're dealing with 438 */ 439 int 440 pci_save_state(struct pci_dev *dev) 441 { 442 int i; 443 /* XXX: 100% dword access ok here? */ 444 for (i = 0; i < 16; i++) 445 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 446 return 0; 447 } 448 449 /** 450 * pci_restore_state - Restore the saved state of a PCI device 451 * @dev: - PCI device that we're dealing with 452 */ 453 int 454 pci_restore_state(struct pci_dev *dev) 455 { 456 int i; 457 458 for (i = 0; i < 16; i++) 459 pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); 460 return 0; 461 } 462 463 /** 464 * pci_enable_device_bars - Initialize some of a device for use 465 * @dev: PCI device to be initialized 466 * @bars: bitmask of BAR's that must be configured 467 * 468 * Initialize device before it's used by a driver. Ask low-level code 469 * to enable selected I/O and memory resources. Wake up the device if it 470 * was suspended. Beware, this function can fail. 471 */ 472 473 int 474 pci_enable_device_bars(struct pci_dev *dev, int bars) 475 { 476 int err; 477 478 err = pci_set_power_state(dev, PCI_D0); 479 if (err < 0 && err != -EIO) 480 return err; 481 err = pcibios_enable_device(dev, bars); 482 if (err < 0) 483 return err; 484 return 0; 485 } 486 487 /** 488 * pci_enable_device - Initialize device before it's used by a driver. 489 * @dev: PCI device to be initialized 490 * 491 * Initialize device before it's used by a driver. Ask low-level code 492 * to enable I/O and memory. Wake up the device if it was suspended. 493 * Beware, this function can fail. 494 */ 495 int 496 pci_enable_device(struct pci_dev *dev) 497 { 498 int err; 499 500 if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1))) 501 return err; 502 pci_fixup_device(pci_fixup_enable, dev); 503 dev->is_enabled = 1; 504 return 0; 505 } 506 507 /** 508 * pcibios_disable_device - disable arch specific PCI resources for device dev 509 * @dev: the PCI device to disable 510 * 511 * Disables architecture specific PCI resources for the device. This 512 * is the default implementation. Architecture implementations can 513 * override this. 514 */ 515 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 516 517 /** 518 * pci_disable_device - Disable PCI device after use 519 * @dev: PCI device to be disabled 520 * 521 * Signal to the system that the PCI device is not in use by the system 522 * anymore. This only involves disabling PCI bus-mastering, if active. 523 */ 524 void 525 pci_disable_device(struct pci_dev *dev) 526 { 527 u16 pci_command; 528 529 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 530 if (pci_command & PCI_COMMAND_MASTER) { 531 pci_command &= ~PCI_COMMAND_MASTER; 532 pci_write_config_word(dev, PCI_COMMAND, pci_command); 533 } 534 dev->is_busmaster = 0; 535 536 pcibios_disable_device(dev); 537 dev->is_enabled = 0; 538 } 539 540 /** 541 * pci_enable_wake - enable device to generate PME# when suspended 542 * @dev: - PCI device to operate on 543 * @state: - Current state of device. 544 * @enable: - Flag to enable or disable generation 545 * 546 * Set the bits in the device's PM Capabilities to generate PME# when 547 * the system is suspended. 548 * 549 * -EIO is returned if device doesn't have PM Capabilities. 550 * -EINVAL is returned if device supports it, but can't generate wake events. 551 * 0 if operation is successful. 552 * 553 */ 554 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 555 { 556 int pm; 557 u16 value; 558 559 /* find PCI PM capability in list */ 560 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 561 562 /* If device doesn't support PM Capabilities, but request is to disable 563 * wake events, it's a nop; otherwise fail */ 564 if (!pm) 565 return enable ? -EIO : 0; 566 567 /* Check device's ability to generate PME# */ 568 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 569 570 value &= PCI_PM_CAP_PME_MASK; 571 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 572 573 /* Check if it can generate PME# from requested state. */ 574 if (!value || !(value & (1 << state))) 575 return enable ? -EINVAL : 0; 576 577 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 578 579 /* Clear PME_Status by writing 1 to it and enable PME# */ 580 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 581 582 if (!enable) 583 value &= ~PCI_PM_CTRL_PME_ENABLE; 584 585 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 586 587 return 0; 588 } 589 590 int 591 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 592 { 593 u8 pin; 594 595 pin = dev->pin; 596 if (!pin) 597 return -1; 598 pin--; 599 while (dev->bus->self) { 600 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 601 dev = dev->bus->self; 602 } 603 *bridge = dev; 604 return pin; 605 } 606 607 /** 608 * pci_release_region - Release a PCI bar 609 * @pdev: PCI device whose resources were previously reserved by pci_request_region 610 * @bar: BAR to release 611 * 612 * Releases the PCI I/O and memory resources previously reserved by a 613 * successful call to pci_request_region. Call this function only 614 * after all use of the PCI regions has ceased. 615 */ 616 void pci_release_region(struct pci_dev *pdev, int bar) 617 { 618 if (pci_resource_len(pdev, bar) == 0) 619 return; 620 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 621 release_region(pci_resource_start(pdev, bar), 622 pci_resource_len(pdev, bar)); 623 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 624 release_mem_region(pci_resource_start(pdev, bar), 625 pci_resource_len(pdev, bar)); 626 } 627 628 /** 629 * pci_request_region - Reserved PCI I/O and memory resource 630 * @pdev: PCI device whose resources are to be reserved 631 * @bar: BAR to be reserved 632 * @res_name: Name to be associated with resource. 633 * 634 * Mark the PCI region associated with PCI device @pdev BR @bar as 635 * being reserved by owner @res_name. Do not access any 636 * address inside the PCI regions unless this call returns 637 * successfully. 638 * 639 * Returns 0 on success, or %EBUSY on error. A warning 640 * message is also printed on failure. 641 */ 642 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name) 643 { 644 if (pci_resource_len(pdev, bar) == 0) 645 return 0; 646 647 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 648 if (!request_region(pci_resource_start(pdev, bar), 649 pci_resource_len(pdev, bar), res_name)) 650 goto err_out; 651 } 652 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 653 if (!request_mem_region(pci_resource_start(pdev, bar), 654 pci_resource_len(pdev, bar), res_name)) 655 goto err_out; 656 } 657 658 return 0; 659 660 err_out: 661 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", 662 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 663 bar + 1, /* PCI BAR # */ 664 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), 665 pci_name(pdev)); 666 return -EBUSY; 667 } 668 669 670 /** 671 * pci_release_regions - Release reserved PCI I/O and memory resources 672 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 673 * 674 * Releases all PCI I/O and memory resources previously reserved by a 675 * successful call to pci_request_regions. Call this function only 676 * after all use of the PCI regions has ceased. 677 */ 678 679 void pci_release_regions(struct pci_dev *pdev) 680 { 681 int i; 682 683 for (i = 0; i < 6; i++) 684 pci_release_region(pdev, i); 685 } 686 687 /** 688 * pci_request_regions - Reserved PCI I/O and memory resources 689 * @pdev: PCI device whose resources are to be reserved 690 * @res_name: Name to be associated with resource. 691 * 692 * Mark all PCI regions associated with PCI device @pdev as 693 * being reserved by owner @res_name. Do not access any 694 * address inside the PCI regions unless this call returns 695 * successfully. 696 * 697 * Returns 0 on success, or %EBUSY on error. A warning 698 * message is also printed on failure. 699 */ 700 int pci_request_regions(struct pci_dev *pdev, char *res_name) 701 { 702 int i; 703 704 for (i = 0; i < 6; i++) 705 if(pci_request_region(pdev, i, res_name)) 706 goto err_out; 707 return 0; 708 709 err_out: 710 while(--i >= 0) 711 pci_release_region(pdev, i); 712 713 return -EBUSY; 714 } 715 716 /** 717 * pci_set_master - enables bus-mastering for device dev 718 * @dev: the PCI device to enable 719 * 720 * Enables bus-mastering on the device and calls pcibios_set_master() 721 * to do the needed arch specific settings. 722 */ 723 void 724 pci_set_master(struct pci_dev *dev) 725 { 726 u16 cmd; 727 728 pci_read_config_word(dev, PCI_COMMAND, &cmd); 729 if (! (cmd & PCI_COMMAND_MASTER)) { 730 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 731 cmd |= PCI_COMMAND_MASTER; 732 pci_write_config_word(dev, PCI_COMMAND, cmd); 733 } 734 dev->is_busmaster = 1; 735 pcibios_set_master(dev); 736 } 737 738 #ifndef HAVE_ARCH_PCI_MWI 739 /* This can be overridden by arch code. */ 740 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; 741 742 /** 743 * pci_generic_prep_mwi - helper function for pci_set_mwi 744 * @dev: the PCI device for which MWI is enabled 745 * 746 * Helper function for generic implementation of pcibios_prep_mwi 747 * function. Originally copied from drivers/net/acenic.c. 748 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 749 * 750 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 751 */ 752 static int 753 pci_generic_prep_mwi(struct pci_dev *dev) 754 { 755 u8 cacheline_size; 756 757 if (!pci_cache_line_size) 758 return -EINVAL; /* The system doesn't support MWI. */ 759 760 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 761 equal to or multiple of the right value. */ 762 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 763 if (cacheline_size >= pci_cache_line_size && 764 (cacheline_size % pci_cache_line_size) == 0) 765 return 0; 766 767 /* Write the correct value. */ 768 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 769 /* Read it back. */ 770 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 771 if (cacheline_size == pci_cache_line_size) 772 return 0; 773 774 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 775 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 776 777 return -EINVAL; 778 } 779 #endif /* !HAVE_ARCH_PCI_MWI */ 780 781 /** 782 * pci_set_mwi - enables memory-write-invalidate PCI transaction 783 * @dev: the PCI device for which MWI is enabled 784 * 785 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND, 786 * and then calls @pcibios_set_mwi to do the needed arch specific 787 * operations or a generic mwi-prep function. 788 * 789 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 790 */ 791 int 792 pci_set_mwi(struct pci_dev *dev) 793 { 794 int rc; 795 u16 cmd; 796 797 #ifdef HAVE_ARCH_PCI_MWI 798 rc = pcibios_prep_mwi(dev); 799 #else 800 rc = pci_generic_prep_mwi(dev); 801 #endif 802 803 if (rc) 804 return rc; 805 806 pci_read_config_word(dev, PCI_COMMAND, &cmd); 807 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 808 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); 809 cmd |= PCI_COMMAND_INVALIDATE; 810 pci_write_config_word(dev, PCI_COMMAND, cmd); 811 } 812 813 return 0; 814 } 815 816 /** 817 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 818 * @dev: the PCI device to disable 819 * 820 * Disables PCI Memory-Write-Invalidate transaction on the device 821 */ 822 void 823 pci_clear_mwi(struct pci_dev *dev) 824 { 825 u16 cmd; 826 827 pci_read_config_word(dev, PCI_COMMAND, &cmd); 828 if (cmd & PCI_COMMAND_INVALIDATE) { 829 cmd &= ~PCI_COMMAND_INVALIDATE; 830 pci_write_config_word(dev, PCI_COMMAND, cmd); 831 } 832 } 833 834 /** 835 * pci_intx - enables/disables PCI INTx for device dev 836 * @pdev: the PCI device to operate on 837 * @enable: boolean: whether to enable or disable PCI INTx 838 * 839 * Enables/disables PCI INTx for device dev 840 */ 841 void 842 pci_intx(struct pci_dev *pdev, int enable) 843 { 844 u16 pci_command, new; 845 846 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 847 848 if (enable) { 849 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 850 } else { 851 new = pci_command | PCI_COMMAND_INTX_DISABLE; 852 } 853 854 if (new != pci_command) { 855 pci_write_config_word(pdev, PCI_COMMAND, new); 856 } 857 } 858 859 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK 860 /* 861 * These can be overridden by arch-specific implementations 862 */ 863 int 864 pci_set_dma_mask(struct pci_dev *dev, u64 mask) 865 { 866 if (!pci_dma_supported(dev, mask)) 867 return -EIO; 868 869 dev->dma_mask = mask; 870 871 return 0; 872 } 873 874 int 875 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 876 { 877 if (!pci_dma_supported(dev, mask)) 878 return -EIO; 879 880 dev->dev.coherent_dma_mask = mask; 881 882 return 0; 883 } 884 #endif 885 886 static int __devinit pci_init(void) 887 { 888 struct pci_dev *dev = NULL; 889 890 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 891 pci_fixup_device(pci_fixup_final, dev); 892 } 893 return 0; 894 } 895 896 static int __devinit pci_setup(char *str) 897 { 898 while (str) { 899 char *k = strchr(str, ','); 900 if (k) 901 *k++ = 0; 902 if (*str && (str = pcibios_setup(str)) && *str) { 903 /* PCI layer options should be handled here */ 904 printk(KERN_ERR "PCI: Unknown option `%s'\n", str); 905 } 906 str = k; 907 } 908 return 1; 909 } 910 911 device_initcall(pci_init); 912 913 __setup("pci=", pci_setup); 914 915 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 916 /* FIXME: Some boxes have multiple ISA bridges! */ 917 struct pci_dev *isa_bridge; 918 EXPORT_SYMBOL(isa_bridge); 919 #endif 920 921 EXPORT_SYMBOL_GPL(pci_restore_bars); 922 EXPORT_SYMBOL(pci_enable_device_bars); 923 EXPORT_SYMBOL(pci_enable_device); 924 EXPORT_SYMBOL(pci_disable_device); 925 EXPORT_SYMBOL(pci_find_capability); 926 EXPORT_SYMBOL(pci_bus_find_capability); 927 EXPORT_SYMBOL(pci_release_regions); 928 EXPORT_SYMBOL(pci_request_regions); 929 EXPORT_SYMBOL(pci_release_region); 930 EXPORT_SYMBOL(pci_request_region); 931 EXPORT_SYMBOL(pci_set_master); 932 EXPORT_SYMBOL(pci_set_mwi); 933 EXPORT_SYMBOL(pci_clear_mwi); 934 EXPORT_SYMBOL_GPL(pci_intx); 935 EXPORT_SYMBOL(pci_set_dma_mask); 936 EXPORT_SYMBOL(pci_set_consistent_dma_mask); 937 EXPORT_SYMBOL(pci_assign_resource); 938 EXPORT_SYMBOL(pci_find_parent_resource); 939 940 EXPORT_SYMBOL(pci_set_power_state); 941 EXPORT_SYMBOL(pci_save_state); 942 EXPORT_SYMBOL(pci_restore_state); 943 EXPORT_SYMBOL(pci_enable_wake); 944 945 /* Quirk info */ 946 947 EXPORT_SYMBOL(isa_dma_bridge_buggy); 948 EXPORT_SYMBOL(pci_pci_problems); 949