1 #include <linux/delay.h> 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/ioport.h> 7 #include <linux/wait.h> 8 9 #include "pci.h" 10 11 /* 12 * This interrupt-safe spinlock protects all accesses to PCI 13 * configuration space. 14 */ 15 16 DEFINE_RAW_SPINLOCK(pci_lock); 17 18 /* 19 * Wrappers for all PCI configuration access functions. They just check 20 * alignment, do locking and call the low-level functions pointed to 21 * by pci_dev->ops. 22 */ 23 24 #define PCI_byte_BAD 0 25 #define PCI_word_BAD (pos & 1) 26 #define PCI_dword_BAD (pos & 3) 27 28 #define PCI_OP_READ(size,type,len) \ 29 int pci_bus_read_config_##size \ 30 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 31 { \ 32 int res; \ 33 unsigned long flags; \ 34 u32 data = 0; \ 35 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 36 raw_spin_lock_irqsave(&pci_lock, flags); \ 37 res = bus->ops->read(bus, devfn, pos, len, &data); \ 38 *value = (type)data; \ 39 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 40 return res; \ 41 } 42 43 #define PCI_OP_WRITE(size,type,len) \ 44 int pci_bus_write_config_##size \ 45 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 46 { \ 47 int res; \ 48 unsigned long flags; \ 49 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ 50 raw_spin_lock_irqsave(&pci_lock, flags); \ 51 res = bus->ops->write(bus, devfn, pos, len, value); \ 52 raw_spin_unlock_irqrestore(&pci_lock, flags); \ 53 return res; \ 54 } 55 56 PCI_OP_READ(byte, u8, 1) 57 PCI_OP_READ(word, u16, 2) 58 PCI_OP_READ(dword, u32, 4) 59 PCI_OP_WRITE(byte, u8, 1) 60 PCI_OP_WRITE(word, u16, 2) 61 PCI_OP_WRITE(dword, u32, 4) 62 63 EXPORT_SYMBOL(pci_bus_read_config_byte); 64 EXPORT_SYMBOL(pci_bus_read_config_word); 65 EXPORT_SYMBOL(pci_bus_read_config_dword); 66 EXPORT_SYMBOL(pci_bus_write_config_byte); 67 EXPORT_SYMBOL(pci_bus_write_config_word); 68 EXPORT_SYMBOL(pci_bus_write_config_dword); 69 70 /** 71 * pci_bus_set_ops - Set raw operations of pci bus 72 * @bus: pci bus struct 73 * @ops: new raw operations 74 * 75 * Return previous raw operations 76 */ 77 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 78 { 79 struct pci_ops *old_ops; 80 unsigned long flags; 81 82 raw_spin_lock_irqsave(&pci_lock, flags); 83 old_ops = bus->ops; 84 bus->ops = ops; 85 raw_spin_unlock_irqrestore(&pci_lock, flags); 86 return old_ops; 87 } 88 EXPORT_SYMBOL(pci_bus_set_ops); 89 90 /** 91 * pci_read_vpd - Read one entry from Vital Product Data 92 * @dev: pci device struct 93 * @pos: offset in vpd space 94 * @count: number of bytes to read 95 * @buf: pointer to where to store result 96 * 97 */ 98 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) 99 { 100 if (!dev->vpd || !dev->vpd->ops) 101 return -ENODEV; 102 return dev->vpd->ops->read(dev, pos, count, buf); 103 } 104 EXPORT_SYMBOL(pci_read_vpd); 105 106 /** 107 * pci_write_vpd - Write entry to Vital Product Data 108 * @dev: pci device struct 109 * @pos: offset in vpd space 110 * @count: number of bytes to write 111 * @buf: buffer containing write data 112 * 113 */ 114 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 115 { 116 if (!dev->vpd || !dev->vpd->ops) 117 return -ENODEV; 118 return dev->vpd->ops->write(dev, pos, count, buf); 119 } 120 EXPORT_SYMBOL(pci_write_vpd); 121 122 /* 123 * The following routines are to prevent the user from accessing PCI config 124 * space when it's unsafe to do so. Some devices require this during BIST and 125 * we're required to prevent it during D-state transitions. 126 * 127 * We have a bit per device to indicate it's blocked and a global wait queue 128 * for callers to sleep on until devices are unblocked. 129 */ 130 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 131 132 static noinline void pci_wait_cfg(struct pci_dev *dev) 133 { 134 DECLARE_WAITQUEUE(wait, current); 135 136 __add_wait_queue(&pci_cfg_wait, &wait); 137 do { 138 set_current_state(TASK_UNINTERRUPTIBLE); 139 raw_spin_unlock_irq(&pci_lock); 140 schedule(); 141 raw_spin_lock_irq(&pci_lock); 142 } while (dev->block_cfg_access); 143 __remove_wait_queue(&pci_cfg_wait, &wait); 144 } 145 146 /* Returns 0 on success, negative values indicate error. */ 147 #define PCI_USER_READ_CONFIG(size,type) \ 148 int pci_user_read_config_##size \ 149 (struct pci_dev *dev, int pos, type *val) \ 150 { \ 151 int ret = 0; \ 152 u32 data = -1; \ 153 if (PCI_##size##_BAD) \ 154 return -EINVAL; \ 155 raw_spin_lock_irq(&pci_lock); \ 156 if (unlikely(dev->block_cfg_access)) \ 157 pci_wait_cfg(dev); \ 158 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 159 pos, sizeof(type), &data); \ 160 raw_spin_unlock_irq(&pci_lock); \ 161 *val = (type)data; \ 162 if (ret > 0) \ 163 ret = -EINVAL; \ 164 return ret; \ 165 } \ 166 EXPORT_SYMBOL_GPL(pci_user_read_config_##size); 167 168 /* Returns 0 on success, negative values indicate error. */ 169 #define PCI_USER_WRITE_CONFIG(size,type) \ 170 int pci_user_write_config_##size \ 171 (struct pci_dev *dev, int pos, type val) \ 172 { \ 173 int ret = -EIO; \ 174 if (PCI_##size##_BAD) \ 175 return -EINVAL; \ 176 raw_spin_lock_irq(&pci_lock); \ 177 if (unlikely(dev->block_cfg_access)) \ 178 pci_wait_cfg(dev); \ 179 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 180 pos, sizeof(type), val); \ 181 raw_spin_unlock_irq(&pci_lock); \ 182 if (ret > 0) \ 183 ret = -EINVAL; \ 184 return ret; \ 185 } \ 186 EXPORT_SYMBOL_GPL(pci_user_write_config_##size); 187 188 PCI_USER_READ_CONFIG(byte, u8) 189 PCI_USER_READ_CONFIG(word, u16) 190 PCI_USER_READ_CONFIG(dword, u32) 191 PCI_USER_WRITE_CONFIG(byte, u8) 192 PCI_USER_WRITE_CONFIG(word, u16) 193 PCI_USER_WRITE_CONFIG(dword, u32) 194 195 /* VPD access through PCI 2.2+ VPD capability */ 196 197 #define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1) 198 199 struct pci_vpd_pci22 { 200 struct pci_vpd base; 201 struct mutex lock; 202 u16 flag; 203 bool busy; 204 u8 cap; 205 }; 206 207 /* 208 * Wait for last operation to complete. 209 * This code has to spin since there is no other notification from the PCI 210 * hardware. Since the VPD is often implemented by serial attachment to an 211 * EEPROM, it may take many milliseconds to complete. 212 * 213 * Returns 0 on success, negative values indicate error. 214 */ 215 static int pci_vpd_pci22_wait(struct pci_dev *dev) 216 { 217 struct pci_vpd_pci22 *vpd = 218 container_of(dev->vpd, struct pci_vpd_pci22, base); 219 unsigned long timeout = jiffies + HZ/20 + 2; 220 u16 status; 221 int ret; 222 223 if (!vpd->busy) 224 return 0; 225 226 for (;;) { 227 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, 228 &status); 229 if (ret < 0) 230 return ret; 231 232 if ((status & PCI_VPD_ADDR_F) == vpd->flag) { 233 vpd->busy = false; 234 return 0; 235 } 236 237 if (time_after(jiffies, timeout)) { 238 dev_printk(KERN_DEBUG, &dev->dev, 239 "vpd r/w failed. This is likely a firmware " 240 "bug on this device. Contact the card " 241 "vendor for a firmware update."); 242 return -ETIMEDOUT; 243 } 244 if (fatal_signal_pending(current)) 245 return -EINTR; 246 if (!cond_resched()) 247 udelay(10); 248 } 249 } 250 251 static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, 252 void *arg) 253 { 254 struct pci_vpd_pci22 *vpd = 255 container_of(dev->vpd, struct pci_vpd_pci22, base); 256 int ret; 257 loff_t end = pos + count; 258 u8 *buf = arg; 259 260 if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) 261 return -EINVAL; 262 263 if (mutex_lock_killable(&vpd->lock)) 264 return -EINTR; 265 266 ret = pci_vpd_pci22_wait(dev); 267 if (ret < 0) 268 goto out; 269 270 while (pos < end) { 271 u32 val; 272 unsigned int i, skip; 273 274 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 275 pos & ~3); 276 if (ret < 0) 277 break; 278 vpd->busy = true; 279 vpd->flag = PCI_VPD_ADDR_F; 280 ret = pci_vpd_pci22_wait(dev); 281 if (ret < 0) 282 break; 283 284 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); 285 if (ret < 0) 286 break; 287 288 skip = pos & 3; 289 for (i = 0; i < sizeof(u32); i++) { 290 if (i >= skip) { 291 *buf++ = val; 292 if (++pos == end) 293 break; 294 } 295 val >>= 8; 296 } 297 } 298 out: 299 mutex_unlock(&vpd->lock); 300 return ret ? ret : count; 301 } 302 303 static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, 304 const void *arg) 305 { 306 struct pci_vpd_pci22 *vpd = 307 container_of(dev->vpd, struct pci_vpd_pci22, base); 308 const u8 *buf = arg; 309 loff_t end = pos + count; 310 int ret = 0; 311 312 if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) 313 return -EINVAL; 314 315 if (mutex_lock_killable(&vpd->lock)) 316 return -EINTR; 317 318 ret = pci_vpd_pci22_wait(dev); 319 if (ret < 0) 320 goto out; 321 322 while (pos < end) { 323 u32 val; 324 325 val = *buf++; 326 val |= *buf++ << 8; 327 val |= *buf++ << 16; 328 val |= *buf++ << 24; 329 330 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); 331 if (ret < 0) 332 break; 333 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 334 pos | PCI_VPD_ADDR_F); 335 if (ret < 0) 336 break; 337 338 vpd->busy = true; 339 vpd->flag = 0; 340 ret = pci_vpd_pci22_wait(dev); 341 if (ret < 0) 342 break; 343 344 pos += sizeof(u32); 345 } 346 out: 347 mutex_unlock(&vpd->lock); 348 return ret ? ret : count; 349 } 350 351 static void pci_vpd_pci22_release(struct pci_dev *dev) 352 { 353 kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); 354 } 355 356 static const struct pci_vpd_ops pci_vpd_pci22_ops = { 357 .read = pci_vpd_pci22_read, 358 .write = pci_vpd_pci22_write, 359 .release = pci_vpd_pci22_release, 360 }; 361 362 int pci_vpd_pci22_init(struct pci_dev *dev) 363 { 364 struct pci_vpd_pci22 *vpd; 365 u8 cap; 366 367 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 368 if (!cap) 369 return -ENODEV; 370 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 371 if (!vpd) 372 return -ENOMEM; 373 374 vpd->base.len = PCI_VPD_PCI22_SIZE; 375 vpd->base.ops = &pci_vpd_pci22_ops; 376 mutex_init(&vpd->lock); 377 vpd->cap = cap; 378 vpd->busy = false; 379 dev->vpd = &vpd->base; 380 return 0; 381 } 382 383 /** 384 * pci_vpd_truncate - Set available Vital Product Data size 385 * @dev: pci device struct 386 * @size: available memory in bytes 387 * 388 * Adjust size of available VPD area. 389 */ 390 int pci_vpd_truncate(struct pci_dev *dev, size_t size) 391 { 392 if (!dev->vpd) 393 return -EINVAL; 394 395 /* limited by the access method */ 396 if (size > dev->vpd->len) 397 return -EINVAL; 398 399 dev->vpd->len = size; 400 if (dev->vpd->attr) 401 dev->vpd->attr->size = size; 402 403 return 0; 404 } 405 EXPORT_SYMBOL(pci_vpd_truncate); 406 407 /** 408 * pci_cfg_access_lock - Lock PCI config reads/writes 409 * @dev: pci device struct 410 * 411 * When access is locked, any userspace reads or writes to config 412 * space and concurrent lock requests will sleep until access is 413 * allowed via pci_cfg_access_unlocked again. 414 */ 415 void pci_cfg_access_lock(struct pci_dev *dev) 416 { 417 might_sleep(); 418 419 raw_spin_lock_irq(&pci_lock); 420 if (dev->block_cfg_access) 421 pci_wait_cfg(dev); 422 dev->block_cfg_access = 1; 423 raw_spin_unlock_irq(&pci_lock); 424 } 425 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 426 427 /** 428 * pci_cfg_access_trylock - try to lock PCI config reads/writes 429 * @dev: pci device struct 430 * 431 * Same as pci_cfg_access_lock, but will return 0 if access is 432 * already locked, 1 otherwise. This function can be used from 433 * atomic contexts. 434 */ 435 bool pci_cfg_access_trylock(struct pci_dev *dev) 436 { 437 unsigned long flags; 438 bool locked = true; 439 440 raw_spin_lock_irqsave(&pci_lock, flags); 441 if (dev->block_cfg_access) 442 locked = false; 443 else 444 dev->block_cfg_access = 1; 445 raw_spin_unlock_irqrestore(&pci_lock, flags); 446 447 return locked; 448 } 449 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 450 451 /** 452 * pci_cfg_access_unlock - Unlock PCI config reads/writes 453 * @dev: pci device struct 454 * 455 * This function allows PCI config accesses to resume. 456 */ 457 void pci_cfg_access_unlock(struct pci_dev *dev) 458 { 459 unsigned long flags; 460 461 raw_spin_lock_irqsave(&pci_lock, flags); 462 463 /* This indicates a problem in the caller, but we don't need 464 * to kill them, unlike a double-block above. */ 465 WARN_ON(!dev->block_cfg_access); 466 467 dev->block_cfg_access = 0; 468 wake_up_all(&pci_cfg_wait); 469 raw_spin_unlock_irqrestore(&pci_lock, flags); 470 } 471 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 472 473 static inline int pcie_cap_version(const struct pci_dev *dev) 474 { 475 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 476 } 477 478 static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 479 { 480 int type = pci_pcie_type(dev); 481 482 return type == PCI_EXP_TYPE_ENDPOINT || 483 type == PCI_EXP_TYPE_LEG_END || 484 type == PCI_EXP_TYPE_ROOT_PORT || 485 type == PCI_EXP_TYPE_UPSTREAM || 486 type == PCI_EXP_TYPE_DOWNSTREAM || 487 type == PCI_EXP_TYPE_PCI_BRIDGE || 488 type == PCI_EXP_TYPE_PCIE_BRIDGE; 489 } 490 491 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 492 { 493 int type = pci_pcie_type(dev); 494 495 return (type == PCI_EXP_TYPE_ROOT_PORT || 496 type == PCI_EXP_TYPE_DOWNSTREAM) && 497 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; 498 } 499 500 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) 501 { 502 int type = pci_pcie_type(dev); 503 504 return type == PCI_EXP_TYPE_ROOT_PORT || 505 type == PCI_EXP_TYPE_RC_EC; 506 } 507 508 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 509 { 510 if (!pci_is_pcie(dev)) 511 return false; 512 513 switch (pos) { 514 case PCI_EXP_FLAGS: 515 return true; 516 case PCI_EXP_DEVCAP: 517 case PCI_EXP_DEVCTL: 518 case PCI_EXP_DEVSTA: 519 return true; 520 case PCI_EXP_LNKCAP: 521 case PCI_EXP_LNKCTL: 522 case PCI_EXP_LNKSTA: 523 return pcie_cap_has_lnkctl(dev); 524 case PCI_EXP_SLTCAP: 525 case PCI_EXP_SLTCTL: 526 case PCI_EXP_SLTSTA: 527 return pcie_cap_has_sltctl(dev); 528 case PCI_EXP_RTCTL: 529 case PCI_EXP_RTCAP: 530 case PCI_EXP_RTSTA: 531 return pcie_cap_has_rtctl(dev); 532 case PCI_EXP_DEVCAP2: 533 case PCI_EXP_DEVCTL2: 534 case PCI_EXP_LNKCAP2: 535 case PCI_EXP_LNKCTL2: 536 case PCI_EXP_LNKSTA2: 537 return pcie_cap_version(dev) > 1; 538 default: 539 return false; 540 } 541 } 542 543 /* 544 * Note that these accessor functions are only for the "PCI Express 545 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 546 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 547 */ 548 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 549 { 550 int ret; 551 552 *val = 0; 553 if (pos & 1) 554 return -EINVAL; 555 556 if (pcie_capability_reg_implemented(dev, pos)) { 557 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 558 /* 559 * Reset *val to 0 if pci_read_config_word() fails, it may 560 * have been written as 0xFFFF if hardware error happens 561 * during pci_read_config_word(). 562 */ 563 if (ret) 564 *val = 0; 565 return ret; 566 } 567 568 /* 569 * For Functions that do not implement the Slot Capabilities, 570 * Slot Status, and Slot Control registers, these spaces must 571 * be hardwired to 0b, with the exception of the Presence Detect 572 * State bit in the Slot Status register of Downstream Ports, 573 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 574 */ 575 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && 576 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 577 *val = PCI_EXP_SLTSTA_PDS; 578 } 579 580 return 0; 581 } 582 EXPORT_SYMBOL(pcie_capability_read_word); 583 584 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 585 { 586 int ret; 587 588 *val = 0; 589 if (pos & 3) 590 return -EINVAL; 591 592 if (pcie_capability_reg_implemented(dev, pos)) { 593 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 594 /* 595 * Reset *val to 0 if pci_read_config_dword() fails, it may 596 * have been written as 0xFFFFFFFF if hardware error happens 597 * during pci_read_config_dword(). 598 */ 599 if (ret) 600 *val = 0; 601 return ret; 602 } 603 604 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && 605 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 606 *val = PCI_EXP_SLTSTA_PDS; 607 } 608 609 return 0; 610 } 611 EXPORT_SYMBOL(pcie_capability_read_dword); 612 613 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 614 { 615 if (pos & 1) 616 return -EINVAL; 617 618 if (!pcie_capability_reg_implemented(dev, pos)) 619 return 0; 620 621 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 622 } 623 EXPORT_SYMBOL(pcie_capability_write_word); 624 625 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 626 { 627 if (pos & 3) 628 return -EINVAL; 629 630 if (!pcie_capability_reg_implemented(dev, pos)) 631 return 0; 632 633 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 634 } 635 EXPORT_SYMBOL(pcie_capability_write_dword); 636 637 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, 638 u16 clear, u16 set) 639 { 640 int ret; 641 u16 val; 642 643 ret = pcie_capability_read_word(dev, pos, &val); 644 if (!ret) { 645 val &= ~clear; 646 val |= set; 647 ret = pcie_capability_write_word(dev, pos, val); 648 } 649 650 return ret; 651 } 652 EXPORT_SYMBOL(pcie_capability_clear_and_set_word); 653 654 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 655 u32 clear, u32 set) 656 { 657 int ret; 658 u32 val; 659 660 ret = pcie_capability_read_dword(dev, pos, &val); 661 if (!ret) { 662 val &= ~clear; 663 val |= set; 664 ret = pcie_capability_write_dword(dev, pos, val); 665 } 666 667 return ret; 668 } 669 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); 670