1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pci.h> 3 #include <linux/module.h> 4 #include <linux/slab.h> 5 #include <linux/ioport.h> 6 #include <linux/wait.h> 7 8 #include "pci.h" 9 10 /* 11 * This interrupt-safe spinlock protects all accesses to PCI 12 * configuration space. 13 */ 14 15 DEFINE_RAW_SPINLOCK(pci_lock); 16 17 /* 18 * Wrappers for all PCI configuration access functions. They just check 19 * alignment, do locking and call the low-level functions pointed to 20 * by pci_dev->ops. 21 */ 22 23 #define PCI_byte_BAD 0 24 #define PCI_word_BAD (pos & 1) 25 #define PCI_dword_BAD (pos & 3) 26 27 #ifdef CONFIG_PCI_LOCKLESS_CONFIG 28 # define pci_lock_config(f) do { (void)(f); } while (0) 29 # define pci_unlock_config(f) do { (void)(f); } while (0) 30 #else 31 # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f) 32 # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f) 33 #endif 34 35 #define PCI_OP_READ(size, type, len) \ 36 int noinline pci_bus_read_config_##size \ 37 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ 38 { \ 39 unsigned long flags; \ 40 u32 data = 0; \ 41 int res; \ 42 \ 43 if (PCI_##size##_BAD) \ 44 return PCIBIOS_BAD_REGISTER_NUMBER; \ 45 \ 46 pci_lock_config(flags); \ 47 res = bus->ops->read(bus, devfn, pos, len, &data); \ 48 if (res) \ 49 PCI_SET_ERROR_RESPONSE(value); \ 50 else \ 51 *value = (type)data; \ 52 pci_unlock_config(flags); \ 53 \ 54 return res; \ 55 } 56 57 #define PCI_OP_WRITE(size, type, len) \ 58 int noinline pci_bus_write_config_##size \ 59 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ 60 { \ 61 unsigned long flags; \ 62 int res; \ 63 \ 64 if (PCI_##size##_BAD) \ 65 return PCIBIOS_BAD_REGISTER_NUMBER; \ 66 \ 67 pci_lock_config(flags); \ 68 res = bus->ops->write(bus, devfn, pos, len, value); \ 69 pci_unlock_config(flags); \ 70 \ 71 return res; \ 72 } 73 74 PCI_OP_READ(byte, u8, 1) 75 PCI_OP_READ(word, u16, 2) 76 PCI_OP_READ(dword, u32, 4) 77 PCI_OP_WRITE(byte, u8, 1) 78 PCI_OP_WRITE(word, u16, 2) 79 PCI_OP_WRITE(dword, u32, 4) 80 81 EXPORT_SYMBOL(pci_bus_read_config_byte); 82 EXPORT_SYMBOL(pci_bus_read_config_word); 83 EXPORT_SYMBOL(pci_bus_read_config_dword); 84 EXPORT_SYMBOL(pci_bus_write_config_byte); 85 EXPORT_SYMBOL(pci_bus_write_config_word); 86 EXPORT_SYMBOL(pci_bus_write_config_dword); 87 88 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 89 int where, int size, u32 *val) 90 { 91 void __iomem *addr; 92 93 addr = bus->ops->map_bus(bus, devfn, where); 94 if (!addr) 95 return PCIBIOS_DEVICE_NOT_FOUND; 96 97 if (size == 1) 98 *val = readb(addr); 99 else if (size == 2) 100 *val = readw(addr); 101 else 102 *val = readl(addr); 103 104 return PCIBIOS_SUCCESSFUL; 105 } 106 EXPORT_SYMBOL_GPL(pci_generic_config_read); 107 108 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 109 int where, int size, u32 val) 110 { 111 void __iomem *addr; 112 113 addr = bus->ops->map_bus(bus, devfn, where); 114 if (!addr) 115 return PCIBIOS_DEVICE_NOT_FOUND; 116 117 if (size == 1) 118 writeb(val, addr); 119 else if (size == 2) 120 writew(val, addr); 121 else 122 writel(val, addr); 123 124 return PCIBIOS_SUCCESSFUL; 125 } 126 EXPORT_SYMBOL_GPL(pci_generic_config_write); 127 128 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 129 int where, int size, u32 *val) 130 { 131 void __iomem *addr; 132 133 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 134 if (!addr) 135 return PCIBIOS_DEVICE_NOT_FOUND; 136 137 *val = readl(addr); 138 139 if (size <= 2) 140 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); 141 142 return PCIBIOS_SUCCESSFUL; 143 } 144 EXPORT_SYMBOL_GPL(pci_generic_config_read32); 145 146 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 147 int where, int size, u32 val) 148 { 149 void __iomem *addr; 150 u32 mask, tmp; 151 152 addr = bus->ops->map_bus(bus, devfn, where & ~0x3); 153 if (!addr) 154 return PCIBIOS_DEVICE_NOT_FOUND; 155 156 if (size == 4) { 157 writel(val, addr); 158 return PCIBIOS_SUCCESSFUL; 159 } 160 161 /* 162 * In general, hardware that supports only 32-bit writes on PCI is 163 * not spec-compliant. For example, software may perform a 16-bit 164 * write. If the hardware only supports 32-bit accesses, we must 165 * do a 32-bit read, merge in the 16 bits we intend to write, 166 * followed by a 32-bit write. If the 16 bits we *don't* intend to 167 * write happen to have any RW1C (write-one-to-clear) bits set, we 168 * just inadvertently cleared something we shouldn't have. 169 */ 170 if (!bus->unsafe_warn) { 171 dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", 172 size, pci_domain_nr(bus), bus->number, 173 PCI_SLOT(devfn), PCI_FUNC(devfn), where); 174 bus->unsafe_warn = 1; 175 } 176 177 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); 178 tmp = readl(addr) & mask; 179 tmp |= val << ((where & 0x3) * 8); 180 writel(tmp, addr); 181 182 return PCIBIOS_SUCCESSFUL; 183 } 184 EXPORT_SYMBOL_GPL(pci_generic_config_write32); 185 186 /** 187 * pci_bus_set_ops - Set raw operations of pci bus 188 * @bus: pci bus struct 189 * @ops: new raw operations 190 * 191 * Return previous raw operations 192 */ 193 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) 194 { 195 struct pci_ops *old_ops; 196 unsigned long flags; 197 198 raw_spin_lock_irqsave(&pci_lock, flags); 199 old_ops = bus->ops; 200 bus->ops = ops; 201 raw_spin_unlock_irqrestore(&pci_lock, flags); 202 return old_ops; 203 } 204 EXPORT_SYMBOL(pci_bus_set_ops); 205 206 /* 207 * The following routines are to prevent the user from accessing PCI config 208 * space when it's unsafe to do so. Some devices require this during BIST and 209 * we're required to prevent it during D-state transitions. 210 * 211 * We have a bit per device to indicate it's blocked and a global wait queue 212 * for callers to sleep on until devices are unblocked. 213 */ 214 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); 215 216 static noinline void pci_wait_cfg(struct pci_dev *dev) 217 __must_hold(&pci_lock) 218 { 219 do { 220 raw_spin_unlock_irq(&pci_lock); 221 wait_event(pci_cfg_wait, !dev->block_cfg_access); 222 raw_spin_lock_irq(&pci_lock); 223 } while (dev->block_cfg_access); 224 } 225 226 /* Returns 0 on success, negative values indicate error. */ 227 #define PCI_USER_READ_CONFIG(size, type) \ 228 int pci_user_read_config_##size \ 229 (struct pci_dev *dev, int pos, type *val) \ 230 { \ 231 u32 data = -1; \ 232 int ret; \ 233 \ 234 if (PCI_##size##_BAD) \ 235 return -EINVAL; \ 236 \ 237 raw_spin_lock_irq(&pci_lock); \ 238 if (unlikely(dev->block_cfg_access)) \ 239 pci_wait_cfg(dev); \ 240 ret = dev->bus->ops->read(dev->bus, dev->devfn, \ 241 pos, sizeof(type), &data); \ 242 raw_spin_unlock_irq(&pci_lock); \ 243 if (ret) \ 244 PCI_SET_ERROR_RESPONSE(val); \ 245 else \ 246 *val = (type)data; \ 247 \ 248 return pcibios_err_to_errno(ret); \ 249 } \ 250 EXPORT_SYMBOL_GPL(pci_user_read_config_##size); 251 252 /* Returns 0 on success, negative values indicate error. */ 253 #define PCI_USER_WRITE_CONFIG(size, type) \ 254 int pci_user_write_config_##size \ 255 (struct pci_dev *dev, int pos, type val) \ 256 { \ 257 int ret; \ 258 \ 259 if (PCI_##size##_BAD) \ 260 return -EINVAL; \ 261 \ 262 raw_spin_lock_irq(&pci_lock); \ 263 if (unlikely(dev->block_cfg_access)) \ 264 pci_wait_cfg(dev); \ 265 ret = dev->bus->ops->write(dev->bus, dev->devfn, \ 266 pos, sizeof(type), val); \ 267 raw_spin_unlock_irq(&pci_lock); \ 268 \ 269 return pcibios_err_to_errno(ret); \ 270 } \ 271 EXPORT_SYMBOL_GPL(pci_user_write_config_##size); 272 273 PCI_USER_READ_CONFIG(byte, u8) 274 PCI_USER_READ_CONFIG(word, u16) 275 PCI_USER_READ_CONFIG(dword, u32) 276 PCI_USER_WRITE_CONFIG(byte, u8) 277 PCI_USER_WRITE_CONFIG(word, u16) 278 PCI_USER_WRITE_CONFIG(dword, u32) 279 280 /** 281 * pci_cfg_access_lock - Lock PCI config reads/writes 282 * @dev: pci device struct 283 * 284 * When access is locked, any userspace reads or writes to config 285 * space and concurrent lock requests will sleep until access is 286 * allowed via pci_cfg_access_unlock() again. 287 */ 288 void pci_cfg_access_lock(struct pci_dev *dev) 289 { 290 might_sleep(); 291 292 raw_spin_lock_irq(&pci_lock); 293 if (dev->block_cfg_access) 294 pci_wait_cfg(dev); 295 dev->block_cfg_access = 1; 296 raw_spin_unlock_irq(&pci_lock); 297 } 298 EXPORT_SYMBOL_GPL(pci_cfg_access_lock); 299 300 /** 301 * pci_cfg_access_trylock - try to lock PCI config reads/writes 302 * @dev: pci device struct 303 * 304 * Same as pci_cfg_access_lock, but will return 0 if access is 305 * already locked, 1 otherwise. This function can be used from 306 * atomic contexts. 307 */ 308 bool pci_cfg_access_trylock(struct pci_dev *dev) 309 { 310 unsigned long flags; 311 bool locked = true; 312 313 raw_spin_lock_irqsave(&pci_lock, flags); 314 if (dev->block_cfg_access) 315 locked = false; 316 else 317 dev->block_cfg_access = 1; 318 raw_spin_unlock_irqrestore(&pci_lock, flags); 319 320 return locked; 321 } 322 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); 323 324 /** 325 * pci_cfg_access_unlock - Unlock PCI config reads/writes 326 * @dev: pci device struct 327 * 328 * This function allows PCI config accesses to resume. 329 */ 330 void pci_cfg_access_unlock(struct pci_dev *dev) 331 { 332 unsigned long flags; 333 334 raw_spin_lock_irqsave(&pci_lock, flags); 335 336 /* 337 * This indicates a problem in the caller, but we don't need 338 * to kill them, unlike a double-block above. 339 */ 340 WARN_ON(!dev->block_cfg_access); 341 342 dev->block_cfg_access = 0; 343 raw_spin_unlock_irqrestore(&pci_lock, flags); 344 345 wake_up_all(&pci_cfg_wait); 346 } 347 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 348 349 static inline int pcie_cap_version(const struct pci_dev *dev) 350 { 351 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 352 } 353 354 bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 355 { 356 int type = pci_pcie_type(dev); 357 358 return type == PCI_EXP_TYPE_ENDPOINT || 359 type == PCI_EXP_TYPE_LEG_END || 360 type == PCI_EXP_TYPE_ROOT_PORT || 361 type == PCI_EXP_TYPE_UPSTREAM || 362 type == PCI_EXP_TYPE_DOWNSTREAM || 363 type == PCI_EXP_TYPE_PCI_BRIDGE || 364 type == PCI_EXP_TYPE_PCIE_BRIDGE; 365 } 366 367 bool pcie_cap_has_lnkctl2(const struct pci_dev *dev) 368 { 369 return pcie_cap_has_lnkctl(dev) && pcie_cap_version(dev) > 1; 370 } 371 372 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 373 { 374 return pcie_downstream_port(dev) && 375 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; 376 } 377 378 bool pcie_cap_has_rtctl(const struct pci_dev *dev) 379 { 380 int type = pci_pcie_type(dev); 381 382 return type == PCI_EXP_TYPE_ROOT_PORT || 383 type == PCI_EXP_TYPE_RC_EC; 384 } 385 386 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 387 { 388 if (!pci_is_pcie(dev)) 389 return false; 390 391 switch (pos) { 392 case PCI_EXP_FLAGS: 393 return true; 394 case PCI_EXP_DEVCAP: 395 case PCI_EXP_DEVCTL: 396 case PCI_EXP_DEVSTA: 397 return true; 398 case PCI_EXP_LNKCAP: 399 case PCI_EXP_LNKCTL: 400 case PCI_EXP_LNKSTA: 401 return pcie_cap_has_lnkctl(dev); 402 case PCI_EXP_SLTCAP: 403 case PCI_EXP_SLTCTL: 404 case PCI_EXP_SLTSTA: 405 return pcie_cap_has_sltctl(dev); 406 case PCI_EXP_RTCTL: 407 case PCI_EXP_RTCAP: 408 case PCI_EXP_RTSTA: 409 return pcie_cap_has_rtctl(dev); 410 case PCI_EXP_DEVCAP2: 411 case PCI_EXP_DEVCTL2: 412 return pcie_cap_version(dev) > 1; 413 case PCI_EXP_LNKCAP2: 414 case PCI_EXP_LNKCTL2: 415 case PCI_EXP_LNKSTA2: 416 return pcie_cap_has_lnkctl2(dev); 417 default: 418 return false; 419 } 420 } 421 422 /* 423 * Note that these accessor functions are only for the "PCI Express 424 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 425 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 426 */ 427 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 428 { 429 int ret; 430 431 *val = 0; 432 if (pos & 1) 433 return PCIBIOS_BAD_REGISTER_NUMBER; 434 435 if (pcie_capability_reg_implemented(dev, pos)) { 436 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 437 /* 438 * Reset *val to 0 if pci_read_config_word() fails; it may 439 * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the 440 * config read failed on PCI. 441 */ 442 if (ret) 443 *val = 0; 444 return ret; 445 } 446 447 /* 448 * For Functions that do not implement the Slot Capabilities, 449 * Slot Status, and Slot Control registers, these spaces must 450 * be hardwired to 0b, with the exception of the Presence Detect 451 * State bit in the Slot Status register of Downstream Ports, 452 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 453 */ 454 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 455 pos == PCI_EXP_SLTSTA) 456 *val = PCI_EXP_SLTSTA_PDS; 457 458 return 0; 459 } 460 EXPORT_SYMBOL(pcie_capability_read_word); 461 462 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 463 { 464 int ret; 465 466 *val = 0; 467 if (pos & 3) 468 return PCIBIOS_BAD_REGISTER_NUMBER; 469 470 if (pcie_capability_reg_implemented(dev, pos)) { 471 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 472 /* 473 * Reset *val to 0 if pci_read_config_dword() fails; it may 474 * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if 475 * the config read failed on PCI. 476 */ 477 if (ret) 478 *val = 0; 479 return ret; 480 } 481 482 if (pci_is_pcie(dev) && pcie_downstream_port(dev) && 483 pos == PCI_EXP_SLTSTA) 484 *val = PCI_EXP_SLTSTA_PDS; 485 486 return 0; 487 } 488 EXPORT_SYMBOL(pcie_capability_read_dword); 489 490 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 491 { 492 if (pos & 1) 493 return PCIBIOS_BAD_REGISTER_NUMBER; 494 495 if (!pcie_capability_reg_implemented(dev, pos)) 496 return 0; 497 498 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 499 } 500 EXPORT_SYMBOL(pcie_capability_write_word); 501 502 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 503 { 504 if (pos & 3) 505 return PCIBIOS_BAD_REGISTER_NUMBER; 506 507 if (!pcie_capability_reg_implemented(dev, pos)) 508 return 0; 509 510 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 511 } 512 EXPORT_SYMBOL(pcie_capability_write_dword); 513 514 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, 515 u16 clear, u16 set) 516 { 517 int ret; 518 u16 val; 519 520 ret = pcie_capability_read_word(dev, pos, &val); 521 if (ret) 522 return ret; 523 524 val &= ~clear; 525 val |= set; 526 return pcie_capability_write_word(dev, pos, val); 527 } 528 EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked); 529 530 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, 531 u16 clear, u16 set) 532 { 533 unsigned long flags; 534 int ret; 535 536 spin_lock_irqsave(&dev->pcie_cap_lock, flags); 537 ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set); 538 spin_unlock_irqrestore(&dev->pcie_cap_lock, flags); 539 540 return ret; 541 } 542 EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked); 543 544 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 545 u32 clear, u32 set) 546 { 547 int ret; 548 u32 val; 549 550 ret = pcie_capability_read_dword(dev, pos, &val); 551 if (ret) 552 return ret; 553 554 val &= ~clear; 555 val |= set; 556 return pcie_capability_write_dword(dev, pos, val); 557 } 558 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); 559 560 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) 561 { 562 if (pci_dev_is_disconnected(dev)) { 563 PCI_SET_ERROR_RESPONSE(val); 564 return PCIBIOS_DEVICE_NOT_FOUND; 565 } 566 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 567 } 568 EXPORT_SYMBOL(pci_read_config_byte); 569 570 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) 571 { 572 if (pci_dev_is_disconnected(dev)) { 573 PCI_SET_ERROR_RESPONSE(val); 574 return PCIBIOS_DEVICE_NOT_FOUND; 575 } 576 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 577 } 578 EXPORT_SYMBOL(pci_read_config_word); 579 580 int pci_read_config_dword(const struct pci_dev *dev, int where, 581 u32 *val) 582 { 583 if (pci_dev_is_disconnected(dev)) { 584 PCI_SET_ERROR_RESPONSE(val); 585 return PCIBIOS_DEVICE_NOT_FOUND; 586 } 587 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 588 } 589 EXPORT_SYMBOL(pci_read_config_dword); 590 591 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 592 { 593 if (pci_dev_is_disconnected(dev)) 594 return PCIBIOS_DEVICE_NOT_FOUND; 595 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 596 } 597 EXPORT_SYMBOL(pci_write_config_byte); 598 599 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 600 { 601 if (pci_dev_is_disconnected(dev)) 602 return PCIBIOS_DEVICE_NOT_FOUND; 603 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 604 } 605 EXPORT_SYMBOL(pci_write_config_word); 606 607 int pci_write_config_dword(const struct pci_dev *dev, int where, 608 u32 val) 609 { 610 if (pci_dev_is_disconnected(dev)) 611 return PCIBIOS_DEVICE_NOT_FOUND; 612 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 613 } 614 EXPORT_SYMBOL(pci_write_config_dword); 615 616 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, 617 u32 clear, u32 set) 618 { 619 u32 val; 620 621 pci_read_config_dword(dev, pos, &val); 622 val &= ~clear; 623 val |= set; 624 pci_write_config_dword(dev, pos, val); 625 } 626 EXPORT_SYMBOL(pci_clear_and_set_config_dword); 627