1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2012 4 * 5 * Author(s): 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * 8 * The System z PCI code is a rewrite from a prototype by 9 * the following people (Kudoz!): 10 * Alexander Schmidt 11 * Christoph Raisch 12 * Hannes Hering 13 * Hoang-Nam Nguyen 14 * Jan-Bernd Themann 15 * Stefan Roscher 16 * Thomas Klein 17 */ 18 19 #define KMSG_COMPONENT "zpci" 20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/slab.h> 24 #include <linux/err.h> 25 #include <linux/export.h> 26 #include <linux/delay.h> 27 #include <linux/seq_file.h> 28 #include <linux/jump_label.h> 29 #include <linux/pci.h> 30 #include <linux/printk.h> 31 32 #include <asm/isc.h> 33 #include <asm/airq.h> 34 #include <asm/facility.h> 35 #include <asm/pci_insn.h> 36 #include <asm/pci_clp.h> 37 #include <asm/pci_dma.h> 38 39 /* list of all detected zpci devices */ 40 static LIST_HEAD(zpci_list); 41 static DEFINE_SPINLOCK(zpci_list_lock); 42 43 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); 44 static DEFINE_SPINLOCK(zpci_domain_lock); 45 static unsigned int zpci_num_domains_allocated; 46 47 #define ZPCI_IOMAP_ENTRIES \ 48 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \ 49 ZPCI_IOMAP_MAX_ENTRIES) 50 51 static DEFINE_SPINLOCK(zpci_iomap_lock); 52 static unsigned long *zpci_iomap_bitmap; 53 struct zpci_iomap_entry *zpci_iomap_start; 54 EXPORT_SYMBOL_GPL(zpci_iomap_start); 55 56 DEFINE_STATIC_KEY_FALSE(have_mio); 57 58 static struct kmem_cache *zdev_fmb_cache; 59 60 struct zpci_dev *get_zdev_by_fid(u32 fid) 61 { 62 struct zpci_dev *tmp, *zdev = NULL; 63 64 spin_lock(&zpci_list_lock); 65 list_for_each_entry(tmp, &zpci_list, entry) { 66 if (tmp->fid == fid) { 67 zdev = tmp; 68 break; 69 } 70 } 71 spin_unlock(&zpci_list_lock); 72 return zdev; 73 } 74 75 void zpci_remove_reserved_devices(void) 76 { 77 struct zpci_dev *tmp, *zdev; 78 enum zpci_state state; 79 LIST_HEAD(remove); 80 81 spin_lock(&zpci_list_lock); 82 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) { 83 if (zdev->state == ZPCI_FN_STATE_STANDBY && 84 !clp_get_state(zdev->fid, &state) && 85 state == ZPCI_FN_STATE_RESERVED) 86 list_move_tail(&zdev->entry, &remove); 87 } 88 spin_unlock(&zpci_list_lock); 89 90 list_for_each_entry_safe(zdev, tmp, &remove, entry) 91 zpci_remove_device(zdev); 92 } 93 94 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 95 { 96 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 97 } 98 99 int pci_domain_nr(struct pci_bus *bus) 100 { 101 return ((struct zpci_dev *) bus->sysdata)->domain; 102 } 103 EXPORT_SYMBOL_GPL(pci_domain_nr); 104 105 int pci_proc_domain(struct pci_bus *bus) 106 { 107 return pci_domain_nr(bus); 108 } 109 EXPORT_SYMBOL_GPL(pci_proc_domain); 110 111 /* Modify PCI: Register I/O address translation parameters */ 112 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 113 u64 base, u64 limit, u64 iota) 114 { 115 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT); 116 struct zpci_fib fib = {0}; 117 u8 status; 118 119 WARN_ON_ONCE(iota & 0x3fff); 120 fib.pba = base; 121 fib.pal = limit; 122 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG; 123 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; 124 } 125 126 /* Modify PCI: Unregister I/O address translation parameters */ 127 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 128 { 129 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT); 130 struct zpci_fib fib = {0}; 131 u8 cc, status; 132 133 cc = zpci_mod_fc(req, &fib, &status); 134 if (cc == 3) /* Function already gone. */ 135 cc = 0; 136 return cc ? -EIO : 0; 137 } 138 139 /* Modify PCI: Set PCI function measurement parameters */ 140 int zpci_fmb_enable_device(struct zpci_dev *zdev) 141 { 142 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 143 struct zpci_fib fib = {0}; 144 u8 cc, status; 145 146 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 147 return -EINVAL; 148 149 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 150 if (!zdev->fmb) 151 return -ENOMEM; 152 WARN_ON((u64) zdev->fmb & 0xf); 153 154 /* reset software counters */ 155 atomic64_set(&zdev->allocated_pages, 0); 156 atomic64_set(&zdev->mapped_pages, 0); 157 atomic64_set(&zdev->unmapped_pages, 0); 158 159 fib.fmb_addr = virt_to_phys(zdev->fmb); 160 cc = zpci_mod_fc(req, &fib, &status); 161 if (cc) { 162 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 163 zdev->fmb = NULL; 164 } 165 return cc ? -EIO : 0; 166 } 167 168 /* Modify PCI: Disable PCI function measurement */ 169 int zpci_fmb_disable_device(struct zpci_dev *zdev) 170 { 171 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE); 172 struct zpci_fib fib = {0}; 173 u8 cc, status; 174 175 if (!zdev->fmb) 176 return -EINVAL; 177 178 /* Function measurement is disabled if fmb address is zero */ 179 cc = zpci_mod_fc(req, &fib, &status); 180 if (cc == 3) /* Function already gone. */ 181 cc = 0; 182 183 if (!cc) { 184 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 185 zdev->fmb = NULL; 186 } 187 return cc ? -EIO : 0; 188 } 189 190 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 191 { 192 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 193 u64 data; 194 int rc; 195 196 rc = __zpci_load(&data, req, offset); 197 if (!rc) { 198 data = le64_to_cpu((__force __le64) data); 199 data >>= (8 - len) * 8; 200 *val = (u32) data; 201 } else 202 *val = 0xffffffff; 203 return rc; 204 } 205 206 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 207 { 208 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 209 u64 data = val; 210 int rc; 211 212 data <<= (8 - len) * 8; 213 data = (__force u64) cpu_to_le64(data); 214 rc = __zpci_store(data, req, offset); 215 return rc; 216 } 217 218 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 219 resource_size_t size, 220 resource_size_t align) 221 { 222 return 0; 223 } 224 225 /* combine single writes by using store-block insn */ 226 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 227 { 228 zpci_memcpy_toio(to, from, count); 229 } 230 231 void __iomem *ioremap(unsigned long ioaddr, unsigned long size) 232 { 233 struct vm_struct *area; 234 unsigned long offset; 235 236 if (!size) 237 return NULL; 238 239 if (!static_branch_unlikely(&have_mio)) 240 return (void __iomem *) ioaddr; 241 242 offset = ioaddr & ~PAGE_MASK; 243 ioaddr &= PAGE_MASK; 244 size = PAGE_ALIGN(size + offset); 245 area = get_vm_area(size, VM_IOREMAP); 246 if (!area) 247 return NULL; 248 249 if (ioremap_page_range((unsigned long) area->addr, 250 (unsigned long) area->addr + size, 251 ioaddr, PAGE_KERNEL)) { 252 vunmap(area->addr); 253 return NULL; 254 } 255 return (void __iomem *) ((unsigned long) area->addr + offset); 256 } 257 EXPORT_SYMBOL(ioremap); 258 259 void iounmap(volatile void __iomem *addr) 260 { 261 if (static_branch_likely(&have_mio)) 262 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK)); 263 } 264 EXPORT_SYMBOL(iounmap); 265 266 /* Create a virtual mapping cookie for a PCI BAR */ 267 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar, 268 unsigned long offset, unsigned long max) 269 { 270 struct zpci_dev *zdev = to_zpci(pdev); 271 int idx; 272 273 idx = zdev->bars[bar].map_idx; 274 spin_lock(&zpci_iomap_lock); 275 /* Detect overrun */ 276 WARN_ON(!++zpci_iomap_start[idx].count); 277 zpci_iomap_start[idx].fh = zdev->fh; 278 zpci_iomap_start[idx].bar = bar; 279 spin_unlock(&zpci_iomap_lock); 280 281 return (void __iomem *) ZPCI_ADDR(idx) + offset; 282 } 283 284 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar, 285 unsigned long offset, 286 unsigned long max) 287 { 288 unsigned long barsize = pci_resource_len(pdev, bar); 289 struct zpci_dev *zdev = to_zpci(pdev); 290 void __iomem *iova; 291 292 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize); 293 return iova ? iova + offset : iova; 294 } 295 296 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar, 297 unsigned long offset, unsigned long max) 298 { 299 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) 300 return NULL; 301 302 if (static_branch_likely(&have_mio)) 303 return pci_iomap_range_mio(pdev, bar, offset, max); 304 else 305 return pci_iomap_range_fh(pdev, bar, offset, max); 306 } 307 EXPORT_SYMBOL(pci_iomap_range); 308 309 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 310 { 311 return pci_iomap_range(dev, bar, 0, maxlen); 312 } 313 EXPORT_SYMBOL(pci_iomap); 314 315 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar, 316 unsigned long offset, unsigned long max) 317 { 318 unsigned long barsize = pci_resource_len(pdev, bar); 319 struct zpci_dev *zdev = to_zpci(pdev); 320 void __iomem *iova; 321 322 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize); 323 return iova ? iova + offset : iova; 324 } 325 326 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar, 327 unsigned long offset, unsigned long max) 328 { 329 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) 330 return NULL; 331 332 if (static_branch_likely(&have_mio)) 333 return pci_iomap_wc_range_mio(pdev, bar, offset, max); 334 else 335 return pci_iomap_range_fh(pdev, bar, offset, max); 336 } 337 EXPORT_SYMBOL(pci_iomap_wc_range); 338 339 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) 340 { 341 return pci_iomap_wc_range(dev, bar, 0, maxlen); 342 } 343 EXPORT_SYMBOL(pci_iomap_wc); 344 345 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr) 346 { 347 unsigned int idx = ZPCI_IDX(addr); 348 349 spin_lock(&zpci_iomap_lock); 350 /* Detect underrun */ 351 WARN_ON(!zpci_iomap_start[idx].count); 352 if (!--zpci_iomap_start[idx].count) { 353 zpci_iomap_start[idx].fh = 0; 354 zpci_iomap_start[idx].bar = 0; 355 } 356 spin_unlock(&zpci_iomap_lock); 357 } 358 359 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr) 360 { 361 iounmap(addr); 362 } 363 364 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 365 { 366 if (static_branch_likely(&have_mio)) 367 pci_iounmap_mio(pdev, addr); 368 else 369 pci_iounmap_fh(pdev, addr); 370 } 371 EXPORT_SYMBOL(pci_iounmap); 372 373 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 374 int size, u32 *val) 375 { 376 struct zpci_dev *zdev = get_zdev_by_bus(bus); 377 int ret; 378 379 if (!zdev || devfn != ZPCI_DEVFN) 380 ret = -ENODEV; 381 else 382 ret = zpci_cfg_load(zdev, where, val, size); 383 384 return ret; 385 } 386 387 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 388 int size, u32 val) 389 { 390 struct zpci_dev *zdev = get_zdev_by_bus(bus); 391 int ret; 392 393 if (!zdev || devfn != ZPCI_DEVFN) 394 ret = -ENODEV; 395 else 396 ret = zpci_cfg_store(zdev, where, val, size); 397 398 return ret; 399 } 400 401 static struct pci_ops pci_root_ops = { 402 .read = pci_read, 403 .write = pci_write, 404 }; 405 406 #ifdef CONFIG_PCI_IOV 407 static struct resource iov_res = { 408 .name = "PCI IOV res", 409 .start = 0, 410 .end = -1, 411 .flags = IORESOURCE_MEM, 412 }; 413 #endif 414 415 static void zpci_map_resources(struct pci_dev *pdev) 416 { 417 struct zpci_dev *zdev = to_zpci(pdev); 418 resource_size_t len; 419 int i; 420 421 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 422 len = pci_resource_len(pdev, i); 423 if (!len) 424 continue; 425 426 if (zpci_use_mio(zdev)) 427 pdev->resource[i].start = 428 (resource_size_t __force) zdev->bars[i].mio_wt; 429 else 430 pdev->resource[i].start = (resource_size_t __force) 431 pci_iomap_range_fh(pdev, i, 0, 0); 432 pdev->resource[i].end = pdev->resource[i].start + len - 1; 433 } 434 435 #ifdef CONFIG_PCI_IOV 436 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 437 int bar = i + PCI_IOV_RESOURCES; 438 439 len = pci_resource_len(pdev, bar); 440 if (!len) 441 continue; 442 pdev->resource[bar].parent = &iov_res; 443 } 444 #endif 445 } 446 447 static void zpci_unmap_resources(struct pci_dev *pdev) 448 { 449 struct zpci_dev *zdev = to_zpci(pdev); 450 resource_size_t len; 451 int i; 452 453 if (zpci_use_mio(zdev)) 454 return; 455 456 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 457 len = pci_resource_len(pdev, i); 458 if (!len) 459 continue; 460 pci_iounmap_fh(pdev, (void __iomem __force *) 461 pdev->resource[i].start); 462 } 463 } 464 465 static int zpci_alloc_iomap(struct zpci_dev *zdev) 466 { 467 unsigned long entry; 468 469 spin_lock(&zpci_iomap_lock); 470 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES); 471 if (entry == ZPCI_IOMAP_ENTRIES) { 472 spin_unlock(&zpci_iomap_lock); 473 return -ENOSPC; 474 } 475 set_bit(entry, zpci_iomap_bitmap); 476 spin_unlock(&zpci_iomap_lock); 477 return entry; 478 } 479 480 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 481 { 482 spin_lock(&zpci_iomap_lock); 483 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 484 clear_bit(entry, zpci_iomap_bitmap); 485 spin_unlock(&zpci_iomap_lock); 486 } 487 488 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, 489 unsigned long size, unsigned long flags) 490 { 491 struct resource *r; 492 493 r = kzalloc(sizeof(*r), GFP_KERNEL); 494 if (!r) 495 return NULL; 496 497 r->start = start; 498 r->end = r->start + size - 1; 499 r->flags = flags; 500 r->name = zdev->res_name; 501 502 if (request_resource(&iomem_resource, r)) { 503 kfree(r); 504 return NULL; 505 } 506 return r; 507 } 508 509 static int zpci_setup_bus_resources(struct zpci_dev *zdev, 510 struct list_head *resources) 511 { 512 unsigned long addr, size, flags; 513 struct resource *res; 514 int i, entry; 515 516 snprintf(zdev->res_name, sizeof(zdev->res_name), 517 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR); 518 519 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 520 if (!zdev->bars[i].size) 521 continue; 522 entry = zpci_alloc_iomap(zdev); 523 if (entry < 0) 524 return entry; 525 zdev->bars[i].map_idx = entry; 526 527 /* only MMIO is supported */ 528 flags = IORESOURCE_MEM; 529 if (zdev->bars[i].val & 8) 530 flags |= IORESOURCE_PREFETCH; 531 if (zdev->bars[i].val & 4) 532 flags |= IORESOURCE_MEM_64; 533 534 if (zpci_use_mio(zdev)) 535 addr = (unsigned long) zdev->bars[i].mio_wt; 536 else 537 addr = ZPCI_ADDR(entry); 538 size = 1UL << zdev->bars[i].size; 539 540 res = __alloc_res(zdev, addr, size, flags); 541 if (!res) { 542 zpci_free_iomap(zdev, entry); 543 return -ENOMEM; 544 } 545 zdev->bars[i].res = res; 546 pci_add_resource(resources, res); 547 } 548 549 return 0; 550 } 551 552 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) 553 { 554 int i; 555 556 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 557 if (!zdev->bars[i].size || !zdev->bars[i].res) 558 continue; 559 560 zpci_free_iomap(zdev, zdev->bars[i].map_idx); 561 release_resource(zdev->bars[i].res); 562 kfree(zdev->bars[i].res); 563 } 564 } 565 566 int pcibios_add_device(struct pci_dev *pdev) 567 { 568 struct resource *res; 569 int i; 570 571 if (pdev->is_physfn) 572 pdev->no_vf_scan = 1; 573 574 pdev->dev.groups = zpci_attr_groups; 575 pdev->dev.dma_ops = &s390_pci_dma_ops; 576 zpci_map_resources(pdev); 577 578 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 579 res = &pdev->resource[i]; 580 if (res->parent || !res->flags) 581 continue; 582 pci_claim_resource(pdev, i); 583 } 584 585 return 0; 586 } 587 588 void pcibios_release_device(struct pci_dev *pdev) 589 { 590 zpci_unmap_resources(pdev); 591 } 592 593 int pcibios_enable_device(struct pci_dev *pdev, int mask) 594 { 595 struct zpci_dev *zdev = to_zpci(pdev); 596 597 zpci_debug_init_device(zdev, dev_name(&pdev->dev)); 598 zpci_fmb_enable_device(zdev); 599 600 return pci_enable_resources(pdev, mask); 601 } 602 603 void pcibios_disable_device(struct pci_dev *pdev) 604 { 605 struct zpci_dev *zdev = to_zpci(pdev); 606 607 zpci_fmb_disable_device(zdev); 608 zpci_debug_exit_device(zdev); 609 } 610 611 static int zpci_alloc_domain(struct zpci_dev *zdev) 612 { 613 spin_lock(&zpci_domain_lock); 614 if (zpci_num_domains_allocated > (ZPCI_NR_DEVICES - 1)) { 615 spin_unlock(&zpci_domain_lock); 616 pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n", 617 zdev->fid, ZPCI_NR_DEVICES); 618 return -ENOSPC; 619 } 620 621 if (zpci_unique_uid) { 622 zdev->domain = (u16) zdev->uid; 623 if (zdev->domain == 0) { 624 pr_warn("UID checking is active but no UID is set for PCI function %08x, so automatic domain allocation is used instead\n", 625 zdev->fid); 626 update_uid_checking(false); 627 goto auto_allocate; 628 } 629 630 if (test_bit(zdev->domain, zpci_domain)) { 631 spin_unlock(&zpci_domain_lock); 632 pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n", 633 zdev->fid, zdev->domain); 634 return -EEXIST; 635 } 636 set_bit(zdev->domain, zpci_domain); 637 zpci_num_domains_allocated++; 638 spin_unlock(&zpci_domain_lock); 639 return 0; 640 } 641 auto_allocate: 642 /* 643 * We can always auto allocate domains below ZPCI_NR_DEVICES. 644 * There is either a free domain or we have reached the maximum in 645 * which case we would have bailed earlier. 646 */ 647 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 648 set_bit(zdev->domain, zpci_domain); 649 zpci_num_domains_allocated++; 650 spin_unlock(&zpci_domain_lock); 651 return 0; 652 } 653 654 static void zpci_free_domain(struct zpci_dev *zdev) 655 { 656 spin_lock(&zpci_domain_lock); 657 clear_bit(zdev->domain, zpci_domain); 658 zpci_num_domains_allocated--; 659 spin_unlock(&zpci_domain_lock); 660 } 661 662 void pcibios_remove_bus(struct pci_bus *bus) 663 { 664 struct zpci_dev *zdev = get_zdev_by_bus(bus); 665 666 zpci_exit_slot(zdev); 667 zpci_cleanup_bus_resources(zdev); 668 zpci_destroy_iommu(zdev); 669 zpci_free_domain(zdev); 670 671 spin_lock(&zpci_list_lock); 672 list_del(&zdev->entry); 673 spin_unlock(&zpci_list_lock); 674 675 zpci_dbg(3, "rem fid:%x\n", zdev->fid); 676 kfree(zdev); 677 } 678 679 static int zpci_scan_bus(struct zpci_dev *zdev) 680 { 681 LIST_HEAD(resources); 682 int ret; 683 684 ret = zpci_setup_bus_resources(zdev, &resources); 685 if (ret) 686 goto error; 687 688 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 689 zdev, &resources); 690 if (!zdev->bus) { 691 ret = -EIO; 692 goto error; 693 } 694 zdev->bus->max_bus_speed = zdev->max_bus_speed; 695 pci_bus_add_devices(zdev->bus); 696 return 0; 697 698 error: 699 zpci_cleanup_bus_resources(zdev); 700 pci_free_resource_list(&resources); 701 return ret; 702 } 703 704 int zpci_enable_device(struct zpci_dev *zdev) 705 { 706 int rc; 707 708 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 709 if (rc) 710 goto out; 711 712 rc = zpci_dma_init_device(zdev); 713 if (rc) 714 goto out_dma; 715 716 zdev->state = ZPCI_FN_STATE_ONLINE; 717 return 0; 718 719 out_dma: 720 clp_disable_fh(zdev); 721 out: 722 return rc; 723 } 724 EXPORT_SYMBOL_GPL(zpci_enable_device); 725 726 int zpci_disable_device(struct zpci_dev *zdev) 727 { 728 zpci_dma_exit_device(zdev); 729 return clp_disable_fh(zdev); 730 } 731 EXPORT_SYMBOL_GPL(zpci_disable_device); 732 733 int zpci_create_device(struct zpci_dev *zdev) 734 { 735 int rc; 736 737 rc = zpci_alloc_domain(zdev); 738 if (rc) 739 goto out; 740 741 rc = zpci_init_iommu(zdev); 742 if (rc) 743 goto out_free; 744 745 mutex_init(&zdev->lock); 746 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { 747 rc = zpci_enable_device(zdev); 748 if (rc) 749 goto out_destroy_iommu; 750 } 751 rc = zpci_scan_bus(zdev); 752 if (rc) 753 goto out_disable; 754 755 spin_lock(&zpci_list_lock); 756 list_add_tail(&zdev->entry, &zpci_list); 757 spin_unlock(&zpci_list_lock); 758 759 zpci_init_slot(zdev); 760 761 return 0; 762 763 out_disable: 764 if (zdev->state == ZPCI_FN_STATE_ONLINE) 765 zpci_disable_device(zdev); 766 out_destroy_iommu: 767 zpci_destroy_iommu(zdev); 768 out_free: 769 zpci_free_domain(zdev); 770 out: 771 return rc; 772 } 773 774 void zpci_remove_device(struct zpci_dev *zdev) 775 { 776 if (!zdev->bus) 777 return; 778 779 pci_stop_root_bus(zdev->bus); 780 pci_remove_root_bus(zdev->bus); 781 } 782 783 int zpci_report_error(struct pci_dev *pdev, 784 struct zpci_report_error_header *report) 785 { 786 struct zpci_dev *zdev = to_zpci(pdev); 787 788 return sclp_pci_report(report, zdev->fh, zdev->fid); 789 } 790 EXPORT_SYMBOL(zpci_report_error); 791 792 static int zpci_mem_init(void) 793 { 794 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) || 795 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb)); 796 797 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 798 __alignof__(struct zpci_fmb), 0, NULL); 799 if (!zdev_fmb_cache) 800 goto error_fmb; 801 802 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES, 803 sizeof(*zpci_iomap_start), GFP_KERNEL); 804 if (!zpci_iomap_start) 805 goto error_iomap; 806 807 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES), 808 sizeof(*zpci_iomap_bitmap), GFP_KERNEL); 809 if (!zpci_iomap_bitmap) 810 goto error_iomap_bitmap; 811 812 return 0; 813 error_iomap_bitmap: 814 kfree(zpci_iomap_start); 815 error_iomap: 816 kmem_cache_destroy(zdev_fmb_cache); 817 error_fmb: 818 return -ENOMEM; 819 } 820 821 static void zpci_mem_exit(void) 822 { 823 kfree(zpci_iomap_bitmap); 824 kfree(zpci_iomap_start); 825 kmem_cache_destroy(zdev_fmb_cache); 826 } 827 828 static unsigned int s390_pci_probe __initdata = 1; 829 static unsigned int s390_pci_no_mio __initdata; 830 unsigned int s390_pci_force_floating __initdata; 831 static unsigned int s390_pci_initialized; 832 833 char * __init pcibios_setup(char *str) 834 { 835 if (!strcmp(str, "off")) { 836 s390_pci_probe = 0; 837 return NULL; 838 } 839 if (!strcmp(str, "nomio")) { 840 s390_pci_no_mio = 1; 841 return NULL; 842 } 843 if (!strcmp(str, "force_floating")) { 844 s390_pci_force_floating = 1; 845 return NULL; 846 } 847 return str; 848 } 849 850 bool zpci_is_enabled(void) 851 { 852 return s390_pci_initialized; 853 } 854 855 static int __init pci_base_init(void) 856 { 857 int rc; 858 859 if (!s390_pci_probe) 860 return 0; 861 862 if (!test_facility(69) || !test_facility(71)) 863 return 0; 864 865 if (test_facility(153) && !s390_pci_no_mio) { 866 static_branch_enable(&have_mio); 867 ctl_set_bit(2, 5); 868 } 869 870 rc = zpci_debug_init(); 871 if (rc) 872 goto out; 873 874 rc = zpci_mem_init(); 875 if (rc) 876 goto out_mem; 877 878 rc = zpci_irq_init(); 879 if (rc) 880 goto out_irq; 881 882 rc = zpci_dma_init(); 883 if (rc) 884 goto out_dma; 885 886 rc = clp_scan_pci_devices(); 887 if (rc) 888 goto out_find; 889 890 s390_pci_initialized = 1; 891 return 0; 892 893 out_find: 894 zpci_dma_exit(); 895 out_dma: 896 zpci_irq_exit(); 897 out_irq: 898 zpci_mem_exit(); 899 out_mem: 900 zpci_debug_exit(); 901 out: 902 return rc; 903 } 904 subsys_initcall_sync(pci_base_init); 905 906 void zpci_rescan(void) 907 { 908 if (zpci_is_enabled()) 909 clp_rescan_pci_devices_simple(NULL); 910 } 911