1 /* 2 * Copyright IBM Corp. 2012 3 * 4 * Author(s): 5 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * 7 * The System z PCI code is a rewrite from a prototype by 8 * the following people (Kudoz!): 9 * Alexander Schmidt 10 * Christoph Raisch 11 * Hannes Hering 12 * Hoang-Nam Nguyen 13 * Jan-Bernd Themann 14 * Stefan Roscher 15 * Thomas Klein 16 */ 17 18 #define COMPONENT "zPCI" 19 #define pr_fmt(fmt) COMPONENT ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/err.h> 24 #include <linux/export.h> 25 #include <linux/delay.h> 26 #include <linux/irq.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/seq_file.h> 29 #include <linux/pci.h> 30 #include <linux/msi.h> 31 32 #include <asm/isc.h> 33 #include <asm/airq.h> 34 #include <asm/facility.h> 35 #include <asm/pci_insn.h> 36 #include <asm/pci_clp.h> 37 #include <asm/pci_dma.h> 38 39 #define DEBUG /* enable pr_debug */ 40 41 #define SIC_IRQ_MODE_ALL 0 42 #define SIC_IRQ_MODE_SINGLE 1 43 44 #define ZPCI_NR_DMA_SPACES 1 45 #define ZPCI_MSI_VEC_BITS 6 46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 47 48 /* list of all detected zpci devices */ 49 LIST_HEAD(zpci_list); 50 EXPORT_SYMBOL_GPL(zpci_list); 51 DEFINE_MUTEX(zpci_list_lock); 52 EXPORT_SYMBOL_GPL(zpci_list_lock); 53 54 static struct pci_hp_callback_ops *hotplug_ops; 55 56 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57 static DEFINE_SPINLOCK(zpci_domain_lock); 58 59 struct callback { 60 irq_handler_t handler; 61 void *data; 62 }; 63 64 struct zdev_irq_map { 65 unsigned long aibv; /* AI bit vector */ 66 int msi_vecs; /* consecutive MSI-vectors used */ 67 int __unused; 68 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ 69 spinlock_t lock; /* protect callbacks against de-reg */ 70 }; 71 72 struct intr_bucket { 73 /* amap of adapters, one bit per dev, corresponds to one irq nr */ 74 unsigned long *alloc; 75 /* AI summary bit, global page for all devices */ 76 unsigned long *aisb; 77 /* pointer to aibv and callback data in zdev */ 78 struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; 79 /* protects the whole bucket struct */ 80 spinlock_t lock; 81 }; 82 83 static struct intr_bucket *bucket; 84 85 /* Adapter local summary indicator */ 86 static u8 *zpci_irq_si; 87 88 static atomic_t irq_retries = ATOMIC_INIT(0); 89 90 /* I/O Map */ 91 static DEFINE_SPINLOCK(zpci_iomap_lock); 92 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 93 struct zpci_iomap_entry *zpci_iomap_start; 94 EXPORT_SYMBOL_GPL(zpci_iomap_start); 95 96 /* highest irq summary bit */ 97 static int __read_mostly aisb_max; 98 99 static struct kmem_cache *zdev_irq_cache; 100 static struct kmem_cache *zdev_fmb_cache; 101 102 static inline int irq_to_msi_nr(unsigned int irq) 103 { 104 return irq & ZPCI_MSI_MASK; 105 } 106 107 static inline int irq_to_dev_nr(unsigned int irq) 108 { 109 return irq >> ZPCI_MSI_VEC_BITS; 110 } 111 112 static inline struct zdev_irq_map *get_imap(unsigned int irq) 113 { 114 return bucket->imap[irq_to_dev_nr(irq)]; 115 } 116 117 struct zpci_dev *get_zdev(struct pci_dev *pdev) 118 { 119 return (struct zpci_dev *) pdev->sysdata; 120 } 121 122 struct zpci_dev *get_zdev_by_fid(u32 fid) 123 { 124 struct zpci_dev *tmp, *zdev = NULL; 125 126 mutex_lock(&zpci_list_lock); 127 list_for_each_entry(tmp, &zpci_list, entry) { 128 if (tmp->fid == fid) { 129 zdev = tmp; 130 break; 131 } 132 } 133 mutex_unlock(&zpci_list_lock); 134 return zdev; 135 } 136 137 bool zpci_fid_present(u32 fid) 138 { 139 return (get_zdev_by_fid(fid) != NULL) ? true : false; 140 } 141 142 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 143 { 144 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 145 } 146 147 int pci_domain_nr(struct pci_bus *bus) 148 { 149 return ((struct zpci_dev *) bus->sysdata)->domain; 150 } 151 EXPORT_SYMBOL_GPL(pci_domain_nr); 152 153 int pci_proc_domain(struct pci_bus *bus) 154 { 155 return pci_domain_nr(bus); 156 } 157 EXPORT_SYMBOL_GPL(pci_proc_domain); 158 159 /* Modify PCI: Register adapter interruptions */ 160 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, 161 u64 aibv) 162 { 163 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 164 struct zpci_fib *fib; 165 int rc; 166 167 fib = (void *) get_zeroed_page(GFP_KERNEL); 168 if (!fib) 169 return -ENOMEM; 170 171 fib->isc = PCI_ISC; 172 fib->noi = zdev->irq_map->msi_vecs; 173 fib->sum = 1; /* enable summary notifications */ 174 fib->aibv = aibv; 175 fib->aibvo = 0; /* every function has its own page */ 176 fib->aisb = (u64) bucket->aisb + aisb / 8; 177 fib->aisbo = aisb & ZPCI_MSI_MASK; 178 179 rc = s390pci_mod_fc(req, fib); 180 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 181 182 free_page((unsigned long) fib); 183 return rc; 184 } 185 186 struct mod_pci_args { 187 u64 base; 188 u64 limit; 189 u64 iota; 190 u64 fmb_addr; 191 }; 192 193 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 194 { 195 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 196 struct zpci_fib *fib; 197 int rc; 198 199 /* The FIB must be available even if it's not used */ 200 fib = (void *) get_zeroed_page(GFP_KERNEL); 201 if (!fib) 202 return -ENOMEM; 203 204 fib->pba = args->base; 205 fib->pal = args->limit; 206 fib->iota = args->iota; 207 fib->fmb_addr = args->fmb_addr; 208 209 rc = s390pci_mod_fc(req, fib); 210 free_page((unsigned long) fib); 211 return rc; 212 } 213 214 /* Modify PCI: Register I/O address translation parameters */ 215 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 216 u64 base, u64 limit, u64 iota) 217 { 218 struct mod_pci_args args = { base, limit, iota, 0 }; 219 220 WARN_ON_ONCE(iota & 0x3fff); 221 args.iota |= ZPCI_IOTA_RTTO_FLAG; 222 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 223 } 224 225 /* Modify PCI: Unregister I/O address translation parameters */ 226 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 227 { 228 struct mod_pci_args args = { 0, 0, 0, 0 }; 229 230 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 231 } 232 233 /* Modify PCI: Unregister adapter interruptions */ 234 static int zpci_unregister_airq(struct zpci_dev *zdev) 235 { 236 struct mod_pci_args args = { 0, 0, 0, 0 }; 237 238 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 239 } 240 241 /* Modify PCI: Set PCI function measurement parameters */ 242 int zpci_fmb_enable_device(struct zpci_dev *zdev) 243 { 244 struct mod_pci_args args = { 0, 0, 0, 0 }; 245 246 if (zdev->fmb) 247 return -EINVAL; 248 249 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 250 if (!zdev->fmb) 251 return -ENOMEM; 252 WARN_ON((u64) zdev->fmb & 0xf); 253 254 args.fmb_addr = virt_to_phys(zdev->fmb); 255 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 256 } 257 258 /* Modify PCI: Disable PCI function measurement */ 259 int zpci_fmb_disable_device(struct zpci_dev *zdev) 260 { 261 struct mod_pci_args args = { 0, 0, 0, 0 }; 262 int rc; 263 264 if (!zdev->fmb) 265 return -EINVAL; 266 267 /* Function measurement is disabled if fmb address is zero */ 268 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 269 270 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 271 zdev->fmb = NULL; 272 return rc; 273 } 274 275 #define ZPCI_PCIAS_CFGSPC 15 276 277 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 278 { 279 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 280 u64 data; 281 int rc; 282 283 rc = s390pci_load(&data, req, offset); 284 if (!rc) { 285 data = data << ((8 - len) * 8); 286 data = le64_to_cpu(data); 287 *val = (u32) data; 288 } else 289 *val = 0xffffffff; 290 return rc; 291 } 292 293 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 294 { 295 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 296 u64 data = val; 297 int rc; 298 299 data = cpu_to_le64(data); 300 data = data >> ((8 - len) * 8); 301 rc = s390pci_store(data, req, offset); 302 return rc; 303 } 304 305 void enable_irq(unsigned int irq) 306 { 307 struct msi_desc *msi = irq_get_msi_desc(irq); 308 309 zpci_msi_set_mask_bits(msi, 1, 0); 310 } 311 EXPORT_SYMBOL_GPL(enable_irq); 312 313 void disable_irq(unsigned int irq) 314 { 315 struct msi_desc *msi = irq_get_msi_desc(irq); 316 317 zpci_msi_set_mask_bits(msi, 1, 1); 318 } 319 EXPORT_SYMBOL_GPL(disable_irq); 320 321 void pcibios_fixup_bus(struct pci_bus *bus) 322 { 323 } 324 325 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 326 resource_size_t size, 327 resource_size_t align) 328 { 329 return 0; 330 } 331 332 /* combine single writes by using store-block insn */ 333 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 334 { 335 zpci_memcpy_toio(to, from, count); 336 } 337 338 /* Create a virtual mapping cookie for a PCI BAR */ 339 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 340 { 341 struct zpci_dev *zdev = get_zdev(pdev); 342 u64 addr; 343 int idx; 344 345 if ((bar & 7) != bar) 346 return NULL; 347 348 idx = zdev->bars[bar].map_idx; 349 spin_lock(&zpci_iomap_lock); 350 zpci_iomap_start[idx].fh = zdev->fh; 351 zpci_iomap_start[idx].bar = bar; 352 spin_unlock(&zpci_iomap_lock); 353 354 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 355 return (void __iomem *) addr; 356 } 357 EXPORT_SYMBOL_GPL(pci_iomap); 358 359 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 360 { 361 unsigned int idx; 362 363 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 364 spin_lock(&zpci_iomap_lock); 365 zpci_iomap_start[idx].fh = 0; 366 zpci_iomap_start[idx].bar = 0; 367 spin_unlock(&zpci_iomap_lock); 368 } 369 EXPORT_SYMBOL_GPL(pci_iounmap); 370 371 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 372 int size, u32 *val) 373 { 374 struct zpci_dev *zdev = get_zdev_by_bus(bus); 375 int ret; 376 377 if (!zdev || devfn != ZPCI_DEVFN) 378 ret = -ENODEV; 379 else 380 ret = zpci_cfg_load(zdev, where, val, size); 381 382 return ret; 383 } 384 385 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 386 int size, u32 val) 387 { 388 struct zpci_dev *zdev = get_zdev_by_bus(bus); 389 int ret; 390 391 if (!zdev || devfn != ZPCI_DEVFN) 392 ret = -ENODEV; 393 else 394 ret = zpci_cfg_store(zdev, where, val, size); 395 396 return ret; 397 } 398 399 static struct pci_ops pci_root_ops = { 400 .read = pci_read, 401 .write = pci_write, 402 }; 403 404 /* store the last handled bit to implement fair scheduling of devices */ 405 static DEFINE_PER_CPU(unsigned long, next_sbit); 406 407 static void zpci_irq_handler(void *dont, void *need) 408 { 409 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 410 int rescan = 0, max = aisb_max; 411 struct zdev_irq_map *imap; 412 413 inc_irq_stat(IRQIO_PCI); 414 sbit = start; 415 416 scan: 417 /* find summary_bit */ 418 for_each_set_bit_left_cont(sbit, bucket->aisb, max) { 419 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); 420 last = sbit; 421 422 /* find vector bit */ 423 imap = bucket->imap[sbit]; 424 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { 425 inc_irq_stat(IRQIO_MSI); 426 clear_bit(63 - mbit, &imap->aibv); 427 428 spin_lock(&imap->lock); 429 if (imap->cb[mbit].handler) 430 imap->cb[mbit].handler(mbit, 431 imap->cb[mbit].data); 432 spin_unlock(&imap->lock); 433 } 434 } 435 436 if (rescan) 437 goto out; 438 439 /* scan the skipped bits */ 440 if (start > 0) { 441 sbit = 0; 442 max = start; 443 start = 0; 444 goto scan; 445 } 446 447 /* enable interrupts again */ 448 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 449 450 /* check again to not lose initiative */ 451 rmb(); 452 max = aisb_max; 453 sbit = find_first_bit_left(bucket->aisb, max); 454 if (sbit != max) { 455 atomic_inc(&irq_retries); 456 rescan++; 457 goto scan; 458 } 459 out: 460 /* store next device bit to scan */ 461 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; 462 } 463 464 /* msi_vecs - number of requested interrupts, 0 place function to error state */ 465 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) 466 { 467 struct zpci_dev *zdev = get_zdev(pdev); 468 unsigned int aisb, msi_nr; 469 struct msi_desc *msi; 470 int rc; 471 472 /* store the number of used MSI vectors */ 473 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); 474 475 spin_lock(&bucket->lock); 476 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); 477 /* alloc map exhausted? */ 478 if (aisb == PAGE_SIZE) { 479 spin_unlock(&bucket->lock); 480 return -EIO; 481 } 482 set_bit(aisb, bucket->alloc); 483 spin_unlock(&bucket->lock); 484 485 zdev->aisb = aisb; 486 if (aisb + 1 > aisb_max) 487 aisb_max = aisb + 1; 488 489 /* wire up IRQ shortcut pointer */ 490 bucket->imap[zdev->aisb] = zdev->irq_map; 491 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); 492 493 /* TODO: irq number 0 wont be found if we return less than requested MSIs. 494 * ignore it for now and fix in common code. 495 */ 496 msi_nr = aisb << ZPCI_MSI_VEC_BITS; 497 498 list_for_each_entry(msi, &pdev->msi_list, list) { 499 rc = zpci_setup_msi_irq(zdev, msi, msi_nr, 500 aisb << ZPCI_MSI_VEC_BITS); 501 if (rc) 502 return rc; 503 msi_nr++; 504 } 505 506 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); 507 if (rc) { 508 clear_bit(aisb, bucket->alloc); 509 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); 510 return rc; 511 } 512 return (zdev->irq_map->msi_vecs == msi_vecs) ? 513 0 : zdev->irq_map->msi_vecs; 514 } 515 516 static void zpci_teardown_msi(struct pci_dev *pdev) 517 { 518 struct zpci_dev *zdev = get_zdev(pdev); 519 struct msi_desc *msi; 520 int aisb, rc; 521 522 rc = zpci_unregister_airq(zdev); 523 if (rc) { 524 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); 525 return; 526 } 527 528 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); 529 aisb = irq_to_dev_nr(msi->irq); 530 531 list_for_each_entry(msi, &pdev->msi_list, list) 532 zpci_teardown_msi_irq(zdev, msi); 533 534 clear_bit(aisb, bucket->alloc); 535 if (aisb + 1 == aisb_max) 536 aisb_max--; 537 } 538 539 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 540 { 541 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); 542 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 543 return -EINVAL; 544 return zpci_setup_msi(pdev, nvec); 545 } 546 547 void arch_teardown_msi_irqs(struct pci_dev *pdev) 548 { 549 pr_info("%s: on pdev: %p\n", __func__, pdev); 550 zpci_teardown_msi(pdev); 551 } 552 553 static void zpci_map_resources(struct zpci_dev *zdev) 554 { 555 struct pci_dev *pdev = zdev->pdev; 556 resource_size_t len; 557 int i; 558 559 for (i = 0; i < PCI_BAR_COUNT; i++) { 560 len = pci_resource_len(pdev, i); 561 if (!len) 562 continue; 563 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 564 pdev->resource[i].end = pdev->resource[i].start + len - 1; 565 pr_debug("BAR%i: -> start: %Lx end: %Lx\n", 566 i, pdev->resource[i].start, pdev->resource[i].end); 567 } 568 }; 569 570 struct zpci_dev *zpci_alloc_device(void) 571 { 572 struct zpci_dev *zdev; 573 574 /* Alloc memory for our private pci device data */ 575 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 576 if (!zdev) 577 return ERR_PTR(-ENOMEM); 578 579 /* Alloc aibv & callback space */ 580 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); 581 if (!zdev->irq_map) 582 goto error; 583 WARN_ON((u64) zdev->irq_map & 0xff); 584 return zdev; 585 586 error: 587 kfree(zdev); 588 return ERR_PTR(-ENOMEM); 589 } 590 591 void zpci_free_device(struct zpci_dev *zdev) 592 { 593 kmem_cache_free(zdev_irq_cache, zdev->irq_map); 594 kfree(zdev); 595 } 596 597 /* 598 * Too late for any s390 specific setup, since interrupts must be set up 599 * already which requires DMA setup too and the pci scan will access the 600 * config space, which only works if the function handle is enabled. 601 */ 602 int pcibios_enable_device(struct pci_dev *pdev, int mask) 603 { 604 struct resource *res; 605 u16 cmd; 606 int i; 607 608 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 609 610 for (i = 0; i < PCI_BAR_COUNT; i++) { 611 res = &pdev->resource[i]; 612 613 if (res->flags & IORESOURCE_IO) 614 return -EINVAL; 615 616 if (res->flags & IORESOURCE_MEM) 617 cmd |= PCI_COMMAND_MEMORY; 618 } 619 pci_write_config_word(pdev, PCI_COMMAND, cmd); 620 return 0; 621 } 622 623 int pcibios_add_platform_entries(struct pci_dev *pdev) 624 { 625 return zpci_sysfs_add_device(&pdev->dev); 626 } 627 628 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) 629 { 630 int msi_nr = irq_to_msi_nr(irq); 631 struct zdev_irq_map *imap; 632 struct msi_desc *msi; 633 634 msi = irq_get_msi_desc(irq); 635 if (!msi) 636 return -EIO; 637 638 imap = get_imap(irq); 639 spin_lock_init(&imap->lock); 640 641 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); 642 imap->cb[msi_nr].handler = handler; 643 imap->cb[msi_nr].data = data; 644 645 /* 646 * The generic MSI code returns with the interrupt disabled on the 647 * card, using the MSI mask bits. Firmware doesn't appear to unmask 648 * at that level, so we do it here by hand. 649 */ 650 zpci_msi_set_mask_bits(msi, 1, 0); 651 return 0; 652 } 653 654 void zpci_free_irq(unsigned int irq) 655 { 656 struct zdev_irq_map *imap = get_imap(irq); 657 int msi_nr = irq_to_msi_nr(irq); 658 unsigned long flags; 659 660 pr_debug("%s: for irq: %d\n", __func__, irq); 661 662 spin_lock_irqsave(&imap->lock, flags); 663 imap->cb[msi_nr].handler = NULL; 664 imap->cb[msi_nr].data = NULL; 665 spin_unlock_irqrestore(&imap->lock, flags); 666 } 667 668 int request_irq(unsigned int irq, irq_handler_t handler, 669 unsigned long irqflags, const char *devname, void *dev_id) 670 { 671 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", 672 __func__, irq, handler, irqflags, devname); 673 674 return zpci_request_irq(irq, handler, dev_id); 675 } 676 EXPORT_SYMBOL_GPL(request_irq); 677 678 void free_irq(unsigned int irq, void *dev_id) 679 { 680 zpci_free_irq(irq); 681 } 682 EXPORT_SYMBOL_GPL(free_irq); 683 684 static int __init zpci_irq_init(void) 685 { 686 int cpu, rc; 687 688 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); 689 if (!bucket) 690 return -ENOMEM; 691 692 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); 693 if (!bucket->aisb) { 694 rc = -ENOMEM; 695 goto out_aisb; 696 } 697 698 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); 699 if (!bucket->alloc) { 700 rc = -ENOMEM; 701 goto out_alloc; 702 } 703 704 isc_register(PCI_ISC); 705 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); 706 if (IS_ERR(zpci_irq_si)) { 707 rc = PTR_ERR(zpci_irq_si); 708 zpci_irq_si = NULL; 709 goto out_ai; 710 } 711 712 for_each_online_cpu(cpu) 713 per_cpu(next_sbit, cpu) = 0; 714 715 spin_lock_init(&bucket->lock); 716 /* set summary to 1 to be called every time for the ISC */ 717 *zpci_irq_si = 1; 718 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 719 return 0; 720 721 out_ai: 722 isc_unregister(PCI_ISC); 723 free_page((unsigned long) bucket->alloc); 724 out_alloc: 725 free_page((unsigned long) bucket->aisb); 726 out_aisb: 727 kfree(bucket); 728 return rc; 729 } 730 731 static void zpci_irq_exit(void) 732 { 733 free_page((unsigned long) bucket->alloc); 734 free_page((unsigned long) bucket->aisb); 735 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); 736 isc_unregister(PCI_ISC); 737 kfree(bucket); 738 } 739 740 void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m) 741 { 742 if (!zdev) 743 return; 744 745 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries)); 746 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n", 747 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb); 748 } 749 750 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 751 unsigned long flags, int domain) 752 { 753 struct resource *r; 754 char *name; 755 int rc; 756 757 r = kzalloc(sizeof(*r), GFP_KERNEL); 758 if (!r) 759 return ERR_PTR(-ENOMEM); 760 r->start = start; 761 r->end = r->start + size - 1; 762 r->flags = flags; 763 r->parent = &iomem_resource; 764 name = kmalloc(18, GFP_KERNEL); 765 if (!name) { 766 kfree(r); 767 return ERR_PTR(-ENOMEM); 768 } 769 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); 770 r->name = name; 771 772 rc = request_resource(&iomem_resource, r); 773 if (rc) 774 pr_debug("request resource %pR failed\n", r); 775 return r; 776 } 777 778 static int zpci_alloc_iomap(struct zpci_dev *zdev) 779 { 780 int entry; 781 782 spin_lock(&zpci_iomap_lock); 783 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 784 if (entry == ZPCI_IOMAP_MAX_ENTRIES) { 785 spin_unlock(&zpci_iomap_lock); 786 return -ENOSPC; 787 } 788 set_bit(entry, zpci_iomap); 789 spin_unlock(&zpci_iomap_lock); 790 return entry; 791 } 792 793 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 794 { 795 spin_lock(&zpci_iomap_lock); 796 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 797 clear_bit(entry, zpci_iomap); 798 spin_unlock(&zpci_iomap_lock); 799 } 800 801 int pcibios_add_device(struct pci_dev *pdev) 802 { 803 struct zpci_dev *zdev = get_zdev(pdev); 804 805 zdev->pdev = pdev; 806 zpci_debug_init_device(zdev); 807 zpci_fmb_enable_device(zdev); 808 zpci_map_resources(zdev); 809 810 return 0; 811 } 812 813 static int zpci_scan_bus(struct zpci_dev *zdev) 814 { 815 struct resource *res; 816 LIST_HEAD(resources); 817 int i; 818 819 /* allocate mapping entry for each used bar */ 820 for (i = 0; i < PCI_BAR_COUNT; i++) { 821 unsigned long addr, size, flags; 822 int entry; 823 824 if (!zdev->bars[i].size) 825 continue; 826 entry = zpci_alloc_iomap(zdev); 827 if (entry < 0) 828 return entry; 829 zdev->bars[i].map_idx = entry; 830 831 /* only MMIO is supported */ 832 flags = IORESOURCE_MEM; 833 if (zdev->bars[i].val & 8) 834 flags |= IORESOURCE_PREFETCH; 835 if (zdev->bars[i].val & 4) 836 flags |= IORESOURCE_MEM_64; 837 838 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); 839 840 size = 1UL << zdev->bars[i].size; 841 842 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); 843 if (IS_ERR(res)) { 844 zpci_free_iomap(zdev, entry); 845 return PTR_ERR(res); 846 } 847 pci_add_resource(&resources, res); 848 } 849 850 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 851 zdev, &resources); 852 if (!zdev->bus) 853 return -EIO; 854 855 zdev->bus->max_bus_speed = zdev->max_bus_speed; 856 return 0; 857 } 858 859 static int zpci_alloc_domain(struct zpci_dev *zdev) 860 { 861 spin_lock(&zpci_domain_lock); 862 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 863 if (zdev->domain == ZPCI_NR_DEVICES) { 864 spin_unlock(&zpci_domain_lock); 865 return -ENOSPC; 866 } 867 set_bit(zdev->domain, zpci_domain); 868 spin_unlock(&zpci_domain_lock); 869 return 0; 870 } 871 872 static void zpci_free_domain(struct zpci_dev *zdev) 873 { 874 spin_lock(&zpci_domain_lock); 875 clear_bit(zdev->domain, zpci_domain); 876 spin_unlock(&zpci_domain_lock); 877 } 878 879 int zpci_enable_device(struct zpci_dev *zdev) 880 { 881 int rc; 882 883 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 884 if (rc) 885 goto out; 886 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); 887 888 rc = zpci_dma_init_device(zdev); 889 if (rc) 890 goto out_dma; 891 return 0; 892 893 out_dma: 894 clp_disable_fh(zdev); 895 out: 896 return rc; 897 } 898 EXPORT_SYMBOL_GPL(zpci_enable_device); 899 900 int zpci_disable_device(struct zpci_dev *zdev) 901 { 902 zpci_dma_exit_device(zdev); 903 return clp_disable_fh(zdev); 904 } 905 EXPORT_SYMBOL_GPL(zpci_disable_device); 906 907 int zpci_create_device(struct zpci_dev *zdev) 908 { 909 int rc; 910 911 rc = zpci_alloc_domain(zdev); 912 if (rc) 913 goto out; 914 915 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { 916 rc = zpci_enable_device(zdev); 917 if (rc) 918 goto out_free; 919 920 zdev->state = ZPCI_FN_STATE_ONLINE; 921 } 922 rc = zpci_scan_bus(zdev); 923 if (rc) 924 goto out_disable; 925 926 mutex_lock(&zpci_list_lock); 927 list_add_tail(&zdev->entry, &zpci_list); 928 if (hotplug_ops) 929 hotplug_ops->create_slot(zdev); 930 mutex_unlock(&zpci_list_lock); 931 932 return 0; 933 934 out_disable: 935 if (zdev->state == ZPCI_FN_STATE_ONLINE) 936 zpci_disable_device(zdev); 937 out_free: 938 zpci_free_domain(zdev); 939 out: 940 return rc; 941 } 942 943 void zpci_stop_device(struct zpci_dev *zdev) 944 { 945 zpci_dma_exit_device(zdev); 946 /* 947 * Note: SCLP disables fh via set-pci-fn so don't 948 * do that here. 949 */ 950 } 951 EXPORT_SYMBOL_GPL(zpci_stop_device); 952 953 int zpci_scan_device(struct zpci_dev *zdev) 954 { 955 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); 956 if (!zdev->pdev) { 957 pr_err("pci_scan_single_device failed for fid: 0x%x\n", 958 zdev->fid); 959 goto out; 960 } 961 962 pci_bus_add_devices(zdev->bus); 963 964 return 0; 965 out: 966 zpci_dma_exit_device(zdev); 967 clp_disable_fh(zdev); 968 return -EIO; 969 } 970 EXPORT_SYMBOL_GPL(zpci_scan_device); 971 972 static inline int barsize(u8 size) 973 { 974 return (size) ? (1 << size) >> 10 : 0; 975 } 976 977 static int zpci_mem_init(void) 978 { 979 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), 980 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); 981 if (!zdev_irq_cache) 982 goto error_zdev; 983 984 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 985 16, 0, NULL); 986 if (!zdev_fmb_cache) 987 goto error_fmb; 988 989 /* TODO: use realloc */ 990 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), 991 GFP_KERNEL); 992 if (!zpci_iomap_start) 993 goto error_iomap; 994 return 0; 995 996 error_iomap: 997 kmem_cache_destroy(zdev_fmb_cache); 998 error_fmb: 999 kmem_cache_destroy(zdev_irq_cache); 1000 error_zdev: 1001 return -ENOMEM; 1002 } 1003 1004 static void zpci_mem_exit(void) 1005 { 1006 kfree(zpci_iomap_start); 1007 kmem_cache_destroy(zdev_irq_cache); 1008 kmem_cache_destroy(zdev_fmb_cache); 1009 } 1010 1011 void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) 1012 { 1013 mutex_lock(&zpci_list_lock); 1014 hotplug_ops = ops; 1015 mutex_unlock(&zpci_list_lock); 1016 } 1017 EXPORT_SYMBOL_GPL(zpci_register_hp_ops); 1018 1019 void zpci_deregister_hp_ops(void) 1020 { 1021 mutex_lock(&zpci_list_lock); 1022 hotplug_ops = NULL; 1023 mutex_unlock(&zpci_list_lock); 1024 } 1025 EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); 1026 1027 unsigned int s390_pci_probe; 1028 EXPORT_SYMBOL_GPL(s390_pci_probe); 1029 1030 char * __init pcibios_setup(char *str) 1031 { 1032 if (!strcmp(str, "on")) { 1033 s390_pci_probe = 1; 1034 return NULL; 1035 } 1036 return str; 1037 } 1038 1039 static int __init pci_base_init(void) 1040 { 1041 int rc; 1042 1043 if (!s390_pci_probe) 1044 return 0; 1045 1046 if (!test_facility(2) || !test_facility(69) 1047 || !test_facility(71) || !test_facility(72)) 1048 return 0; 1049 1050 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", 1051 test_facility(69), test_facility(70), 1052 test_facility(71)); 1053 1054 rc = zpci_debug_init(); 1055 if (rc) 1056 return rc; 1057 1058 rc = zpci_mem_init(); 1059 if (rc) 1060 goto out_mem; 1061 1062 rc = zpci_msihash_init(); 1063 if (rc) 1064 goto out_hash; 1065 1066 rc = zpci_irq_init(); 1067 if (rc) 1068 goto out_irq; 1069 1070 rc = zpci_dma_init(); 1071 if (rc) 1072 goto out_dma; 1073 1074 rc = clp_find_pci_devices(); 1075 if (rc) 1076 goto out_find; 1077 1078 return 0; 1079 1080 out_find: 1081 zpci_dma_exit(); 1082 out_dma: 1083 zpci_irq_exit(); 1084 out_irq: 1085 zpci_msihash_exit(); 1086 out_hash: 1087 zpci_mem_exit(); 1088 out_mem: 1089 zpci_debug_exit(); 1090 return rc; 1091 } 1092 subsys_initcall(pci_base_init); 1093