1 /* 2 * Copyright IBM Corp. 2012 3 * 4 * Author(s): 5 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * 7 * The System z PCI code is a rewrite from a prototype by 8 * the following people (Kudoz!): 9 * Alexander Schmidt 10 * Christoph Raisch 11 * Hannes Hering 12 * Hoang-Nam Nguyen 13 * Jan-Bernd Themann 14 * Stefan Roscher 15 * Thomas Klein 16 */ 17 18 #define COMPONENT "zPCI" 19 #define pr_fmt(fmt) COMPONENT ": " fmt 20 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/err.h> 24 #include <linux/export.h> 25 #include <linux/delay.h> 26 #include <linux/irq.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/seq_file.h> 29 #include <linux/pci.h> 30 #include <linux/msi.h> 31 32 #include <asm/isc.h> 33 #include <asm/airq.h> 34 #include <asm/facility.h> 35 #include <asm/pci_insn.h> 36 #include <asm/pci_clp.h> 37 #include <asm/pci_dma.h> 38 39 #define DEBUG /* enable pr_debug */ 40 41 #define SIC_IRQ_MODE_ALL 0 42 #define SIC_IRQ_MODE_SINGLE 1 43 44 #define ZPCI_NR_DMA_SPACES 1 45 #define ZPCI_MSI_VEC_BITS 6 46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 47 48 /* list of all detected zpci devices */ 49 LIST_HEAD(zpci_list); 50 EXPORT_SYMBOL_GPL(zpci_list); 51 DEFINE_MUTEX(zpci_list_lock); 52 EXPORT_SYMBOL_GPL(zpci_list_lock); 53 54 struct pci_hp_callback_ops hotplug_ops; 55 EXPORT_SYMBOL_GPL(hotplug_ops); 56 57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 58 static DEFINE_SPINLOCK(zpci_domain_lock); 59 60 struct callback { 61 irq_handler_t handler; 62 void *data; 63 }; 64 65 struct zdev_irq_map { 66 unsigned long aibv; /* AI bit vector */ 67 int msi_vecs; /* consecutive MSI-vectors used */ 68 int __unused; 69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ 70 spinlock_t lock; /* protect callbacks against de-reg */ 71 }; 72 73 struct intr_bucket { 74 /* amap of adapters, one bit per dev, corresponds to one irq nr */ 75 unsigned long *alloc; 76 /* AI summary bit, global page for all devices */ 77 unsigned long *aisb; 78 /* pointer to aibv and callback data in zdev */ 79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; 80 /* protects the whole bucket struct */ 81 spinlock_t lock; 82 }; 83 84 static struct intr_bucket *bucket; 85 86 /* Adapter local summary indicator */ 87 static u8 *zpci_irq_si; 88 89 static atomic_t irq_retries = ATOMIC_INIT(0); 90 91 /* I/O Map */ 92 static DEFINE_SPINLOCK(zpci_iomap_lock); 93 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 94 struct zpci_iomap_entry *zpci_iomap_start; 95 EXPORT_SYMBOL_GPL(zpci_iomap_start); 96 97 /* highest irq summary bit */ 98 static int __read_mostly aisb_max; 99 100 static struct kmem_cache *zdev_irq_cache; 101 static struct kmem_cache *zdev_fmb_cache; 102 103 debug_info_t *pci_debug_msg_id; 104 debug_info_t *pci_debug_err_id; 105 106 static inline int irq_to_msi_nr(unsigned int irq) 107 { 108 return irq & ZPCI_MSI_MASK; 109 } 110 111 static inline int irq_to_dev_nr(unsigned int irq) 112 { 113 return irq >> ZPCI_MSI_VEC_BITS; 114 } 115 116 static inline struct zdev_irq_map *get_imap(unsigned int irq) 117 { 118 return bucket->imap[irq_to_dev_nr(irq)]; 119 } 120 121 struct zpci_dev *get_zdev(struct pci_dev *pdev) 122 { 123 return (struct zpci_dev *) pdev->sysdata; 124 } 125 126 struct zpci_dev *get_zdev_by_fid(u32 fid) 127 { 128 struct zpci_dev *tmp, *zdev = NULL; 129 130 mutex_lock(&zpci_list_lock); 131 list_for_each_entry(tmp, &zpci_list, entry) { 132 if (tmp->fid == fid) { 133 zdev = tmp; 134 break; 135 } 136 } 137 mutex_unlock(&zpci_list_lock); 138 return zdev; 139 } 140 141 bool zpci_fid_present(u32 fid) 142 { 143 return (get_zdev_by_fid(fid) != NULL) ? true : false; 144 } 145 146 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 147 { 148 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 149 } 150 151 int pci_domain_nr(struct pci_bus *bus) 152 { 153 return ((struct zpci_dev *) bus->sysdata)->domain; 154 } 155 EXPORT_SYMBOL_GPL(pci_domain_nr); 156 157 int pci_proc_domain(struct pci_bus *bus) 158 { 159 return pci_domain_nr(bus); 160 } 161 EXPORT_SYMBOL_GPL(pci_proc_domain); 162 163 /* Store PCI function information block */ 164 static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc) 165 { 166 struct zpci_fib *fib; 167 u8 status, cc; 168 169 fib = (void *) get_zeroed_page(GFP_KERNEL); 170 if (!fib) 171 return -ENOMEM; 172 173 do { 174 cc = __stpcifc(zdev->fh, 0, fib, &status); 175 if (cc == 2) { 176 msleep(ZPCI_INSN_BUSY_DELAY); 177 memset(fib, 0, PAGE_SIZE); 178 } 179 } while (cc == 2); 180 181 if (cc) 182 pr_err_once("%s: cc: %u status: %u\n", 183 __func__, cc, status); 184 185 /* Return PCI function controls */ 186 *fc = fib->fc; 187 188 free_page((unsigned long) fib); 189 return (cc) ? -EIO : 0; 190 } 191 192 /* Modify PCI: Register adapter interruptions */ 193 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, 194 u64 aibv) 195 { 196 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 197 struct zpci_fib *fib; 198 int rc; 199 200 fib = (void *) get_zeroed_page(GFP_KERNEL); 201 if (!fib) 202 return -ENOMEM; 203 204 fib->isc = PCI_ISC; 205 fib->noi = zdev->irq_map->msi_vecs; 206 fib->sum = 1; /* enable summary notifications */ 207 fib->aibv = aibv; 208 fib->aibvo = 0; /* every function has its own page */ 209 fib->aisb = (u64) bucket->aisb + aisb / 8; 210 fib->aisbo = aisb & ZPCI_MSI_MASK; 211 212 rc = mpcifc_instr(req, fib); 213 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 214 215 free_page((unsigned long) fib); 216 return rc; 217 } 218 219 struct mod_pci_args { 220 u64 base; 221 u64 limit; 222 u64 iota; 223 u64 fmb_addr; 224 }; 225 226 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 227 { 228 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 229 struct zpci_fib *fib; 230 int rc; 231 232 /* The FIB must be available even if it's not used */ 233 fib = (void *) get_zeroed_page(GFP_KERNEL); 234 if (!fib) 235 return -ENOMEM; 236 237 fib->pba = args->base; 238 fib->pal = args->limit; 239 fib->iota = args->iota; 240 fib->fmb_addr = args->fmb_addr; 241 242 rc = mpcifc_instr(req, fib); 243 free_page((unsigned long) fib); 244 return rc; 245 } 246 247 /* Modify PCI: Register I/O address translation parameters */ 248 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, 249 u64 base, u64 limit, u64 iota) 250 { 251 struct mod_pci_args args = { base, limit, iota, 0 }; 252 253 WARN_ON_ONCE(iota & 0x3fff); 254 args.iota |= ZPCI_IOTA_RTTO_FLAG; 255 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); 256 } 257 258 /* Modify PCI: Unregister I/O address translation parameters */ 259 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) 260 { 261 struct mod_pci_args args = { 0, 0, 0, 0 }; 262 263 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); 264 } 265 266 /* Modify PCI: Unregister adapter interruptions */ 267 static int zpci_unregister_airq(struct zpci_dev *zdev) 268 { 269 struct mod_pci_args args = { 0, 0, 0, 0 }; 270 271 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); 272 } 273 274 /* Modify PCI: Set PCI function measurement parameters */ 275 int zpci_fmb_enable_device(struct zpci_dev *zdev) 276 { 277 struct mod_pci_args args = { 0, 0, 0, 0 }; 278 279 if (zdev->fmb) 280 return -EINVAL; 281 282 zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); 283 if (!zdev->fmb) 284 return -ENOMEM; 285 memset(zdev->fmb, 0, sizeof(*zdev->fmb)); 286 WARN_ON((u64) zdev->fmb & 0xf); 287 288 args.fmb_addr = virt_to_phys(zdev->fmb); 289 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 290 } 291 292 /* Modify PCI: Disable PCI function measurement */ 293 int zpci_fmb_disable_device(struct zpci_dev *zdev) 294 { 295 struct mod_pci_args args = { 0, 0, 0, 0 }; 296 int rc; 297 298 if (!zdev->fmb) 299 return -EINVAL; 300 301 /* Function measurement is disabled if fmb address is zero */ 302 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); 303 304 kmem_cache_free(zdev_fmb_cache, zdev->fmb); 305 zdev->fmb = NULL; 306 return rc; 307 } 308 309 #define ZPCI_PCIAS_CFGSPC 15 310 311 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) 312 { 313 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 314 u64 data; 315 int rc; 316 317 rc = pcilg_instr(&data, req, offset); 318 data = data << ((8 - len) * 8); 319 data = le64_to_cpu(data); 320 if (!rc) 321 *val = (u32) data; 322 else 323 *val = 0xffffffff; 324 return rc; 325 } 326 327 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) 328 { 329 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); 330 u64 data = val; 331 int rc; 332 333 data = cpu_to_le64(data); 334 data = data >> ((8 - len) * 8); 335 rc = pcistg_instr(data, req, offset); 336 return rc; 337 } 338 339 void synchronize_irq(unsigned int irq) 340 { 341 /* 342 * Not needed, the handler is protected by a lock and IRQs that occur 343 * after the handler is deleted are just NOPs. 344 */ 345 } 346 EXPORT_SYMBOL_GPL(synchronize_irq); 347 348 void enable_irq(unsigned int irq) 349 { 350 struct msi_desc *msi = irq_get_msi_desc(irq); 351 352 zpci_msi_set_mask_bits(msi, 1, 0); 353 } 354 EXPORT_SYMBOL_GPL(enable_irq); 355 356 void disable_irq(unsigned int irq) 357 { 358 struct msi_desc *msi = irq_get_msi_desc(irq); 359 360 zpci_msi_set_mask_bits(msi, 1, 1); 361 } 362 EXPORT_SYMBOL_GPL(disable_irq); 363 364 void disable_irq_nosync(unsigned int irq) 365 { 366 disable_irq(irq); 367 } 368 EXPORT_SYMBOL_GPL(disable_irq_nosync); 369 370 unsigned long probe_irq_on(void) 371 { 372 return 0; 373 } 374 EXPORT_SYMBOL_GPL(probe_irq_on); 375 376 int probe_irq_off(unsigned long val) 377 { 378 return 0; 379 } 380 EXPORT_SYMBOL_GPL(probe_irq_off); 381 382 unsigned int probe_irq_mask(unsigned long val) 383 { 384 return val; 385 } 386 EXPORT_SYMBOL_GPL(probe_irq_mask); 387 388 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 389 { 390 } 391 392 resource_size_t pcibios_align_resource(void *data, const struct resource *res, 393 resource_size_t size, 394 resource_size_t align) 395 { 396 return 0; 397 } 398 399 /* combine single writes by using store-block insn */ 400 void __iowrite64_copy(void __iomem *to, const void *from, size_t count) 401 { 402 zpci_memcpy_toio(to, from, count); 403 } 404 405 /* Create a virtual mapping cookie for a PCI BAR */ 406 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 407 { 408 struct zpci_dev *zdev = get_zdev(pdev); 409 u64 addr; 410 int idx; 411 412 if ((bar & 7) != bar) 413 return NULL; 414 415 idx = zdev->bars[bar].map_idx; 416 spin_lock(&zpci_iomap_lock); 417 zpci_iomap_start[idx].fh = zdev->fh; 418 zpci_iomap_start[idx].bar = bar; 419 spin_unlock(&zpci_iomap_lock); 420 421 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 422 return (void __iomem *) addr; 423 } 424 EXPORT_SYMBOL_GPL(pci_iomap); 425 426 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 427 { 428 unsigned int idx; 429 430 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 431 spin_lock(&zpci_iomap_lock); 432 zpci_iomap_start[idx].fh = 0; 433 zpci_iomap_start[idx].bar = 0; 434 spin_unlock(&zpci_iomap_lock); 435 } 436 EXPORT_SYMBOL_GPL(pci_iounmap); 437 438 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 439 int size, u32 *val) 440 { 441 struct zpci_dev *zdev = get_zdev_by_bus(bus); 442 443 if (!zdev || devfn != ZPCI_DEVFN) 444 return 0; 445 return zpci_cfg_load(zdev, where, val, size); 446 } 447 448 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 449 int size, u32 val) 450 { 451 struct zpci_dev *zdev = get_zdev_by_bus(bus); 452 453 if (!zdev || devfn != ZPCI_DEVFN) 454 return 0; 455 return zpci_cfg_store(zdev, where, val, size); 456 } 457 458 static struct pci_ops pci_root_ops = { 459 .read = pci_read, 460 .write = pci_write, 461 }; 462 463 /* store the last handled bit to implement fair scheduling of devices */ 464 static DEFINE_PER_CPU(unsigned long, next_sbit); 465 466 static void zpci_irq_handler(void *dont, void *need) 467 { 468 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 469 int rescan = 0, max = aisb_max; 470 struct zdev_irq_map *imap; 471 472 kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++; 473 sbit = start; 474 475 scan: 476 /* find summary_bit */ 477 for_each_set_bit_left_cont(sbit, bucket->aisb, max) { 478 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); 479 last = sbit; 480 481 /* find vector bit */ 482 imap = bucket->imap[sbit]; 483 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { 484 kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++; 485 clear_bit(63 - mbit, &imap->aibv); 486 487 spin_lock(&imap->lock); 488 if (imap->cb[mbit].handler) 489 imap->cb[mbit].handler(mbit, 490 imap->cb[mbit].data); 491 spin_unlock(&imap->lock); 492 } 493 } 494 495 if (rescan) 496 goto out; 497 498 /* scan the skipped bits */ 499 if (start > 0) { 500 sbit = 0; 501 max = start; 502 start = 0; 503 goto scan; 504 } 505 506 /* enable interrupts again */ 507 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 508 509 /* check again to not lose initiative */ 510 rmb(); 511 max = aisb_max; 512 sbit = find_first_bit_left(bucket->aisb, max); 513 if (sbit != max) { 514 atomic_inc(&irq_retries); 515 rescan++; 516 goto scan; 517 } 518 out: 519 /* store next device bit to scan */ 520 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; 521 } 522 523 /* msi_vecs - number of requested interrupts, 0 place function to error state */ 524 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) 525 { 526 struct zpci_dev *zdev = get_zdev(pdev); 527 unsigned int aisb, msi_nr; 528 struct msi_desc *msi; 529 int rc; 530 531 /* store the number of used MSI vectors */ 532 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); 533 534 spin_lock(&bucket->lock); 535 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); 536 /* alloc map exhausted? */ 537 if (aisb == PAGE_SIZE) { 538 spin_unlock(&bucket->lock); 539 return -EIO; 540 } 541 set_bit(aisb, bucket->alloc); 542 spin_unlock(&bucket->lock); 543 544 zdev->aisb = aisb; 545 if (aisb + 1 > aisb_max) 546 aisb_max = aisb + 1; 547 548 /* wire up IRQ shortcut pointer */ 549 bucket->imap[zdev->aisb] = zdev->irq_map; 550 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); 551 552 /* TODO: irq number 0 wont be found if we return less than requested MSIs. 553 * ignore it for now and fix in common code. 554 */ 555 msi_nr = aisb << ZPCI_MSI_VEC_BITS; 556 557 list_for_each_entry(msi, &pdev->msi_list, list) { 558 rc = zpci_setup_msi_irq(zdev, msi, msi_nr, 559 aisb << ZPCI_MSI_VEC_BITS); 560 if (rc) 561 return rc; 562 msi_nr++; 563 } 564 565 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); 566 if (rc) { 567 clear_bit(aisb, bucket->alloc); 568 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); 569 return rc; 570 } 571 return (zdev->irq_map->msi_vecs == msi_vecs) ? 572 0 : zdev->irq_map->msi_vecs; 573 } 574 575 static void zpci_teardown_msi(struct pci_dev *pdev) 576 { 577 struct zpci_dev *zdev = get_zdev(pdev); 578 struct msi_desc *msi; 579 int aisb, rc; 580 581 rc = zpci_unregister_airq(zdev); 582 if (rc) { 583 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); 584 return; 585 } 586 587 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); 588 aisb = irq_to_dev_nr(msi->irq); 589 590 list_for_each_entry(msi, &pdev->msi_list, list) 591 zpci_teardown_msi_irq(zdev, msi); 592 593 clear_bit(aisb, bucket->alloc); 594 if (aisb + 1 == aisb_max) 595 aisb_max--; 596 } 597 598 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 599 { 600 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); 601 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 602 return -EINVAL; 603 return zpci_setup_msi(pdev, nvec); 604 } 605 606 void arch_teardown_msi_irqs(struct pci_dev *pdev) 607 { 608 pr_info("%s: on pdev: %p\n", __func__, pdev); 609 zpci_teardown_msi(pdev); 610 } 611 612 static void zpci_map_resources(struct zpci_dev *zdev) 613 { 614 struct pci_dev *pdev = zdev->pdev; 615 resource_size_t len; 616 int i; 617 618 for (i = 0; i < PCI_BAR_COUNT; i++) { 619 len = pci_resource_len(pdev, i); 620 if (!len) 621 continue; 622 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 623 pdev->resource[i].end = pdev->resource[i].start + len - 1; 624 pr_debug("BAR%i: -> start: %Lx end: %Lx\n", 625 i, pdev->resource[i].start, pdev->resource[i].end); 626 } 627 }; 628 629 static void zpci_unmap_resources(struct pci_dev *pdev) 630 { 631 resource_size_t len; 632 int i; 633 634 for (i = 0; i < PCI_BAR_COUNT; i++) { 635 len = pci_resource_len(pdev, i); 636 if (!len) 637 continue; 638 pci_iounmap(pdev, (void *) pdev->resource[i].start); 639 } 640 }; 641 642 struct zpci_dev *zpci_alloc_device(void) 643 { 644 struct zpci_dev *zdev; 645 646 /* Alloc memory for our private pci device data */ 647 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 648 if (!zdev) 649 return ERR_PTR(-ENOMEM); 650 651 /* Alloc aibv & callback space */ 652 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); 653 if (!zdev->irq_map) 654 goto error; 655 WARN_ON((u64) zdev->irq_map & 0xff); 656 return zdev; 657 658 error: 659 kfree(zdev); 660 return ERR_PTR(-ENOMEM); 661 } 662 663 void zpci_free_device(struct zpci_dev *zdev) 664 { 665 kmem_cache_free(zdev_irq_cache, zdev->irq_map); 666 kfree(zdev); 667 } 668 669 /* Called on removal of pci_dev, leaves zpci and bus device */ 670 static void zpci_remove_device(struct pci_dev *pdev) 671 { 672 struct zpci_dev *zdev = get_zdev(pdev); 673 674 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); 675 zdev->state = ZPCI_FN_STATE_CONFIGURED; 676 zpci_dma_exit_device(zdev); 677 zpci_fmb_disable_device(zdev); 678 zpci_sysfs_remove_device(&pdev->dev); 679 zpci_unmap_resources(pdev); 680 list_del(&zdev->entry); /* can be called from init */ 681 zdev->pdev = NULL; 682 } 683 684 static void zpci_scan_devices(void) 685 { 686 struct zpci_dev *zdev; 687 688 mutex_lock(&zpci_list_lock); 689 list_for_each_entry(zdev, &zpci_list, entry) 690 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) 691 zpci_scan_device(zdev); 692 mutex_unlock(&zpci_list_lock); 693 } 694 695 /* 696 * Too late for any s390 specific setup, since interrupts must be set up 697 * already which requires DMA setup too and the pci scan will access the 698 * config space, which only works if the function handle is enabled. 699 */ 700 int pcibios_enable_device(struct pci_dev *pdev, int mask) 701 { 702 struct resource *res; 703 u16 cmd; 704 int i; 705 706 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 707 708 for (i = 0; i < PCI_BAR_COUNT; i++) { 709 res = &pdev->resource[i]; 710 711 if (res->flags & IORESOURCE_IO) 712 return -EINVAL; 713 714 if (res->flags & IORESOURCE_MEM) 715 cmd |= PCI_COMMAND_MEMORY; 716 } 717 pci_write_config_word(pdev, PCI_COMMAND, cmd); 718 return 0; 719 } 720 721 void pcibios_disable_device(struct pci_dev *pdev) 722 { 723 zpci_remove_device(pdev); 724 pdev->sysdata = NULL; 725 } 726 727 int pcibios_add_platform_entries(struct pci_dev *pdev) 728 { 729 return zpci_sysfs_add_device(&pdev->dev); 730 } 731 732 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) 733 { 734 int msi_nr = irq_to_msi_nr(irq); 735 struct zdev_irq_map *imap; 736 struct msi_desc *msi; 737 738 msi = irq_get_msi_desc(irq); 739 if (!msi) 740 return -EIO; 741 742 imap = get_imap(irq); 743 spin_lock_init(&imap->lock); 744 745 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); 746 imap->cb[msi_nr].handler = handler; 747 imap->cb[msi_nr].data = data; 748 749 /* 750 * The generic MSI code returns with the interrupt disabled on the 751 * card, using the MSI mask bits. Firmware doesn't appear to unmask 752 * at that level, so we do it here by hand. 753 */ 754 zpci_msi_set_mask_bits(msi, 1, 0); 755 return 0; 756 } 757 758 void zpci_free_irq(unsigned int irq) 759 { 760 struct zdev_irq_map *imap = get_imap(irq); 761 int msi_nr = irq_to_msi_nr(irq); 762 unsigned long flags; 763 764 pr_debug("%s: for irq: %d\n", __func__, irq); 765 766 spin_lock_irqsave(&imap->lock, flags); 767 imap->cb[msi_nr].handler = NULL; 768 imap->cb[msi_nr].data = NULL; 769 spin_unlock_irqrestore(&imap->lock, flags); 770 } 771 772 int request_irq(unsigned int irq, irq_handler_t handler, 773 unsigned long irqflags, const char *devname, void *dev_id) 774 { 775 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", 776 __func__, irq, handler, irqflags, devname); 777 778 return zpci_request_irq(irq, handler, dev_id); 779 } 780 EXPORT_SYMBOL_GPL(request_irq); 781 782 void free_irq(unsigned int irq, void *dev_id) 783 { 784 zpci_free_irq(irq); 785 } 786 EXPORT_SYMBOL_GPL(free_irq); 787 788 static int __init zpci_irq_init(void) 789 { 790 int cpu, rc; 791 792 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); 793 if (!bucket) 794 return -ENOMEM; 795 796 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); 797 if (!bucket->aisb) { 798 rc = -ENOMEM; 799 goto out_aisb; 800 } 801 802 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); 803 if (!bucket->alloc) { 804 rc = -ENOMEM; 805 goto out_alloc; 806 } 807 808 isc_register(PCI_ISC); 809 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); 810 if (IS_ERR(zpci_irq_si)) { 811 rc = PTR_ERR(zpci_irq_si); 812 zpci_irq_si = NULL; 813 goto out_ai; 814 } 815 816 for_each_online_cpu(cpu) 817 per_cpu(next_sbit, cpu) = 0; 818 819 spin_lock_init(&bucket->lock); 820 /* set summary to 1 to be called every time for the ISC */ 821 *zpci_irq_si = 1; 822 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 823 return 0; 824 825 out_ai: 826 isc_unregister(PCI_ISC); 827 free_page((unsigned long) bucket->alloc); 828 out_alloc: 829 free_page((unsigned long) bucket->aisb); 830 out_aisb: 831 kfree(bucket); 832 return rc; 833 } 834 835 static void zpci_irq_exit(void) 836 { 837 free_page((unsigned long) bucket->alloc); 838 free_page((unsigned long) bucket->aisb); 839 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); 840 isc_unregister(PCI_ISC); 841 kfree(bucket); 842 } 843 844 void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m) 845 { 846 if (!zdev) 847 return; 848 849 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries)); 850 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n", 851 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb); 852 } 853 854 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 855 unsigned long flags, int domain) 856 { 857 struct resource *r; 858 char *name; 859 int rc; 860 861 r = kzalloc(sizeof(*r), GFP_KERNEL); 862 if (!r) 863 return ERR_PTR(-ENOMEM); 864 r->start = start; 865 r->end = r->start + size - 1; 866 r->flags = flags; 867 r->parent = &iomem_resource; 868 name = kmalloc(18, GFP_KERNEL); 869 if (!name) { 870 kfree(r); 871 return ERR_PTR(-ENOMEM); 872 } 873 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); 874 r->name = name; 875 876 rc = request_resource(&iomem_resource, r); 877 if (rc) 878 pr_debug("request resource %pR failed\n", r); 879 return r; 880 } 881 882 static int zpci_alloc_iomap(struct zpci_dev *zdev) 883 { 884 int entry; 885 886 spin_lock(&zpci_iomap_lock); 887 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); 888 if (entry == ZPCI_IOMAP_MAX_ENTRIES) { 889 spin_unlock(&zpci_iomap_lock); 890 return -ENOSPC; 891 } 892 set_bit(entry, zpci_iomap); 893 spin_unlock(&zpci_iomap_lock); 894 return entry; 895 } 896 897 static void zpci_free_iomap(struct zpci_dev *zdev, int entry) 898 { 899 spin_lock(&zpci_iomap_lock); 900 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); 901 clear_bit(entry, zpci_iomap); 902 spin_unlock(&zpci_iomap_lock); 903 } 904 905 static int zpci_create_device_bus(struct zpci_dev *zdev) 906 { 907 struct resource *res; 908 LIST_HEAD(resources); 909 int i; 910 911 /* allocate mapping entry for each used bar */ 912 for (i = 0; i < PCI_BAR_COUNT; i++) { 913 unsigned long addr, size, flags; 914 int entry; 915 916 if (!zdev->bars[i].size) 917 continue; 918 entry = zpci_alloc_iomap(zdev); 919 if (entry < 0) 920 return entry; 921 zdev->bars[i].map_idx = entry; 922 923 /* only MMIO is supported */ 924 flags = IORESOURCE_MEM; 925 if (zdev->bars[i].val & 8) 926 flags |= IORESOURCE_PREFETCH; 927 if (zdev->bars[i].val & 4) 928 flags |= IORESOURCE_MEM_64; 929 930 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); 931 932 size = 1UL << zdev->bars[i].size; 933 934 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); 935 if (IS_ERR(res)) { 936 zpci_free_iomap(zdev, entry); 937 return PTR_ERR(res); 938 } 939 pci_add_resource(&resources, res); 940 } 941 942 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 943 zdev, &resources); 944 if (!zdev->bus) 945 return -EIO; 946 947 zdev->bus->max_bus_speed = zdev->max_bus_speed; 948 return 0; 949 } 950 951 static int zpci_alloc_domain(struct zpci_dev *zdev) 952 { 953 spin_lock(&zpci_domain_lock); 954 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 955 if (zdev->domain == ZPCI_NR_DEVICES) { 956 spin_unlock(&zpci_domain_lock); 957 return -ENOSPC; 958 } 959 set_bit(zdev->domain, zpci_domain); 960 spin_unlock(&zpci_domain_lock); 961 return 0; 962 } 963 964 static void zpci_free_domain(struct zpci_dev *zdev) 965 { 966 spin_lock(&zpci_domain_lock); 967 clear_bit(zdev->domain, zpci_domain); 968 spin_unlock(&zpci_domain_lock); 969 } 970 971 int zpci_enable_device(struct zpci_dev *zdev) 972 { 973 int rc; 974 975 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 976 if (rc) 977 goto out; 978 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); 979 980 rc = zpci_dma_init_device(zdev); 981 if (rc) 982 goto out_dma; 983 return 0; 984 985 out_dma: 986 clp_disable_fh(zdev); 987 out: 988 return rc; 989 } 990 EXPORT_SYMBOL_GPL(zpci_enable_device); 991 992 int zpci_create_device(struct zpci_dev *zdev) 993 { 994 int rc; 995 996 rc = zpci_alloc_domain(zdev); 997 if (rc) 998 goto out; 999 1000 rc = zpci_create_device_bus(zdev); 1001 if (rc) 1002 goto out_bus; 1003 1004 mutex_lock(&zpci_list_lock); 1005 list_add_tail(&zdev->entry, &zpci_list); 1006 if (hotplug_ops.create_slot) 1007 hotplug_ops.create_slot(zdev); 1008 mutex_unlock(&zpci_list_lock); 1009 1010 if (zdev->state == ZPCI_FN_STATE_STANDBY) 1011 return 0; 1012 1013 rc = zpci_enable_device(zdev); 1014 if (rc) 1015 goto out_start; 1016 return 0; 1017 1018 out_start: 1019 mutex_lock(&zpci_list_lock); 1020 list_del(&zdev->entry); 1021 if (hotplug_ops.remove_slot) 1022 hotplug_ops.remove_slot(zdev); 1023 mutex_unlock(&zpci_list_lock); 1024 out_bus: 1025 zpci_free_domain(zdev); 1026 out: 1027 return rc; 1028 } 1029 1030 void zpci_stop_device(struct zpci_dev *zdev) 1031 { 1032 zpci_dma_exit_device(zdev); 1033 /* 1034 * Note: SCLP disables fh via set-pci-fn so don't 1035 * do that here. 1036 */ 1037 } 1038 EXPORT_SYMBOL_GPL(zpci_stop_device); 1039 1040 int zpci_scan_device(struct zpci_dev *zdev) 1041 { 1042 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); 1043 if (!zdev->pdev) { 1044 pr_err("pci_scan_single_device failed for fid: 0x%x\n", 1045 zdev->fid); 1046 goto out; 1047 } 1048 1049 zpci_debug_init_device(zdev); 1050 zpci_fmb_enable_device(zdev); 1051 zpci_map_resources(zdev); 1052 pci_bus_add_devices(zdev->bus); 1053 1054 /* now that pdev was added to the bus mark it as used */ 1055 zdev->state = ZPCI_FN_STATE_ONLINE; 1056 return 0; 1057 1058 out: 1059 zpci_dma_exit_device(zdev); 1060 clp_disable_fh(zdev); 1061 return -EIO; 1062 } 1063 EXPORT_SYMBOL_GPL(zpci_scan_device); 1064 1065 static inline int barsize(u8 size) 1066 { 1067 return (size) ? (1 << size) >> 10 : 0; 1068 } 1069 1070 static int zpci_mem_init(void) 1071 { 1072 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), 1073 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); 1074 if (!zdev_irq_cache) 1075 goto error_zdev; 1076 1077 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 1078 16, 0, NULL); 1079 if (!zdev_fmb_cache) 1080 goto error_fmb; 1081 1082 /* TODO: use realloc */ 1083 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), 1084 GFP_KERNEL); 1085 if (!zpci_iomap_start) 1086 goto error_iomap; 1087 return 0; 1088 1089 error_iomap: 1090 kmem_cache_destroy(zdev_fmb_cache); 1091 error_fmb: 1092 kmem_cache_destroy(zdev_irq_cache); 1093 error_zdev: 1094 return -ENOMEM; 1095 } 1096 1097 static void zpci_mem_exit(void) 1098 { 1099 kfree(zpci_iomap_start); 1100 kmem_cache_destroy(zdev_irq_cache); 1101 kmem_cache_destroy(zdev_fmb_cache); 1102 } 1103 1104 unsigned int pci_probe = 1; 1105 EXPORT_SYMBOL_GPL(pci_probe); 1106 1107 char * __init pcibios_setup(char *str) 1108 { 1109 if (!strcmp(str, "off")) { 1110 pci_probe = 0; 1111 return NULL; 1112 } 1113 return str; 1114 } 1115 1116 static int __init pci_base_init(void) 1117 { 1118 int rc; 1119 1120 if (!pci_probe) 1121 return 0; 1122 1123 if (!test_facility(2) || !test_facility(69) 1124 || !test_facility(71) || !test_facility(72)) 1125 return 0; 1126 1127 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", 1128 test_facility(69), test_facility(70), 1129 test_facility(71)); 1130 1131 rc = zpci_debug_init(); 1132 if (rc) 1133 return rc; 1134 1135 rc = zpci_mem_init(); 1136 if (rc) 1137 goto out_mem; 1138 1139 rc = zpci_msihash_init(); 1140 if (rc) 1141 goto out_hash; 1142 1143 rc = zpci_irq_init(); 1144 if (rc) 1145 goto out_irq; 1146 1147 rc = zpci_dma_init(); 1148 if (rc) 1149 goto out_dma; 1150 1151 rc = clp_find_pci_devices(); 1152 if (rc) 1153 goto out_find; 1154 1155 zpci_scan_devices(); 1156 return 0; 1157 1158 out_find: 1159 zpci_dma_exit(); 1160 out_dma: 1161 zpci_irq_exit(); 1162 out_irq: 1163 zpci_msihash_exit(); 1164 out_hash: 1165 zpci_mem_exit(); 1166 out_mem: 1167 zpci_debug_exit(); 1168 return rc; 1169 } 1170 subsys_initcall(pci_base_init); 1171