1 /* pci_sun4v.c: SUN4V specific PCI controller support. 2 * 3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <linux/pci.h> 9 #include <linux/init.h> 10 #include <linux/slab.h> 11 #include <linux/interrupt.h> 12 #include <linux/percpu.h> 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/export.h> 16 #include <linux/log2.h> 17 #include <linux/of_device.h> 18 19 #include <asm/iommu.h> 20 #include <asm/irq.h> 21 #include <asm/hypervisor.h> 22 #include <asm/prom.h> 23 24 #include "pci_impl.h" 25 #include "iommu_common.h" 26 27 #include "pci_sun4v.h" 28 29 #define DRIVER_NAME "pci_sun4v" 30 #define PFX DRIVER_NAME ": " 31 32 static unsigned long vpci_major = 1; 33 static unsigned long vpci_minor = 1; 34 35 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 36 37 struct iommu_batch { 38 struct device *dev; /* Device mapping is for. */ 39 unsigned long prot; /* IOMMU page protections */ 40 unsigned long entry; /* Index into IOTSB. */ 41 u64 *pglist; /* List of physical pages */ 42 unsigned long npages; /* Number of pages in list. */ 43 }; 44 45 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); 46 static int iommu_batch_initialized; 47 48 /* Interrupts must be disabled. */ 49 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) 50 { 51 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 52 53 p->dev = dev; 54 p->prot = prot; 55 p->entry = entry; 56 p->npages = 0; 57 } 58 59 /* Interrupts must be disabled. */ 60 static long iommu_batch_flush(struct iommu_batch *p) 61 { 62 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 63 unsigned long devhandle = pbm->devhandle; 64 unsigned long prot = p->prot; 65 unsigned long entry = p->entry; 66 u64 *pglist = p->pglist; 67 unsigned long npages = p->npages; 68 69 while (npages != 0) { 70 long num; 71 72 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 73 npages, prot, __pa(pglist)); 74 if (unlikely(num < 0)) { 75 if (printk_ratelimit()) 76 printk("iommu_batch_flush: IOMMU map of " 77 "[%08lx:%08llx:%lx:%lx:%lx] failed with " 78 "status %ld\n", 79 devhandle, HV_PCI_TSBID(0, entry), 80 npages, prot, __pa(pglist), num); 81 return -1; 82 } 83 84 entry += num; 85 npages -= num; 86 pglist += num; 87 } 88 89 p->entry = entry; 90 p->npages = 0; 91 92 return 0; 93 } 94 95 static inline void iommu_batch_new_entry(unsigned long entry) 96 { 97 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 98 99 if (p->entry + p->npages == entry) 100 return; 101 if (p->entry != ~0UL) 102 iommu_batch_flush(p); 103 p->entry = entry; 104 } 105 106 /* Interrupts must be disabled. */ 107 static inline long iommu_batch_add(u64 phys_page) 108 { 109 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 110 111 BUG_ON(p->npages >= PGLIST_NENTS); 112 113 p->pglist[p->npages++] = phys_page; 114 if (p->npages == PGLIST_NENTS) 115 return iommu_batch_flush(p); 116 117 return 0; 118 } 119 120 /* Interrupts must be disabled. */ 121 static inline long iommu_batch_end(void) 122 { 123 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 124 125 BUG_ON(p->npages >= PGLIST_NENTS); 126 127 return iommu_batch_flush(p); 128 } 129 130 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 131 dma_addr_t *dma_addrp, gfp_t gfp) 132 { 133 unsigned long flags, order, first_page, npages, n; 134 struct iommu *iommu; 135 struct page *page; 136 void *ret; 137 long entry; 138 int nid; 139 140 size = IO_PAGE_ALIGN(size); 141 order = get_order(size); 142 if (unlikely(order >= MAX_ORDER)) 143 return NULL; 144 145 npages = size >> IO_PAGE_SHIFT; 146 147 nid = dev->archdata.numa_node; 148 page = alloc_pages_node(nid, gfp, order); 149 if (unlikely(!page)) 150 return NULL; 151 152 first_page = (unsigned long) page_address(page); 153 memset((char *)first_page, 0, PAGE_SIZE << order); 154 155 iommu = dev->archdata.iommu; 156 157 spin_lock_irqsave(&iommu->lock, flags); 158 entry = iommu_range_alloc(dev, iommu, npages, NULL); 159 spin_unlock_irqrestore(&iommu->lock, flags); 160 161 if (unlikely(entry == DMA_ERROR_CODE)) 162 goto range_alloc_fail; 163 164 *dma_addrp = (iommu->page_table_map_base + 165 (entry << IO_PAGE_SHIFT)); 166 ret = (void *) first_page; 167 first_page = __pa(first_page); 168 169 local_irq_save(flags); 170 171 iommu_batch_start(dev, 172 (HV_PCI_MAP_ATTR_READ | 173 HV_PCI_MAP_ATTR_WRITE), 174 entry); 175 176 for (n = 0; n < npages; n++) { 177 long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 178 if (unlikely(err < 0L)) 179 goto iommu_map_fail; 180 } 181 182 if (unlikely(iommu_batch_end() < 0L)) 183 goto iommu_map_fail; 184 185 local_irq_restore(flags); 186 187 return ret; 188 189 iommu_map_fail: 190 /* Interrupts are disabled. */ 191 spin_lock(&iommu->lock); 192 iommu_range_free(iommu, *dma_addrp, npages); 193 spin_unlock_irqrestore(&iommu->lock, flags); 194 195 range_alloc_fail: 196 free_pages(first_page, order); 197 return NULL; 198 } 199 200 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 201 dma_addr_t dvma) 202 { 203 struct pci_pbm_info *pbm; 204 struct iommu *iommu; 205 unsigned long flags, order, npages, entry; 206 u32 devhandle; 207 208 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 209 iommu = dev->archdata.iommu; 210 pbm = dev->archdata.host_controller; 211 devhandle = pbm->devhandle; 212 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 213 214 spin_lock_irqsave(&iommu->lock, flags); 215 216 iommu_range_free(iommu, dvma, npages); 217 218 do { 219 unsigned long num; 220 221 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 222 npages); 223 entry += num; 224 npages -= num; 225 } while (npages != 0); 226 227 spin_unlock_irqrestore(&iommu->lock, flags); 228 229 order = get_order(size); 230 if (order < 10) 231 free_pages((unsigned long)cpu, order); 232 } 233 234 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 235 unsigned long offset, size_t sz, 236 enum dma_data_direction direction, 237 struct dma_attrs *attrs) 238 { 239 struct iommu *iommu; 240 unsigned long flags, npages, oaddr; 241 unsigned long i, base_paddr; 242 u32 bus_addr, ret; 243 unsigned long prot; 244 long entry; 245 246 iommu = dev->archdata.iommu; 247 248 if (unlikely(direction == DMA_NONE)) 249 goto bad; 250 251 oaddr = (unsigned long)(page_address(page) + offset); 252 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 253 npages >>= IO_PAGE_SHIFT; 254 255 spin_lock_irqsave(&iommu->lock, flags); 256 entry = iommu_range_alloc(dev, iommu, npages, NULL); 257 spin_unlock_irqrestore(&iommu->lock, flags); 258 259 if (unlikely(entry == DMA_ERROR_CODE)) 260 goto bad; 261 262 bus_addr = (iommu->page_table_map_base + 263 (entry << IO_PAGE_SHIFT)); 264 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 265 base_paddr = __pa(oaddr & IO_PAGE_MASK); 266 prot = HV_PCI_MAP_ATTR_READ; 267 if (direction != DMA_TO_DEVICE) 268 prot |= HV_PCI_MAP_ATTR_WRITE; 269 270 local_irq_save(flags); 271 272 iommu_batch_start(dev, prot, entry); 273 274 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 275 long err = iommu_batch_add(base_paddr); 276 if (unlikely(err < 0L)) 277 goto iommu_map_fail; 278 } 279 if (unlikely(iommu_batch_end() < 0L)) 280 goto iommu_map_fail; 281 282 local_irq_restore(flags); 283 284 return ret; 285 286 bad: 287 if (printk_ratelimit()) 288 WARN_ON(1); 289 return DMA_ERROR_CODE; 290 291 iommu_map_fail: 292 /* Interrupts are disabled. */ 293 spin_lock(&iommu->lock); 294 iommu_range_free(iommu, bus_addr, npages); 295 spin_unlock_irqrestore(&iommu->lock, flags); 296 297 return DMA_ERROR_CODE; 298 } 299 300 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 301 size_t sz, enum dma_data_direction direction, 302 struct dma_attrs *attrs) 303 { 304 struct pci_pbm_info *pbm; 305 struct iommu *iommu; 306 unsigned long flags, npages; 307 long entry; 308 u32 devhandle; 309 310 if (unlikely(direction == DMA_NONE)) { 311 if (printk_ratelimit()) 312 WARN_ON(1); 313 return; 314 } 315 316 iommu = dev->archdata.iommu; 317 pbm = dev->archdata.host_controller; 318 devhandle = pbm->devhandle; 319 320 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 321 npages >>= IO_PAGE_SHIFT; 322 bus_addr &= IO_PAGE_MASK; 323 324 spin_lock_irqsave(&iommu->lock, flags); 325 326 iommu_range_free(iommu, bus_addr, npages); 327 328 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 329 do { 330 unsigned long num; 331 332 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 333 npages); 334 entry += num; 335 npages -= num; 336 } while (npages != 0); 337 338 spin_unlock_irqrestore(&iommu->lock, flags); 339 } 340 341 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 342 int nelems, enum dma_data_direction direction, 343 struct dma_attrs *attrs) 344 { 345 struct scatterlist *s, *outs, *segstart; 346 unsigned long flags, handle, prot; 347 dma_addr_t dma_next = 0, dma_addr; 348 unsigned int max_seg_size; 349 unsigned long seg_boundary_size; 350 int outcount, incount, i; 351 struct iommu *iommu; 352 unsigned long base_shift; 353 long err; 354 355 BUG_ON(direction == DMA_NONE); 356 357 iommu = dev->archdata.iommu; 358 if (nelems == 0 || !iommu) 359 return 0; 360 361 prot = HV_PCI_MAP_ATTR_READ; 362 if (direction != DMA_TO_DEVICE) 363 prot |= HV_PCI_MAP_ATTR_WRITE; 364 365 outs = s = segstart = &sglist[0]; 366 outcount = 1; 367 incount = nelems; 368 handle = 0; 369 370 /* Init first segment length for backout at failure */ 371 outs->dma_length = 0; 372 373 spin_lock_irqsave(&iommu->lock, flags); 374 375 iommu_batch_start(dev, prot, ~0UL); 376 377 max_seg_size = dma_get_max_seg_size(dev); 378 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 379 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 380 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; 381 for_each_sg(sglist, s, nelems, i) { 382 unsigned long paddr, npages, entry, out_entry = 0, slen; 383 384 slen = s->length; 385 /* Sanity check */ 386 if (slen == 0) { 387 dma_next = 0; 388 continue; 389 } 390 /* Allocate iommu entries for that segment */ 391 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 392 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 393 entry = iommu_range_alloc(dev, iommu, npages, &handle); 394 395 /* Handle failure */ 396 if (unlikely(entry == DMA_ERROR_CODE)) { 397 if (printk_ratelimit()) 398 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 399 " npages %lx\n", iommu, paddr, npages); 400 goto iommu_map_failed; 401 } 402 403 iommu_batch_new_entry(entry); 404 405 /* Convert entry to a dma_addr_t */ 406 dma_addr = iommu->page_table_map_base + 407 (entry << IO_PAGE_SHIFT); 408 dma_addr |= (s->offset & ~IO_PAGE_MASK); 409 410 /* Insert into HW table */ 411 paddr &= IO_PAGE_MASK; 412 while (npages--) { 413 err = iommu_batch_add(paddr); 414 if (unlikely(err < 0L)) 415 goto iommu_map_failed; 416 paddr += IO_PAGE_SIZE; 417 } 418 419 /* If we are in an open segment, try merging */ 420 if (segstart != s) { 421 /* We cannot merge if: 422 * - allocated dma_addr isn't contiguous to previous allocation 423 */ 424 if ((dma_addr != dma_next) || 425 (outs->dma_length + s->length > max_seg_size) || 426 (is_span_boundary(out_entry, base_shift, 427 seg_boundary_size, outs, s))) { 428 /* Can't merge: create a new segment */ 429 segstart = s; 430 outcount++; 431 outs = sg_next(outs); 432 } else { 433 outs->dma_length += s->length; 434 } 435 } 436 437 if (segstart == s) { 438 /* This is a new segment, fill entries */ 439 outs->dma_address = dma_addr; 440 outs->dma_length = slen; 441 out_entry = entry; 442 } 443 444 /* Calculate next page pointer for contiguous check */ 445 dma_next = dma_addr + slen; 446 } 447 448 err = iommu_batch_end(); 449 450 if (unlikely(err < 0L)) 451 goto iommu_map_failed; 452 453 spin_unlock_irqrestore(&iommu->lock, flags); 454 455 if (outcount < incount) { 456 outs = sg_next(outs); 457 outs->dma_address = DMA_ERROR_CODE; 458 outs->dma_length = 0; 459 } 460 461 return outcount; 462 463 iommu_map_failed: 464 for_each_sg(sglist, s, nelems, i) { 465 if (s->dma_length != 0) { 466 unsigned long vaddr, npages; 467 468 vaddr = s->dma_address & IO_PAGE_MASK; 469 npages = iommu_num_pages(s->dma_address, s->dma_length, 470 IO_PAGE_SIZE); 471 iommu_range_free(iommu, vaddr, npages); 472 /* XXX demap? XXX */ 473 s->dma_address = DMA_ERROR_CODE; 474 s->dma_length = 0; 475 } 476 if (s == outs) 477 break; 478 } 479 spin_unlock_irqrestore(&iommu->lock, flags); 480 481 return 0; 482 } 483 484 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 485 int nelems, enum dma_data_direction direction, 486 struct dma_attrs *attrs) 487 { 488 struct pci_pbm_info *pbm; 489 struct scatterlist *sg; 490 struct iommu *iommu; 491 unsigned long flags; 492 u32 devhandle; 493 494 BUG_ON(direction == DMA_NONE); 495 496 iommu = dev->archdata.iommu; 497 pbm = dev->archdata.host_controller; 498 devhandle = pbm->devhandle; 499 500 spin_lock_irqsave(&iommu->lock, flags); 501 502 sg = sglist; 503 while (nelems--) { 504 dma_addr_t dma_handle = sg->dma_address; 505 unsigned int len = sg->dma_length; 506 unsigned long npages, entry; 507 508 if (!len) 509 break; 510 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 511 iommu_range_free(iommu, dma_handle, npages); 512 513 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 514 while (npages) { 515 unsigned long num; 516 517 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 518 npages); 519 entry += num; 520 npages -= num; 521 } 522 523 sg = sg_next(sg); 524 } 525 526 spin_unlock_irqrestore(&iommu->lock, flags); 527 } 528 529 static struct dma_map_ops sun4v_dma_ops = { 530 .alloc_coherent = dma_4v_alloc_coherent, 531 .free_coherent = dma_4v_free_coherent, 532 .map_page = dma_4v_map_page, 533 .unmap_page = dma_4v_unmap_page, 534 .map_sg = dma_4v_map_sg, 535 .unmap_sg = dma_4v_unmap_sg, 536 }; 537 538 static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 539 struct device *parent) 540 { 541 struct property *prop; 542 struct device_node *dp; 543 544 dp = pbm->op->dev.of_node; 545 prop = of_find_property(dp, "66mhz-capable", NULL); 546 pbm->is_66mhz_capable = (prop != NULL); 547 pbm->pci_bus = pci_scan_one_pbm(pbm, parent); 548 549 /* XXX register error interrupt handlers XXX */ 550 } 551 552 static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm, 553 struct iommu *iommu) 554 { 555 struct iommu_arena *arena = &iommu->arena; 556 unsigned long i, cnt = 0; 557 u32 devhandle; 558 559 devhandle = pbm->devhandle; 560 for (i = 0; i < arena->limit; i++) { 561 unsigned long ret, io_attrs, ra; 562 563 ret = pci_sun4v_iommu_getmap(devhandle, 564 HV_PCI_TSBID(0, i), 565 &io_attrs, &ra); 566 if (ret == HV_EOK) { 567 if (page_in_phys_avail(ra)) { 568 pci_sun4v_iommu_demap(devhandle, 569 HV_PCI_TSBID(0, i), 1); 570 } else { 571 cnt++; 572 __set_bit(i, arena->map); 573 } 574 } 575 } 576 577 return cnt; 578 } 579 580 static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 581 { 582 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 583 struct iommu *iommu = pbm->iommu; 584 unsigned long num_tsb_entries, sz; 585 u32 dma_mask, dma_offset; 586 const u32 *vdma; 587 588 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); 589 if (!vdma) 590 vdma = vdma_default; 591 592 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { 593 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", 594 vdma[0], vdma[1]); 595 return -EINVAL; 596 }; 597 598 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); 599 num_tsb_entries = vdma[1] / IO_PAGE_SIZE; 600 601 dma_offset = vdma[0]; 602 603 /* Setup initial software IOMMU state. */ 604 spin_lock_init(&iommu->lock); 605 iommu->ctx_lowest_free = 1; 606 iommu->page_table_map_base = dma_offset; 607 iommu->dma_addr_mask = dma_mask; 608 609 /* Allocate and initialize the free area map. */ 610 sz = (num_tsb_entries + 7) / 8; 611 sz = (sz + 7UL) & ~7UL; 612 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 613 if (!iommu->arena.map) { 614 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); 615 return -ENOMEM; 616 } 617 iommu->arena.limit = num_tsb_entries; 618 619 sz = probe_existing_entries(pbm, iommu); 620 if (sz) 621 printk("%s: Imported %lu TSB entries from OBP\n", 622 pbm->name, sz); 623 624 return 0; 625 } 626 627 #ifdef CONFIG_PCI_MSI 628 struct pci_sun4v_msiq_entry { 629 u64 version_type; 630 #define MSIQ_VERSION_MASK 0xffffffff00000000UL 631 #define MSIQ_VERSION_SHIFT 32 632 #define MSIQ_TYPE_MASK 0x00000000000000ffUL 633 #define MSIQ_TYPE_SHIFT 0 634 #define MSIQ_TYPE_NONE 0x00 635 #define MSIQ_TYPE_MSG 0x01 636 #define MSIQ_TYPE_MSI32 0x02 637 #define MSIQ_TYPE_MSI64 0x03 638 #define MSIQ_TYPE_INTX 0x08 639 #define MSIQ_TYPE_NONE2 0xff 640 641 u64 intx_sysino; 642 u64 reserved1; 643 u64 stick; 644 u64 req_id; /* bus/device/func */ 645 #define MSIQ_REQID_BUS_MASK 0xff00UL 646 #define MSIQ_REQID_BUS_SHIFT 8 647 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL 648 #define MSIQ_REQID_DEVICE_SHIFT 3 649 #define MSIQ_REQID_FUNC_MASK 0x0007UL 650 #define MSIQ_REQID_FUNC_SHIFT 0 651 652 u64 msi_address; 653 654 /* The format of this value is message type dependent. 655 * For MSI bits 15:0 are the data from the MSI packet. 656 * For MSI-X bits 31:0 are the data from the MSI packet. 657 * For MSG, the message code and message routing code where: 658 * bits 39:32 is the bus/device/fn of the msg target-id 659 * bits 18:16 is the message routing code 660 * bits 7:0 is the message code 661 * For INTx the low order 2-bits are: 662 * 00 - INTA 663 * 01 - INTB 664 * 10 - INTC 665 * 11 - INTD 666 */ 667 u64 msi_data; 668 669 u64 reserved2; 670 }; 671 672 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, 673 unsigned long *head) 674 { 675 unsigned long err, limit; 676 677 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); 678 if (unlikely(err)) 679 return -ENXIO; 680 681 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 682 if (unlikely(*head >= limit)) 683 return -EFBIG; 684 685 return 0; 686 } 687 688 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, 689 unsigned long msiqid, unsigned long *head, 690 unsigned long *msi) 691 { 692 struct pci_sun4v_msiq_entry *ep; 693 unsigned long err, type; 694 695 /* Note: void pointer arithmetic, 'head' is a byte offset */ 696 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 697 (pbm->msiq_ent_count * 698 sizeof(struct pci_sun4v_msiq_entry))) + 699 *head); 700 701 if ((ep->version_type & MSIQ_TYPE_MASK) == 0) 702 return 0; 703 704 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; 705 if (unlikely(type != MSIQ_TYPE_MSI32 && 706 type != MSIQ_TYPE_MSI64)) 707 return -EINVAL; 708 709 *msi = ep->msi_data; 710 711 err = pci_sun4v_msi_setstate(pbm->devhandle, 712 ep->msi_data /* msi_num */, 713 HV_MSISTATE_IDLE); 714 if (unlikely(err)) 715 return -ENXIO; 716 717 /* Clear the entry. */ 718 ep->version_type &= ~MSIQ_TYPE_MASK; 719 720 (*head) += sizeof(struct pci_sun4v_msiq_entry); 721 if (*head >= 722 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) 723 *head = 0; 724 725 return 1; 726 } 727 728 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, 729 unsigned long head) 730 { 731 unsigned long err; 732 733 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); 734 if (unlikely(err)) 735 return -EINVAL; 736 737 return 0; 738 } 739 740 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, 741 unsigned long msi, int is_msi64) 742 { 743 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, 744 (is_msi64 ? 745 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) 746 return -ENXIO; 747 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) 748 return -ENXIO; 749 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) 750 return -ENXIO; 751 return 0; 752 } 753 754 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) 755 { 756 unsigned long err, msiqid; 757 758 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); 759 if (err) 760 return -ENXIO; 761 762 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); 763 764 return 0; 765 } 766 767 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) 768 { 769 unsigned long q_size, alloc_size, pages, order; 770 int i; 771 772 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 773 alloc_size = (pbm->msiq_num * q_size); 774 order = get_order(alloc_size); 775 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); 776 if (pages == 0UL) { 777 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", 778 order); 779 return -ENOMEM; 780 } 781 memset((char *)pages, 0, PAGE_SIZE << order); 782 pbm->msi_queues = (void *) pages; 783 784 for (i = 0; i < pbm->msiq_num; i++) { 785 unsigned long err, base = __pa(pages + (i * q_size)); 786 unsigned long ret1, ret2; 787 788 err = pci_sun4v_msiq_conf(pbm->devhandle, 789 pbm->msiq_first + i, 790 base, pbm->msiq_ent_count); 791 if (err) { 792 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", 793 err); 794 goto h_error; 795 } 796 797 err = pci_sun4v_msiq_info(pbm->devhandle, 798 pbm->msiq_first + i, 799 &ret1, &ret2); 800 if (err) { 801 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", 802 err); 803 goto h_error; 804 } 805 if (ret1 != base || ret2 != pbm->msiq_ent_count) { 806 printk(KERN_ERR "MSI: Bogus qconf " 807 "expected[%lx:%x] got[%lx:%lx]\n", 808 base, pbm->msiq_ent_count, 809 ret1, ret2); 810 goto h_error; 811 } 812 } 813 814 return 0; 815 816 h_error: 817 free_pages(pages, order); 818 return -EINVAL; 819 } 820 821 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) 822 { 823 unsigned long q_size, alloc_size, pages, order; 824 int i; 825 826 for (i = 0; i < pbm->msiq_num; i++) { 827 unsigned long msiqid = pbm->msiq_first + i; 828 829 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); 830 } 831 832 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 833 alloc_size = (pbm->msiq_num * q_size); 834 order = get_order(alloc_size); 835 836 pages = (unsigned long) pbm->msi_queues; 837 838 free_pages(pages, order); 839 840 pbm->msi_queues = NULL; 841 } 842 843 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, 844 unsigned long msiqid, 845 unsigned long devino) 846 { 847 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino); 848 849 if (!irq) 850 return -ENOMEM; 851 852 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) 853 return -EINVAL; 854 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) 855 return -EINVAL; 856 857 return irq; 858 } 859 860 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { 861 .get_head = pci_sun4v_get_head, 862 .dequeue_msi = pci_sun4v_dequeue_msi, 863 .set_head = pci_sun4v_set_head, 864 .msi_setup = pci_sun4v_msi_setup, 865 .msi_teardown = pci_sun4v_msi_teardown, 866 .msiq_alloc = pci_sun4v_msiq_alloc, 867 .msiq_free = pci_sun4v_msiq_free, 868 .msiq_build_irq = pci_sun4v_msiq_build_irq, 869 }; 870 871 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 872 { 873 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); 874 } 875 #else /* CONFIG_PCI_MSI */ 876 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 877 { 878 } 879 #endif /* !(CONFIG_PCI_MSI) */ 880 881 static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, 882 struct platform_device *op, u32 devhandle) 883 { 884 struct device_node *dp = op->dev.of_node; 885 int err; 886 887 pbm->numa_node = of_node_to_nid(dp); 888 889 pbm->pci_ops = &sun4v_pci_ops; 890 pbm->config_space_reg_bits = 12; 891 892 pbm->index = pci_num_pbms++; 893 894 pbm->op = op; 895 896 pbm->devhandle = devhandle; 897 898 pbm->name = dp->full_name; 899 900 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 901 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); 902 903 pci_determine_mem_io_space(pbm); 904 905 pci_get_pbm_props(pbm); 906 907 err = pci_sun4v_iommu_init(pbm); 908 if (err) 909 return err; 910 911 pci_sun4v_msi_init(pbm); 912 913 pci_sun4v_scan_bus(pbm, &op->dev); 914 915 pbm->next = pci_pbm_root; 916 pci_pbm_root = pbm; 917 918 return 0; 919 } 920 921 static int __devinit pci_sun4v_probe(struct platform_device *op) 922 { 923 const struct linux_prom64_registers *regs; 924 static int hvapi_negotiated = 0; 925 struct pci_pbm_info *pbm; 926 struct device_node *dp; 927 struct iommu *iommu; 928 u32 devhandle; 929 int i, err; 930 931 dp = op->dev.of_node; 932 933 if (!hvapi_negotiated++) { 934 err = sun4v_hvapi_register(HV_GRP_PCI, 935 vpci_major, 936 &vpci_minor); 937 938 if (err) { 939 printk(KERN_ERR PFX "Could not register hvapi, " 940 "err=%d\n", err); 941 return err; 942 } 943 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n", 944 vpci_major, vpci_minor); 945 946 dma_ops = &sun4v_dma_ops; 947 } 948 949 regs = of_get_property(dp, "reg", NULL); 950 err = -ENODEV; 951 if (!regs) { 952 printk(KERN_ERR PFX "Could not find config registers\n"); 953 goto out_err; 954 } 955 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; 956 957 err = -ENOMEM; 958 if (!iommu_batch_initialized) { 959 for_each_possible_cpu(i) { 960 unsigned long page = get_zeroed_page(GFP_KERNEL); 961 962 if (!page) 963 goto out_err; 964 965 per_cpu(iommu_batch, i).pglist = (u64 *) page; 966 } 967 iommu_batch_initialized = 1; 968 } 969 970 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); 971 if (!pbm) { 972 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); 973 goto out_err; 974 } 975 976 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); 977 if (!iommu) { 978 printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); 979 goto out_free_controller; 980 } 981 982 pbm->iommu = iommu; 983 984 err = pci_sun4v_pbm_init(pbm, op, devhandle); 985 if (err) 986 goto out_free_iommu; 987 988 dev_set_drvdata(&op->dev, pbm); 989 990 return 0; 991 992 out_free_iommu: 993 kfree(pbm->iommu); 994 995 out_free_controller: 996 kfree(pbm); 997 998 out_err: 999 return err; 1000 } 1001 1002 static const struct of_device_id pci_sun4v_match[] = { 1003 { 1004 .name = "pci", 1005 .compatible = "SUNW,sun4v-pci", 1006 }, 1007 {}, 1008 }; 1009 1010 static struct platform_driver pci_sun4v_driver = { 1011 .driver = { 1012 .name = DRIVER_NAME, 1013 .owner = THIS_MODULE, 1014 .of_match_table = pci_sun4v_match, 1015 }, 1016 .probe = pci_sun4v_probe, 1017 }; 1018 1019 static int __init pci_sun4v_init(void) 1020 { 1021 return platform_driver_register(&pci_sun4v_driver); 1022 } 1023 1024 subsys_initcall(pci_sun4v_init); 1025