1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 *
4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/pci.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/irq.h>
15 #include <linux/msi.h>
16 #include <linux/export.h>
17 #include <linux/log2.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/dma-map-ops.h>
21 #include <asm/iommu-common.h>
22
23 #include <asm/iommu.h>
24 #include <asm/irq.h>
25 #include <asm/hypervisor.h>
26 #include <asm/prom.h>
27
28 #include "pci_impl.h"
29 #include "iommu_common.h"
30 #include "kernel.h"
31
32 #include "pci_sun4v.h"
33
34 #define DRIVER_NAME "pci_sun4v"
35 #define PFX DRIVER_NAME ": "
36
37 static unsigned long vpci_major;
38 static unsigned long vpci_minor;
39
40 struct vpci_version {
41 unsigned long major;
42 unsigned long minor;
43 };
44
45 /* Ordered from largest major to lowest */
46 static struct vpci_version vpci_versions[] = {
47 { .major = 2, .minor = 0 },
48 { .major = 1, .minor = 1 },
49 };
50
51 static unsigned long vatu_major = 1;
52 static unsigned long vatu_minor = 1;
53
54 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
55
56 struct iommu_batch {
57 struct device *dev; /* Device mapping is for. */
58 unsigned long prot; /* IOMMU page protections */
59 unsigned long entry; /* Index into IOTSB. */
60 u64 *pglist; /* List of physical pages */
61 unsigned long npages; /* Number of pages in list. */
62 };
63
64 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
65 static int iommu_batch_initialized;
66
67 /* Interrupts must be disabled. */
iommu_batch_start(struct device * dev,unsigned long prot,unsigned long entry)68 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
69 {
70 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
71
72 p->dev = dev;
73 p->prot = prot;
74 p->entry = entry;
75 p->npages = 0;
76 }
77
iommu_use_atu(struct iommu * iommu,u64 mask)78 static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
79 {
80 return iommu->atu && mask > DMA_BIT_MASK(32);
81 }
82
83 /* Interrupts must be disabled. */
iommu_batch_flush(struct iommu_batch * p,u64 mask)84 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
85 {
86 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
87 u64 *pglist = p->pglist;
88 u64 index_count;
89 unsigned long devhandle = pbm->devhandle;
90 unsigned long prot = p->prot;
91 unsigned long entry = p->entry;
92 unsigned long npages = p->npages;
93 unsigned long iotsb_num;
94 unsigned long ret;
95 long num;
96
97 /* VPCI maj=1, min=[0,1] only supports read and write */
98 if (vpci_major < 2)
99 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
100
101 while (npages != 0) {
102 if (!iommu_use_atu(pbm->iommu, mask)) {
103 num = pci_sun4v_iommu_map(devhandle,
104 HV_PCI_TSBID(0, entry),
105 npages,
106 prot,
107 __pa(pglist));
108 if (unlikely(num < 0)) {
109 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
110 __func__,
111 devhandle,
112 HV_PCI_TSBID(0, entry),
113 npages, prot, __pa(pglist),
114 num);
115 return -1;
116 }
117 } else {
118 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
119 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
120 ret = pci_sun4v_iotsb_map(devhandle,
121 iotsb_num,
122 index_count,
123 prot,
124 __pa(pglist),
125 &num);
126 if (unlikely(ret != HV_EOK)) {
127 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
128 __func__,
129 devhandle, iotsb_num,
130 index_count, prot,
131 __pa(pglist), ret);
132 return -1;
133 }
134 }
135 entry += num;
136 npages -= num;
137 pglist += num;
138 }
139
140 p->entry = entry;
141 p->npages = 0;
142
143 return 0;
144 }
145
iommu_batch_new_entry(unsigned long entry,u64 mask)146 static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
147 {
148 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
149
150 if (p->entry + p->npages == entry)
151 return;
152 if (p->entry != ~0UL)
153 iommu_batch_flush(p, mask);
154 p->entry = entry;
155 }
156
157 /* Interrupts must be disabled. */
iommu_batch_add(u64 phys_page,u64 mask)158 static inline long iommu_batch_add(u64 phys_page, u64 mask)
159 {
160 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
161
162 BUG_ON(p->npages >= PGLIST_NENTS);
163
164 p->pglist[p->npages++] = phys_page;
165 if (p->npages == PGLIST_NENTS)
166 return iommu_batch_flush(p, mask);
167
168 return 0;
169 }
170
171 /* Interrupts must be disabled. */
iommu_batch_end(u64 mask)172 static inline long iommu_batch_end(u64 mask)
173 {
174 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
175
176 BUG_ON(p->npages >= PGLIST_NENTS);
177
178 return iommu_batch_flush(p, mask);
179 }
180
dma_4v_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addrp,gfp_t gfp,unsigned long attrs)181 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
182 dma_addr_t *dma_addrp, gfp_t gfp,
183 unsigned long attrs)
184 {
185 u64 mask;
186 unsigned long flags, order, first_page, npages, n;
187 unsigned long prot = 0;
188 struct iommu *iommu;
189 struct iommu_map_table *tbl;
190 struct page *page;
191 void *ret;
192 long entry;
193 int nid;
194
195 size = IO_PAGE_ALIGN(size);
196 order = get_order(size);
197 if (unlikely(order > MAX_PAGE_ORDER))
198 return NULL;
199
200 npages = size >> IO_PAGE_SHIFT;
201
202 if (attrs & DMA_ATTR_WEAK_ORDERING)
203 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
204
205 nid = dev->archdata.numa_node;
206 page = alloc_pages_node(nid, gfp, order);
207 if (unlikely(!page))
208 return NULL;
209
210 first_page = (unsigned long) page_address(page);
211 memset((char *)first_page, 0, PAGE_SIZE << order);
212
213 iommu = dev->archdata.iommu;
214 mask = dev->coherent_dma_mask;
215 if (!iommu_use_atu(iommu, mask))
216 tbl = &iommu->tbl;
217 else
218 tbl = &iommu->atu->tbl;
219
220 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
221 (unsigned long)(-1), 0);
222
223 if (unlikely(entry == IOMMU_ERROR_CODE))
224 goto range_alloc_fail;
225
226 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
227 ret = (void *) first_page;
228 first_page = __pa(first_page);
229
230 local_irq_save(flags);
231
232 iommu_batch_start(dev,
233 (HV_PCI_MAP_ATTR_READ | prot |
234 HV_PCI_MAP_ATTR_WRITE),
235 entry);
236
237 for (n = 0; n < npages; n++) {
238 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
239 if (unlikely(err < 0L))
240 goto iommu_map_fail;
241 }
242
243 if (unlikely(iommu_batch_end(mask) < 0L))
244 goto iommu_map_fail;
245
246 local_irq_restore(flags);
247
248 return ret;
249
250 iommu_map_fail:
251 local_irq_restore(flags);
252 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
253
254 range_alloc_fail:
255 free_pages(first_page, order);
256 return NULL;
257 }
258
dma_4v_iotsb_bind(unsigned long devhandle,unsigned long iotsb_num,struct pci_bus * bus_dev)259 static unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
260 unsigned long iotsb_num,
261 struct pci_bus *bus_dev)
262 {
263 struct pci_dev *pdev;
264 unsigned long err;
265 unsigned int bus;
266 unsigned int device;
267 unsigned int fun;
268
269 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
270 if (pdev->subordinate) {
271 /* No need to bind pci bridge */
272 dma_4v_iotsb_bind(devhandle, iotsb_num,
273 pdev->subordinate);
274 } else {
275 bus = bus_dev->number;
276 device = PCI_SLOT(pdev->devfn);
277 fun = PCI_FUNC(pdev->devfn);
278 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
279 HV_PCI_DEVICE_BUILD(bus,
280 device,
281 fun));
282
283 /* If bind fails for one device it is going to fail
284 * for rest of the devices because we are sharing
285 * IOTSB. So in case of failure simply return with
286 * error.
287 */
288 if (err)
289 return err;
290 }
291 }
292
293 return 0;
294 }
295
dma_4v_iommu_demap(struct device * dev,unsigned long devhandle,dma_addr_t dvma,unsigned long iotsb_num,unsigned long entry,unsigned long npages)296 static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
297 dma_addr_t dvma, unsigned long iotsb_num,
298 unsigned long entry, unsigned long npages)
299 {
300 unsigned long num, flags;
301 unsigned long ret;
302
303 local_irq_save(flags);
304 do {
305 if (dvma <= DMA_BIT_MASK(32)) {
306 num = pci_sun4v_iommu_demap(devhandle,
307 HV_PCI_TSBID(0, entry),
308 npages);
309 } else {
310 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
311 entry, npages, &num);
312 if (unlikely(ret != HV_EOK)) {
313 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
314 ret);
315 }
316 }
317 entry += num;
318 npages -= num;
319 } while (npages != 0);
320 local_irq_restore(flags);
321 }
322
dma_4v_free_coherent(struct device * dev,size_t size,void * cpu,dma_addr_t dvma,unsigned long attrs)323 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
324 dma_addr_t dvma, unsigned long attrs)
325 {
326 struct pci_pbm_info *pbm;
327 struct iommu *iommu;
328 struct atu *atu;
329 struct iommu_map_table *tbl;
330 unsigned long order, npages, entry;
331 unsigned long iotsb_num;
332 u32 devhandle;
333
334 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
335 iommu = dev->archdata.iommu;
336 pbm = dev->archdata.host_controller;
337 atu = iommu->atu;
338 devhandle = pbm->devhandle;
339
340 if (!iommu_use_atu(iommu, dvma)) {
341 tbl = &iommu->tbl;
342 iotsb_num = 0; /* we don't care for legacy iommu */
343 } else {
344 tbl = &atu->tbl;
345 iotsb_num = atu->iotsb->iotsb_num;
346 }
347 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
348 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
349 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
350 order = get_order(size);
351 if (order < 10)
352 free_pages((unsigned long)cpu, order);
353 }
354
dma_4v_map_phys(struct device * dev,phys_addr_t phys,size_t sz,enum dma_data_direction direction,unsigned long attrs)355 static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
356 size_t sz, enum dma_data_direction direction,
357 unsigned long attrs)
358 {
359 struct iommu *iommu;
360 struct atu *atu;
361 struct iommu_map_table *tbl;
362 u64 mask;
363 unsigned long flags, npages, oaddr;
364 unsigned long i, prot;
365 dma_addr_t bus_addr, ret;
366 long entry;
367
368 if (unlikely(attrs & DMA_ATTR_MMIO))
369 /*
370 * This check is included because older versions of the code
371 * lacked MMIO path support, and my ability to test this path
372 * is limited. However, from a software technical standpoint,
373 * there is no restriction, as the following code operates
374 * solely on physical addresses.
375 */
376 goto bad;
377
378 iommu = dev->archdata.iommu;
379 atu = iommu->atu;
380
381 if (unlikely(direction == DMA_NONE))
382 goto bad;
383
384 oaddr = (unsigned long)(phys_to_virt(phys));
385 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
386 npages >>= IO_PAGE_SHIFT;
387
388 mask = *dev->dma_mask;
389 if (!iommu_use_atu(iommu, mask))
390 tbl = &iommu->tbl;
391 else
392 tbl = &atu->tbl;
393
394 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
395 (unsigned long)(-1), 0);
396
397 if (unlikely(entry == IOMMU_ERROR_CODE))
398 goto bad;
399
400 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
401 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
402 prot = HV_PCI_MAP_ATTR_READ;
403 if (direction != DMA_TO_DEVICE)
404 prot |= HV_PCI_MAP_ATTR_WRITE;
405
406 if (attrs & DMA_ATTR_WEAK_ORDERING)
407 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
408
409 local_irq_save(flags);
410
411 iommu_batch_start(dev, prot, entry);
412
413 for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
414 long err = iommu_batch_add(phys, mask);
415 if (unlikely(err < 0L))
416 goto iommu_map_fail;
417 }
418 if (unlikely(iommu_batch_end(mask) < 0L))
419 goto iommu_map_fail;
420
421 local_irq_restore(flags);
422
423 return ret;
424
425 bad:
426 if (printk_ratelimit())
427 WARN_ON(1);
428 return DMA_MAPPING_ERROR;
429
430 iommu_map_fail:
431 local_irq_restore(flags);
432 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
433 return DMA_MAPPING_ERROR;
434 }
435
dma_4v_unmap_phys(struct device * dev,dma_addr_t bus_addr,size_t sz,enum dma_data_direction direction,unsigned long attrs)436 static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr,
437 size_t sz, enum dma_data_direction direction,
438 unsigned long attrs)
439 {
440 struct pci_pbm_info *pbm;
441 struct iommu *iommu;
442 struct atu *atu;
443 struct iommu_map_table *tbl;
444 unsigned long npages;
445 unsigned long iotsb_num;
446 long entry;
447 u32 devhandle;
448
449 if (unlikely(direction == DMA_NONE)) {
450 if (printk_ratelimit())
451 WARN_ON(1);
452 return;
453 }
454
455 iommu = dev->archdata.iommu;
456 pbm = dev->archdata.host_controller;
457 atu = iommu->atu;
458 devhandle = pbm->devhandle;
459
460 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
461 npages >>= IO_PAGE_SHIFT;
462 bus_addr &= IO_PAGE_MASK;
463
464 if (bus_addr <= DMA_BIT_MASK(32)) {
465 iotsb_num = 0; /* we don't care for legacy iommu */
466 tbl = &iommu->tbl;
467 } else {
468 iotsb_num = atu->iotsb->iotsb_num;
469 tbl = &atu->tbl;
470 }
471 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
472 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
473 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
474 }
475
dma_4v_map_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,unsigned long attrs)476 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
477 int nelems, enum dma_data_direction direction,
478 unsigned long attrs)
479 {
480 struct scatterlist *s, *outs, *segstart;
481 unsigned long flags, handle, prot;
482 dma_addr_t dma_next = 0, dma_addr;
483 unsigned int max_seg_size;
484 unsigned long seg_boundary_size;
485 int outcount, incount, i;
486 struct iommu *iommu;
487 struct atu *atu;
488 struct iommu_map_table *tbl;
489 u64 mask;
490 unsigned long base_shift;
491 long err;
492
493 BUG_ON(direction == DMA_NONE);
494
495 iommu = dev->archdata.iommu;
496 if (nelems == 0 || !iommu)
497 return -EINVAL;
498 atu = iommu->atu;
499
500 prot = HV_PCI_MAP_ATTR_READ;
501 if (direction != DMA_TO_DEVICE)
502 prot |= HV_PCI_MAP_ATTR_WRITE;
503
504 if (attrs & DMA_ATTR_WEAK_ORDERING)
505 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
506
507 outs = s = segstart = &sglist[0];
508 outcount = 1;
509 incount = nelems;
510 handle = 0;
511
512 /* Init first segment length for backout at failure */
513 outs->dma_length = 0;
514
515 local_irq_save(flags);
516
517 iommu_batch_start(dev, prot, ~0UL);
518
519 max_seg_size = dma_get_max_seg_size(dev);
520 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
521
522 mask = *dev->dma_mask;
523 if (!iommu_use_atu(iommu, mask))
524 tbl = &iommu->tbl;
525 else
526 tbl = &atu->tbl;
527
528 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
529
530 for_each_sg(sglist, s, nelems, i) {
531 unsigned long paddr, npages, entry, out_entry = 0, slen;
532
533 slen = s->length;
534 /* Sanity check */
535 if (slen == 0) {
536 dma_next = 0;
537 continue;
538 }
539 /* Allocate iommu entries for that segment */
540 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
541 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
542 entry = iommu_tbl_range_alloc(dev, tbl, npages,
543 &handle, (unsigned long)(-1), 0);
544
545 /* Handle failure */
546 if (unlikely(entry == IOMMU_ERROR_CODE)) {
547 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
548 tbl, paddr, npages);
549 goto iommu_map_failed;
550 }
551
552 iommu_batch_new_entry(entry, mask);
553
554 /* Convert entry to a dma_addr_t */
555 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
556 dma_addr |= (s->offset & ~IO_PAGE_MASK);
557
558 /* Insert into HW table */
559 paddr &= IO_PAGE_MASK;
560 while (npages--) {
561 err = iommu_batch_add(paddr, mask);
562 if (unlikely(err < 0L))
563 goto iommu_map_failed;
564 paddr += IO_PAGE_SIZE;
565 }
566
567 /* If we are in an open segment, try merging */
568 if (segstart != s) {
569 /* We cannot merge if:
570 * - allocated dma_addr isn't contiguous to previous allocation
571 */
572 if ((dma_addr != dma_next) ||
573 (outs->dma_length + s->length > max_seg_size) ||
574 (is_span_boundary(out_entry, base_shift,
575 seg_boundary_size, outs, s))) {
576 /* Can't merge: create a new segment */
577 segstart = s;
578 outcount++;
579 outs = sg_next(outs);
580 } else {
581 outs->dma_length += s->length;
582 }
583 }
584
585 if (segstart == s) {
586 /* This is a new segment, fill entries */
587 outs->dma_address = dma_addr;
588 outs->dma_length = slen;
589 out_entry = entry;
590 }
591
592 /* Calculate next page pointer for contiguous check */
593 dma_next = dma_addr + slen;
594 }
595
596 err = iommu_batch_end(mask);
597
598 if (unlikely(err < 0L))
599 goto iommu_map_failed;
600
601 local_irq_restore(flags);
602
603 if (outcount < incount) {
604 outs = sg_next(outs);
605 outs->dma_length = 0;
606 }
607
608 return outcount;
609
610 iommu_map_failed:
611 for_each_sg(sglist, s, nelems, i) {
612 if (s->dma_length != 0) {
613 unsigned long vaddr, npages;
614
615 vaddr = s->dma_address & IO_PAGE_MASK;
616 npages = iommu_num_pages(s->dma_address, s->dma_length,
617 IO_PAGE_SIZE);
618 iommu_tbl_range_free(tbl, vaddr, npages,
619 IOMMU_ERROR_CODE);
620 /* XXX demap? XXX */
621 s->dma_length = 0;
622 }
623 if (s == outs)
624 break;
625 }
626 local_irq_restore(flags);
627
628 return -EINVAL;
629 }
630
dma_4v_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,unsigned long attrs)631 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
632 int nelems, enum dma_data_direction direction,
633 unsigned long attrs)
634 {
635 struct pci_pbm_info *pbm;
636 struct scatterlist *sg;
637 struct iommu *iommu;
638 struct atu *atu;
639 unsigned long flags, entry;
640 unsigned long iotsb_num;
641 u32 devhandle;
642
643 BUG_ON(direction == DMA_NONE);
644
645 iommu = dev->archdata.iommu;
646 pbm = dev->archdata.host_controller;
647 atu = iommu->atu;
648 devhandle = pbm->devhandle;
649
650 local_irq_save(flags);
651
652 sg = sglist;
653 while (nelems--) {
654 dma_addr_t dma_handle = sg->dma_address;
655 unsigned int len = sg->dma_length;
656 unsigned long npages;
657 struct iommu_map_table *tbl;
658 unsigned long shift = IO_PAGE_SHIFT;
659
660 if (!len)
661 break;
662 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
663
664 if (dma_handle <= DMA_BIT_MASK(32)) {
665 iotsb_num = 0; /* we don't care for legacy iommu */
666 tbl = &iommu->tbl;
667 } else {
668 iotsb_num = atu->iotsb->iotsb_num;
669 tbl = &atu->tbl;
670 }
671 entry = ((dma_handle - tbl->table_map_base) >> shift);
672 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
673 entry, npages);
674 iommu_tbl_range_free(tbl, dma_handle, npages,
675 IOMMU_ERROR_CODE);
676 sg = sg_next(sg);
677 }
678
679 local_irq_restore(flags);
680 }
681
dma_4v_supported(struct device * dev,u64 device_mask)682 static int dma_4v_supported(struct device *dev, u64 device_mask)
683 {
684 struct iommu *iommu = dev->archdata.iommu;
685
686 if (ali_sound_dma_hack(dev, device_mask))
687 return 1;
688 if (device_mask < iommu->dma_addr_mask)
689 return 0;
690 return 1;
691 }
692
693 static const struct dma_map_ops sun4v_dma_ops = {
694 .alloc = dma_4v_alloc_coherent,
695 .free = dma_4v_free_coherent,
696 .map_phys = dma_4v_map_phys,
697 .unmap_phys = dma_4v_unmap_phys,
698 .map_sg = dma_4v_map_sg,
699 .unmap_sg = dma_4v_unmap_sg,
700 .dma_supported = dma_4v_supported,
701 };
702
pci_sun4v_scan_bus(struct pci_pbm_info * pbm,struct device * parent)703 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
704 {
705 struct property *prop;
706 struct device_node *dp;
707
708 dp = pbm->op->dev.of_node;
709 prop = of_find_property(dp, "66mhz-capable", NULL);
710 pbm->is_66mhz_capable = (prop != NULL);
711 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
712
713 /* XXX register error interrupt handlers XXX */
714 }
715
probe_existing_entries(struct pci_pbm_info * pbm,struct iommu_map_table * iommu)716 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
717 struct iommu_map_table *iommu)
718 {
719 struct iommu_pool *pool;
720 unsigned long i, pool_nr, cnt = 0;
721 u32 devhandle;
722
723 devhandle = pbm->devhandle;
724 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
725 pool = &(iommu->pools[pool_nr]);
726 for (i = pool->start; i <= pool->end; i++) {
727 unsigned long ret, io_attrs, ra;
728
729 ret = pci_sun4v_iommu_getmap(devhandle,
730 HV_PCI_TSBID(0, i),
731 &io_attrs, &ra);
732 if (ret == HV_EOK) {
733 if (page_in_phys_avail(ra)) {
734 pci_sun4v_iommu_demap(devhandle,
735 HV_PCI_TSBID(0,
736 i), 1);
737 } else {
738 cnt++;
739 __set_bit(i, iommu->map);
740 }
741 }
742 }
743 }
744 return cnt;
745 }
746
pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info * pbm)747 static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
748 {
749 struct atu *atu = pbm->iommu->atu;
750 struct atu_iotsb *iotsb;
751 void *table;
752 u64 table_size;
753 u64 iotsb_num;
754 unsigned long order;
755 unsigned long err;
756
757 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
758 if (!iotsb) {
759 err = -ENOMEM;
760 goto out_err;
761 }
762 atu->iotsb = iotsb;
763
764 /* calculate size of IOTSB */
765 table_size = (atu->size / IO_PAGE_SIZE) * 8;
766 order = get_order(table_size);
767 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
768 if (!table) {
769 err = -ENOMEM;
770 goto table_failed;
771 }
772 iotsb->table = table;
773 iotsb->ra = __pa(table);
774 iotsb->dvma_size = atu->size;
775 iotsb->dvma_base = atu->base;
776 iotsb->table_size = table_size;
777 iotsb->page_size = IO_PAGE_SIZE;
778
779 /* configure and register IOTSB with HV */
780 err = pci_sun4v_iotsb_conf(pbm->devhandle,
781 iotsb->ra,
782 iotsb->table_size,
783 iotsb->page_size,
784 iotsb->dvma_base,
785 &iotsb_num);
786 if (err) {
787 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
788 goto iotsb_conf_failed;
789 }
790 iotsb->iotsb_num = iotsb_num;
791
792 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
793 if (err) {
794 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
795 goto iotsb_conf_failed;
796 }
797
798 return 0;
799
800 iotsb_conf_failed:
801 free_pages((unsigned long)table, order);
802 table_failed:
803 kfree(iotsb);
804 out_err:
805 return err;
806 }
807
pci_sun4v_atu_init(struct pci_pbm_info * pbm)808 static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
809 {
810 struct atu *atu = pbm->iommu->atu;
811 unsigned long err;
812 const u64 *ranges;
813 u64 map_size, num_iotte;
814 u64 dma_mask;
815 const u32 *page_size;
816 int len;
817
818 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
819 &len);
820 if (!ranges) {
821 pr_err(PFX "No iommu-address-ranges\n");
822 return -EINVAL;
823 }
824
825 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
826 NULL);
827 if (!page_size) {
828 pr_err(PFX "No iommu-pagesizes\n");
829 return -EINVAL;
830 }
831
832 /* There are 4 iommu-address-ranges supported. Each range is pair of
833 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
834 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
835 * address ranges to support 64bit addressing. Because 'size' for
836 * address ranges[2] and ranges[3] are same we can select either of
837 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
838 * large for OS to allocate IOTSB we are using fix size 32G
839 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
840 * to share.
841 */
842 atu->ranges = (struct atu_ranges *)ranges;
843 atu->base = atu->ranges[3].base;
844 atu->size = ATU_64_SPACE_SIZE;
845
846 /* Create IOTSB */
847 err = pci_sun4v_atu_alloc_iotsb(pbm);
848 if (err) {
849 pr_err(PFX "Error creating ATU IOTSB\n");
850 return err;
851 }
852
853 /* Create ATU iommu map.
854 * One bit represents one iotte in IOTSB table.
855 */
856 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
857 num_iotte = atu->size / IO_PAGE_SIZE;
858 map_size = num_iotte / 8;
859 atu->tbl.table_map_base = atu->base;
860 atu->dma_addr_mask = dma_mask;
861 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
862 if (!atu->tbl.map)
863 return -ENOMEM;
864
865 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
866 NULL, false /* no large_pool */,
867 0 /* default npools */,
868 false /* want span boundary checking */);
869
870 return 0;
871 }
872
pci_sun4v_iommu_init(struct pci_pbm_info * pbm)873 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
874 {
875 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
876 struct iommu *iommu = pbm->iommu;
877 unsigned long num_tsb_entries, sz;
878 u32 dma_mask, dma_offset;
879 const u32 *vdma;
880
881 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
882 if (!vdma)
883 vdma = vdma_default;
884
885 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
886 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
887 vdma[0], vdma[1]);
888 return -EINVAL;
889 }
890
891 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
892 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
893
894 dma_offset = vdma[0];
895
896 /* Setup initial software IOMMU state. */
897 spin_lock_init(&iommu->lock);
898 iommu->ctx_lowest_free = 1;
899 iommu->tbl.table_map_base = dma_offset;
900 iommu->dma_addr_mask = dma_mask;
901
902 /* Allocate and initialize the free area map. */
903 sz = (num_tsb_entries + 7) / 8;
904 sz = (sz + 7UL) & ~7UL;
905 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
906 if (!iommu->tbl.map) {
907 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
908 return -ENOMEM;
909 }
910 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
911 NULL, false /* no large_pool */,
912 0 /* default npools */,
913 false /* want span boundary checking */);
914 sz = probe_existing_entries(pbm, &iommu->tbl);
915 if (sz)
916 printk("%s: Imported %lu TSB entries from OBP\n",
917 pbm->name, sz);
918
919 return 0;
920 }
921
922 #ifdef CONFIG_PCI_MSI
923 struct pci_sun4v_msiq_entry {
924 u64 version_type;
925 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
926 #define MSIQ_VERSION_SHIFT 32
927 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
928 #define MSIQ_TYPE_SHIFT 0
929 #define MSIQ_TYPE_NONE 0x00
930 #define MSIQ_TYPE_MSG 0x01
931 #define MSIQ_TYPE_MSI32 0x02
932 #define MSIQ_TYPE_MSI64 0x03
933 #define MSIQ_TYPE_INTX 0x08
934 #define MSIQ_TYPE_NONE2 0xff
935
936 u64 intx_sysino;
937 u64 reserved1;
938 u64 stick;
939 u64 req_id; /* bus/device/func */
940 #define MSIQ_REQID_BUS_MASK 0xff00UL
941 #define MSIQ_REQID_BUS_SHIFT 8
942 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
943 #define MSIQ_REQID_DEVICE_SHIFT 3
944 #define MSIQ_REQID_FUNC_MASK 0x0007UL
945 #define MSIQ_REQID_FUNC_SHIFT 0
946
947 u64 msi_address;
948
949 /* The format of this value is message type dependent.
950 * For MSI bits 15:0 are the data from the MSI packet.
951 * For MSI-X bits 31:0 are the data from the MSI packet.
952 * For MSG, the message code and message routing code where:
953 * bits 39:32 is the bus/device/fn of the msg target-id
954 * bits 18:16 is the message routing code
955 * bits 7:0 is the message code
956 * For INTx the low order 2-bits are:
957 * 00 - INTA
958 * 01 - INTB
959 * 10 - INTC
960 * 11 - INTD
961 */
962 u64 msi_data;
963
964 u64 reserved2;
965 };
966
pci_sun4v_get_head(struct pci_pbm_info * pbm,unsigned long msiqid,unsigned long * head)967 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
968 unsigned long *head)
969 {
970 unsigned long err, limit;
971
972 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
973 if (unlikely(err))
974 return -ENXIO;
975
976 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
977 if (unlikely(*head >= limit))
978 return -EFBIG;
979
980 return 0;
981 }
982
pci_sun4v_dequeue_msi(struct pci_pbm_info * pbm,unsigned long msiqid,unsigned long * head,unsigned long * msi)983 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
984 unsigned long msiqid, unsigned long *head,
985 unsigned long *msi)
986 {
987 struct pci_sun4v_msiq_entry *ep;
988 unsigned long err, type;
989
990 /* Note: void pointer arithmetic, 'head' is a byte offset */
991 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
992 (pbm->msiq_ent_count *
993 sizeof(struct pci_sun4v_msiq_entry))) +
994 *head);
995
996 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
997 return 0;
998
999 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
1000 if (unlikely(type != MSIQ_TYPE_MSI32 &&
1001 type != MSIQ_TYPE_MSI64))
1002 return -EINVAL;
1003
1004 *msi = ep->msi_data;
1005
1006 err = pci_sun4v_msi_setstate(pbm->devhandle,
1007 ep->msi_data /* msi_num */,
1008 HV_MSISTATE_IDLE);
1009 if (unlikely(err))
1010 return -ENXIO;
1011
1012 /* Clear the entry. */
1013 ep->version_type &= ~MSIQ_TYPE_MASK;
1014
1015 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1016 if (*head >=
1017 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1018 *head = 0;
1019
1020 return 1;
1021 }
1022
pci_sun4v_set_head(struct pci_pbm_info * pbm,unsigned long msiqid,unsigned long head)1023 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1024 unsigned long head)
1025 {
1026 unsigned long err;
1027
1028 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1029 if (unlikely(err))
1030 return -EINVAL;
1031
1032 return 0;
1033 }
1034
pci_sun4v_msi_setup(struct pci_pbm_info * pbm,unsigned long msiqid,unsigned long msi,int is_msi64)1035 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1036 unsigned long msi, int is_msi64)
1037 {
1038 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1039 (is_msi64 ?
1040 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1041 return -ENXIO;
1042 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1043 return -ENXIO;
1044 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1045 return -ENXIO;
1046 return 0;
1047 }
1048
pci_sun4v_msi_teardown(struct pci_pbm_info * pbm,unsigned long msi)1049 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1050 {
1051 unsigned long err, msiqid;
1052
1053 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1054 if (err)
1055 return -ENXIO;
1056
1057 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1058
1059 return 0;
1060 }
1061
pci_sun4v_msiq_alloc(struct pci_pbm_info * pbm)1062 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1063 {
1064 unsigned long q_size, alloc_size, pages, order;
1065 int i;
1066
1067 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1068 alloc_size = (pbm->msiq_num * q_size);
1069 order = get_order(alloc_size);
1070 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1071 if (pages == 0UL) {
1072 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1073 order);
1074 return -ENOMEM;
1075 }
1076 memset((char *)pages, 0, PAGE_SIZE << order);
1077 pbm->msi_queues = (void *) pages;
1078
1079 for (i = 0; i < pbm->msiq_num; i++) {
1080 unsigned long err, base = __pa(pages + (i * q_size));
1081 unsigned long ret1, ret2;
1082
1083 err = pci_sun4v_msiq_conf(pbm->devhandle,
1084 pbm->msiq_first + i,
1085 base, pbm->msiq_ent_count);
1086 if (err) {
1087 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1088 err);
1089 goto h_error;
1090 }
1091
1092 err = pci_sun4v_msiq_info(pbm->devhandle,
1093 pbm->msiq_first + i,
1094 &ret1, &ret2);
1095 if (err) {
1096 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1097 err);
1098 goto h_error;
1099 }
1100 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1101 printk(KERN_ERR "MSI: Bogus qconf "
1102 "expected[%lx:%x] got[%lx:%lx]\n",
1103 base, pbm->msiq_ent_count,
1104 ret1, ret2);
1105 goto h_error;
1106 }
1107 }
1108
1109 return 0;
1110
1111 h_error:
1112 free_pages(pages, order);
1113 return -EINVAL;
1114 }
1115
pci_sun4v_msiq_free(struct pci_pbm_info * pbm)1116 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1117 {
1118 unsigned long q_size, alloc_size, pages, order;
1119 int i;
1120
1121 for (i = 0; i < pbm->msiq_num; i++) {
1122 unsigned long msiqid = pbm->msiq_first + i;
1123
1124 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1125 }
1126
1127 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1128 alloc_size = (pbm->msiq_num * q_size);
1129 order = get_order(alloc_size);
1130
1131 pages = (unsigned long) pbm->msi_queues;
1132
1133 free_pages(pages, order);
1134
1135 pbm->msi_queues = NULL;
1136 }
1137
pci_sun4v_msiq_build_irq(struct pci_pbm_info * pbm,unsigned long msiqid,unsigned long devino)1138 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1139 unsigned long msiqid,
1140 unsigned long devino)
1141 {
1142 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1143
1144 if (!irq)
1145 return -ENOMEM;
1146
1147 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1148 return -EINVAL;
1149 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1150 return -EINVAL;
1151
1152 return irq;
1153 }
1154
1155 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1156 .get_head = pci_sun4v_get_head,
1157 .dequeue_msi = pci_sun4v_dequeue_msi,
1158 .set_head = pci_sun4v_set_head,
1159 .msi_setup = pci_sun4v_msi_setup,
1160 .msi_teardown = pci_sun4v_msi_teardown,
1161 .msiq_alloc = pci_sun4v_msiq_alloc,
1162 .msiq_free = pci_sun4v_msiq_free,
1163 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1164 };
1165
pci_sun4v_msi_init(struct pci_pbm_info * pbm)1166 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1167 {
1168 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1169 }
1170 #else /* CONFIG_PCI_MSI */
pci_sun4v_msi_init(struct pci_pbm_info * pbm)1171 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1172 {
1173 }
1174 #endif /* !(CONFIG_PCI_MSI) */
1175
pci_sun4v_pbm_init(struct pci_pbm_info * pbm,struct platform_device * op,u32 devhandle)1176 static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1177 struct platform_device *op, u32 devhandle)
1178 {
1179 struct device_node *dp = op->dev.of_node;
1180 int err;
1181
1182 pbm->numa_node = of_node_to_nid(dp);
1183
1184 pbm->pci_ops = &sun4v_pci_ops;
1185 pbm->config_space_reg_bits = 12;
1186
1187 pbm->index = pci_num_pbms++;
1188
1189 pbm->op = op;
1190
1191 pbm->devhandle = devhandle;
1192
1193 pbm->name = dp->full_name;
1194
1195 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1196 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1197
1198 pci_determine_mem_io_space(pbm);
1199
1200 pci_get_pbm_props(pbm);
1201
1202 err = pci_sun4v_iommu_init(pbm);
1203 if (err)
1204 return err;
1205
1206 pci_sun4v_msi_init(pbm);
1207
1208 pci_sun4v_scan_bus(pbm, &op->dev);
1209
1210 /* if atu_init fails its not complete failure.
1211 * we can still continue using legacy iommu.
1212 */
1213 if (pbm->iommu->atu) {
1214 err = pci_sun4v_atu_init(pbm);
1215 if (err) {
1216 kfree(pbm->iommu->atu);
1217 pbm->iommu->atu = NULL;
1218 pr_err(PFX "ATU init failed, err=%d\n", err);
1219 }
1220 }
1221
1222 pbm->next = pci_pbm_root;
1223 pci_pbm_root = pbm;
1224
1225 return 0;
1226 }
1227
pci_sun4v_probe(struct platform_device * op)1228 static int pci_sun4v_probe(struct platform_device *op)
1229 {
1230 const struct linux_prom64_registers *regs;
1231 static int hvapi_negotiated = 0;
1232 struct pci_pbm_info *pbm;
1233 struct device_node *dp;
1234 struct iommu *iommu;
1235 struct atu *atu;
1236 u32 devhandle;
1237 int i, err = -ENODEV;
1238 static bool hv_atu = true;
1239
1240 dp = op->dev.of_node;
1241
1242 if (!hvapi_negotiated++) {
1243 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1244 vpci_major = vpci_versions[i].major;
1245 vpci_minor = vpci_versions[i].minor;
1246
1247 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1248 &vpci_minor);
1249 if (!err)
1250 break;
1251 }
1252
1253 if (err) {
1254 pr_err(PFX "Could not register hvapi, err=%d\n", err);
1255 return err;
1256 }
1257 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1258 vpci_major, vpci_minor);
1259
1260 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1261 if (err) {
1262 /* don't return an error if we fail to register the
1263 * ATU group, but ATU hcalls won't be available.
1264 */
1265 hv_atu = false;
1266 } else {
1267 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1268 vatu_major, vatu_minor);
1269 }
1270
1271 dma_ops = &sun4v_dma_ops;
1272 }
1273
1274 regs = of_get_property(dp, "reg", NULL);
1275 err = -ENODEV;
1276 if (!regs) {
1277 printk(KERN_ERR PFX "Could not find config registers\n");
1278 goto out_err;
1279 }
1280 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1281
1282 err = -ENOMEM;
1283 if (!iommu_batch_initialized) {
1284 for_each_possible_cpu(i) {
1285 unsigned long page = get_zeroed_page(GFP_KERNEL);
1286
1287 if (!page)
1288 goto out_err;
1289
1290 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1291 }
1292 iommu_batch_initialized = 1;
1293 }
1294
1295 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1296 if (!pbm) {
1297 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1298 goto out_err;
1299 }
1300
1301 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1302 if (!iommu) {
1303 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1304 goto out_free_controller;
1305 }
1306
1307 pbm->iommu = iommu;
1308 iommu->atu = NULL;
1309 if (hv_atu) {
1310 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1311 if (!atu)
1312 pr_err(PFX "Could not allocate atu\n");
1313 else
1314 iommu->atu = atu;
1315 }
1316
1317 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1318 if (err)
1319 goto out_free_iommu;
1320
1321 dev_set_drvdata(&op->dev, pbm);
1322
1323 return 0;
1324
1325 out_free_iommu:
1326 kfree(iommu->atu);
1327 kfree(pbm->iommu);
1328
1329 out_free_controller:
1330 kfree(pbm);
1331
1332 out_err:
1333 return err;
1334 }
1335
1336 static const struct of_device_id pci_sun4v_match[] = {
1337 {
1338 .name = "pci",
1339 .compatible = "SUNW,sun4v-pci",
1340 },
1341 {},
1342 };
1343
1344 static struct platform_driver pci_sun4v_driver = {
1345 .driver = {
1346 .name = DRIVER_NAME,
1347 .of_match_table = pci_sun4v_match,
1348 },
1349 .probe = pci_sun4v_probe,
1350 };
1351
pci_sun4v_init(void)1352 static int __init pci_sun4v_init(void)
1353 {
1354 return platform_driver_register(&pci_sun4v_driver);
1355 }
1356
1357 subsys_initcall(pci_sun4v_init);
1358