xref: /linux/arch/sparc/kernel/iommu.c (revision 6000fc4d6f3e55ad52cce8d76317187fe01af2aa)
1 /* iommu.c: Generic sparc64 IOMMU support.
2  *
3  * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
14 
15 #ifdef CONFIG_PCI
16 #include <linux/pci.h>
17 #endif
18 
19 #include <asm/iommu.h>
20 
21 #include "iommu_common.h"
22 
23 #define STC_CTXMATCH_ADDR(STC, CTX)	\
24 	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 	(*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 	(*((STC)->strbuf_flushflag) != 0UL)
29 
30 #define iommu_read(__reg) \
31 ({	u64 __ret; \
32 	__asm__ __volatile__("ldxa [%1] %2, %0" \
33 			     : "=r" (__ret) \
34 			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
35 			     : "memory"); \
36 	__ret; \
37 })
38 #define iommu_write(__reg, __val) \
39 	__asm__ __volatile__("stxa %0, [%1] %2" \
40 			     : /* no outputs */ \
41 			     : "r" (__val), "r" (__reg), \
42 			       "i" (ASI_PHYS_BYPASS_EC_E))
43 
44 /* Must be invoked under the IOMMU lock. */
45 static void iommu_flushall(struct iommu *iommu)
46 {
47 	if (iommu->iommu_flushinv) {
48 		iommu_write(iommu->iommu_flushinv, ~(u64)0);
49 	} else {
50 		unsigned long tag;
51 		int entry;
52 
53 		tag = iommu->iommu_tags;
54 		for (entry = 0; entry < 16; entry++) {
55 			iommu_write(tag, 0);
56 			tag += 8;
57 		}
58 
59 		/* Ensure completion of previous PIO writes. */
60 		(void) iommu_read(iommu->write_complete_reg);
61 	}
62 }
63 
64 #define IOPTE_CONSISTENT(CTX) \
65 	(IOPTE_VALID | IOPTE_CACHE | \
66 	 (((CTX) << 47) & IOPTE_CONTEXT))
67 
68 #define IOPTE_STREAMING(CTX) \
69 	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
70 
71 /* Existing mappings are never marked invalid, instead they
72  * are pointed to a dummy page.
73  */
74 #define IOPTE_IS_DUMMY(iommu, iopte)	\
75 	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
76 
77 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
78 {
79 	unsigned long val = iopte_val(*iopte);
80 
81 	val &= ~IOPTE_PAGE;
82 	val |= iommu->dummy_page_pa;
83 
84 	iopte_val(*iopte) = val;
85 }
86 
87 /* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
88  * facility it must all be done in one pass while under the iommu lock.
89  *
90  * On sun4u platforms, we only flush the IOMMU once every time we've passed
91  * over the entire page table doing allocations.  Therefore we only ever advance
92  * the hint and cannot backtrack it.
93  */
94 unsigned long iommu_range_alloc(struct device *dev,
95 				struct iommu *iommu,
96 				unsigned long npages,
97 				unsigned long *handle)
98 {
99 	unsigned long n, end, start, limit, boundary_size;
100 	struct iommu_arena *arena = &iommu->arena;
101 	int pass = 0;
102 
103 	/* This allocator was derived from x86_64's bit string search */
104 
105 	/* Sanity check */
106 	if (unlikely(npages == 0)) {
107 		if (printk_ratelimit())
108 			WARN_ON(1);
109 		return DMA_ERROR_CODE;
110 	}
111 
112 	if (handle && *handle)
113 		start = *handle;
114 	else
115 		start = arena->hint;
116 
117 	limit = arena->limit;
118 
119 	/* The case below can happen if we have a small segment appended
120 	 * to a large, or when the previous alloc was at the very end of
121 	 * the available space. If so, go back to the beginning and flush.
122 	 */
123 	if (start >= limit) {
124 		start = 0;
125 		if (iommu->flush_all)
126 			iommu->flush_all(iommu);
127 	}
128 
129  again:
130 
131 	if (dev)
132 		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
133 				      1 << IO_PAGE_SHIFT);
134 	else
135 		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
136 
137 	n = iommu_area_alloc(arena->map, limit, start, npages,
138 			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
139 			     boundary_size >> IO_PAGE_SHIFT, 0);
140 	if (n == -1) {
141 		if (likely(pass < 1)) {
142 			/* First failure, rescan from the beginning.  */
143 			start = 0;
144 			if (iommu->flush_all)
145 				iommu->flush_all(iommu);
146 			pass++;
147 			goto again;
148 		} else {
149 			/* Second failure, give up */
150 			return DMA_ERROR_CODE;
151 		}
152 	}
153 
154 	end = n + npages;
155 
156 	arena->hint = end;
157 
158 	/* Update handle for SG allocations */
159 	if (handle)
160 		*handle = end;
161 
162 	return n;
163 }
164 
165 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
166 {
167 	struct iommu_arena *arena = &iommu->arena;
168 	unsigned long entry;
169 
170 	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
171 
172 	iommu_area_free(arena->map, entry, npages);
173 }
174 
175 int iommu_table_init(struct iommu *iommu, int tsbsize,
176 		     u32 dma_offset, u32 dma_addr_mask,
177 		     int numa_node)
178 {
179 	unsigned long i, order, sz, num_tsb_entries;
180 	struct page *page;
181 
182 	num_tsb_entries = tsbsize / sizeof(iopte_t);
183 
184 	/* Setup initial software IOMMU state. */
185 	spin_lock_init(&iommu->lock);
186 	iommu->ctx_lowest_free = 1;
187 	iommu->page_table_map_base = dma_offset;
188 	iommu->dma_addr_mask = dma_addr_mask;
189 
190 	/* Allocate and initialize the free area map.  */
191 	sz = num_tsb_entries / 8;
192 	sz = (sz + 7UL) & ~7UL;
193 	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
194 	if (!iommu->arena.map) {
195 		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
196 		return -ENOMEM;
197 	}
198 	memset(iommu->arena.map, 0, sz);
199 	iommu->arena.limit = num_tsb_entries;
200 
201 	if (tlb_type != hypervisor)
202 		iommu->flush_all = iommu_flushall;
203 
204 	/* Allocate and initialize the dummy page which we
205 	 * set inactive IO PTEs to point to.
206 	 */
207 	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
208 	if (!page) {
209 		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
210 		goto out_free_map;
211 	}
212 	iommu->dummy_page = (unsigned long) page_address(page);
213 	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
214 	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
215 
216 	/* Now allocate and setup the IOMMU page table itself.  */
217 	order = get_order(tsbsize);
218 	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
219 	if (!page) {
220 		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
221 		goto out_free_dummy_page;
222 	}
223 	iommu->page_table = (iopte_t *)page_address(page);
224 
225 	for (i = 0; i < num_tsb_entries; i++)
226 		iopte_make_dummy(iommu, &iommu->page_table[i]);
227 
228 	return 0;
229 
230 out_free_dummy_page:
231 	free_page(iommu->dummy_page);
232 	iommu->dummy_page = 0UL;
233 
234 out_free_map:
235 	kfree(iommu->arena.map);
236 	iommu->arena.map = NULL;
237 
238 	return -ENOMEM;
239 }
240 
241 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
242 				    unsigned long npages)
243 {
244 	unsigned long entry;
245 
246 	entry = iommu_range_alloc(dev, iommu, npages, NULL);
247 	if (unlikely(entry == DMA_ERROR_CODE))
248 		return NULL;
249 
250 	return iommu->page_table + entry;
251 }
252 
253 static int iommu_alloc_ctx(struct iommu *iommu)
254 {
255 	int lowest = iommu->ctx_lowest_free;
256 	int sz = IOMMU_NUM_CTXS - lowest;
257 	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
258 
259 	if (unlikely(n == sz)) {
260 		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
261 		if (unlikely(n == lowest)) {
262 			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
263 			n = 0;
264 		}
265 	}
266 	if (n)
267 		__set_bit(n, iommu->ctx_bitmap);
268 
269 	return n;
270 }
271 
272 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
273 {
274 	if (likely(ctx)) {
275 		__clear_bit(ctx, iommu->ctx_bitmap);
276 		if (ctx < iommu->ctx_lowest_free)
277 			iommu->ctx_lowest_free = ctx;
278 	}
279 }
280 
281 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
282 				   dma_addr_t *dma_addrp, gfp_t gfp)
283 {
284 	unsigned long flags, order, first_page;
285 	struct iommu *iommu;
286 	struct page *page;
287 	int npages, nid;
288 	iopte_t *iopte;
289 	void *ret;
290 
291 	size = IO_PAGE_ALIGN(size);
292 	order = get_order(size);
293 	if (order >= 10)
294 		return NULL;
295 
296 	nid = dev->archdata.numa_node;
297 	page = alloc_pages_node(nid, gfp, order);
298 	if (unlikely(!page))
299 		return NULL;
300 
301 	first_page = (unsigned long) page_address(page);
302 	memset((char *)first_page, 0, PAGE_SIZE << order);
303 
304 	iommu = dev->archdata.iommu;
305 
306 	spin_lock_irqsave(&iommu->lock, flags);
307 	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
308 	spin_unlock_irqrestore(&iommu->lock, flags);
309 
310 	if (unlikely(iopte == NULL)) {
311 		free_pages(first_page, order);
312 		return NULL;
313 	}
314 
315 	*dma_addrp = (iommu->page_table_map_base +
316 		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
317 	ret = (void *) first_page;
318 	npages = size >> IO_PAGE_SHIFT;
319 	first_page = __pa(first_page);
320 	while (npages--) {
321 		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
322 				     IOPTE_WRITE |
323 				     (first_page & IOPTE_PAGE));
324 		iopte++;
325 		first_page += IO_PAGE_SIZE;
326 	}
327 
328 	return ret;
329 }
330 
331 static void dma_4u_free_coherent(struct device *dev, size_t size,
332 				 void *cpu, dma_addr_t dvma)
333 {
334 	struct iommu *iommu;
335 	iopte_t *iopte;
336 	unsigned long flags, order, npages;
337 
338 	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339 	iommu = dev->archdata.iommu;
340 	iopte = iommu->page_table +
341 		((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
342 
343 	spin_lock_irqsave(&iommu->lock, flags);
344 
345 	iommu_range_free(iommu, dvma, npages);
346 
347 	spin_unlock_irqrestore(&iommu->lock, flags);
348 
349 	order = get_order(size);
350 	if (order < 10)
351 		free_pages((unsigned long)cpu, order);
352 }
353 
354 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 				  unsigned long offset, size_t sz,
356 				  enum dma_data_direction direction)
357 {
358 	struct iommu *iommu;
359 	struct strbuf *strbuf;
360 	iopte_t *base;
361 	unsigned long flags, npages, oaddr;
362 	unsigned long i, base_paddr, ctx;
363 	u32 bus_addr, ret;
364 	unsigned long iopte_protection;
365 
366 	iommu = dev->archdata.iommu;
367 	strbuf = dev->archdata.stc;
368 
369 	if (unlikely(direction == DMA_NONE))
370 		goto bad_no_ctx;
371 
372 	oaddr = (unsigned long)(page_address(page) + offset);
373 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
374 	npages >>= IO_PAGE_SHIFT;
375 
376 	spin_lock_irqsave(&iommu->lock, flags);
377 	base = alloc_npages(dev, iommu, npages);
378 	ctx = 0;
379 	if (iommu->iommu_ctxflush)
380 		ctx = iommu_alloc_ctx(iommu);
381 	spin_unlock_irqrestore(&iommu->lock, flags);
382 
383 	if (unlikely(!base))
384 		goto bad;
385 
386 	bus_addr = (iommu->page_table_map_base +
387 		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
388 	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
389 	base_paddr = __pa(oaddr & IO_PAGE_MASK);
390 	if (strbuf->strbuf_enabled)
391 		iopte_protection = IOPTE_STREAMING(ctx);
392 	else
393 		iopte_protection = IOPTE_CONSISTENT(ctx);
394 	if (direction != DMA_TO_DEVICE)
395 		iopte_protection |= IOPTE_WRITE;
396 
397 	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
398 		iopte_val(*base) = iopte_protection | base_paddr;
399 
400 	return ret;
401 
402 bad:
403 	iommu_free_ctx(iommu, ctx);
404 bad_no_ctx:
405 	if (printk_ratelimit())
406 		WARN_ON(1);
407 	return DMA_ERROR_CODE;
408 }
409 
410 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
411 			 u32 vaddr, unsigned long ctx, unsigned long npages,
412 			 enum dma_data_direction direction)
413 {
414 	int limit;
415 
416 	if (strbuf->strbuf_ctxflush &&
417 	    iommu->iommu_ctxflush) {
418 		unsigned long matchreg, flushreg;
419 		u64 val;
420 
421 		flushreg = strbuf->strbuf_ctxflush;
422 		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
423 
424 		iommu_write(flushreg, ctx);
425 		val = iommu_read(matchreg);
426 		val &= 0xffff;
427 		if (!val)
428 			goto do_flush_sync;
429 
430 		while (val) {
431 			if (val & 0x1)
432 				iommu_write(flushreg, ctx);
433 			val >>= 1;
434 		}
435 		val = iommu_read(matchreg);
436 		if (unlikely(val)) {
437 			printk(KERN_WARNING "strbuf_flush: ctx flush "
438 			       "timeout matchreg[%llx] ctx[%lx]\n",
439 			       val, ctx);
440 			goto do_page_flush;
441 		}
442 	} else {
443 		unsigned long i;
444 
445 	do_page_flush:
446 		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
447 			iommu_write(strbuf->strbuf_pflush, vaddr);
448 	}
449 
450 do_flush_sync:
451 	/* If the device could not have possibly put dirty data into
452 	 * the streaming cache, no flush-flag synchronization needs
453 	 * to be performed.
454 	 */
455 	if (direction == DMA_TO_DEVICE)
456 		return;
457 
458 	STC_FLUSHFLAG_INIT(strbuf);
459 	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
460 	(void) iommu_read(iommu->write_complete_reg);
461 
462 	limit = 100000;
463 	while (!STC_FLUSHFLAG_SET(strbuf)) {
464 		limit--;
465 		if (!limit)
466 			break;
467 		udelay(1);
468 		rmb();
469 	}
470 	if (!limit)
471 		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
472 		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
473 		       vaddr, ctx, npages);
474 }
475 
476 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 			      size_t sz, enum dma_data_direction direction)
478 {
479 	struct iommu *iommu;
480 	struct strbuf *strbuf;
481 	iopte_t *base;
482 	unsigned long flags, npages, ctx, i;
483 
484 	if (unlikely(direction == DMA_NONE)) {
485 		if (printk_ratelimit())
486 			WARN_ON(1);
487 		return;
488 	}
489 
490 	iommu = dev->archdata.iommu;
491 	strbuf = dev->archdata.stc;
492 
493 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494 	npages >>= IO_PAGE_SHIFT;
495 	base = iommu->page_table +
496 		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497 	bus_addr &= IO_PAGE_MASK;
498 
499 	spin_lock_irqsave(&iommu->lock, flags);
500 
501 	/* Record the context, if any. */
502 	ctx = 0;
503 	if (iommu->iommu_ctxflush)
504 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
505 
506 	/* Step 1: Kick data out of streaming buffers if necessary. */
507 	if (strbuf->strbuf_enabled)
508 		strbuf_flush(strbuf, iommu, bus_addr, ctx,
509 			     npages, direction);
510 
511 	/* Step 2: Clear out TSB entries. */
512 	for (i = 0; i < npages; i++)
513 		iopte_make_dummy(iommu, base + i);
514 
515 	iommu_range_free(iommu, bus_addr, npages);
516 
517 	iommu_free_ctx(iommu, ctx);
518 
519 	spin_unlock_irqrestore(&iommu->lock, flags);
520 }
521 
522 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 			 int nelems, enum dma_data_direction direction)
524 {
525 	struct scatterlist *s, *outs, *segstart;
526 	unsigned long flags, handle, prot, ctx;
527 	dma_addr_t dma_next = 0, dma_addr;
528 	unsigned int max_seg_size;
529 	unsigned long seg_boundary_size;
530 	int outcount, incount, i;
531 	struct strbuf *strbuf;
532 	struct iommu *iommu;
533 	unsigned long base_shift;
534 
535 	BUG_ON(direction == DMA_NONE);
536 
537 	iommu = dev->archdata.iommu;
538 	strbuf = dev->archdata.stc;
539 	if (nelems == 0 || !iommu)
540 		return 0;
541 
542 	spin_lock_irqsave(&iommu->lock, flags);
543 
544 	ctx = 0;
545 	if (iommu->iommu_ctxflush)
546 		ctx = iommu_alloc_ctx(iommu);
547 
548 	if (strbuf->strbuf_enabled)
549 		prot = IOPTE_STREAMING(ctx);
550 	else
551 		prot = IOPTE_CONSISTENT(ctx);
552 	if (direction != DMA_TO_DEVICE)
553 		prot |= IOPTE_WRITE;
554 
555 	outs = s = segstart = &sglist[0];
556 	outcount = 1;
557 	incount = nelems;
558 	handle = 0;
559 
560 	/* Init first segment length for backout at failure */
561 	outs->dma_length = 0;
562 
563 	max_seg_size = dma_get_max_seg_size(dev);
564 	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
565 				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
566 	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
567 	for_each_sg(sglist, s, nelems, i) {
568 		unsigned long paddr, npages, entry, out_entry = 0, slen;
569 		iopte_t *base;
570 
571 		slen = s->length;
572 		/* Sanity check */
573 		if (slen == 0) {
574 			dma_next = 0;
575 			continue;
576 		}
577 		/* Allocate iommu entries for that segment */
578 		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
579 		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
580 		entry = iommu_range_alloc(dev, iommu, npages, &handle);
581 
582 		/* Handle failure */
583 		if (unlikely(entry == DMA_ERROR_CODE)) {
584 			if (printk_ratelimit())
585 				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
586 				       " npages %lx\n", iommu, paddr, npages);
587 			goto iommu_map_failed;
588 		}
589 
590 		base = iommu->page_table + entry;
591 
592 		/* Convert entry to a dma_addr_t */
593 		dma_addr = iommu->page_table_map_base +
594 			(entry << IO_PAGE_SHIFT);
595 		dma_addr |= (s->offset & ~IO_PAGE_MASK);
596 
597 		/* Insert into HW table */
598 		paddr &= IO_PAGE_MASK;
599 		while (npages--) {
600 			iopte_val(*base) = prot | paddr;
601 			base++;
602 			paddr += IO_PAGE_SIZE;
603 		}
604 
605 		/* If we are in an open segment, try merging */
606 		if (segstart != s) {
607 			/* We cannot merge if:
608 			 * - allocated dma_addr isn't contiguous to previous allocation
609 			 */
610 			if ((dma_addr != dma_next) ||
611 			    (outs->dma_length + s->length > max_seg_size) ||
612 			    (is_span_boundary(out_entry, base_shift,
613 					      seg_boundary_size, outs, s))) {
614 				/* Can't merge: create a new segment */
615 				segstart = s;
616 				outcount++;
617 				outs = sg_next(outs);
618 			} else {
619 				outs->dma_length += s->length;
620 			}
621 		}
622 
623 		if (segstart == s) {
624 			/* This is a new segment, fill entries */
625 			outs->dma_address = dma_addr;
626 			outs->dma_length = slen;
627 			out_entry = entry;
628 		}
629 
630 		/* Calculate next page pointer for contiguous check */
631 		dma_next = dma_addr + slen;
632 	}
633 
634 	spin_unlock_irqrestore(&iommu->lock, flags);
635 
636 	if (outcount < incount) {
637 		outs = sg_next(outs);
638 		outs->dma_address = DMA_ERROR_CODE;
639 		outs->dma_length = 0;
640 	}
641 
642 	return outcount;
643 
644 iommu_map_failed:
645 	for_each_sg(sglist, s, nelems, i) {
646 		if (s->dma_length != 0) {
647 			unsigned long vaddr, npages, entry, j;
648 			iopte_t *base;
649 
650 			vaddr = s->dma_address & IO_PAGE_MASK;
651 			npages = iommu_num_pages(s->dma_address, s->dma_length,
652 						 IO_PAGE_SIZE);
653 			iommu_range_free(iommu, vaddr, npages);
654 
655 			entry = (vaddr - iommu->page_table_map_base)
656 				>> IO_PAGE_SHIFT;
657 			base = iommu->page_table + entry;
658 
659 			for (j = 0; j < npages; j++)
660 				iopte_make_dummy(iommu, base + j);
661 
662 			s->dma_address = DMA_ERROR_CODE;
663 			s->dma_length = 0;
664 		}
665 		if (s == outs)
666 			break;
667 	}
668 	spin_unlock_irqrestore(&iommu->lock, flags);
669 
670 	return 0;
671 }
672 
673 /* If contexts are being used, they are the same in all of the mappings
674  * we make for a particular SG.
675  */
676 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
677 {
678 	unsigned long ctx = 0;
679 
680 	if (iommu->iommu_ctxflush) {
681 		iopte_t *base;
682 		u32 bus_addr;
683 
684 		bus_addr = sg->dma_address & IO_PAGE_MASK;
685 		base = iommu->page_table +
686 			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
687 
688 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
689 	}
690 	return ctx;
691 }
692 
693 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 			    int nelems, enum dma_data_direction direction)
695 {
696 	unsigned long flags, ctx;
697 	struct scatterlist *sg;
698 	struct strbuf *strbuf;
699 	struct iommu *iommu;
700 
701 	BUG_ON(direction == DMA_NONE);
702 
703 	iommu = dev->archdata.iommu;
704 	strbuf = dev->archdata.stc;
705 
706 	ctx = fetch_sg_ctx(iommu, sglist);
707 
708 	spin_lock_irqsave(&iommu->lock, flags);
709 
710 	sg = sglist;
711 	while (nelems--) {
712 		dma_addr_t dma_handle = sg->dma_address;
713 		unsigned int len = sg->dma_length;
714 		unsigned long npages, entry;
715 		iopte_t *base;
716 		int i;
717 
718 		if (!len)
719 			break;
720 		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
721 		iommu_range_free(iommu, dma_handle, npages);
722 
723 		entry = ((dma_handle - iommu->page_table_map_base)
724 			 >> IO_PAGE_SHIFT);
725 		base = iommu->page_table + entry;
726 
727 		dma_handle &= IO_PAGE_MASK;
728 		if (strbuf->strbuf_enabled)
729 			strbuf_flush(strbuf, iommu, dma_handle, ctx,
730 				     npages, direction);
731 
732 		for (i = 0; i < npages; i++)
733 			iopte_make_dummy(iommu, base + i);
734 
735 		sg = sg_next(sg);
736 	}
737 
738 	iommu_free_ctx(iommu, ctx);
739 
740 	spin_unlock_irqrestore(&iommu->lock, flags);
741 }
742 
743 static void dma_4u_sync_single_for_cpu(struct device *dev,
744 				       dma_addr_t bus_addr, size_t sz,
745 				       enum dma_data_direction direction)
746 {
747 	struct iommu *iommu;
748 	struct strbuf *strbuf;
749 	unsigned long flags, ctx, npages;
750 
751 	iommu = dev->archdata.iommu;
752 	strbuf = dev->archdata.stc;
753 
754 	if (!strbuf->strbuf_enabled)
755 		return;
756 
757 	spin_lock_irqsave(&iommu->lock, flags);
758 
759 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
760 	npages >>= IO_PAGE_SHIFT;
761 	bus_addr &= IO_PAGE_MASK;
762 
763 	/* Step 1: Record the context, if any. */
764 	ctx = 0;
765 	if (iommu->iommu_ctxflush &&
766 	    strbuf->strbuf_ctxflush) {
767 		iopte_t *iopte;
768 
769 		iopte = iommu->page_table +
770 			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
771 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
772 	}
773 
774 	/* Step 2: Kick data out of streaming buffers. */
775 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
776 
777 	spin_unlock_irqrestore(&iommu->lock, flags);
778 }
779 
780 static void dma_4u_sync_sg_for_cpu(struct device *dev,
781 				   struct scatterlist *sglist, int nelems,
782 				   enum dma_data_direction direction)
783 {
784 	struct iommu *iommu;
785 	struct strbuf *strbuf;
786 	unsigned long flags, ctx, npages, i;
787 	struct scatterlist *sg, *sgprv;
788 	u32 bus_addr;
789 
790 	iommu = dev->archdata.iommu;
791 	strbuf = dev->archdata.stc;
792 
793 	if (!strbuf->strbuf_enabled)
794 		return;
795 
796 	spin_lock_irqsave(&iommu->lock, flags);
797 
798 	/* Step 1: Record the context, if any. */
799 	ctx = 0;
800 	if (iommu->iommu_ctxflush &&
801 	    strbuf->strbuf_ctxflush) {
802 		iopte_t *iopte;
803 
804 		iopte = iommu->page_table +
805 			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
806 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
807 	}
808 
809 	/* Step 2: Kick data out of streaming buffers. */
810 	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
811 	sgprv = NULL;
812 	for_each_sg(sglist, sg, nelems, i) {
813 		if (sg->dma_length == 0)
814 			break;
815 		sgprv = sg;
816 	}
817 
818 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
819 		  - bus_addr) >> IO_PAGE_SHIFT;
820 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
821 
822 	spin_unlock_irqrestore(&iommu->lock, flags);
823 }
824 
825 static const struct dma_ops sun4u_dma_ops = {
826 	.alloc_coherent		= dma_4u_alloc_coherent,
827 	.free_coherent		= dma_4u_free_coherent,
828 	.map_page		= dma_4u_map_page,
829 	.unmap_page		= dma_4u_unmap_page,
830 	.map_sg			= dma_4u_map_sg,
831 	.unmap_sg		= dma_4u_unmap_sg,
832 	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
833 	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
834 };
835 
836 const struct dma_ops *dma_ops = &sun4u_dma_ops;
837 EXPORT_SYMBOL(dma_ops);
838 
839 int dma_supported(struct device *dev, u64 device_mask)
840 {
841 	struct iommu *iommu = dev->archdata.iommu;
842 	u64 dma_addr_mask = iommu->dma_addr_mask;
843 
844 	if (device_mask >= (1UL << 32UL))
845 		return 0;
846 
847 	if ((device_mask & dma_addr_mask) == dma_addr_mask)
848 		return 1;
849 
850 #ifdef CONFIG_PCI
851 	if (dev->bus == &pci_bus_type)
852 		return pci_dma_supported(to_pci_dev(dev), device_mask);
853 #endif
854 
855 	return 0;
856 }
857 EXPORT_SYMBOL(dma_supported);
858 
859 int dma_set_mask(struct device *dev, u64 dma_mask)
860 {
861 #ifdef CONFIG_PCI
862 	if (dev->bus == &pci_bus_type)
863 		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
864 #endif
865 	return -EINVAL;
866 }
867 EXPORT_SYMBOL(dma_set_mask);
868