xref: /linux/drivers/iommu/omap-iommu.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * omap iommu: tlb and pagetable primitives
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  *
6  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7  *		Paul Mundt and Toshihiro Kobayashi
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/platform_device.h>
20 #include <linux/iommu.h>
21 #include <linux/omap-iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
24 #include <linux/io.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_platform.h>
30 
31 #include <asm/cacheflush.h>
32 
33 #include <linux/platform_data/iommu-omap.h>
34 
35 #include "omap-iopgtable.h"
36 #include "omap-iommu.h"
37 
38 #define to_iommu(dev)							\
39 	((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
40 
41 #define for_each_iotlb_cr(obj, n, __i, cr)				\
42 	for (__i = 0;							\
43 	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
44 	     __i++)
45 
46 /* bitmap of the page sizes currently supported */
47 #define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
48 
49 /**
50  * struct omap_iommu_domain - omap iommu domain
51  * @pgtable:	the page table
52  * @iommu_dev:	an omap iommu device attached to this domain. only a single
53  *		iommu device can be attached for now.
54  * @dev:	Device using this domain.
55  * @lock:	domain lock, should be taken when attaching/detaching
56  */
57 struct omap_iommu_domain {
58 	u32 *pgtable;
59 	struct omap_iommu *iommu_dev;
60 	struct device *dev;
61 	spinlock_t lock;
62 };
63 
64 #define MMU_LOCK_BASE_SHIFT	10
65 #define MMU_LOCK_BASE_MASK	(0x1f << MMU_LOCK_BASE_SHIFT)
66 #define MMU_LOCK_BASE(x)	\
67 	((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
68 
69 #define MMU_LOCK_VICT_SHIFT	4
70 #define MMU_LOCK_VICT_MASK	(0x1f << MMU_LOCK_VICT_SHIFT)
71 #define MMU_LOCK_VICT(x)	\
72 	((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
73 
74 struct iotlb_lock {
75 	short base;
76 	short vict;
77 };
78 
79 static struct platform_driver omap_iommu_driver;
80 static struct kmem_cache *iopte_cachep;
81 
82 /**
83  * omap_iommu_save_ctx - Save registers for pm off-mode support
84  * @dev:	client device
85  **/
86 void omap_iommu_save_ctx(struct device *dev)
87 {
88 	struct omap_iommu *obj = dev_to_omap_iommu(dev);
89 	u32 *p = obj->ctx;
90 	int i;
91 
92 	for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
93 		p[i] = iommu_read_reg(obj, i * sizeof(u32));
94 		dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
95 	}
96 }
97 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
98 
99 /**
100  * omap_iommu_restore_ctx - Restore registers for pm off-mode support
101  * @dev:	client device
102  **/
103 void omap_iommu_restore_ctx(struct device *dev)
104 {
105 	struct omap_iommu *obj = dev_to_omap_iommu(dev);
106 	u32 *p = obj->ctx;
107 	int i;
108 
109 	for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
110 		iommu_write_reg(obj, p[i], i * sizeof(u32));
111 		dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
112 	}
113 }
114 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
115 
116 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
117 {
118 	u32 l = iommu_read_reg(obj, MMU_CNTL);
119 
120 	if (on)
121 		iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
122 	else
123 		iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
124 
125 	l &= ~MMU_CNTL_MASK;
126 	if (on)
127 		l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
128 	else
129 		l |= (MMU_CNTL_MMU_EN);
130 
131 	iommu_write_reg(obj, l, MMU_CNTL);
132 }
133 
134 static int omap2_iommu_enable(struct omap_iommu *obj)
135 {
136 	u32 l, pa;
137 
138 	if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd,  SZ_16K))
139 		return -EINVAL;
140 
141 	pa = virt_to_phys(obj->iopgd);
142 	if (!IS_ALIGNED(pa, SZ_16K))
143 		return -EINVAL;
144 
145 	l = iommu_read_reg(obj, MMU_REVISION);
146 	dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
147 		 (l >> 4) & 0xf, l & 0xf);
148 
149 	iommu_write_reg(obj, pa, MMU_TTB);
150 
151 	if (obj->has_bus_err_back)
152 		iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
153 
154 	__iommu_set_twl(obj, true);
155 
156 	return 0;
157 }
158 
159 static void omap2_iommu_disable(struct omap_iommu *obj)
160 {
161 	u32 l = iommu_read_reg(obj, MMU_CNTL);
162 
163 	l &= ~MMU_CNTL_MASK;
164 	iommu_write_reg(obj, l, MMU_CNTL);
165 
166 	dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
167 }
168 
169 static int iommu_enable(struct omap_iommu *obj)
170 {
171 	int err;
172 	struct platform_device *pdev = to_platform_device(obj->dev);
173 	struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
174 
175 	if (pdata && pdata->deassert_reset) {
176 		err = pdata->deassert_reset(pdev, pdata->reset_name);
177 		if (err) {
178 			dev_err(obj->dev, "deassert_reset failed: %d\n", err);
179 			return err;
180 		}
181 	}
182 
183 	pm_runtime_get_sync(obj->dev);
184 
185 	err = omap2_iommu_enable(obj);
186 
187 	return err;
188 }
189 
190 static void iommu_disable(struct omap_iommu *obj)
191 {
192 	struct platform_device *pdev = to_platform_device(obj->dev);
193 	struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
194 
195 	omap2_iommu_disable(obj);
196 
197 	pm_runtime_put_sync(obj->dev);
198 
199 	if (pdata && pdata->assert_reset)
200 		pdata->assert_reset(pdev, pdata->reset_name);
201 }
202 
203 /*
204  *	TLB operations
205  */
206 static inline int iotlb_cr_valid(struct cr_regs *cr)
207 {
208 	if (!cr)
209 		return -EINVAL;
210 
211 	return cr->cam & MMU_CAM_V;
212 }
213 
214 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
215 {
216 	u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
217 	u32 mask = get_cam_va_mask(cr->cam & page_size);
218 
219 	return cr->cam & mask;
220 }
221 
222 static u32 get_iopte_attr(struct iotlb_entry *e)
223 {
224 	u32 attr;
225 
226 	attr = e->mixed << 5;
227 	attr |= e->endian;
228 	attr |= e->elsz >> 3;
229 	attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
230 			(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
231 	return attr;
232 }
233 
234 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
235 {
236 	u32 status, fault_addr;
237 
238 	status = iommu_read_reg(obj, MMU_IRQSTATUS);
239 	status &= MMU_IRQ_MASK;
240 	if (!status) {
241 		*da = 0;
242 		return 0;
243 	}
244 
245 	fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
246 	*da = fault_addr;
247 
248 	iommu_write_reg(obj, status, MMU_IRQSTATUS);
249 
250 	return status;
251 }
252 
253 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
254 {
255 	u32 val;
256 
257 	val = iommu_read_reg(obj, MMU_LOCK);
258 
259 	l->base = MMU_LOCK_BASE(val);
260 	l->vict = MMU_LOCK_VICT(val);
261 
262 }
263 
264 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
265 {
266 	u32 val;
267 
268 	val = (l->base << MMU_LOCK_BASE_SHIFT);
269 	val |= (l->vict << MMU_LOCK_VICT_SHIFT);
270 
271 	iommu_write_reg(obj, val, MMU_LOCK);
272 }
273 
274 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
275 {
276 	cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
277 	cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
278 }
279 
280 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
281 {
282 	iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
283 	iommu_write_reg(obj, cr->ram, MMU_RAM);
284 
285 	iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
286 	iommu_write_reg(obj, 1, MMU_LD_TLB);
287 }
288 
289 /* only used in iotlb iteration for-loop */
290 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
291 {
292 	struct cr_regs cr;
293 	struct iotlb_lock l;
294 
295 	iotlb_lock_get(obj, &l);
296 	l.vict = n;
297 	iotlb_lock_set(obj, &l);
298 	iotlb_read_cr(obj, &cr);
299 
300 	return cr;
301 }
302 
303 #ifdef PREFETCH_IOTLB
304 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
305 				      struct iotlb_entry *e)
306 {
307 	struct cr_regs *cr;
308 
309 	if (!e)
310 		return NULL;
311 
312 	if (e->da & ~(get_cam_va_mask(e->pgsz))) {
313 		dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
314 			e->da);
315 		return ERR_PTR(-EINVAL);
316 	}
317 
318 	cr = kmalloc(sizeof(*cr), GFP_KERNEL);
319 	if (!cr)
320 		return ERR_PTR(-ENOMEM);
321 
322 	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
323 	cr->ram = e->pa | e->endian | e->elsz | e->mixed;
324 
325 	return cr;
326 }
327 
328 /**
329  * load_iotlb_entry - Set an iommu tlb entry
330  * @obj:	target iommu
331  * @e:		an iommu tlb entry info
332  **/
333 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
334 {
335 	int err = 0;
336 	struct iotlb_lock l;
337 	struct cr_regs *cr;
338 
339 	if (!obj || !obj->nr_tlb_entries || !e)
340 		return -EINVAL;
341 
342 	pm_runtime_get_sync(obj->dev);
343 
344 	iotlb_lock_get(obj, &l);
345 	if (l.base == obj->nr_tlb_entries) {
346 		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
347 		err = -EBUSY;
348 		goto out;
349 	}
350 	if (!e->prsvd) {
351 		int i;
352 		struct cr_regs tmp;
353 
354 		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
355 			if (!iotlb_cr_valid(&tmp))
356 				break;
357 
358 		if (i == obj->nr_tlb_entries) {
359 			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
360 			err = -EBUSY;
361 			goto out;
362 		}
363 
364 		iotlb_lock_get(obj, &l);
365 	} else {
366 		l.vict = l.base;
367 		iotlb_lock_set(obj, &l);
368 	}
369 
370 	cr = iotlb_alloc_cr(obj, e);
371 	if (IS_ERR(cr)) {
372 		pm_runtime_put_sync(obj->dev);
373 		return PTR_ERR(cr);
374 	}
375 
376 	iotlb_load_cr(obj, cr);
377 	kfree(cr);
378 
379 	if (e->prsvd)
380 		l.base++;
381 	/* increment victim for next tlb load */
382 	if (++l.vict == obj->nr_tlb_entries)
383 		l.vict = l.base;
384 	iotlb_lock_set(obj, &l);
385 out:
386 	pm_runtime_put_sync(obj->dev);
387 	return err;
388 }
389 
390 #else /* !PREFETCH_IOTLB */
391 
392 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
393 {
394 	return 0;
395 }
396 
397 #endif /* !PREFETCH_IOTLB */
398 
399 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
400 {
401 	return load_iotlb_entry(obj, e);
402 }
403 
404 /**
405  * flush_iotlb_page - Clear an iommu tlb entry
406  * @obj:	target iommu
407  * @da:		iommu device virtual address
408  *
409  * Clear an iommu tlb entry which includes 'da' address.
410  **/
411 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
412 {
413 	int i;
414 	struct cr_regs cr;
415 
416 	pm_runtime_get_sync(obj->dev);
417 
418 	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
419 		u32 start;
420 		size_t bytes;
421 
422 		if (!iotlb_cr_valid(&cr))
423 			continue;
424 
425 		start = iotlb_cr_to_virt(&cr);
426 		bytes = iopgsz_to_bytes(cr.cam & 3);
427 
428 		if ((start <= da) && (da < start + bytes)) {
429 			dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
430 				__func__, start, da, bytes);
431 			iotlb_load_cr(obj, &cr);
432 			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
433 			break;
434 		}
435 	}
436 	pm_runtime_put_sync(obj->dev);
437 
438 	if (i == obj->nr_tlb_entries)
439 		dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
440 }
441 
442 /**
443  * flush_iotlb_all - Clear all iommu tlb entries
444  * @obj:	target iommu
445  **/
446 static void flush_iotlb_all(struct omap_iommu *obj)
447 {
448 	struct iotlb_lock l;
449 
450 	pm_runtime_get_sync(obj->dev);
451 
452 	l.base = 0;
453 	l.vict = 0;
454 	iotlb_lock_set(obj, &l);
455 
456 	iommu_write_reg(obj, 1, MMU_GFLUSH);
457 
458 	pm_runtime_put_sync(obj->dev);
459 }
460 
461 #ifdef CONFIG_OMAP_IOMMU_DEBUG
462 
463 #define pr_reg(name)							\
464 	do {								\
465 		ssize_t bytes;						\
466 		const char *str = "%20s: %08x\n";			\
467 		const int maxcol = 32;					\
468 		bytes = snprintf(p, maxcol, str, __stringify(name),	\
469 				 iommu_read_reg(obj, MMU_##name));	\
470 		p += bytes;						\
471 		len -= bytes;						\
472 		if (len < maxcol)					\
473 			goto out;					\
474 	} while (0)
475 
476 static ssize_t
477 omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
478 {
479 	char *p = buf;
480 
481 	pr_reg(REVISION);
482 	pr_reg(IRQSTATUS);
483 	pr_reg(IRQENABLE);
484 	pr_reg(WALKING_ST);
485 	pr_reg(CNTL);
486 	pr_reg(FAULT_AD);
487 	pr_reg(TTB);
488 	pr_reg(LOCK);
489 	pr_reg(LD_TLB);
490 	pr_reg(CAM);
491 	pr_reg(RAM);
492 	pr_reg(GFLUSH);
493 	pr_reg(FLUSH_ENTRY);
494 	pr_reg(READ_CAM);
495 	pr_reg(READ_RAM);
496 	pr_reg(EMU_FAULT_AD);
497 out:
498 	return p - buf;
499 }
500 
501 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
502 {
503 	if (!obj || !buf)
504 		return -EINVAL;
505 
506 	pm_runtime_get_sync(obj->dev);
507 
508 	bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
509 
510 	pm_runtime_put_sync(obj->dev);
511 
512 	return bytes;
513 }
514 
515 static int
516 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
517 {
518 	int i;
519 	struct iotlb_lock saved;
520 	struct cr_regs tmp;
521 	struct cr_regs *p = crs;
522 
523 	pm_runtime_get_sync(obj->dev);
524 	iotlb_lock_get(obj, &saved);
525 
526 	for_each_iotlb_cr(obj, num, i, tmp) {
527 		if (!iotlb_cr_valid(&tmp))
528 			continue;
529 		*p++ = tmp;
530 	}
531 
532 	iotlb_lock_set(obj, &saved);
533 	pm_runtime_put_sync(obj->dev);
534 
535 	return  p - crs;
536 }
537 
538 /**
539  * iotlb_dump_cr - Dump an iommu tlb entry into buf
540  * @obj:	target iommu
541  * @cr:		contents of cam and ram register
542  * @buf:	output buffer
543  **/
544 static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
545 			     char *buf)
546 {
547 	char *p = buf;
548 
549 	/* FIXME: Need more detail analysis of cam/ram */
550 	p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
551 					(cr->cam & MMU_CAM_P) ? 1 : 0);
552 
553 	return p - buf;
554 }
555 
556 /**
557  * omap_dump_tlb_entries - dump cr arrays to given buffer
558  * @obj:	target iommu
559  * @buf:	output buffer
560  **/
561 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
562 {
563 	int i, num;
564 	struct cr_regs *cr;
565 	char *p = buf;
566 
567 	num = bytes / sizeof(*cr);
568 	num = min(obj->nr_tlb_entries, num);
569 
570 	cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
571 	if (!cr)
572 		return 0;
573 
574 	num = __dump_tlb_entries(obj, cr, num);
575 	for (i = 0; i < num; i++)
576 		p += iotlb_dump_cr(obj, cr + i, p);
577 	kfree(cr);
578 
579 	return p - buf;
580 }
581 
582 #endif /* CONFIG_OMAP_IOMMU_DEBUG */
583 
584 /*
585  *	H/W pagetable operations
586  */
587 static void flush_iopgd_range(u32 *first, u32 *last)
588 {
589 	/* FIXME: L2 cache should be taken care of if it exists */
590 	do {
591 		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
592 		    : : "r" (first));
593 		first += L1_CACHE_BYTES / sizeof(*first);
594 	} while (first <= last);
595 }
596 
597 static void flush_iopte_range(u32 *first, u32 *last)
598 {
599 	/* FIXME: L2 cache should be taken care of if it exists */
600 	do {
601 		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
602 		    : : "r" (first));
603 		first += L1_CACHE_BYTES / sizeof(*first);
604 	} while (first <= last);
605 }
606 
607 static void iopte_free(u32 *iopte)
608 {
609 	/* Note: freed iopte's must be clean ready for re-use */
610 	if (iopte)
611 		kmem_cache_free(iopte_cachep, iopte);
612 }
613 
614 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
615 {
616 	u32 *iopte;
617 
618 	/* a table has already existed */
619 	if (*iopgd)
620 		goto pte_ready;
621 
622 	/*
623 	 * do the allocation outside the page table lock
624 	 */
625 	spin_unlock(&obj->page_table_lock);
626 	iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
627 	spin_lock(&obj->page_table_lock);
628 
629 	if (!*iopgd) {
630 		if (!iopte)
631 			return ERR_PTR(-ENOMEM);
632 
633 		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
634 		flush_iopgd_range(iopgd, iopgd);
635 
636 		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
637 	} else {
638 		/* We raced, free the reduniovant table */
639 		iopte_free(iopte);
640 	}
641 
642 pte_ready:
643 	iopte = iopte_offset(iopgd, da);
644 
645 	dev_vdbg(obj->dev,
646 		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
647 		 __func__, da, iopgd, *iopgd, iopte, *iopte);
648 
649 	return iopte;
650 }
651 
652 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
653 {
654 	u32 *iopgd = iopgd_offset(obj, da);
655 
656 	if ((da | pa) & ~IOSECTION_MASK) {
657 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
658 			__func__, da, pa, IOSECTION_SIZE);
659 		return -EINVAL;
660 	}
661 
662 	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
663 	flush_iopgd_range(iopgd, iopgd);
664 	return 0;
665 }
666 
667 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
668 {
669 	u32 *iopgd = iopgd_offset(obj, da);
670 	int i;
671 
672 	if ((da | pa) & ~IOSUPER_MASK) {
673 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
674 			__func__, da, pa, IOSUPER_SIZE);
675 		return -EINVAL;
676 	}
677 
678 	for (i = 0; i < 16; i++)
679 		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
680 	flush_iopgd_range(iopgd, iopgd + 15);
681 	return 0;
682 }
683 
684 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
685 {
686 	u32 *iopgd = iopgd_offset(obj, da);
687 	u32 *iopte = iopte_alloc(obj, iopgd, da);
688 
689 	if (IS_ERR(iopte))
690 		return PTR_ERR(iopte);
691 
692 	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
693 	flush_iopte_range(iopte, iopte);
694 
695 	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
696 		 __func__, da, pa, iopte, *iopte);
697 
698 	return 0;
699 }
700 
701 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
702 {
703 	u32 *iopgd = iopgd_offset(obj, da);
704 	u32 *iopte = iopte_alloc(obj, iopgd, da);
705 	int i;
706 
707 	if ((da | pa) & ~IOLARGE_MASK) {
708 		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
709 			__func__, da, pa, IOLARGE_SIZE);
710 		return -EINVAL;
711 	}
712 
713 	if (IS_ERR(iopte))
714 		return PTR_ERR(iopte);
715 
716 	for (i = 0; i < 16; i++)
717 		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
718 	flush_iopte_range(iopte, iopte + 15);
719 	return 0;
720 }
721 
722 static int
723 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
724 {
725 	int (*fn)(struct omap_iommu *, u32, u32, u32);
726 	u32 prot;
727 	int err;
728 
729 	if (!obj || !e)
730 		return -EINVAL;
731 
732 	switch (e->pgsz) {
733 	case MMU_CAM_PGSZ_16M:
734 		fn = iopgd_alloc_super;
735 		break;
736 	case MMU_CAM_PGSZ_1M:
737 		fn = iopgd_alloc_section;
738 		break;
739 	case MMU_CAM_PGSZ_64K:
740 		fn = iopte_alloc_large;
741 		break;
742 	case MMU_CAM_PGSZ_4K:
743 		fn = iopte_alloc_page;
744 		break;
745 	default:
746 		fn = NULL;
747 		BUG();
748 		break;
749 	}
750 
751 	prot = get_iopte_attr(e);
752 
753 	spin_lock(&obj->page_table_lock);
754 	err = fn(obj, e->da, e->pa, prot);
755 	spin_unlock(&obj->page_table_lock);
756 
757 	return err;
758 }
759 
760 /**
761  * omap_iopgtable_store_entry - Make an iommu pte entry
762  * @obj:	target iommu
763  * @e:		an iommu tlb entry info
764  **/
765 static int
766 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
767 {
768 	int err;
769 
770 	flush_iotlb_page(obj, e->da);
771 	err = iopgtable_store_entry_core(obj, e);
772 	if (!err)
773 		prefetch_iotlb_entry(obj, e);
774 	return err;
775 }
776 
777 /**
778  * iopgtable_lookup_entry - Lookup an iommu pte entry
779  * @obj:	target iommu
780  * @da:		iommu device virtual address
781  * @ppgd:	iommu pgd entry pointer to be returned
782  * @ppte:	iommu pte entry pointer to be returned
783  **/
784 static void
785 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
786 {
787 	u32 *iopgd, *iopte = NULL;
788 
789 	iopgd = iopgd_offset(obj, da);
790 	if (!*iopgd)
791 		goto out;
792 
793 	if (iopgd_is_table(*iopgd))
794 		iopte = iopte_offset(iopgd, da);
795 out:
796 	*ppgd = iopgd;
797 	*ppte = iopte;
798 }
799 
800 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
801 {
802 	size_t bytes;
803 	u32 *iopgd = iopgd_offset(obj, da);
804 	int nent = 1;
805 
806 	if (!*iopgd)
807 		return 0;
808 
809 	if (iopgd_is_table(*iopgd)) {
810 		int i;
811 		u32 *iopte = iopte_offset(iopgd, da);
812 
813 		bytes = IOPTE_SIZE;
814 		if (*iopte & IOPTE_LARGE) {
815 			nent *= 16;
816 			/* rewind to the 1st entry */
817 			iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
818 		}
819 		bytes *= nent;
820 		memset(iopte, 0, nent * sizeof(*iopte));
821 		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
822 
823 		/*
824 		 * do table walk to check if this table is necessary or not
825 		 */
826 		iopte = iopte_offset(iopgd, 0);
827 		for (i = 0; i < PTRS_PER_IOPTE; i++)
828 			if (iopte[i])
829 				goto out;
830 
831 		iopte_free(iopte);
832 		nent = 1; /* for the next L1 entry */
833 	} else {
834 		bytes = IOPGD_SIZE;
835 		if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
836 			nent *= 16;
837 			/* rewind to the 1st entry */
838 			iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
839 		}
840 		bytes *= nent;
841 	}
842 	memset(iopgd, 0, nent * sizeof(*iopgd));
843 	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
844 out:
845 	return bytes;
846 }
847 
848 /**
849  * iopgtable_clear_entry - Remove an iommu pte entry
850  * @obj:	target iommu
851  * @da:		iommu device virtual address
852  **/
853 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
854 {
855 	size_t bytes;
856 
857 	spin_lock(&obj->page_table_lock);
858 
859 	bytes = iopgtable_clear_entry_core(obj, da);
860 	flush_iotlb_page(obj, da);
861 
862 	spin_unlock(&obj->page_table_lock);
863 
864 	return bytes;
865 }
866 
867 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
868 {
869 	int i;
870 
871 	spin_lock(&obj->page_table_lock);
872 
873 	for (i = 0; i < PTRS_PER_IOPGD; i++) {
874 		u32 da;
875 		u32 *iopgd;
876 
877 		da = i << IOPGD_SHIFT;
878 		iopgd = iopgd_offset(obj, da);
879 
880 		if (!*iopgd)
881 			continue;
882 
883 		if (iopgd_is_table(*iopgd))
884 			iopte_free(iopte_offset(iopgd, 0));
885 
886 		*iopgd = 0;
887 		flush_iopgd_range(iopgd, iopgd);
888 	}
889 
890 	flush_iotlb_all(obj);
891 
892 	spin_unlock(&obj->page_table_lock);
893 }
894 
895 /*
896  *	Device IOMMU generic operations
897  */
898 static irqreturn_t iommu_fault_handler(int irq, void *data)
899 {
900 	u32 da, errs;
901 	u32 *iopgd, *iopte;
902 	struct omap_iommu *obj = data;
903 	struct iommu_domain *domain = obj->domain;
904 	struct omap_iommu_domain *omap_domain = domain->priv;
905 
906 	if (!omap_domain->iommu_dev)
907 		return IRQ_NONE;
908 
909 	errs = iommu_report_fault(obj, &da);
910 	if (errs == 0)
911 		return IRQ_HANDLED;
912 
913 	/* Fault callback or TLB/PTE Dynamic loading */
914 	if (!report_iommu_fault(domain, obj->dev, da, 0))
915 		return IRQ_HANDLED;
916 
917 	iommu_disable(obj);
918 
919 	iopgd = iopgd_offset(obj, da);
920 
921 	if (!iopgd_is_table(*iopgd)) {
922 		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
923 				obj->name, errs, da, iopgd, *iopgd);
924 		return IRQ_NONE;
925 	}
926 
927 	iopte = iopte_offset(iopgd, da);
928 
929 	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
930 			obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
931 
932 	return IRQ_NONE;
933 }
934 
935 static int device_match_by_alias(struct device *dev, void *data)
936 {
937 	struct omap_iommu *obj = to_iommu(dev);
938 	const char *name = data;
939 
940 	pr_debug("%s: %s %s\n", __func__, obj->name, name);
941 
942 	return strcmp(obj->name, name) == 0;
943 }
944 
945 /**
946  * omap_iommu_attach() - attach iommu device to an iommu domain
947  * @name:	name of target omap iommu device
948  * @iopgd:	page table
949  **/
950 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
951 {
952 	int err;
953 	struct device *dev;
954 	struct omap_iommu *obj;
955 
956 	dev = driver_find_device(&omap_iommu_driver.driver, NULL,
957 				(void *)name,
958 				device_match_by_alias);
959 	if (!dev)
960 		return ERR_PTR(-ENODEV);
961 
962 	obj = to_iommu(dev);
963 
964 	spin_lock(&obj->iommu_lock);
965 
966 	obj->iopgd = iopgd;
967 	err = iommu_enable(obj);
968 	if (err)
969 		goto err_enable;
970 	flush_iotlb_all(obj);
971 
972 	spin_unlock(&obj->iommu_lock);
973 
974 	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
975 	return obj;
976 
977 err_enable:
978 	spin_unlock(&obj->iommu_lock);
979 	return ERR_PTR(err);
980 }
981 
982 /**
983  * omap_iommu_detach - release iommu device
984  * @obj:	target iommu
985  **/
986 static void omap_iommu_detach(struct omap_iommu *obj)
987 {
988 	if (!obj || IS_ERR(obj))
989 		return;
990 
991 	spin_lock(&obj->iommu_lock);
992 
993 	iommu_disable(obj);
994 	obj->iopgd = NULL;
995 
996 	spin_unlock(&obj->iommu_lock);
997 
998 	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
999 }
1000 
1001 /*
1002  *	OMAP Device MMU(IOMMU) detection
1003  */
1004 static int omap_iommu_probe(struct platform_device *pdev)
1005 {
1006 	int err = -ENODEV;
1007 	int irq;
1008 	struct omap_iommu *obj;
1009 	struct resource *res;
1010 	struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
1011 	struct device_node *of = pdev->dev.of_node;
1012 
1013 	obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1014 	if (!obj)
1015 		return -ENOMEM;
1016 
1017 	if (of) {
1018 		obj->name = dev_name(&pdev->dev);
1019 		obj->nr_tlb_entries = 32;
1020 		err = of_property_read_u32(of, "ti,#tlb-entries",
1021 					   &obj->nr_tlb_entries);
1022 		if (err && err != -EINVAL)
1023 			return err;
1024 		if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
1025 			return -EINVAL;
1026 		if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
1027 			obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
1028 	} else {
1029 		obj->nr_tlb_entries = pdata->nr_tlb_entries;
1030 		obj->name = pdata->name;
1031 	}
1032 
1033 	obj->dev = &pdev->dev;
1034 	obj->ctx = (void *)obj + sizeof(*obj);
1035 
1036 	spin_lock_init(&obj->iommu_lock);
1037 	spin_lock_init(&obj->page_table_lock);
1038 
1039 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1040 	obj->regbase = devm_ioremap_resource(obj->dev, res);
1041 	if (IS_ERR(obj->regbase))
1042 		return PTR_ERR(obj->regbase);
1043 
1044 	irq = platform_get_irq(pdev, 0);
1045 	if (irq < 0)
1046 		return -ENODEV;
1047 
1048 	err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1049 			       dev_name(obj->dev), obj);
1050 	if (err < 0)
1051 		return err;
1052 	platform_set_drvdata(pdev, obj);
1053 
1054 	pm_runtime_irq_safe(obj->dev);
1055 	pm_runtime_enable(obj->dev);
1056 
1057 	omap_iommu_debugfs_add(obj);
1058 
1059 	dev_info(&pdev->dev, "%s registered\n", obj->name);
1060 	return 0;
1061 }
1062 
1063 static int omap_iommu_remove(struct platform_device *pdev)
1064 {
1065 	struct omap_iommu *obj = platform_get_drvdata(pdev);
1066 
1067 	iopgtable_clear_entry_all(obj);
1068 	omap_iommu_debugfs_remove(obj);
1069 
1070 	pm_runtime_disable(obj->dev);
1071 
1072 	dev_info(&pdev->dev, "%s removed\n", obj->name);
1073 	return 0;
1074 }
1075 
1076 static const struct of_device_id omap_iommu_of_match[] = {
1077 	{ .compatible = "ti,omap2-iommu" },
1078 	{ .compatible = "ti,omap4-iommu" },
1079 	{ .compatible = "ti,dra7-iommu"	},
1080 	{},
1081 };
1082 MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
1083 
1084 static struct platform_driver omap_iommu_driver = {
1085 	.probe	= omap_iommu_probe,
1086 	.remove	= omap_iommu_remove,
1087 	.driver	= {
1088 		.name	= "omap-iommu",
1089 		.of_match_table = of_match_ptr(omap_iommu_of_match),
1090 	},
1091 };
1092 
1093 static void iopte_cachep_ctor(void *iopte)
1094 {
1095 	clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1096 }
1097 
1098 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1099 {
1100 	memset(e, 0, sizeof(*e));
1101 
1102 	e->da		= da;
1103 	e->pa		= pa;
1104 	e->valid	= MMU_CAM_V;
1105 	e->pgsz		= pgsz;
1106 	e->endian	= MMU_RAM_ENDIAN_LITTLE;
1107 	e->elsz		= MMU_RAM_ELSZ_8;
1108 	e->mixed	= 0;
1109 
1110 	return iopgsz_to_bytes(e->pgsz);
1111 }
1112 
1113 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1114 			 phys_addr_t pa, size_t bytes, int prot)
1115 {
1116 	struct omap_iommu_domain *omap_domain = domain->priv;
1117 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1118 	struct device *dev = oiommu->dev;
1119 	struct iotlb_entry e;
1120 	int omap_pgsz;
1121 	u32 ret;
1122 
1123 	omap_pgsz = bytes_to_iopgsz(bytes);
1124 	if (omap_pgsz < 0) {
1125 		dev_err(dev, "invalid size to map: %d\n", bytes);
1126 		return -EINVAL;
1127 	}
1128 
1129 	dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
1130 
1131 	iotlb_init_entry(&e, da, pa, omap_pgsz);
1132 
1133 	ret = omap_iopgtable_store_entry(oiommu, &e);
1134 	if (ret)
1135 		dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1136 
1137 	return ret;
1138 }
1139 
1140 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1141 			    size_t size)
1142 {
1143 	struct omap_iommu_domain *omap_domain = domain->priv;
1144 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1145 	struct device *dev = oiommu->dev;
1146 
1147 	dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1148 
1149 	return iopgtable_clear_entry(oiommu, da);
1150 }
1151 
1152 static int
1153 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1154 {
1155 	struct omap_iommu_domain *omap_domain = domain->priv;
1156 	struct omap_iommu *oiommu;
1157 	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1158 	int ret = 0;
1159 
1160 	if (!arch_data || !arch_data->name) {
1161 		dev_err(dev, "device doesn't have an associated iommu\n");
1162 		return -EINVAL;
1163 	}
1164 
1165 	spin_lock(&omap_domain->lock);
1166 
1167 	/* only a single device is supported per domain for now */
1168 	if (omap_domain->iommu_dev) {
1169 		dev_err(dev, "iommu domain is already attached\n");
1170 		ret = -EBUSY;
1171 		goto out;
1172 	}
1173 
1174 	/* get a handle to and enable the omap iommu */
1175 	oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1176 	if (IS_ERR(oiommu)) {
1177 		ret = PTR_ERR(oiommu);
1178 		dev_err(dev, "can't get omap iommu: %d\n", ret);
1179 		goto out;
1180 	}
1181 
1182 	omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1183 	omap_domain->dev = dev;
1184 	oiommu->domain = domain;
1185 
1186 out:
1187 	spin_unlock(&omap_domain->lock);
1188 	return ret;
1189 }
1190 
1191 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1192 			struct device *dev)
1193 {
1194 	struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1195 	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1196 
1197 	/* only a single device is supported per domain for now */
1198 	if (omap_domain->iommu_dev != oiommu) {
1199 		dev_err(dev, "invalid iommu device\n");
1200 		return;
1201 	}
1202 
1203 	iopgtable_clear_entry_all(oiommu);
1204 
1205 	omap_iommu_detach(oiommu);
1206 
1207 	omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1208 	omap_domain->dev = NULL;
1209 	oiommu->domain = NULL;
1210 }
1211 
1212 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1213 				 struct device *dev)
1214 {
1215 	struct omap_iommu_domain *omap_domain = domain->priv;
1216 
1217 	spin_lock(&omap_domain->lock);
1218 	_omap_iommu_detach_dev(omap_domain, dev);
1219 	spin_unlock(&omap_domain->lock);
1220 }
1221 
1222 static int omap_iommu_domain_init(struct iommu_domain *domain)
1223 {
1224 	struct omap_iommu_domain *omap_domain;
1225 
1226 	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1227 	if (!omap_domain) {
1228 		pr_err("kzalloc failed\n");
1229 		goto out;
1230 	}
1231 
1232 	omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1233 	if (!omap_domain->pgtable) {
1234 		pr_err("kzalloc failed\n");
1235 		goto fail_nomem;
1236 	}
1237 
1238 	/*
1239 	 * should never fail, but please keep this around to ensure
1240 	 * we keep the hardware happy
1241 	 */
1242 	BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1243 
1244 	clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1245 	spin_lock_init(&omap_domain->lock);
1246 
1247 	domain->priv = omap_domain;
1248 
1249 	domain->geometry.aperture_start = 0;
1250 	domain->geometry.aperture_end   = (1ULL << 32) - 1;
1251 	domain->geometry.force_aperture = true;
1252 
1253 	return 0;
1254 
1255 fail_nomem:
1256 	kfree(omap_domain);
1257 out:
1258 	return -ENOMEM;
1259 }
1260 
1261 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1262 {
1263 	struct omap_iommu_domain *omap_domain = domain->priv;
1264 
1265 	domain->priv = NULL;
1266 
1267 	/*
1268 	 * An iommu device is still attached
1269 	 * (currently, only one device can be attached) ?
1270 	 */
1271 	if (omap_domain->iommu_dev)
1272 		_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1273 
1274 	kfree(omap_domain->pgtable);
1275 	kfree(omap_domain);
1276 }
1277 
1278 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1279 					  dma_addr_t da)
1280 {
1281 	struct omap_iommu_domain *omap_domain = domain->priv;
1282 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
1283 	struct device *dev = oiommu->dev;
1284 	u32 *pgd, *pte;
1285 	phys_addr_t ret = 0;
1286 
1287 	iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1288 
1289 	if (pte) {
1290 		if (iopte_is_small(*pte))
1291 			ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1292 		else if (iopte_is_large(*pte))
1293 			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1294 		else
1295 			dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1296 							(unsigned long long)da);
1297 	} else {
1298 		if (iopgd_is_section(*pgd))
1299 			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1300 		else if (iopgd_is_super(*pgd))
1301 			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1302 		else
1303 			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1304 							(unsigned long long)da);
1305 	}
1306 
1307 	return ret;
1308 }
1309 
1310 static int omap_iommu_add_device(struct device *dev)
1311 {
1312 	struct omap_iommu_arch_data *arch_data;
1313 	struct device_node *np;
1314 	struct platform_device *pdev;
1315 
1316 	/*
1317 	 * Allocate the archdata iommu structure for DT-based devices.
1318 	 *
1319 	 * TODO: Simplify this when removing non-DT support completely from the
1320 	 * IOMMU users.
1321 	 */
1322 	if (!dev->of_node)
1323 		return 0;
1324 
1325 	np = of_parse_phandle(dev->of_node, "iommus", 0);
1326 	if (!np)
1327 		return 0;
1328 
1329 	pdev = of_find_device_by_node(np);
1330 	if (WARN_ON(!pdev)) {
1331 		of_node_put(np);
1332 		return -EINVAL;
1333 	}
1334 
1335 	arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
1336 	if (!arch_data) {
1337 		of_node_put(np);
1338 		return -ENOMEM;
1339 	}
1340 
1341 	arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
1342 	dev->archdata.iommu = arch_data;
1343 
1344 	of_node_put(np);
1345 
1346 	return 0;
1347 }
1348 
1349 static void omap_iommu_remove_device(struct device *dev)
1350 {
1351 	struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1352 
1353 	if (!dev->of_node || !arch_data)
1354 		return;
1355 
1356 	kfree(arch_data->name);
1357 	kfree(arch_data);
1358 }
1359 
1360 static const struct iommu_ops omap_iommu_ops = {
1361 	.domain_init	= omap_iommu_domain_init,
1362 	.domain_destroy	= omap_iommu_domain_destroy,
1363 	.attach_dev	= omap_iommu_attach_dev,
1364 	.detach_dev	= omap_iommu_detach_dev,
1365 	.map		= omap_iommu_map,
1366 	.unmap		= omap_iommu_unmap,
1367 	.map_sg		= default_iommu_map_sg,
1368 	.iova_to_phys	= omap_iommu_iova_to_phys,
1369 	.add_device	= omap_iommu_add_device,
1370 	.remove_device	= omap_iommu_remove_device,
1371 	.pgsize_bitmap	= OMAP_IOMMU_PGSIZES,
1372 };
1373 
1374 static int __init omap_iommu_init(void)
1375 {
1376 	struct kmem_cache *p;
1377 	const unsigned long flags = SLAB_HWCACHE_ALIGN;
1378 	size_t align = 1 << 10; /* L2 pagetable alignement */
1379 	struct device_node *np;
1380 
1381 	np = of_find_matching_node(NULL, omap_iommu_of_match);
1382 	if (!np)
1383 		return 0;
1384 
1385 	of_node_put(np);
1386 
1387 	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1388 			      iopte_cachep_ctor);
1389 	if (!p)
1390 		return -ENOMEM;
1391 	iopte_cachep = p;
1392 
1393 	bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1394 
1395 	omap_iommu_debugfs_init();
1396 
1397 	return platform_driver_register(&omap_iommu_driver);
1398 }
1399 /* must be ready before omap3isp is probed */
1400 subsys_initcall(omap_iommu_init);
1401 
1402 static void __exit omap_iommu_exit(void)
1403 {
1404 	kmem_cache_destroy(iopte_cachep);
1405 
1406 	platform_driver_unregister(&omap_iommu_driver);
1407 
1408 	omap_iommu_debugfs_exit();
1409 }
1410 module_exit(omap_iommu_exit);
1411 
1412 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1413 MODULE_ALIAS("platform:omap-iommu");
1414 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1415 MODULE_LICENSE("GPL v2");
1416