xref: /linux/drivers/iommu/ipmmu-vmsa.c (revision 7a309195d11cde854eb75559fbd6b48f9e518f25)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for Renesas VMSA-compatible IPMMU
4  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
5  *
6  * Copyright (C) 2014 Renesas Electronics Corporation
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/delay.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-pgtable.h>
19 #include <linux/iommu.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/sys_soc.h>
28 
29 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
30 #include <asm/dma-iommu.h>
31 #else
32 #define arm_iommu_create_mapping(...)	NULL
33 #define arm_iommu_attach_device(...)	-ENODEV
34 #define arm_iommu_release_mapping(...)	do {} while (0)
35 #define arm_iommu_detach_device(...)	do {} while (0)
36 #endif
37 
38 #define IPMMU_CTX_MAX		8U
39 #define IPMMU_CTX_INVALID	-1
40 
41 #define IPMMU_UTLB_MAX		48U
42 
43 struct ipmmu_features {
44 	bool use_ns_alias_offset;
45 	bool has_cache_leaf_nodes;
46 	unsigned int number_of_contexts;
47 	unsigned int num_utlbs;
48 	bool setup_imbuscr;
49 	bool twobit_imttbcr_sl0;
50 	bool reserved_context;
51 	bool cache_snoop;
52 	unsigned int ctx_offset_base;
53 	unsigned int ctx_offset_stride;
54 	unsigned int utlb_offset_base;
55 };
56 
57 struct ipmmu_vmsa_device {
58 	struct device *dev;
59 	void __iomem *base;
60 	struct iommu_device iommu;
61 	struct ipmmu_vmsa_device *root;
62 	const struct ipmmu_features *features;
63 	unsigned int num_ctx;
64 	spinlock_t lock;			/* Protects ctx and domains[] */
65 	DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
66 	struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
67 	s8 utlb_ctx[IPMMU_UTLB_MAX];
68 
69 	struct iommu_group *group;
70 	struct dma_iommu_mapping *mapping;
71 };
72 
73 struct ipmmu_vmsa_domain {
74 	struct ipmmu_vmsa_device *mmu;
75 	struct iommu_domain io_domain;
76 
77 	struct io_pgtable_cfg cfg;
78 	struct io_pgtable_ops *iop;
79 
80 	unsigned int context_id;
81 	struct mutex mutex;			/* Protects mappings */
82 };
83 
84 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
85 {
86 	return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
87 }
88 
89 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
90 {
91 	return dev_iommu_priv_get(dev);
92 }
93 
94 #define TLB_LOOP_TIMEOUT		100	/* 100us */
95 
96 /* -----------------------------------------------------------------------------
97  * Registers Definition
98  */
99 
100 #define IM_NS_ALIAS_OFFSET		0x800
101 
102 /* MMU "context" registers */
103 #define IMCTR				0x0000		/* R-Car Gen2/3 */
104 #define IMCTR_INTEN			(1 << 2)	/* R-Car Gen2/3 */
105 #define IMCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
106 #define IMCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
107 
108 #define IMTTBCR				0x0008		/* R-Car Gen2/3 */
109 #define IMTTBCR_EAE			(1 << 31)	/* R-Car Gen2/3 */
110 #define IMTTBCR_SH0_INNER_SHAREABLE	(3 << 12)	/* R-Car Gen2 only */
111 #define IMTTBCR_ORGN0_WB_WA		(1 << 10)	/* R-Car Gen2 only */
112 #define IMTTBCR_IRGN0_WB_WA		(1 << 8)	/* R-Car Gen2 only */
113 #define IMTTBCR_SL0_TWOBIT_LVL_1	(2 << 6)	/* R-Car Gen3 only */
114 #define IMTTBCR_SL0_LVL_1		(1 << 4)	/* R-Car Gen2 only */
115 
116 #define IMBUSCR				0x000c		/* R-Car Gen2 only */
117 #define IMBUSCR_DVM			(1 << 2)	/* R-Car Gen2 only */
118 #define IMBUSCR_BUSSEL_MASK		(3 << 0)	/* R-Car Gen2 only */
119 
120 #define IMTTLBR0			0x0010		/* R-Car Gen2/3 */
121 #define IMTTUBR0			0x0014		/* R-Car Gen2/3 */
122 
123 #define IMSTR				0x0020		/* R-Car Gen2/3 */
124 #define IMSTR_MHIT			(1 << 4)	/* R-Car Gen2/3 */
125 #define IMSTR_ABORT			(1 << 2)	/* R-Car Gen2/3 */
126 #define IMSTR_PF			(1 << 1)	/* R-Car Gen2/3 */
127 #define IMSTR_TF			(1 << 0)	/* R-Car Gen2/3 */
128 
129 #define IMMAIR0				0x0028		/* R-Car Gen2/3 */
130 
131 #define IMELAR				0x0030		/* R-Car Gen2/3, IMEAR on R-Car Gen2 */
132 #define IMEUAR				0x0034		/* R-Car Gen3 only */
133 
134 /* uTLB registers */
135 #define IMUCTR(n)			((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
136 #define IMUCTR0(n)			(0x0300 + ((n) * 16))		/* R-Car Gen2/3 */
137 #define IMUCTR32(n)			(0x0600 + (((n) - 32) * 16))	/* R-Car Gen3 only */
138 #define IMUCTR_TTSEL_MMU(n)		((n) << 4)	/* R-Car Gen2/3 */
139 #define IMUCTR_FLUSH			(1 << 1)	/* R-Car Gen2/3 */
140 #define IMUCTR_MMUEN			(1 << 0)	/* R-Car Gen2/3 */
141 
142 #define IMUASID(n)			((n) < 32 ? IMUASID0(n) : IMUASID32(n))
143 #define IMUASID0(n)			(0x0308 + ((n) * 16))		/* R-Car Gen2/3 */
144 #define IMUASID32(n)			(0x0608 + (((n) - 32) * 16))	/* R-Car Gen3 only */
145 
146 /* -----------------------------------------------------------------------------
147  * Root device handling
148  */
149 
150 static struct platform_driver ipmmu_driver;
151 
152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
153 {
154 	return mmu->root == mmu;
155 }
156 
157 static int __ipmmu_check_device(struct device *dev, void *data)
158 {
159 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
160 	struct ipmmu_vmsa_device **rootp = data;
161 
162 	if (ipmmu_is_root(mmu))
163 		*rootp = mmu;
164 
165 	return 0;
166 }
167 
168 static struct ipmmu_vmsa_device *ipmmu_find_root(void)
169 {
170 	struct ipmmu_vmsa_device *root = NULL;
171 
172 	return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
173 				      __ipmmu_check_device) == 0 ? root : NULL;
174 }
175 
176 /* -----------------------------------------------------------------------------
177  * Read/Write Access
178  */
179 
180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
181 {
182 	return ioread32(mmu->base + offset);
183 }
184 
185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
186 			u32 data)
187 {
188 	iowrite32(data, mmu->base + offset);
189 }
190 
191 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
192 				  unsigned int context_id, unsigned int reg)
193 {
194 	return mmu->features->ctx_offset_base +
195 	       context_id * mmu->features->ctx_offset_stride + reg;
196 }
197 
198 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
199 			  unsigned int context_id, unsigned int reg)
200 {
201 	return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
202 }
203 
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
205 			    unsigned int context_id, unsigned int reg, u32 data)
206 {
207 	ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
208 }
209 
210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
211 			       unsigned int reg)
212 {
213 	return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
214 }
215 
216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
217 				 unsigned int reg, u32 data)
218 {
219 	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
220 }
221 
222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
223 				unsigned int reg, u32 data)
224 {
225 	if (domain->mmu != domain->mmu->root)
226 		ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
227 
228 	ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
229 }
230 
231 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
232 {
233 	return mmu->features->utlb_offset_base + reg;
234 }
235 
236 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
237 				unsigned int utlb, u32 data)
238 {
239 	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
240 }
241 
242 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
243 			       unsigned int utlb, u32 data)
244 {
245 	ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
246 }
247 
248 /* -----------------------------------------------------------------------------
249  * TLB and microTLB Management
250  */
251 
252 /* Wait for any pending TLB invalidations to complete */
253 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
254 {
255 	unsigned int count = 0;
256 
257 	while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
258 		cpu_relax();
259 		if (++count == TLB_LOOP_TIMEOUT) {
260 			dev_err_ratelimited(domain->mmu->dev,
261 			"TLB sync timed out -- MMU may be deadlocked\n");
262 			return;
263 		}
264 		udelay(1);
265 	}
266 }
267 
268 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
269 {
270 	u32 reg;
271 
272 	reg = ipmmu_ctx_read_root(domain, IMCTR);
273 	reg |= IMCTR_FLUSH;
274 	ipmmu_ctx_write_all(domain, IMCTR, reg);
275 
276 	ipmmu_tlb_sync(domain);
277 }
278 
279 /*
280  * Enable MMU translation for the microTLB.
281  */
282 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
283 			      unsigned int utlb)
284 {
285 	struct ipmmu_vmsa_device *mmu = domain->mmu;
286 
287 	/*
288 	 * TODO: Reference-count the microTLB as several bus masters can be
289 	 * connected to the same microTLB.
290 	 */
291 
292 	/* TODO: What should we set the ASID to ? */
293 	ipmmu_imuasid_write(mmu, utlb, 0);
294 	/* TODO: Do we need to flush the microTLB ? */
295 	ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
296 				      IMUCTR_FLUSH | IMUCTR_MMUEN);
297 	mmu->utlb_ctx[utlb] = domain->context_id;
298 }
299 
300 /*
301  * Disable MMU translation for the microTLB.
302  */
303 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
304 			       unsigned int utlb)
305 {
306 	struct ipmmu_vmsa_device *mmu = domain->mmu;
307 
308 	ipmmu_imuctr_write(mmu, utlb, 0);
309 	mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
310 }
311 
312 static void ipmmu_tlb_flush_all(void *cookie)
313 {
314 	struct ipmmu_vmsa_domain *domain = cookie;
315 
316 	ipmmu_tlb_invalidate(domain);
317 }
318 
319 static void ipmmu_tlb_flush(unsigned long iova, size_t size,
320 				size_t granule, void *cookie)
321 {
322 	ipmmu_tlb_flush_all(cookie);
323 }
324 
325 static const struct iommu_flush_ops ipmmu_flush_ops = {
326 	.tlb_flush_all = ipmmu_tlb_flush_all,
327 	.tlb_flush_walk = ipmmu_tlb_flush,
328 	.tlb_flush_leaf = ipmmu_tlb_flush,
329 };
330 
331 /* -----------------------------------------------------------------------------
332  * Domain/Context Management
333  */
334 
335 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
336 					 struct ipmmu_vmsa_domain *domain)
337 {
338 	unsigned long flags;
339 	int ret;
340 
341 	spin_lock_irqsave(&mmu->lock, flags);
342 
343 	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
344 	if (ret != mmu->num_ctx) {
345 		mmu->domains[ret] = domain;
346 		set_bit(ret, mmu->ctx);
347 	} else
348 		ret = -EBUSY;
349 
350 	spin_unlock_irqrestore(&mmu->lock, flags);
351 
352 	return ret;
353 }
354 
355 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
356 				      unsigned int context_id)
357 {
358 	unsigned long flags;
359 
360 	spin_lock_irqsave(&mmu->lock, flags);
361 
362 	clear_bit(context_id, mmu->ctx);
363 	mmu->domains[context_id] = NULL;
364 
365 	spin_unlock_irqrestore(&mmu->lock, flags);
366 }
367 
368 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
369 {
370 	u64 ttbr;
371 	u32 tmp;
372 
373 	/* TTBR0 */
374 	ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
375 	ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
376 	ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
377 
378 	/*
379 	 * TTBCR
380 	 * We use long descriptors and allocate the whole 32-bit VA space to
381 	 * TTBR0.
382 	 */
383 	if (domain->mmu->features->twobit_imttbcr_sl0)
384 		tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
385 	else
386 		tmp = IMTTBCR_SL0_LVL_1;
387 
388 	if (domain->mmu->features->cache_snoop)
389 		tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
390 		       IMTTBCR_IRGN0_WB_WA;
391 
392 	ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
393 
394 	/* MAIR0 */
395 	ipmmu_ctx_write_root(domain, IMMAIR0,
396 			     domain->cfg.arm_lpae_s1_cfg.mair);
397 
398 	/* IMBUSCR */
399 	if (domain->mmu->features->setup_imbuscr)
400 		ipmmu_ctx_write_root(domain, IMBUSCR,
401 				     ipmmu_ctx_read_root(domain, IMBUSCR) &
402 				     ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
403 
404 	/*
405 	 * IMSTR
406 	 * Clear all interrupt flags.
407 	 */
408 	ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
409 
410 	/*
411 	 * IMCTR
412 	 * Enable the MMU and interrupt generation. The long-descriptor
413 	 * translation table format doesn't use TEX remapping. Don't enable AF
414 	 * software management as we have no use for it. Flush the TLB as
415 	 * required when modifying the context registers.
416 	 */
417 	ipmmu_ctx_write_all(domain, IMCTR,
418 			    IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
419 }
420 
421 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
422 {
423 	int ret;
424 
425 	/*
426 	 * Allocate the page table operations.
427 	 *
428 	 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
429 	 * access, Long-descriptor format" that the NStable bit being set in a
430 	 * table descriptor will result in the NStable and NS bits of all child
431 	 * entries being ignored and considered as being set. The IPMMU seems
432 	 * not to comply with this, as it generates a secure access page fault
433 	 * if any of the NStable and NS bits isn't set when running in
434 	 * non-secure mode.
435 	 */
436 	domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
437 	domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
438 	domain->cfg.ias = 32;
439 	domain->cfg.oas = 40;
440 	domain->cfg.tlb = &ipmmu_flush_ops;
441 	domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
442 	domain->io_domain.geometry.force_aperture = true;
443 	/*
444 	 * TODO: Add support for coherent walk through CCI with DVM and remove
445 	 * cache handling. For now, delegate it to the io-pgtable code.
446 	 */
447 	domain->cfg.coherent_walk = false;
448 	domain->cfg.iommu_dev = domain->mmu->root->dev;
449 
450 	/*
451 	 * Find an unused context.
452 	 */
453 	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
454 	if (ret < 0)
455 		return ret;
456 
457 	domain->context_id = ret;
458 
459 	domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
460 					   domain);
461 	if (!domain->iop) {
462 		ipmmu_domain_free_context(domain->mmu->root,
463 					  domain->context_id);
464 		return -EINVAL;
465 	}
466 
467 	ipmmu_domain_setup_context(domain);
468 	return 0;
469 }
470 
471 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
472 {
473 	if (!domain->mmu)
474 		return;
475 
476 	/*
477 	 * Disable the context. Flush the TLB as required when modifying the
478 	 * context registers.
479 	 *
480 	 * TODO: Is TLB flush really needed ?
481 	 */
482 	ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
483 	ipmmu_tlb_sync(domain);
484 	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
485 }
486 
487 /* -----------------------------------------------------------------------------
488  * Fault Handling
489  */
490 
491 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
492 {
493 	const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
494 	struct ipmmu_vmsa_device *mmu = domain->mmu;
495 	unsigned long iova;
496 	u32 status;
497 
498 	status = ipmmu_ctx_read_root(domain, IMSTR);
499 	if (!(status & err_mask))
500 		return IRQ_NONE;
501 
502 	iova = ipmmu_ctx_read_root(domain, IMELAR);
503 	if (IS_ENABLED(CONFIG_64BIT))
504 		iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
505 
506 	/*
507 	 * Clear the error status flags. Unlike traditional interrupt flag
508 	 * registers that must be cleared by writing 1, this status register
509 	 * seems to require 0. The error address register must be read before,
510 	 * otherwise its value will be 0.
511 	 */
512 	ipmmu_ctx_write_root(domain, IMSTR, 0);
513 
514 	/* Log fatal errors. */
515 	if (status & IMSTR_MHIT)
516 		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
517 				    iova);
518 	if (status & IMSTR_ABORT)
519 		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
520 				    iova);
521 
522 	if (!(status & (IMSTR_PF | IMSTR_TF)))
523 		return IRQ_NONE;
524 
525 	/*
526 	 * Try to handle page faults and translation faults.
527 	 *
528 	 * TODO: We need to look up the faulty device based on the I/O VA. Use
529 	 * the IOMMU device for now.
530 	 */
531 	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
532 		return IRQ_HANDLED;
533 
534 	dev_err_ratelimited(mmu->dev,
535 			    "Unhandled fault: status 0x%08x iova 0x%lx\n",
536 			    status, iova);
537 
538 	return IRQ_HANDLED;
539 }
540 
541 static irqreturn_t ipmmu_irq(int irq, void *dev)
542 {
543 	struct ipmmu_vmsa_device *mmu = dev;
544 	irqreturn_t status = IRQ_NONE;
545 	unsigned int i;
546 	unsigned long flags;
547 
548 	spin_lock_irqsave(&mmu->lock, flags);
549 
550 	/*
551 	 * Check interrupts for all active contexts.
552 	 */
553 	for (i = 0; i < mmu->num_ctx; i++) {
554 		if (!mmu->domains[i])
555 			continue;
556 		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
557 			status = IRQ_HANDLED;
558 	}
559 
560 	spin_unlock_irqrestore(&mmu->lock, flags);
561 
562 	return status;
563 }
564 
565 /* -----------------------------------------------------------------------------
566  * IOMMU Operations
567  */
568 
569 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
570 {
571 	struct ipmmu_vmsa_domain *domain;
572 
573 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
574 	if (!domain)
575 		return NULL;
576 
577 	mutex_init(&domain->mutex);
578 
579 	return &domain->io_domain;
580 }
581 
582 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
583 {
584 	struct iommu_domain *io_domain = NULL;
585 
586 	switch (type) {
587 	case IOMMU_DOMAIN_UNMANAGED:
588 		io_domain = __ipmmu_domain_alloc(type);
589 		break;
590 
591 	case IOMMU_DOMAIN_DMA:
592 		io_domain = __ipmmu_domain_alloc(type);
593 		if (io_domain && iommu_get_dma_cookie(io_domain)) {
594 			kfree(io_domain);
595 			io_domain = NULL;
596 		}
597 		break;
598 	}
599 
600 	return io_domain;
601 }
602 
603 static void ipmmu_domain_free(struct iommu_domain *io_domain)
604 {
605 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
606 
607 	/*
608 	 * Free the domain resources. We assume that all devices have already
609 	 * been detached.
610 	 */
611 	iommu_put_dma_cookie(io_domain);
612 	ipmmu_domain_destroy_context(domain);
613 	free_io_pgtable_ops(domain->iop);
614 	kfree(domain);
615 }
616 
617 static int ipmmu_attach_device(struct iommu_domain *io_domain,
618 			       struct device *dev)
619 {
620 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
621 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
622 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
623 	unsigned int i;
624 	int ret = 0;
625 
626 	if (!mmu) {
627 		dev_err(dev, "Cannot attach to IPMMU\n");
628 		return -ENXIO;
629 	}
630 
631 	mutex_lock(&domain->mutex);
632 
633 	if (!domain->mmu) {
634 		/* The domain hasn't been used yet, initialize it. */
635 		domain->mmu = mmu;
636 		ret = ipmmu_domain_init_context(domain);
637 		if (ret < 0) {
638 			dev_err(dev, "Unable to initialize IPMMU context\n");
639 			domain->mmu = NULL;
640 		} else {
641 			dev_info(dev, "Using IPMMU context %u\n",
642 				 domain->context_id);
643 		}
644 	} else if (domain->mmu != mmu) {
645 		/*
646 		 * Something is wrong, we can't attach two devices using
647 		 * different IOMMUs to the same domain.
648 		 */
649 		dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
650 			dev_name(mmu->dev), dev_name(domain->mmu->dev));
651 		ret = -EINVAL;
652 	} else
653 		dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
654 
655 	mutex_unlock(&domain->mutex);
656 
657 	if (ret < 0)
658 		return ret;
659 
660 	for (i = 0; i < fwspec->num_ids; ++i)
661 		ipmmu_utlb_enable(domain, fwspec->ids[i]);
662 
663 	return 0;
664 }
665 
666 static void ipmmu_detach_device(struct iommu_domain *io_domain,
667 				struct device *dev)
668 {
669 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
670 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
671 	unsigned int i;
672 
673 	for (i = 0; i < fwspec->num_ids; ++i)
674 		ipmmu_utlb_disable(domain, fwspec->ids[i]);
675 
676 	/*
677 	 * TODO: Optimize by disabling the context when no device is attached.
678 	 */
679 }
680 
681 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
682 		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
683 {
684 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
685 
686 	if (!domain)
687 		return -ENODEV;
688 
689 	return domain->iop->map(domain->iop, iova, paddr, size, prot);
690 }
691 
692 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
693 			  size_t size, struct iommu_iotlb_gather *gather)
694 {
695 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
696 
697 	return domain->iop->unmap(domain->iop, iova, size, gather);
698 }
699 
700 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
701 {
702 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
703 
704 	if (domain->mmu)
705 		ipmmu_tlb_flush_all(domain);
706 }
707 
708 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
709 			     struct iommu_iotlb_gather *gather)
710 {
711 	ipmmu_flush_iotlb_all(io_domain);
712 }
713 
714 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
715 				      dma_addr_t iova)
716 {
717 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
718 
719 	/* TODO: Is locking needed ? */
720 
721 	return domain->iop->iova_to_phys(domain->iop, iova);
722 }
723 
724 static int ipmmu_init_platform_device(struct device *dev,
725 				      struct of_phandle_args *args)
726 {
727 	struct platform_device *ipmmu_pdev;
728 
729 	ipmmu_pdev = of_find_device_by_node(args->np);
730 	if (!ipmmu_pdev)
731 		return -ENODEV;
732 
733 	dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
734 
735 	return 0;
736 }
737 
738 static const struct soc_device_attribute soc_rcar_gen3[] = {
739 	{ .soc_id = "r8a774a1", },
740 	{ .soc_id = "r8a774b1", },
741 	{ .soc_id = "r8a774c0", },
742 	{ .soc_id = "r8a7795", },
743 	{ .soc_id = "r8a7796", },
744 	{ .soc_id = "r8a77965", },
745 	{ .soc_id = "r8a77970", },
746 	{ .soc_id = "r8a77990", },
747 	{ .soc_id = "r8a77995", },
748 	{ /* sentinel */ }
749 };
750 
751 static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
752 	{ .soc_id = "r8a774b1", },
753 	{ .soc_id = "r8a774c0", },
754 	{ .soc_id = "r8a7795", .revision = "ES3.*" },
755 	{ .soc_id = "r8a77965", },
756 	{ .soc_id = "r8a77990", },
757 	{ .soc_id = "r8a77995", },
758 	{ /* sentinel */ }
759 };
760 
761 static const char * const rcar_gen3_slave_whitelist[] = {
762 };
763 
764 static bool ipmmu_slave_whitelist(struct device *dev)
765 {
766 	unsigned int i;
767 
768 	/*
769 	 * For R-Car Gen3 use a white list to opt-in slave devices.
770 	 * For Other SoCs, this returns true anyway.
771 	 */
772 	if (!soc_device_match(soc_rcar_gen3))
773 		return true;
774 
775 	/* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
776 	if (!soc_device_match(soc_rcar_gen3_whitelist))
777 		return false;
778 
779 	/* Check whether this slave device can work with the IPMMU */
780 	for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
781 		if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
782 			return true;
783 	}
784 
785 	/* Otherwise, do not allow use of IPMMU */
786 	return false;
787 }
788 
789 static int ipmmu_of_xlate(struct device *dev,
790 			  struct of_phandle_args *spec)
791 {
792 	if (!ipmmu_slave_whitelist(dev))
793 		return -ENODEV;
794 
795 	iommu_fwspec_add_ids(dev, spec->args, 1);
796 
797 	/* Initialize once - xlate() will call multiple times */
798 	if (to_ipmmu(dev))
799 		return 0;
800 
801 	return ipmmu_init_platform_device(dev, spec);
802 }
803 
804 static int ipmmu_init_arm_mapping(struct device *dev)
805 {
806 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
807 	int ret;
808 
809 	/*
810 	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
811 	 * VAs. This will allocate a corresponding IOMMU domain.
812 	 *
813 	 * TODO:
814 	 * - Create one mapping per context (TLB).
815 	 * - Make the mapping size configurable ? We currently use a 2GB mapping
816 	 *   at a 1GB offset to ensure that NULL VAs will fault.
817 	 */
818 	if (!mmu->mapping) {
819 		struct dma_iommu_mapping *mapping;
820 
821 		mapping = arm_iommu_create_mapping(&platform_bus_type,
822 						   SZ_1G, SZ_2G);
823 		if (IS_ERR(mapping)) {
824 			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
825 			ret = PTR_ERR(mapping);
826 			goto error;
827 		}
828 
829 		mmu->mapping = mapping;
830 	}
831 
832 	/* Attach the ARM VA mapping to the device. */
833 	ret = arm_iommu_attach_device(dev, mmu->mapping);
834 	if (ret < 0) {
835 		dev_err(dev, "Failed to attach device to VA mapping\n");
836 		goto error;
837 	}
838 
839 	return 0;
840 
841 error:
842 	if (mmu->mapping)
843 		arm_iommu_release_mapping(mmu->mapping);
844 
845 	return ret;
846 }
847 
848 static struct iommu_device *ipmmu_probe_device(struct device *dev)
849 {
850 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
851 
852 	/*
853 	 * Only let through devices that have been verified in xlate()
854 	 */
855 	if (!mmu)
856 		return ERR_PTR(-ENODEV);
857 
858 	return &mmu->iommu;
859 }
860 
861 static void ipmmu_probe_finalize(struct device *dev)
862 {
863 	int ret = 0;
864 
865 	if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
866 		ret = ipmmu_init_arm_mapping(dev);
867 
868 	if (ret)
869 		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
870 }
871 
872 static void ipmmu_release_device(struct device *dev)
873 {
874 	arm_iommu_detach_device(dev);
875 }
876 
877 static struct iommu_group *ipmmu_find_group(struct device *dev)
878 {
879 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
880 	struct iommu_group *group;
881 
882 	if (mmu->group)
883 		return iommu_group_ref_get(mmu->group);
884 
885 	group = iommu_group_alloc();
886 	if (!IS_ERR(group))
887 		mmu->group = group;
888 
889 	return group;
890 }
891 
892 static const struct iommu_ops ipmmu_ops = {
893 	.domain_alloc = ipmmu_domain_alloc,
894 	.domain_free = ipmmu_domain_free,
895 	.attach_dev = ipmmu_attach_device,
896 	.detach_dev = ipmmu_detach_device,
897 	.map = ipmmu_map,
898 	.unmap = ipmmu_unmap,
899 	.flush_iotlb_all = ipmmu_flush_iotlb_all,
900 	.iotlb_sync = ipmmu_iotlb_sync,
901 	.iova_to_phys = ipmmu_iova_to_phys,
902 	.probe_device = ipmmu_probe_device,
903 	.release_device = ipmmu_release_device,
904 	.probe_finalize = ipmmu_probe_finalize,
905 	.device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
906 			? generic_device_group : ipmmu_find_group,
907 	.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
908 	.of_xlate = ipmmu_of_xlate,
909 };
910 
911 /* -----------------------------------------------------------------------------
912  * Probe/remove and init
913  */
914 
915 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
916 {
917 	unsigned int i;
918 
919 	/* Disable all contexts. */
920 	for (i = 0; i < mmu->num_ctx; ++i)
921 		ipmmu_ctx_write(mmu, i, IMCTR, 0);
922 }
923 
924 static const struct ipmmu_features ipmmu_features_default = {
925 	.use_ns_alias_offset = true,
926 	.has_cache_leaf_nodes = false,
927 	.number_of_contexts = 1, /* software only tested with one context */
928 	.num_utlbs = 32,
929 	.setup_imbuscr = true,
930 	.twobit_imttbcr_sl0 = false,
931 	.reserved_context = false,
932 	.cache_snoop = true,
933 	.ctx_offset_base = 0,
934 	.ctx_offset_stride = 0x40,
935 	.utlb_offset_base = 0,
936 };
937 
938 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
939 	.use_ns_alias_offset = false,
940 	.has_cache_leaf_nodes = true,
941 	.number_of_contexts = 8,
942 	.num_utlbs = 48,
943 	.setup_imbuscr = false,
944 	.twobit_imttbcr_sl0 = true,
945 	.reserved_context = true,
946 	.cache_snoop = false,
947 	.ctx_offset_base = 0,
948 	.ctx_offset_stride = 0x40,
949 	.utlb_offset_base = 0,
950 };
951 
952 static const struct of_device_id ipmmu_of_ids[] = {
953 	{
954 		.compatible = "renesas,ipmmu-vmsa",
955 		.data = &ipmmu_features_default,
956 	}, {
957 		.compatible = "renesas,ipmmu-r8a774a1",
958 		.data = &ipmmu_features_rcar_gen3,
959 	}, {
960 		.compatible = "renesas,ipmmu-r8a774b1",
961 		.data = &ipmmu_features_rcar_gen3,
962 	}, {
963 		.compatible = "renesas,ipmmu-r8a774c0",
964 		.data = &ipmmu_features_rcar_gen3,
965 	}, {
966 		.compatible = "renesas,ipmmu-r8a7795",
967 		.data = &ipmmu_features_rcar_gen3,
968 	}, {
969 		.compatible = "renesas,ipmmu-r8a7796",
970 		.data = &ipmmu_features_rcar_gen3,
971 	}, {
972 		.compatible = "renesas,ipmmu-r8a77965",
973 		.data = &ipmmu_features_rcar_gen3,
974 	}, {
975 		.compatible = "renesas,ipmmu-r8a77970",
976 		.data = &ipmmu_features_rcar_gen3,
977 	}, {
978 		.compatible = "renesas,ipmmu-r8a77990",
979 		.data = &ipmmu_features_rcar_gen3,
980 	}, {
981 		.compatible = "renesas,ipmmu-r8a77995",
982 		.data = &ipmmu_features_rcar_gen3,
983 	}, {
984 		/* Terminator */
985 	},
986 };
987 
988 static int ipmmu_probe(struct platform_device *pdev)
989 {
990 	struct ipmmu_vmsa_device *mmu;
991 	struct resource *res;
992 	int irq;
993 	int ret;
994 
995 	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
996 	if (!mmu) {
997 		dev_err(&pdev->dev, "cannot allocate device data\n");
998 		return -ENOMEM;
999 	}
1000 
1001 	mmu->dev = &pdev->dev;
1002 	spin_lock_init(&mmu->lock);
1003 	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1004 	mmu->features = of_device_get_match_data(&pdev->dev);
1005 	memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1006 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1007 
1008 	/* Map I/O memory and request IRQ. */
1009 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1010 	mmu->base = devm_ioremap_resource(&pdev->dev, res);
1011 	if (IS_ERR(mmu->base))
1012 		return PTR_ERR(mmu->base);
1013 
1014 	/*
1015 	 * The IPMMU has two register banks, for secure and non-secure modes.
1016 	 * The bank mapped at the beginning of the IPMMU address space
1017 	 * corresponds to the running mode of the CPU. When running in secure
1018 	 * mode the non-secure register bank is also available at an offset.
1019 	 *
1020 	 * Secure mode operation isn't clearly documented and is thus currently
1021 	 * not implemented in the driver. Furthermore, preliminary tests of
1022 	 * non-secure operation with the main register bank were not successful.
1023 	 * Offset the registers base unconditionally to point to the non-secure
1024 	 * alias space for now.
1025 	 */
1026 	if (mmu->features->use_ns_alias_offset)
1027 		mmu->base += IM_NS_ALIAS_OFFSET;
1028 
1029 	mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1030 
1031 	/*
1032 	 * Determine if this IPMMU instance is a root device by checking for
1033 	 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1034 	 */
1035 	if (!mmu->features->has_cache_leaf_nodes ||
1036 	    !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1037 		mmu->root = mmu;
1038 	else
1039 		mmu->root = ipmmu_find_root();
1040 
1041 	/*
1042 	 * Wait until the root device has been registered for sure.
1043 	 */
1044 	if (!mmu->root)
1045 		return -EPROBE_DEFER;
1046 
1047 	/* Root devices have mandatory IRQs */
1048 	if (ipmmu_is_root(mmu)) {
1049 		irq = platform_get_irq(pdev, 0);
1050 		if (irq < 0)
1051 			return irq;
1052 
1053 		ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1054 				       dev_name(&pdev->dev), mmu);
1055 		if (ret < 0) {
1056 			dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1057 			return ret;
1058 		}
1059 
1060 		ipmmu_device_reset(mmu);
1061 
1062 		if (mmu->features->reserved_context) {
1063 			dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1064 			set_bit(0, mmu->ctx);
1065 		}
1066 	}
1067 
1068 	/*
1069 	 * Register the IPMMU to the IOMMU subsystem in the following cases:
1070 	 * - R-Car Gen2 IPMMU (all devices registered)
1071 	 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1072 	 */
1073 	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1074 		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1075 					     dev_name(&pdev->dev));
1076 		if (ret)
1077 			return ret;
1078 
1079 		iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1080 		iommu_device_set_fwnode(&mmu->iommu,
1081 					&pdev->dev.of_node->fwnode);
1082 
1083 		ret = iommu_device_register(&mmu->iommu);
1084 		if (ret)
1085 			return ret;
1086 
1087 #if defined(CONFIG_IOMMU_DMA)
1088 		if (!iommu_present(&platform_bus_type))
1089 			bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1090 #endif
1091 	}
1092 
1093 	/*
1094 	 * We can't create the ARM mapping here as it requires the bus to have
1095 	 * an IOMMU, which only happens when bus_set_iommu() is called in
1096 	 * ipmmu_init() after the probe function returns.
1097 	 */
1098 
1099 	platform_set_drvdata(pdev, mmu);
1100 
1101 	return 0;
1102 }
1103 
1104 static int ipmmu_remove(struct platform_device *pdev)
1105 {
1106 	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1107 
1108 	iommu_device_sysfs_remove(&mmu->iommu);
1109 	iommu_device_unregister(&mmu->iommu);
1110 
1111 	arm_iommu_release_mapping(mmu->mapping);
1112 
1113 	ipmmu_device_reset(mmu);
1114 
1115 	return 0;
1116 }
1117 
1118 #ifdef CONFIG_PM_SLEEP
1119 static int ipmmu_resume_noirq(struct device *dev)
1120 {
1121 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1122 	unsigned int i;
1123 
1124 	/* Reset root MMU and restore contexts */
1125 	if (ipmmu_is_root(mmu)) {
1126 		ipmmu_device_reset(mmu);
1127 
1128 		for (i = 0; i < mmu->num_ctx; i++) {
1129 			if (!mmu->domains[i])
1130 				continue;
1131 
1132 			ipmmu_domain_setup_context(mmu->domains[i]);
1133 		}
1134 	}
1135 
1136 	/* Re-enable active micro-TLBs */
1137 	for (i = 0; i < mmu->features->num_utlbs; i++) {
1138 		if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1139 			continue;
1140 
1141 		ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static const struct dev_pm_ops ipmmu_pm  = {
1148 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1149 };
1150 #define DEV_PM_OPS	&ipmmu_pm
1151 #else
1152 #define DEV_PM_OPS	NULL
1153 #endif /* CONFIG_PM_SLEEP */
1154 
1155 static struct platform_driver ipmmu_driver = {
1156 	.driver = {
1157 		.name = "ipmmu-vmsa",
1158 		.of_match_table = of_match_ptr(ipmmu_of_ids),
1159 		.pm = DEV_PM_OPS,
1160 	},
1161 	.probe = ipmmu_probe,
1162 	.remove	= ipmmu_remove,
1163 };
1164 
1165 static int __init ipmmu_init(void)
1166 {
1167 	struct device_node *np;
1168 	static bool setup_done;
1169 	int ret;
1170 
1171 	if (setup_done)
1172 		return 0;
1173 
1174 	np = of_find_matching_node(NULL, ipmmu_of_ids);
1175 	if (!np)
1176 		return 0;
1177 
1178 	of_node_put(np);
1179 
1180 	ret = platform_driver_register(&ipmmu_driver);
1181 	if (ret < 0)
1182 		return ret;
1183 
1184 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1185 	if (!iommu_present(&platform_bus_type))
1186 		bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1187 #endif
1188 
1189 	setup_done = true;
1190 	return 0;
1191 }
1192 subsys_initcall(ipmmu_init);
1193