xref: /linux/drivers/iommu/tegra-smmu.c (revision 6c8c1406a6d6a3f2e61ac590f5c0994231bc6be7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
8 #include <linux/err.h>
9 #include <linux/iommu.h>
10 #include <linux/kernel.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/pci.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
21 
22 struct tegra_smmu_group {
23 	struct list_head list;
24 	struct tegra_smmu *smmu;
25 	const struct tegra_smmu_group_soc *soc;
26 	struct iommu_group *group;
27 	unsigned int swgroup;
28 };
29 
30 struct tegra_smmu {
31 	void __iomem *regs;
32 	struct device *dev;
33 
34 	struct tegra_mc *mc;
35 	const struct tegra_smmu_soc *soc;
36 
37 	struct list_head groups;
38 
39 	unsigned long pfn_mask;
40 	unsigned long tlb_mask;
41 
42 	unsigned long *asids;
43 	struct mutex lock;
44 
45 	struct list_head list;
46 
47 	struct dentry *debugfs;
48 
49 	struct iommu_device iommu;	/* IOMMU Core code handle */
50 };
51 
52 struct tegra_smmu_as {
53 	struct iommu_domain domain;
54 	struct tegra_smmu *smmu;
55 	unsigned int use_count;
56 	spinlock_t lock;
57 	u32 *count;
58 	struct page **pts;
59 	struct page *pd;
60 	dma_addr_t pd_dma;
61 	unsigned id;
62 	u32 attr;
63 };
64 
65 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
66 {
67 	return container_of(dom, struct tegra_smmu_as, domain);
68 }
69 
70 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
71 			       unsigned long offset)
72 {
73 	writel(value, smmu->regs + offset);
74 }
75 
76 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
77 {
78 	return readl(smmu->regs + offset);
79 }
80 
81 #define SMMU_CONFIG 0x010
82 #define  SMMU_CONFIG_ENABLE (1 << 0)
83 
84 #define SMMU_TLB_CONFIG 0x14
85 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
86 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
87 #define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
88 	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
89 
90 #define SMMU_PTC_CONFIG 0x18
91 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
92 #define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
93 #define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
94 
95 #define SMMU_PTB_ASID 0x01c
96 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
97 
98 #define SMMU_PTB_DATA 0x020
99 #define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
100 
101 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
102 
103 #define SMMU_TLB_FLUSH 0x030
104 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
105 #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
106 #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
107 #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
108 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
109 #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
110 					  SMMU_TLB_FLUSH_VA_MATCH_GROUP)
111 #define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
112 
113 #define SMMU_PTC_FLUSH 0x034
114 #define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
115 #define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
116 
117 #define SMMU_PTC_FLUSH_HI 0x9b8
118 #define  SMMU_PTC_FLUSH_HI_MASK 0x3
119 
120 /* per-SWGROUP SMMU_*_ASID register */
121 #define SMMU_ASID_ENABLE (1 << 31)
122 #define SMMU_ASID_MASK 0x7f
123 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
124 
125 /* page table definitions */
126 #define SMMU_NUM_PDE 1024
127 #define SMMU_NUM_PTE 1024
128 
129 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
130 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
131 
132 #define SMMU_PDE_SHIFT 22
133 #define SMMU_PTE_SHIFT 12
134 
135 #define SMMU_PAGE_MASK		(~(SMMU_SIZE_PT-1))
136 #define SMMU_OFFSET_IN_PAGE(x)	((unsigned long)(x) & ~SMMU_PAGE_MASK)
137 #define SMMU_PFN_PHYS(x)	((phys_addr_t)(x) << SMMU_PTE_SHIFT)
138 #define SMMU_PHYS_PFN(x)	((unsigned long)((x) >> SMMU_PTE_SHIFT))
139 
140 #define SMMU_PD_READABLE	(1 << 31)
141 #define SMMU_PD_WRITABLE	(1 << 30)
142 #define SMMU_PD_NONSECURE	(1 << 29)
143 
144 #define SMMU_PDE_READABLE	(1 << 31)
145 #define SMMU_PDE_WRITABLE	(1 << 30)
146 #define SMMU_PDE_NONSECURE	(1 << 29)
147 #define SMMU_PDE_NEXT		(1 << 28)
148 
149 #define SMMU_PTE_READABLE	(1 << 31)
150 #define SMMU_PTE_WRITABLE	(1 << 30)
151 #define SMMU_PTE_NONSECURE	(1 << 29)
152 
153 #define SMMU_PDE_ATTR		(SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
154 				 SMMU_PDE_NONSECURE)
155 
156 static unsigned int iova_pd_index(unsigned long iova)
157 {
158 	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
159 }
160 
161 static unsigned int iova_pt_index(unsigned long iova)
162 {
163 	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
164 }
165 
166 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
167 {
168 	addr >>= 12;
169 	return (addr & smmu->pfn_mask) == addr;
170 }
171 
172 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
173 {
174 	return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
175 }
176 
177 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
178 {
179 	smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
180 }
181 
182 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
183 				  unsigned long offset)
184 {
185 	u32 value;
186 
187 	offset &= ~(smmu->mc->soc->atom_size - 1);
188 
189 	if (smmu->mc->soc->num_address_bits > 32) {
190 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
191 		value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
192 #else
193 		value = 0;
194 #endif
195 		smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
196 	}
197 
198 	value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
199 	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
200 }
201 
202 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
203 {
204 	smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
205 }
206 
207 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
208 				       unsigned long asid)
209 {
210 	u32 value;
211 
212 	if (smmu->soc->num_asids == 4)
213 		value = (asid & 0x3) << 29;
214 	else
215 		value = (asid & 0x7f) << 24;
216 
217 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
218 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
219 }
220 
221 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
222 					  unsigned long asid,
223 					  unsigned long iova)
224 {
225 	u32 value;
226 
227 	if (smmu->soc->num_asids == 4)
228 		value = (asid & 0x3) << 29;
229 	else
230 		value = (asid & 0x7f) << 24;
231 
232 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
233 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
234 }
235 
236 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
237 					unsigned long asid,
238 					unsigned long iova)
239 {
240 	u32 value;
241 
242 	if (smmu->soc->num_asids == 4)
243 		value = (asid & 0x3) << 29;
244 	else
245 		value = (asid & 0x7f) << 24;
246 
247 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
248 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
249 }
250 
251 static inline void smmu_flush(struct tegra_smmu *smmu)
252 {
253 	smmu_readl(smmu, SMMU_PTB_ASID);
254 }
255 
256 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
257 {
258 	unsigned long id;
259 
260 	id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
261 	if (id >= smmu->soc->num_asids)
262 		return -ENOSPC;
263 
264 	set_bit(id, smmu->asids);
265 	*idp = id;
266 
267 	return 0;
268 }
269 
270 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
271 {
272 	clear_bit(id, smmu->asids);
273 }
274 
275 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
276 {
277 	struct tegra_smmu_as *as;
278 
279 	if (type != IOMMU_DOMAIN_UNMANAGED)
280 		return NULL;
281 
282 	as = kzalloc(sizeof(*as), GFP_KERNEL);
283 	if (!as)
284 		return NULL;
285 
286 	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
287 
288 	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
289 	if (!as->pd) {
290 		kfree(as);
291 		return NULL;
292 	}
293 
294 	as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
295 	if (!as->count) {
296 		__free_page(as->pd);
297 		kfree(as);
298 		return NULL;
299 	}
300 
301 	as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
302 	if (!as->pts) {
303 		kfree(as->count);
304 		__free_page(as->pd);
305 		kfree(as);
306 		return NULL;
307 	}
308 
309 	spin_lock_init(&as->lock);
310 
311 	/* setup aperture */
312 	as->domain.geometry.aperture_start = 0;
313 	as->domain.geometry.aperture_end = 0xffffffff;
314 	as->domain.geometry.force_aperture = true;
315 
316 	return &as->domain;
317 }
318 
319 static void tegra_smmu_domain_free(struct iommu_domain *domain)
320 {
321 	struct tegra_smmu_as *as = to_smmu_as(domain);
322 
323 	/* TODO: free page directory and page tables */
324 
325 	WARN_ON_ONCE(as->use_count);
326 	kfree(as->count);
327 	kfree(as->pts);
328 	kfree(as);
329 }
330 
331 static const struct tegra_smmu_swgroup *
332 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
333 {
334 	const struct tegra_smmu_swgroup *group = NULL;
335 	unsigned int i;
336 
337 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
338 		if (smmu->soc->swgroups[i].swgroup == swgroup) {
339 			group = &smmu->soc->swgroups[i];
340 			break;
341 		}
342 	}
343 
344 	return group;
345 }
346 
347 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
348 			      unsigned int asid)
349 {
350 	const struct tegra_smmu_swgroup *group;
351 	unsigned int i;
352 	u32 value;
353 
354 	group = tegra_smmu_find_swgroup(smmu, swgroup);
355 	if (group) {
356 		value = smmu_readl(smmu, group->reg);
357 		value &= ~SMMU_ASID_MASK;
358 		value |= SMMU_ASID_VALUE(asid);
359 		value |= SMMU_ASID_ENABLE;
360 		smmu_writel(smmu, value, group->reg);
361 	} else {
362 		pr_warn("%s group from swgroup %u not found\n", __func__,
363 				swgroup);
364 		/* No point moving ahead if group was not found */
365 		return;
366 	}
367 
368 	for (i = 0; i < smmu->soc->num_clients; i++) {
369 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
370 
371 		if (client->swgroup != swgroup)
372 			continue;
373 
374 		value = smmu_readl(smmu, client->regs.smmu.reg);
375 		value |= BIT(client->regs.smmu.bit);
376 		smmu_writel(smmu, value, client->regs.smmu.reg);
377 	}
378 }
379 
380 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
381 			       unsigned int asid)
382 {
383 	const struct tegra_smmu_swgroup *group;
384 	unsigned int i;
385 	u32 value;
386 
387 	group = tegra_smmu_find_swgroup(smmu, swgroup);
388 	if (group) {
389 		value = smmu_readl(smmu, group->reg);
390 		value &= ~SMMU_ASID_MASK;
391 		value |= SMMU_ASID_VALUE(asid);
392 		value &= ~SMMU_ASID_ENABLE;
393 		smmu_writel(smmu, value, group->reg);
394 	}
395 
396 	for (i = 0; i < smmu->soc->num_clients; i++) {
397 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
398 
399 		if (client->swgroup != swgroup)
400 			continue;
401 
402 		value = smmu_readl(smmu, client->regs.smmu.reg);
403 		value &= ~BIT(client->regs.smmu.bit);
404 		smmu_writel(smmu, value, client->regs.smmu.reg);
405 	}
406 }
407 
408 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
409 				 struct tegra_smmu_as *as)
410 {
411 	u32 value;
412 	int err = 0;
413 
414 	mutex_lock(&smmu->lock);
415 
416 	if (as->use_count > 0) {
417 		as->use_count++;
418 		goto unlock;
419 	}
420 
421 	as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
422 				  DMA_TO_DEVICE);
423 	if (dma_mapping_error(smmu->dev, as->pd_dma)) {
424 		err = -ENOMEM;
425 		goto unlock;
426 	}
427 
428 	/* We can't handle 64-bit DMA addresses */
429 	if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
430 		err = -ENOMEM;
431 		goto err_unmap;
432 	}
433 
434 	err = tegra_smmu_alloc_asid(smmu, &as->id);
435 	if (err < 0)
436 		goto err_unmap;
437 
438 	smmu_flush_ptc(smmu, as->pd_dma, 0);
439 	smmu_flush_tlb_asid(smmu, as->id);
440 
441 	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
442 	value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
443 	smmu_writel(smmu, value, SMMU_PTB_DATA);
444 	smmu_flush(smmu);
445 
446 	as->smmu = smmu;
447 	as->use_count++;
448 
449 	mutex_unlock(&smmu->lock);
450 
451 	return 0;
452 
453 err_unmap:
454 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
455 unlock:
456 	mutex_unlock(&smmu->lock);
457 
458 	return err;
459 }
460 
461 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
462 				    struct tegra_smmu_as *as)
463 {
464 	mutex_lock(&smmu->lock);
465 
466 	if (--as->use_count > 0) {
467 		mutex_unlock(&smmu->lock);
468 		return;
469 	}
470 
471 	tegra_smmu_free_asid(smmu, as->id);
472 
473 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
474 
475 	as->smmu = NULL;
476 
477 	mutex_unlock(&smmu->lock);
478 }
479 
480 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
481 				 struct device *dev)
482 {
483 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
484 	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
485 	struct tegra_smmu_as *as = to_smmu_as(domain);
486 	unsigned int index;
487 	int err;
488 
489 	if (!fwspec)
490 		return -ENOENT;
491 
492 	for (index = 0; index < fwspec->num_ids; index++) {
493 		err = tegra_smmu_as_prepare(smmu, as);
494 		if (err)
495 			goto disable;
496 
497 		tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
498 	}
499 
500 	if (index == 0)
501 		return -ENODEV;
502 
503 	return 0;
504 
505 disable:
506 	while (index--) {
507 		tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
508 		tegra_smmu_as_unprepare(smmu, as);
509 	}
510 
511 	return err;
512 }
513 
514 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
515 {
516 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
517 	struct tegra_smmu_as *as = to_smmu_as(domain);
518 	struct tegra_smmu *smmu = as->smmu;
519 	unsigned int index;
520 
521 	if (!fwspec)
522 		return;
523 
524 	for (index = 0; index < fwspec->num_ids; index++) {
525 		tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
526 		tegra_smmu_as_unprepare(smmu, as);
527 	}
528 }
529 
530 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
531 			       u32 value)
532 {
533 	unsigned int pd_index = iova_pd_index(iova);
534 	struct tegra_smmu *smmu = as->smmu;
535 	u32 *pd = page_address(as->pd);
536 	unsigned long offset = pd_index * sizeof(*pd);
537 
538 	/* Set the page directory entry first */
539 	pd[pd_index] = value;
540 
541 	/* The flush the page directory entry from caches */
542 	dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
543 					 sizeof(*pd), DMA_TO_DEVICE);
544 
545 	/* And flush the iommu */
546 	smmu_flush_ptc(smmu, as->pd_dma, offset);
547 	smmu_flush_tlb_section(smmu, as->id, iova);
548 	smmu_flush(smmu);
549 }
550 
551 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
552 {
553 	u32 *pt = page_address(pt_page);
554 
555 	return pt + iova_pt_index(iova);
556 }
557 
558 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
559 				  dma_addr_t *dmap)
560 {
561 	unsigned int pd_index = iova_pd_index(iova);
562 	struct tegra_smmu *smmu = as->smmu;
563 	struct page *pt_page;
564 	u32 *pd;
565 
566 	pt_page = as->pts[pd_index];
567 	if (!pt_page)
568 		return NULL;
569 
570 	pd = page_address(as->pd);
571 	*dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
572 
573 	return tegra_smmu_pte_offset(pt_page, iova);
574 }
575 
576 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
577 		       dma_addr_t *dmap, struct page *page)
578 {
579 	unsigned int pde = iova_pd_index(iova);
580 	struct tegra_smmu *smmu = as->smmu;
581 
582 	if (!as->pts[pde]) {
583 		dma_addr_t dma;
584 
585 		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
586 				   DMA_TO_DEVICE);
587 		if (dma_mapping_error(smmu->dev, dma)) {
588 			__free_page(page);
589 			return NULL;
590 		}
591 
592 		if (!smmu_dma_addr_valid(smmu, dma)) {
593 			dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
594 				       DMA_TO_DEVICE);
595 			__free_page(page);
596 			return NULL;
597 		}
598 
599 		as->pts[pde] = page;
600 
601 		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
602 							      SMMU_PDE_NEXT));
603 
604 		*dmap = dma;
605 	} else {
606 		u32 *pd = page_address(as->pd);
607 
608 		*dmap = smmu_pde_to_dma(smmu, pd[pde]);
609 	}
610 
611 	return tegra_smmu_pte_offset(as->pts[pde], iova);
612 }
613 
614 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
615 {
616 	unsigned int pd_index = iova_pd_index(iova);
617 
618 	as->count[pd_index]++;
619 }
620 
621 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
622 {
623 	unsigned int pde = iova_pd_index(iova);
624 	struct page *page = as->pts[pde];
625 
626 	/*
627 	 * When no entries in this page table are used anymore, return the
628 	 * memory page to the system.
629 	 */
630 	if (--as->count[pde] == 0) {
631 		struct tegra_smmu *smmu = as->smmu;
632 		u32 *pd = page_address(as->pd);
633 		dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
634 
635 		tegra_smmu_set_pde(as, iova, 0);
636 
637 		dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
638 		__free_page(page);
639 		as->pts[pde] = NULL;
640 	}
641 }
642 
643 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
644 			       u32 *pte, dma_addr_t pte_dma, u32 val)
645 {
646 	struct tegra_smmu *smmu = as->smmu;
647 	unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
648 
649 	*pte = val;
650 
651 	dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
652 					 4, DMA_TO_DEVICE);
653 	smmu_flush_ptc(smmu, pte_dma, offset);
654 	smmu_flush_tlb_group(smmu, as->id, iova);
655 	smmu_flush(smmu);
656 }
657 
658 static struct page *as_get_pde_page(struct tegra_smmu_as *as,
659 				    unsigned long iova, gfp_t gfp,
660 				    unsigned long *flags)
661 {
662 	unsigned int pde = iova_pd_index(iova);
663 	struct page *page = as->pts[pde];
664 
665 	/* at first check whether allocation needs to be done at all */
666 	if (page)
667 		return page;
668 
669 	/*
670 	 * In order to prevent exhaustion of the atomic memory pool, we
671 	 * allocate page in a sleeping context if GFP flags permit. Hence
672 	 * spinlock needs to be unlocked and re-locked after allocation.
673 	 */
674 	if (!(gfp & __GFP_ATOMIC))
675 		spin_unlock_irqrestore(&as->lock, *flags);
676 
677 	page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
678 
679 	if (!(gfp & __GFP_ATOMIC))
680 		spin_lock_irqsave(&as->lock, *flags);
681 
682 	/*
683 	 * In a case of blocking allocation, a concurrent mapping may win
684 	 * the PDE allocation. In this case the allocated page isn't needed
685 	 * if allocation succeeded and the allocation failure isn't fatal.
686 	 */
687 	if (as->pts[pde]) {
688 		if (page)
689 			__free_page(page);
690 
691 		page = as->pts[pde];
692 	}
693 
694 	return page;
695 }
696 
697 static int
698 __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
699 		 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
700 		 unsigned long *flags)
701 {
702 	struct tegra_smmu_as *as = to_smmu_as(domain);
703 	dma_addr_t pte_dma;
704 	struct page *page;
705 	u32 pte_attrs;
706 	u32 *pte;
707 
708 	page = as_get_pde_page(as, iova, gfp, flags);
709 	if (!page)
710 		return -ENOMEM;
711 
712 	pte = as_get_pte(as, iova, &pte_dma, page);
713 	if (!pte)
714 		return -ENOMEM;
715 
716 	/* If we aren't overwriting a pre-existing entry, increment use */
717 	if (*pte == 0)
718 		tegra_smmu_pte_get_use(as, iova);
719 
720 	pte_attrs = SMMU_PTE_NONSECURE;
721 
722 	if (prot & IOMMU_READ)
723 		pte_attrs |= SMMU_PTE_READABLE;
724 
725 	if (prot & IOMMU_WRITE)
726 		pte_attrs |= SMMU_PTE_WRITABLE;
727 
728 	tegra_smmu_set_pte(as, iova, pte, pte_dma,
729 			   SMMU_PHYS_PFN(paddr) | pte_attrs);
730 
731 	return 0;
732 }
733 
734 static size_t
735 __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
736 		   size_t size, struct iommu_iotlb_gather *gather)
737 {
738 	struct tegra_smmu_as *as = to_smmu_as(domain);
739 	dma_addr_t pte_dma;
740 	u32 *pte;
741 
742 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
743 	if (!pte || !*pte)
744 		return 0;
745 
746 	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
747 	tegra_smmu_pte_put_use(as, iova);
748 
749 	return size;
750 }
751 
752 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
753 			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
754 {
755 	struct tegra_smmu_as *as = to_smmu_as(domain);
756 	unsigned long flags;
757 	int ret;
758 
759 	spin_lock_irqsave(&as->lock, flags);
760 	ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
761 	spin_unlock_irqrestore(&as->lock, flags);
762 
763 	return ret;
764 }
765 
766 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
767 			       size_t size, struct iommu_iotlb_gather *gather)
768 {
769 	struct tegra_smmu_as *as = to_smmu_as(domain);
770 	unsigned long flags;
771 
772 	spin_lock_irqsave(&as->lock, flags);
773 	size = __tegra_smmu_unmap(domain, iova, size, gather);
774 	spin_unlock_irqrestore(&as->lock, flags);
775 
776 	return size;
777 }
778 
779 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
780 					   dma_addr_t iova)
781 {
782 	struct tegra_smmu_as *as = to_smmu_as(domain);
783 	unsigned long pfn;
784 	dma_addr_t pte_dma;
785 	u32 *pte;
786 
787 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
788 	if (!pte || !*pte)
789 		return 0;
790 
791 	pfn = *pte & as->smmu->pfn_mask;
792 
793 	return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
794 }
795 
796 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
797 {
798 	struct platform_device *pdev;
799 	struct tegra_mc *mc;
800 
801 	pdev = of_find_device_by_node(np);
802 	if (!pdev)
803 		return NULL;
804 
805 	mc = platform_get_drvdata(pdev);
806 	if (!mc) {
807 		put_device(&pdev->dev);
808 		return NULL;
809 	}
810 
811 	return mc->smmu;
812 }
813 
814 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
815 				struct of_phandle_args *args)
816 {
817 	const struct iommu_ops *ops = smmu->iommu.ops;
818 	int err;
819 
820 	err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
821 	if (err < 0) {
822 		dev_err(dev, "failed to initialize fwspec: %d\n", err);
823 		return err;
824 	}
825 
826 	err = ops->of_xlate(dev, args);
827 	if (err < 0) {
828 		dev_err(dev, "failed to parse SW group ID: %d\n", err);
829 		iommu_fwspec_free(dev);
830 		return err;
831 	}
832 
833 	return 0;
834 }
835 
836 static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
837 {
838 	struct device_node *np = dev->of_node;
839 	struct tegra_smmu *smmu = NULL;
840 	struct of_phandle_args args;
841 	unsigned int index = 0;
842 	int err;
843 
844 	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
845 					  &args) == 0) {
846 		smmu = tegra_smmu_find(args.np);
847 		if (smmu) {
848 			err = tegra_smmu_configure(smmu, dev, &args);
849 
850 			if (err < 0) {
851 				of_node_put(args.np);
852 				return ERR_PTR(err);
853 			}
854 		}
855 
856 		of_node_put(args.np);
857 		index++;
858 	}
859 
860 	smmu = dev_iommu_priv_get(dev);
861 	if (!smmu)
862 		return ERR_PTR(-ENODEV);
863 
864 	return &smmu->iommu;
865 }
866 
867 static const struct tegra_smmu_group_soc *
868 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
869 {
870 	unsigned int i, j;
871 
872 	for (i = 0; i < smmu->soc->num_groups; i++)
873 		for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
874 			if (smmu->soc->groups[i].swgroups[j] == swgroup)
875 				return &smmu->soc->groups[i];
876 
877 	return NULL;
878 }
879 
880 static void tegra_smmu_group_release(void *iommu_data)
881 {
882 	struct tegra_smmu_group *group = iommu_data;
883 	struct tegra_smmu *smmu = group->smmu;
884 
885 	mutex_lock(&smmu->lock);
886 	list_del(&group->list);
887 	mutex_unlock(&smmu->lock);
888 }
889 
890 static struct iommu_group *tegra_smmu_device_group(struct device *dev)
891 {
892 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
893 	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
894 	const struct tegra_smmu_group_soc *soc;
895 	unsigned int swgroup = fwspec->ids[0];
896 	struct tegra_smmu_group *group;
897 	struct iommu_group *grp;
898 
899 	/* Find group_soc associating with swgroup */
900 	soc = tegra_smmu_find_group(smmu, swgroup);
901 
902 	mutex_lock(&smmu->lock);
903 
904 	/* Find existing iommu_group associating with swgroup or group_soc */
905 	list_for_each_entry(group, &smmu->groups, list)
906 		if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
907 			grp = iommu_group_ref_get(group->group);
908 			mutex_unlock(&smmu->lock);
909 			return grp;
910 		}
911 
912 	group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
913 	if (!group) {
914 		mutex_unlock(&smmu->lock);
915 		return NULL;
916 	}
917 
918 	INIT_LIST_HEAD(&group->list);
919 	group->swgroup = swgroup;
920 	group->smmu = smmu;
921 	group->soc = soc;
922 
923 	if (dev_is_pci(dev))
924 		group->group = pci_device_group(dev);
925 	else
926 		group->group = generic_device_group(dev);
927 
928 	if (IS_ERR(group->group)) {
929 		devm_kfree(smmu->dev, group);
930 		mutex_unlock(&smmu->lock);
931 		return NULL;
932 	}
933 
934 	iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
935 	if (soc)
936 		iommu_group_set_name(group->group, soc->name);
937 	list_add_tail(&group->list, &smmu->groups);
938 	mutex_unlock(&smmu->lock);
939 
940 	return group->group;
941 }
942 
943 static int tegra_smmu_of_xlate(struct device *dev,
944 			       struct of_phandle_args *args)
945 {
946 	struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
947 	struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
948 	u32 id = args->args[0];
949 
950 	/*
951 	 * Note: we are here releasing the reference of &iommu_pdev->dev, which
952 	 * is mc->dev. Although some functions in tegra_smmu_ops may keep using
953 	 * its private data beyond this point, it's still safe to do so because
954 	 * the SMMU parent device is the same as the MC, so the reference count
955 	 * isn't strictly necessary.
956 	 */
957 	put_device(&iommu_pdev->dev);
958 
959 	dev_iommu_priv_set(dev, mc->smmu);
960 
961 	return iommu_fwspec_add_ids(dev, &id, 1);
962 }
963 
964 static const struct iommu_ops tegra_smmu_ops = {
965 	.domain_alloc = tegra_smmu_domain_alloc,
966 	.probe_device = tegra_smmu_probe_device,
967 	.device_group = tegra_smmu_device_group,
968 	.of_xlate = tegra_smmu_of_xlate,
969 	.pgsize_bitmap = SZ_4K,
970 	.default_domain_ops = &(const struct iommu_domain_ops) {
971 		.attach_dev	= tegra_smmu_attach_dev,
972 		.detach_dev	= tegra_smmu_detach_dev,
973 		.map		= tegra_smmu_map,
974 		.unmap		= tegra_smmu_unmap,
975 		.iova_to_phys	= tegra_smmu_iova_to_phys,
976 		.free		= tegra_smmu_domain_free,
977 	}
978 };
979 
980 static void tegra_smmu_ahb_enable(void)
981 {
982 	static const struct of_device_id ahb_match[] = {
983 		{ .compatible = "nvidia,tegra30-ahb", },
984 		{ }
985 	};
986 	struct device_node *ahb;
987 
988 	ahb = of_find_matching_node(NULL, ahb_match);
989 	if (ahb) {
990 		tegra_ahb_enable_smmu(ahb);
991 		of_node_put(ahb);
992 	}
993 }
994 
995 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
996 {
997 	struct tegra_smmu *smmu = s->private;
998 	unsigned int i;
999 	u32 value;
1000 
1001 	seq_printf(s, "swgroup    enabled  ASID\n");
1002 	seq_printf(s, "------------------------\n");
1003 
1004 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
1005 		const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1006 		const char *status;
1007 		unsigned int asid;
1008 
1009 		value = smmu_readl(smmu, group->reg);
1010 
1011 		if (value & SMMU_ASID_ENABLE)
1012 			status = "yes";
1013 		else
1014 			status = "no";
1015 
1016 		asid = value & SMMU_ASID_MASK;
1017 
1018 		seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
1019 			   asid);
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
1026 
1027 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1028 {
1029 	struct tegra_smmu *smmu = s->private;
1030 	unsigned int i;
1031 	u32 value;
1032 
1033 	seq_printf(s, "client       enabled\n");
1034 	seq_printf(s, "--------------------\n");
1035 
1036 	for (i = 0; i < smmu->soc->num_clients; i++) {
1037 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
1038 		const char *status;
1039 
1040 		value = smmu_readl(smmu, client->regs.smmu.reg);
1041 
1042 		if (value & BIT(client->regs.smmu.bit))
1043 			status = "yes";
1044 		else
1045 			status = "no";
1046 
1047 		seq_printf(s, "%-12s %s\n", client->name, status);
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
1054 
1055 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1056 {
1057 	smmu->debugfs = debugfs_create_dir("smmu", NULL);
1058 	if (!smmu->debugfs)
1059 		return;
1060 
1061 	debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1062 			    &tegra_smmu_swgroups_fops);
1063 	debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1064 			    &tegra_smmu_clients_fops);
1065 }
1066 
1067 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1068 {
1069 	debugfs_remove_recursive(smmu->debugfs);
1070 }
1071 
1072 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1073 				    const struct tegra_smmu_soc *soc,
1074 				    struct tegra_mc *mc)
1075 {
1076 	struct tegra_smmu *smmu;
1077 	u32 value;
1078 	int err;
1079 
1080 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1081 	if (!smmu)
1082 		return ERR_PTR(-ENOMEM);
1083 
1084 	/*
1085 	 * This is a bit of a hack. Ideally we'd want to simply return this
1086 	 * value. However iommu_device_register() will attempt to add
1087 	 * all devices to the IOMMU before we get that far. In order
1088 	 * not to rely on global variables to track the IOMMU instance, we
1089 	 * set it here so that it can be looked up from the .probe_device()
1090 	 * callback via the IOMMU device's .drvdata field.
1091 	 */
1092 	mc->smmu = smmu;
1093 
1094 	smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL);
1095 	if (!smmu->asids)
1096 		return ERR_PTR(-ENOMEM);
1097 
1098 	INIT_LIST_HEAD(&smmu->groups);
1099 	mutex_init(&smmu->lock);
1100 
1101 	smmu->regs = mc->regs;
1102 	smmu->soc = soc;
1103 	smmu->dev = dev;
1104 	smmu->mc = mc;
1105 
1106 	smmu->pfn_mask =
1107 		BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
1108 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1109 		mc->soc->num_address_bits, smmu->pfn_mask);
1110 	smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1111 	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1112 		smmu->tlb_mask);
1113 
1114 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1115 
1116 	if (soc->supports_request_limit)
1117 		value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1118 
1119 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1120 
1121 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1122 		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1123 
1124 	if (soc->supports_round_robin_arbitration)
1125 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1126 
1127 	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1128 
1129 	smmu_flush_ptc_all(smmu);
1130 	smmu_flush_tlb(smmu);
1131 	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1132 	smmu_flush(smmu);
1133 
1134 	tegra_smmu_ahb_enable();
1135 
1136 	err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1137 	if (err)
1138 		return ERR_PTR(err);
1139 
1140 	err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
1141 	if (err) {
1142 		iommu_device_sysfs_remove(&smmu->iommu);
1143 		return ERR_PTR(err);
1144 	}
1145 
1146 	if (IS_ENABLED(CONFIG_DEBUG_FS))
1147 		tegra_smmu_debugfs_init(smmu);
1148 
1149 	return smmu;
1150 }
1151 
1152 void tegra_smmu_remove(struct tegra_smmu *smmu)
1153 {
1154 	iommu_device_unregister(&smmu->iommu);
1155 	iommu_device_sysfs_remove(&smmu->iommu);
1156 
1157 	if (IS_ENABLED(CONFIG_DEBUG_FS))
1158 		tegra_smmu_debugfs_exit(smmu);
1159 }
1160