xref: /linux/drivers/iommu/arm/arm-smmu/arm-smmu.c (revision e70140ba0d2b1a30467d4af6bcfe761327b9ec95)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IOMMU API for ARM architected SMMU implementations.
4  *
5  * Copyright (C) 2013 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  *
9  * This driver currently supports:
10  *	- SMMUv1 and v2 implementations
11  *	- Stream-matching and stream-indexing
12  *	- v7/v8 long-descriptor format
13  *	- Non-secure access to the SMMU
14  *	- Context fault reporting
15  *	- Extended Stream ID (16 bit)
16  */
17 
18 #define pr_fmt(fmt) "arm-smmu: " fmt
19 
20 #include <linux/acpi.h>
21 #include <linux/acpi_iort.h>
22 #include <linux/bitfield.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/iopoll.h>
29 #include <linux/module.h>
30 #include <linux/of.h>
31 #include <linux/of_address.h>
32 #include <linux/pci.h>
33 #include <linux/platform_device.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/ratelimit.h>
36 #include <linux/slab.h>
37 
38 #include <linux/fsl/mc.h>
39 
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
42 
43 /*
44  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
45  * global register space are still, in fact, using a hypervisor to mediate it
46  * by trapping and emulating register accesses. Sadly, some deployed versions
47  * of said trapping code have bugs wherein they go horribly wrong for stores
48  * using r31 (i.e. XZR/WZR) as the source register.
49  */
50 #define QCOM_DUMMY_VAL -1
51 
52 #define MSI_IOVA_BASE			0x8000000
53 #define MSI_IOVA_LENGTH			0x100000
54 
55 static int force_stage;
56 module_param(force_stage, int, S_IRUGO);
57 MODULE_PARM_DESC(force_stage,
58 	"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
59 static bool disable_bypass =
60 	IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
61 module_param(disable_bypass, bool, S_IRUGO);
62 MODULE_PARM_DESC(disable_bypass,
63 	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
64 
65 #define s2cr_init_val (struct arm_smmu_s2cr){				\
66 	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
67 }
68 
69 static bool using_legacy_binding, using_generic_binding;
70 
arm_smmu_rpm_get(struct arm_smmu_device * smmu)71 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
72 {
73 	if (pm_runtime_enabled(smmu->dev))
74 		return pm_runtime_resume_and_get(smmu->dev);
75 
76 	return 0;
77 }
78 
arm_smmu_rpm_put(struct arm_smmu_device * smmu)79 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
80 {
81 	if (pm_runtime_enabled(smmu->dev))
82 		pm_runtime_put_autosuspend(smmu->dev);
83 }
84 
arm_smmu_rpm_use_autosuspend(struct arm_smmu_device * smmu)85 static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
86 {
87 	/*
88 	 * Setup an autosuspend delay to avoid bouncing runpm state.
89 	 * Otherwise, if a driver for a suspended consumer device
90 	 * unmaps buffers, it will runpm resume/suspend for each one.
91 	 *
92 	 * For example, when used by a GPU device, when an application
93 	 * or game exits, it can trigger unmapping 100s or 1000s of
94 	 * buffers.  With a runpm cycle for each buffer, that adds up
95 	 * to 5-10sec worth of reprogramming the context bank, while
96 	 * the system appears to be locked up to the user.
97 	 */
98 	pm_runtime_set_autosuspend_delay(smmu->dev, 20);
99 	pm_runtime_use_autosuspend(smmu->dev);
100 }
101 
to_smmu_domain(struct iommu_domain * dom)102 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
103 {
104 	return container_of(dom, struct arm_smmu_domain, domain);
105 }
106 
107 static struct platform_driver arm_smmu_driver;
108 static struct iommu_ops arm_smmu_ops;
109 
110 #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
dev_get_dev_node(struct device * dev)111 static struct device_node *dev_get_dev_node(struct device *dev)
112 {
113 	if (dev_is_pci(dev)) {
114 		struct pci_bus *bus = to_pci_dev(dev)->bus;
115 
116 		while (!pci_is_root_bus(bus))
117 			bus = bus->parent;
118 		return of_node_get(bus->bridge->parent->of_node);
119 	}
120 
121 	return of_node_get(dev->of_node);
122 }
123 
__arm_smmu_get_pci_sid(struct pci_dev * pdev,u16 alias,void * data)124 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
125 {
126 	*((__be32 *)data) = cpu_to_be32(alias);
127 	return 0; /* Continue walking */
128 }
129 
__find_legacy_master_phandle(struct device * dev,void * data)130 static int __find_legacy_master_phandle(struct device *dev, void *data)
131 {
132 	struct of_phandle_iterator *it = *(void **)data;
133 	struct device_node *np = it->node;
134 	int err;
135 
136 	of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
137 			    "#stream-id-cells", -1)
138 		if (it->node == np) {
139 			*(void **)data = dev;
140 			return 1;
141 		}
142 	it->node = np;
143 	return err == -ENOENT ? 0 : err;
144 }
145 
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)146 static int arm_smmu_register_legacy_master(struct device *dev,
147 					   struct arm_smmu_device **smmu)
148 {
149 	struct device *smmu_dev;
150 	struct device_node *np;
151 	struct of_phandle_iterator it;
152 	void *data = &it;
153 	u32 *sids;
154 	__be32 pci_sid;
155 	int err;
156 
157 	np = dev_get_dev_node(dev);
158 	if (!np || !of_property_present(np, "#stream-id-cells")) {
159 		of_node_put(np);
160 		return -ENODEV;
161 	}
162 
163 	it.node = np;
164 	err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
165 				     __find_legacy_master_phandle);
166 	smmu_dev = data;
167 	of_node_put(np);
168 	if (err == 0)
169 		return -ENODEV;
170 	if (err < 0)
171 		return err;
172 
173 	if (dev_is_pci(dev)) {
174 		/* "mmu-masters" assumes Stream ID == Requester ID */
175 		pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
176 				       &pci_sid);
177 		it.cur = &pci_sid;
178 		it.cur_count = 1;
179 	}
180 
181 	err = iommu_fwspec_init(dev, NULL);
182 	if (err)
183 		return err;
184 
185 	sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
186 	if (!sids)
187 		return -ENOMEM;
188 
189 	*smmu = dev_get_drvdata(smmu_dev);
190 	of_phandle_iterator_args(&it, sids, it.cur_count);
191 	err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
192 	kfree(sids);
193 	return err;
194 }
195 #else
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)196 static int arm_smmu_register_legacy_master(struct device *dev,
197 					   struct arm_smmu_device **smmu)
198 {
199 	return -ENODEV;
200 }
201 #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
202 
__arm_smmu_free_bitmap(unsigned long * map,int idx)203 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
204 {
205 	clear_bit(idx, map);
206 }
207 
208 /* Wait for any pending TLB invalidations to complete */
__arm_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)209 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
210 				int sync, int status)
211 {
212 	unsigned int spin_cnt, delay;
213 	u32 reg;
214 
215 	if (smmu->impl && unlikely(smmu->impl->tlb_sync))
216 		return smmu->impl->tlb_sync(smmu, page, sync, status);
217 
218 	arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
219 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
220 		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
221 			reg = arm_smmu_readl(smmu, page, status);
222 			if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
223 				return;
224 			cpu_relax();
225 		}
226 		udelay(delay);
227 	}
228 	dev_err_ratelimited(smmu->dev,
229 			    "TLB sync timed out -- SMMU may be deadlocked\n");
230 }
231 
arm_smmu_tlb_sync_global(struct arm_smmu_device * smmu)232 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
233 {
234 	unsigned long flags;
235 
236 	spin_lock_irqsave(&smmu->global_sync_lock, flags);
237 	__arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
238 			    ARM_SMMU_GR0_sTLBGSTATUS);
239 	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
240 }
241 
arm_smmu_tlb_sync_context(struct arm_smmu_domain * smmu_domain)242 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
243 {
244 	struct arm_smmu_device *smmu = smmu_domain->smmu;
245 	unsigned long flags;
246 
247 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
248 	__arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
249 			    ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
250 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
251 }
252 
arm_smmu_tlb_inv_context_s1(void * cookie)253 static void arm_smmu_tlb_inv_context_s1(void *cookie)
254 {
255 	struct arm_smmu_domain *smmu_domain = cookie;
256 	/*
257 	 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
258 	 * current CPU are visible beforehand.
259 	 */
260 	wmb();
261 	arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
262 			  ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
263 	arm_smmu_tlb_sync_context(smmu_domain);
264 }
265 
arm_smmu_tlb_inv_context_s2(void * cookie)266 static void arm_smmu_tlb_inv_context_s2(void *cookie)
267 {
268 	struct arm_smmu_domain *smmu_domain = cookie;
269 	struct arm_smmu_device *smmu = smmu_domain->smmu;
270 
271 	/* See above */
272 	wmb();
273 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
274 	arm_smmu_tlb_sync_global(smmu);
275 }
276 
arm_smmu_tlb_inv_range_s1(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)277 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
278 				      size_t granule, void *cookie, int reg)
279 {
280 	struct arm_smmu_domain *smmu_domain = cookie;
281 	struct arm_smmu_device *smmu = smmu_domain->smmu;
282 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
283 	int idx = cfg->cbndx;
284 
285 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
286 		wmb();
287 
288 	if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
289 		iova = (iova >> 12) << 12;
290 		iova |= cfg->asid;
291 		do {
292 			arm_smmu_cb_write(smmu, idx, reg, iova);
293 			iova += granule;
294 		} while (size -= granule);
295 	} else {
296 		iova >>= 12;
297 		iova |= (u64)cfg->asid << 48;
298 		do {
299 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
300 			iova += granule >> 12;
301 		} while (size -= granule);
302 	}
303 }
304 
arm_smmu_tlb_inv_range_s2(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)305 static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
306 				      size_t granule, void *cookie, int reg)
307 {
308 	struct arm_smmu_domain *smmu_domain = cookie;
309 	struct arm_smmu_device *smmu = smmu_domain->smmu;
310 	int idx = smmu_domain->cfg.cbndx;
311 
312 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
313 		wmb();
314 
315 	iova >>= 12;
316 	do {
317 		if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
318 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
319 		else
320 			arm_smmu_cb_write(smmu, idx, reg, iova);
321 		iova += granule >> 12;
322 	} while (size -= granule);
323 }
324 
arm_smmu_tlb_inv_walk_s1(unsigned long iova,size_t size,size_t granule,void * cookie)325 static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
326 				     size_t granule, void *cookie)
327 {
328 	struct arm_smmu_domain *smmu_domain = cookie;
329 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
330 
331 	if (cfg->flush_walk_prefer_tlbiasid) {
332 		arm_smmu_tlb_inv_context_s1(cookie);
333 	} else {
334 		arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
335 					  ARM_SMMU_CB_S1_TLBIVA);
336 		arm_smmu_tlb_sync_context(cookie);
337 	}
338 }
339 
arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)340 static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
341 				     unsigned long iova, size_t granule,
342 				     void *cookie)
343 {
344 	arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
345 				  ARM_SMMU_CB_S1_TLBIVAL);
346 }
347 
arm_smmu_tlb_inv_walk_s2(unsigned long iova,size_t size,size_t granule,void * cookie)348 static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
349 				     size_t granule, void *cookie)
350 {
351 	arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
352 				  ARM_SMMU_CB_S2_TLBIIPAS2);
353 	arm_smmu_tlb_sync_context(cookie);
354 }
355 
arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)356 static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
357 				     unsigned long iova, size_t granule,
358 				     void *cookie)
359 {
360 	arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
361 				  ARM_SMMU_CB_S2_TLBIIPAS2L);
362 }
363 
arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova,size_t size,size_t granule,void * cookie)364 static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
365 					size_t granule, void *cookie)
366 {
367 	arm_smmu_tlb_inv_context_s2(cookie);
368 }
369 /*
370  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
371  * almost negligible, but the benefit of getting the first one in as far ahead
372  * of the sync as possible is significant, hence we don't just make this a
373  * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
374  * think.
375  */
arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)376 static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
377 					unsigned long iova, size_t granule,
378 					void *cookie)
379 {
380 	struct arm_smmu_domain *smmu_domain = cookie;
381 	struct arm_smmu_device *smmu = smmu_domain->smmu;
382 
383 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
384 		wmb();
385 
386 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
387 }
388 
389 static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
390 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
391 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s1,
392 	.tlb_add_page	= arm_smmu_tlb_add_page_s1,
393 };
394 
395 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
396 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
397 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s2,
398 	.tlb_add_page	= arm_smmu_tlb_add_page_s2,
399 };
400 
401 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
402 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
403 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s2_v1,
404 	.tlb_add_page	= arm_smmu_tlb_add_page_s2_v1,
405 };
406 
407 
arm_smmu_read_context_fault_info(struct arm_smmu_device * smmu,int idx,struct arm_smmu_context_fault_info * cfi)408 void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
409 				      struct arm_smmu_context_fault_info *cfi)
410 {
411 	cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
412 	cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
413 	cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
414 	cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
415 }
416 
arm_smmu_print_context_fault_info(struct arm_smmu_device * smmu,int idx,const struct arm_smmu_context_fault_info * cfi)417 void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
418 				       const struct arm_smmu_context_fault_info *cfi)
419 {
420 	dev_err(smmu->dev,
421 		"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
422 		cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
423 
424 	dev_err(smmu->dev, "FSR    = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
425 		cfi->fsr,
426 		(cfi->fsr & ARM_SMMU_CB_FSR_MULTI)  ? "MULTI " : "",
427 		(cfi->fsr & ARM_SMMU_CB_FSR_SS)     ? "SS " : "",
428 		(u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
429 		(cfi->fsr & ARM_SMMU_CB_FSR_UUT)    ? " UUT" : "",
430 		(cfi->fsr & ARM_SMMU_CB_FSR_ASF)    ? " ASF" : "",
431 		(cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
432 		(cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
433 		(cfi->fsr & ARM_SMMU_CB_FSR_EF)     ? " EF" : "",
434 		(cfi->fsr & ARM_SMMU_CB_FSR_PF)     ? " PF" : "",
435 		(cfi->fsr & ARM_SMMU_CB_FSR_AFF)    ? " AFF" : "",
436 		(cfi->fsr & ARM_SMMU_CB_FSR_TF)     ? " TF" : "",
437 		cfi->cbfrsynra);
438 
439 	dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
440 		cfi->fsynr,
441 		(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
442 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
443 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
444 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
445 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
446 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
447 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
448 		(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
449 }
450 
arm_smmu_context_fault(int irq,void * dev)451 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
452 {
453 	struct arm_smmu_context_fault_info cfi;
454 	struct arm_smmu_domain *smmu_domain = dev;
455 	struct arm_smmu_device *smmu = smmu_domain->smmu;
456 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
457 				      DEFAULT_RATELIMIT_BURST);
458 	int idx = smmu_domain->cfg.cbndx;
459 	int ret;
460 
461 	arm_smmu_read_context_fault_info(smmu, idx, &cfi);
462 
463 	if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
464 		return IRQ_NONE;
465 
466 	ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
467 		cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
468 
469 	if (ret == -ENOSYS && __ratelimit(&rs))
470 		arm_smmu_print_context_fault_info(smmu, idx, &cfi);
471 
472 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
473 	return IRQ_HANDLED;
474 }
475 
arm_smmu_global_fault(int irq,void * dev)476 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
477 {
478 	u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
479 	struct arm_smmu_device *smmu = dev;
480 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
481 				      DEFAULT_RATELIMIT_BURST);
482 
483 	gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
484 	gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
485 	gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
486 	gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
487 
488 	if (!gfsr)
489 		return IRQ_NONE;
490 
491 	if (__ratelimit(&rs)) {
492 		if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
493 		    (gfsr & ARM_SMMU_sGFSR_USF))
494 			dev_err(smmu->dev,
495 				"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
496 				(u16)gfsynr1);
497 		else
498 			dev_err(smmu->dev,
499 				"Unexpected global fault, this could be serious\n");
500 		dev_err(smmu->dev,
501 			"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
502 			gfsr, gfsynr0, gfsynr1, gfsynr2);
503 	}
504 
505 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
506 	return IRQ_HANDLED;
507 }
508 
arm_smmu_init_context_bank(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg)509 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
510 				       struct io_pgtable_cfg *pgtbl_cfg)
511 {
512 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
513 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
514 	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
515 
516 	cb->cfg = cfg;
517 
518 	/* TCR */
519 	if (stage1) {
520 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
521 			cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
522 		} else {
523 			cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
524 			cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
525 			if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
526 				cb->tcr[1] |= ARM_SMMU_TCR2_AS;
527 			else
528 				cb->tcr[0] |= ARM_SMMU_TCR_EAE;
529 		}
530 	} else {
531 		cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
532 	}
533 
534 	/* TTBRs */
535 	if (stage1) {
536 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
537 			cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
538 			cb->ttbr[1] = 0;
539 		} else {
540 			cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
541 						 cfg->asid);
542 			cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
543 						 cfg->asid);
544 
545 			if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
546 				cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
547 			else
548 				cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
549 		}
550 	} else {
551 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
552 	}
553 
554 	/* MAIRs (stage-1 only) */
555 	if (stage1) {
556 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
557 			cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
558 			cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
559 		} else {
560 			cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
561 			cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
562 		}
563 	}
564 }
565 
arm_smmu_write_context_bank(struct arm_smmu_device * smmu,int idx)566 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
567 {
568 	u32 reg;
569 	bool stage1;
570 	struct arm_smmu_cb *cb = &smmu->cbs[idx];
571 	struct arm_smmu_cfg *cfg = cb->cfg;
572 
573 	/* Unassigned context banks only need disabling */
574 	if (!cfg) {
575 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
576 		return;
577 	}
578 
579 	stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
580 
581 	/* CBA2R */
582 	if (smmu->version > ARM_SMMU_V1) {
583 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
584 			reg = ARM_SMMU_CBA2R_VA64;
585 		else
586 			reg = 0;
587 		/* 16-bit VMIDs live in CBA2R */
588 		if (smmu->features & ARM_SMMU_FEAT_VMID16)
589 			reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
590 
591 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
592 	}
593 
594 	/* CBAR */
595 	reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
596 	if (smmu->version < ARM_SMMU_V2)
597 		reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
598 
599 	/*
600 	 * Use the weakest shareability/memory types, so they are
601 	 * overridden by the ttbcr/pte.
602 	 */
603 	if (stage1) {
604 		reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
605 				  ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
606 		       FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
607 				  ARM_SMMU_CBAR_S1_MEMATTR_WB);
608 	} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
609 		/* 8-bit VMIDs live in CBAR */
610 		reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
611 	}
612 	arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
613 
614 	/*
615 	 * TCR
616 	 * We must write this before the TTBRs, since it determines the
617 	 * access behaviour of some fields (in particular, ASID[15:8]).
618 	 */
619 	if (stage1 && smmu->version > ARM_SMMU_V1)
620 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
621 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
622 
623 	/* TTBRs */
624 	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
625 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
626 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
627 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
628 	} else {
629 		arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
630 		if (stage1)
631 			arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
632 					   cb->ttbr[1]);
633 	}
634 
635 	/* MAIRs (stage-1 only) */
636 	if (stage1) {
637 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
638 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
639 	}
640 
641 	/* SCTLR */
642 	reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
643 	      ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
644 	if (stage1)
645 		reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
646 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
647 		reg |= ARM_SMMU_SCTLR_E;
648 
649 	if (smmu->impl && smmu->impl->write_sctlr)
650 		smmu->impl->write_sctlr(smmu, idx, reg);
651 	else
652 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
653 }
654 
arm_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,unsigned int start)655 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
656 				       struct arm_smmu_device *smmu,
657 				       struct device *dev, unsigned int start)
658 {
659 	if (smmu->impl && smmu->impl->alloc_context_bank)
660 		return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
661 
662 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
663 }
664 
arm_smmu_init_domain_context(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev)665 static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
666 					struct arm_smmu_device *smmu,
667 					struct device *dev)
668 {
669 	int irq, start, ret = 0;
670 	unsigned long ias, oas;
671 	struct io_pgtable_ops *pgtbl_ops;
672 	struct io_pgtable_cfg pgtbl_cfg;
673 	enum io_pgtable_fmt fmt;
674 	struct iommu_domain *domain = &smmu_domain->domain;
675 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
676 	irqreturn_t (*context_fault)(int irq, void *dev);
677 
678 	mutex_lock(&smmu_domain->init_mutex);
679 	if (smmu_domain->smmu)
680 		goto out_unlock;
681 
682 	/*
683 	 * Mapping the requested stage onto what we support is surprisingly
684 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
685 	 * support for nested translation. That means we end up with the
686 	 * following table:
687 	 *
688 	 * Requested        Supported        Actual
689 	 *     S1               N              S1
690 	 *     S1             S1+S2            S1
691 	 *     S1               S2             S2
692 	 *     S1               S1             S1
693 	 *     N                N              N
694 	 *     N              S1+S2            S2
695 	 *     N                S2             S2
696 	 *     N                S1             S1
697 	 *
698 	 * Note that you can't actually request stage-2 mappings.
699 	 */
700 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
701 		smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
702 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
703 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
704 
705 	/*
706 	 * Choosing a suitable context format is even more fiddly. Until we
707 	 * grow some way for the caller to express a preference, and/or move
708 	 * the decision into the io-pgtable code where it arguably belongs,
709 	 * just aim for the closest thing to the rest of the system, and hope
710 	 * that the hardware isn't esoteric enough that we can't assume AArch64
711 	 * support to be a superset of AArch32 support...
712 	 */
713 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
714 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
715 	if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
716 	    !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
717 	    (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
718 	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
719 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
720 	if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
721 	    (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
722 			       ARM_SMMU_FEAT_FMT_AARCH64_16K |
723 			       ARM_SMMU_FEAT_FMT_AARCH64_4K)))
724 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
725 
726 	if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
727 		ret = -EINVAL;
728 		goto out_unlock;
729 	}
730 
731 	switch (smmu_domain->stage) {
732 	case ARM_SMMU_DOMAIN_S1:
733 		cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
734 		start = smmu->num_s2_context_banks;
735 		ias = smmu->va_size;
736 		oas = smmu->ipa_size;
737 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
738 			fmt = ARM_64_LPAE_S1;
739 		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
740 			fmt = ARM_32_LPAE_S1;
741 			ias = min(ias, 32UL);
742 			oas = min(oas, 40UL);
743 		} else {
744 			fmt = ARM_V7S;
745 			ias = min(ias, 32UL);
746 			oas = min(oas, 32UL);
747 		}
748 		smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
749 		break;
750 	case ARM_SMMU_DOMAIN_NESTED:
751 		/*
752 		 * We will likely want to change this if/when KVM gets
753 		 * involved.
754 		 */
755 	case ARM_SMMU_DOMAIN_S2:
756 		cfg->cbar = CBAR_TYPE_S2_TRANS;
757 		start = 0;
758 		ias = smmu->ipa_size;
759 		oas = smmu->pa_size;
760 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
761 			fmt = ARM_64_LPAE_S2;
762 		} else {
763 			fmt = ARM_32_LPAE_S2;
764 			ias = min(ias, 40UL);
765 			oas = min(oas, 40UL);
766 		}
767 		if (smmu->version == ARM_SMMU_V2)
768 			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
769 		else
770 			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
771 		break;
772 	default:
773 		ret = -EINVAL;
774 		goto out_unlock;
775 	}
776 
777 	ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
778 	if (ret < 0) {
779 		goto out_unlock;
780 	}
781 
782 	smmu_domain->smmu = smmu;
783 
784 	cfg->cbndx = ret;
785 	if (smmu->version < ARM_SMMU_V2) {
786 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
787 		cfg->irptndx %= smmu->num_context_irqs;
788 	} else {
789 		cfg->irptndx = cfg->cbndx;
790 	}
791 
792 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
793 		cfg->vmid = cfg->cbndx + 1;
794 	else
795 		cfg->asid = cfg->cbndx;
796 
797 	pgtbl_cfg = (struct io_pgtable_cfg) {
798 		.pgsize_bitmap	= smmu->pgsize_bitmap,
799 		.ias		= ias,
800 		.oas		= oas,
801 		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
802 		.tlb		= smmu_domain->flush_ops,
803 		.iommu_dev	= smmu->dev,
804 	};
805 
806 	if (smmu->impl && smmu->impl->init_context) {
807 		ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
808 		if (ret)
809 			goto out_clear_smmu;
810 	}
811 
812 	if (smmu_domain->pgtbl_quirks)
813 		pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
814 
815 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
816 	if (!pgtbl_ops) {
817 		ret = -ENOMEM;
818 		goto out_clear_smmu;
819 	}
820 
821 	/* Update the domain's page sizes to reflect the page table format */
822 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
823 
824 	if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
825 		domain->geometry.aperture_start = ~0UL << ias;
826 		domain->geometry.aperture_end = ~0UL;
827 	} else {
828 		domain->geometry.aperture_end = (1UL << ias) - 1;
829 	}
830 
831 	domain->geometry.force_aperture = true;
832 
833 	/* Initialise the context bank with our page table cfg */
834 	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
835 	arm_smmu_write_context_bank(smmu, cfg->cbndx);
836 
837 	/*
838 	 * Request context fault interrupt. Do this last to avoid the
839 	 * handler seeing a half-initialised domain state.
840 	 */
841 	irq = smmu->irqs[cfg->irptndx];
842 
843 	if (smmu->impl && smmu->impl->context_fault)
844 		context_fault = smmu->impl->context_fault;
845 	else
846 		context_fault = arm_smmu_context_fault;
847 
848 	if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
849 		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
850 						context_fault,
851 						IRQF_ONESHOT | IRQF_SHARED,
852 						"arm-smmu-context-fault",
853 						smmu_domain);
854 	else
855 		ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
856 				       "arm-smmu-context-fault", smmu_domain);
857 
858 	if (ret < 0) {
859 		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
860 			cfg->irptndx, irq);
861 		cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
862 	}
863 
864 	mutex_unlock(&smmu_domain->init_mutex);
865 
866 	/* Publish page table ops for map/unmap */
867 	smmu_domain->pgtbl_ops = pgtbl_ops;
868 	return 0;
869 
870 out_clear_smmu:
871 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
872 	smmu_domain->smmu = NULL;
873 out_unlock:
874 	mutex_unlock(&smmu_domain->init_mutex);
875 	return ret;
876 }
877 
arm_smmu_destroy_domain_context(struct arm_smmu_domain * smmu_domain)878 static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
879 {
880 	struct arm_smmu_device *smmu = smmu_domain->smmu;
881 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
882 	int ret, irq;
883 
884 	if (!smmu)
885 		return;
886 
887 	ret = arm_smmu_rpm_get(smmu);
888 	if (ret < 0)
889 		return;
890 
891 	/*
892 	 * Disable the context bank and free the page tables before freeing
893 	 * it.
894 	 */
895 	smmu->cbs[cfg->cbndx].cfg = NULL;
896 	arm_smmu_write_context_bank(smmu, cfg->cbndx);
897 
898 	if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
899 		irq = smmu->irqs[cfg->irptndx];
900 		devm_free_irq(smmu->dev, irq, smmu_domain);
901 	}
902 
903 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
904 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
905 
906 	arm_smmu_rpm_put(smmu);
907 }
908 
arm_smmu_domain_alloc_paging(struct device * dev)909 static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
910 {
911 	struct arm_smmu_domain *smmu_domain;
912 
913 	/*
914 	 * Allocate the domain and initialise some of its data structures.
915 	 * We can't really do anything meaningful until we've added a
916 	 * master.
917 	 */
918 	smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
919 	if (!smmu_domain)
920 		return NULL;
921 
922 	mutex_init(&smmu_domain->init_mutex);
923 	spin_lock_init(&smmu_domain->cb_lock);
924 
925 	return &smmu_domain->domain;
926 }
927 
arm_smmu_domain_free(struct iommu_domain * domain)928 static void arm_smmu_domain_free(struct iommu_domain *domain)
929 {
930 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
931 
932 	/*
933 	 * Free the domain resources. We assume that all devices have
934 	 * already been detached.
935 	 */
936 	arm_smmu_destroy_domain_context(smmu_domain);
937 	kfree(smmu_domain);
938 }
939 
arm_smmu_write_smr(struct arm_smmu_device * smmu,int idx)940 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
941 {
942 	struct arm_smmu_smr *smr = smmu->smrs + idx;
943 	u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
944 		  FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
945 
946 	if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
947 		reg |= ARM_SMMU_SMR_VALID;
948 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
949 }
950 
arm_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)951 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
952 {
953 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
954 	u32 reg;
955 
956 	if (smmu->impl && smmu->impl->write_s2cr) {
957 		smmu->impl->write_s2cr(smmu, idx);
958 		return;
959 	}
960 
961 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
962 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
963 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
964 
965 	if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
966 	    smmu->smrs[idx].valid)
967 		reg |= ARM_SMMU_S2CR_EXIDVALID;
968 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
969 }
970 
arm_smmu_write_sme(struct arm_smmu_device * smmu,int idx)971 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
972 {
973 	arm_smmu_write_s2cr(smmu, idx);
974 	if (smmu->smrs)
975 		arm_smmu_write_smr(smmu, idx);
976 }
977 
978 /*
979  * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
980  * should be called after sCR0 is written.
981  */
arm_smmu_test_smr_masks(struct arm_smmu_device * smmu)982 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
983 {
984 	u32 smr;
985 	int i;
986 
987 	if (!smmu->smrs)
988 		return;
989 	/*
990 	 * If we've had to accommodate firmware memory regions, we may
991 	 * have live SMRs by now; tread carefully...
992 	 *
993 	 * Somewhat perversely, not having a free SMR for this test implies we
994 	 * can get away without it anyway, as we'll only be able to 'allocate'
995 	 * these SMRs for the ID/mask values we're already trusting to be OK.
996 	 */
997 	for (i = 0; i < smmu->num_mapping_groups; i++)
998 		if (!smmu->smrs[i].valid)
999 			goto smr_ok;
1000 	return;
1001 smr_ok:
1002 	/*
1003 	 * SMR.ID bits may not be preserved if the corresponding MASK
1004 	 * bits are set, so check each one separately. We can reject
1005 	 * masters later if they try to claim IDs outside these masks.
1006 	 */
1007 	smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
1008 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1009 	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1010 	smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
1011 
1012 	smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1013 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1014 	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1015 	smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
1016 }
1017 
arm_smmu_find_sme(struct arm_smmu_device * smmu,u16 id,u16 mask)1018 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1019 {
1020 	struct arm_smmu_smr *smrs = smmu->smrs;
1021 	int i, free_idx = -ENOSPC;
1022 
1023 	/* Stream indexing is blissfully easy */
1024 	if (!smrs)
1025 		return id;
1026 
1027 	/* Validating SMRs is... less so */
1028 	for (i = 0; i < smmu->num_mapping_groups; ++i) {
1029 		if (!smrs[i].valid) {
1030 			/*
1031 			 * Note the first free entry we come across, which
1032 			 * we'll claim in the end if nothing else matches.
1033 			 */
1034 			if (free_idx < 0)
1035 				free_idx = i;
1036 			continue;
1037 		}
1038 		/*
1039 		 * If the new entry is _entirely_ matched by an existing entry,
1040 		 * then reuse that, with the guarantee that there also cannot
1041 		 * be any subsequent conflicting entries. In normal use we'd
1042 		 * expect simply identical entries for this case, but there's
1043 		 * no harm in accommodating the generalisation.
1044 		 */
1045 		if ((mask & smrs[i].mask) == mask &&
1046 		    !((id ^ smrs[i].id) & ~smrs[i].mask))
1047 			return i;
1048 		/*
1049 		 * If the new entry has any other overlap with an existing one,
1050 		 * though, then there always exists at least one stream ID
1051 		 * which would cause a conflict, and we can't allow that risk.
1052 		 */
1053 		if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1054 			return -EINVAL;
1055 	}
1056 
1057 	return free_idx;
1058 }
1059 
arm_smmu_free_sme(struct arm_smmu_device * smmu,int idx)1060 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1061 {
1062 	if (--smmu->s2crs[idx].count)
1063 		return false;
1064 
1065 	smmu->s2crs[idx] = s2cr_init_val;
1066 	if (smmu->smrs)
1067 		smmu->smrs[idx].valid = false;
1068 
1069 	return true;
1070 }
1071 
arm_smmu_master_alloc_smes(struct device * dev)1072 static int arm_smmu_master_alloc_smes(struct device *dev)
1073 {
1074 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1075 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1076 	struct arm_smmu_device *smmu = cfg->smmu;
1077 	struct arm_smmu_smr *smrs = smmu->smrs;
1078 	int i, idx, ret;
1079 
1080 	mutex_lock(&smmu->stream_map_mutex);
1081 	/* Figure out a viable stream map entry allocation */
1082 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1083 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1084 		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1085 
1086 		if (idx != INVALID_SMENDX) {
1087 			ret = -EEXIST;
1088 			goto out_err;
1089 		}
1090 
1091 		ret = arm_smmu_find_sme(smmu, sid, mask);
1092 		if (ret < 0)
1093 			goto out_err;
1094 
1095 		idx = ret;
1096 		if (smrs && smmu->s2crs[idx].count == 0) {
1097 			smrs[idx].id = sid;
1098 			smrs[idx].mask = mask;
1099 			smrs[idx].valid = true;
1100 		}
1101 		smmu->s2crs[idx].count++;
1102 		cfg->smendx[i] = (s16)idx;
1103 	}
1104 
1105 	/* It worked! Now, poke the actual hardware */
1106 	for_each_cfg_sme(cfg, fwspec, i, idx)
1107 		arm_smmu_write_sme(smmu, idx);
1108 
1109 	mutex_unlock(&smmu->stream_map_mutex);
1110 	return 0;
1111 
1112 out_err:
1113 	while (i--) {
1114 		arm_smmu_free_sme(smmu, cfg->smendx[i]);
1115 		cfg->smendx[i] = INVALID_SMENDX;
1116 	}
1117 	mutex_unlock(&smmu->stream_map_mutex);
1118 	return ret;
1119 }
1120 
arm_smmu_master_free_smes(struct arm_smmu_master_cfg * cfg,struct iommu_fwspec * fwspec)1121 static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1122 				      struct iommu_fwspec *fwspec)
1123 {
1124 	struct arm_smmu_device *smmu = cfg->smmu;
1125 	int i, idx;
1126 
1127 	mutex_lock(&smmu->stream_map_mutex);
1128 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1129 		if (arm_smmu_free_sme(smmu, idx))
1130 			arm_smmu_write_sme(smmu, idx);
1131 		cfg->smendx[i] = INVALID_SMENDX;
1132 	}
1133 	mutex_unlock(&smmu->stream_map_mutex);
1134 }
1135 
arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg * cfg,enum arm_smmu_s2cr_type type,u8 cbndx,struct iommu_fwspec * fwspec)1136 static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
1137 					  enum arm_smmu_s2cr_type type,
1138 					  u8 cbndx, struct iommu_fwspec *fwspec)
1139 {
1140 	struct arm_smmu_device *smmu = cfg->smmu;
1141 	struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1142 	int i, idx;
1143 
1144 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1145 		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1146 			continue;
1147 
1148 		s2cr[idx].type = type;
1149 		s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1150 		s2cr[idx].cbndx = cbndx;
1151 		arm_smmu_write_s2cr(smmu, idx);
1152 	}
1153 }
1154 
arm_smmu_attach_dev(struct iommu_domain * domain,struct device * dev)1155 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1156 {
1157 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1158 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1159 	struct arm_smmu_master_cfg *cfg;
1160 	struct arm_smmu_device *smmu;
1161 	int ret;
1162 
1163 	/*
1164 	 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1165 	 * domains between of_xlate() and probe_device() - we have no way to cope
1166 	 * with that, so until ARM gets converted to rely on groups and default
1167 	 * domains, just say no (but more politely than by dereferencing NULL).
1168 	 * This should be at least a WARN_ON once that's sorted.
1169 	 */
1170 	cfg = dev_iommu_priv_get(dev);
1171 	if (!cfg)
1172 		return -ENODEV;
1173 
1174 	smmu = cfg->smmu;
1175 
1176 	ret = arm_smmu_rpm_get(smmu);
1177 	if (ret < 0)
1178 		return ret;
1179 
1180 	/* Ensure that the domain is finalised */
1181 	ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
1182 	if (ret < 0)
1183 		goto rpm_put;
1184 
1185 	/*
1186 	 * Sanity check the domain. We don't support domains across
1187 	 * different SMMUs.
1188 	 */
1189 	if (smmu_domain->smmu != smmu) {
1190 		ret = -EINVAL;
1191 		goto rpm_put;
1192 	}
1193 
1194 	/* Looks ok, so add the device to the domain */
1195 	arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
1196 				      smmu_domain->cfg.cbndx, fwspec);
1197 	arm_smmu_rpm_use_autosuspend(smmu);
1198 rpm_put:
1199 	arm_smmu_rpm_put(smmu);
1200 	return ret;
1201 }
1202 
arm_smmu_attach_dev_type(struct device * dev,enum arm_smmu_s2cr_type type)1203 static int arm_smmu_attach_dev_type(struct device *dev,
1204 				    enum arm_smmu_s2cr_type type)
1205 {
1206 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1207 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1208 	struct arm_smmu_device *smmu;
1209 	int ret;
1210 
1211 	if (!cfg)
1212 		return -ENODEV;
1213 	smmu = cfg->smmu;
1214 
1215 	ret = arm_smmu_rpm_get(smmu);
1216 	if (ret < 0)
1217 		return ret;
1218 
1219 	arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
1220 	arm_smmu_rpm_use_autosuspend(smmu);
1221 	arm_smmu_rpm_put(smmu);
1222 	return 0;
1223 }
1224 
arm_smmu_attach_dev_identity(struct iommu_domain * domain,struct device * dev)1225 static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
1226 					struct device *dev)
1227 {
1228 	return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
1229 }
1230 
1231 static const struct iommu_domain_ops arm_smmu_identity_ops = {
1232 	.attach_dev = arm_smmu_attach_dev_identity,
1233 };
1234 
1235 static struct iommu_domain arm_smmu_identity_domain = {
1236 	.type = IOMMU_DOMAIN_IDENTITY,
1237 	.ops = &arm_smmu_identity_ops,
1238 };
1239 
arm_smmu_attach_dev_blocked(struct iommu_domain * domain,struct device * dev)1240 static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
1241 				       struct device *dev)
1242 {
1243 	return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
1244 }
1245 
1246 static const struct iommu_domain_ops arm_smmu_blocked_ops = {
1247 	.attach_dev = arm_smmu_attach_dev_blocked,
1248 };
1249 
1250 static struct iommu_domain arm_smmu_blocked_domain = {
1251 	.type = IOMMU_DOMAIN_BLOCKED,
1252 	.ops = &arm_smmu_blocked_ops,
1253 };
1254 
arm_smmu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)1255 static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
1256 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
1257 			      int prot, gfp_t gfp, size_t *mapped)
1258 {
1259 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1260 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1261 	int ret;
1262 
1263 	if (!ops)
1264 		return -ENODEV;
1265 
1266 	arm_smmu_rpm_get(smmu);
1267 	ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1268 	arm_smmu_rpm_put(smmu);
1269 
1270 	return ret;
1271 }
1272 
arm_smmu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)1273 static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
1274 				   size_t pgsize, size_t pgcount,
1275 				   struct iommu_iotlb_gather *iotlb_gather)
1276 {
1277 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1278 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1279 	size_t ret;
1280 
1281 	if (!ops)
1282 		return 0;
1283 
1284 	arm_smmu_rpm_get(smmu);
1285 	ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1286 	arm_smmu_rpm_put(smmu);
1287 
1288 	return ret;
1289 }
1290 
arm_smmu_flush_iotlb_all(struct iommu_domain * domain)1291 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1292 {
1293 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1294 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1295 
1296 	if (smmu_domain->flush_ops) {
1297 		arm_smmu_rpm_get(smmu);
1298 		smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1299 		arm_smmu_rpm_put(smmu);
1300 	}
1301 }
1302 
arm_smmu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)1303 static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1304 				struct iommu_iotlb_gather *gather)
1305 {
1306 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1307 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1308 
1309 	if (!smmu)
1310 		return;
1311 
1312 	arm_smmu_rpm_get(smmu);
1313 	if (smmu->version == ARM_SMMU_V2 ||
1314 	    smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1315 		arm_smmu_tlb_sync_context(smmu_domain);
1316 	else
1317 		arm_smmu_tlb_sync_global(smmu);
1318 	arm_smmu_rpm_put(smmu);
1319 }
1320 
arm_smmu_iova_to_phys_hard(struct iommu_domain * domain,dma_addr_t iova)1321 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1322 					      dma_addr_t iova)
1323 {
1324 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1325 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1326 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1327 	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1328 	struct device *dev = smmu->dev;
1329 	void __iomem *reg;
1330 	u32 tmp;
1331 	u64 phys;
1332 	unsigned long va, flags;
1333 	int ret, idx = cfg->cbndx;
1334 	phys_addr_t addr = 0;
1335 
1336 	ret = arm_smmu_rpm_get(smmu);
1337 	if (ret < 0)
1338 		return 0;
1339 
1340 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1341 	va = iova & ~0xfffUL;
1342 	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1343 		arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1344 	else
1345 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1346 
1347 	reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1348 	if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE),
1349 				      5, 50)) {
1350 		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1351 		dev_err(dev,
1352 			"iova to phys timed out on %pad. Falling back to software table walk.\n",
1353 			&iova);
1354 		arm_smmu_rpm_put(smmu);
1355 		return ops->iova_to_phys(ops, iova);
1356 	}
1357 
1358 	phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1359 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1360 	if (phys & ARM_SMMU_CB_PAR_F) {
1361 		dev_err(dev, "translation fault!\n");
1362 		dev_err(dev, "PAR = 0x%llx\n", phys);
1363 		goto out;
1364 	}
1365 
1366 	addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1367 out:
1368 	arm_smmu_rpm_put(smmu);
1369 
1370 	return addr;
1371 }
1372 
arm_smmu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1373 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1374 					dma_addr_t iova)
1375 {
1376 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1377 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1378 
1379 	if (!ops)
1380 		return 0;
1381 
1382 	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1383 			smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1384 		return arm_smmu_iova_to_phys_hard(domain, iova);
1385 
1386 	return ops->iova_to_phys(ops, iova);
1387 }
1388 
arm_smmu_capable(struct device * dev,enum iommu_cap cap)1389 static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
1390 {
1391 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1392 
1393 	switch (cap) {
1394 	case IOMMU_CAP_CACHE_COHERENCY:
1395 		/*
1396 		 * It's overwhelmingly the case in practice that when the pagetable
1397 		 * walk interface is connected to a coherent interconnect, all the
1398 		 * translation interfaces are too. Furthermore if the device is
1399 		 * natively coherent, then its translation interface must also be.
1400 		 */
1401 		return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1402 			device_get_dma_attr(dev) == DEV_DMA_COHERENT;
1403 	case IOMMU_CAP_NOEXEC:
1404 	case IOMMU_CAP_DEFERRED_FLUSH:
1405 		return true;
1406 	default:
1407 		return false;
1408 	}
1409 }
1410 
1411 static
arm_smmu_get_by_fwnode(struct fwnode_handle * fwnode)1412 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1413 {
1414 	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1415 							  fwnode);
1416 	put_device(dev);
1417 	return dev ? dev_get_drvdata(dev) : NULL;
1418 }
1419 
arm_smmu_probe_device(struct device * dev)1420 static struct iommu_device *arm_smmu_probe_device(struct device *dev)
1421 {
1422 	struct arm_smmu_device *smmu = NULL;
1423 	struct arm_smmu_master_cfg *cfg;
1424 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1425 	int i, ret;
1426 
1427 	if (using_legacy_binding) {
1428 		ret = arm_smmu_register_legacy_master(dev, &smmu);
1429 
1430 		/*
1431 		 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1432 		 * will allocate/initialise a new one. Thus we need to update fwspec for
1433 		 * later use.
1434 		 */
1435 		fwspec = dev_iommu_fwspec_get(dev);
1436 		if (ret)
1437 			goto out_free;
1438 	} else {
1439 		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1440 
1441 		/*
1442 		 * Defer probe if the relevant SMMU instance hasn't finished
1443 		 * probing yet. This is a fragile hack and we'd ideally
1444 		 * avoid this race in the core code. Until that's ironed
1445 		 * out, however, this is the most pragmatic option on the
1446 		 * table.
1447 		 */
1448 		if (!smmu)
1449 			return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
1450 						"smmu dev has not bound yet\n"));
1451 	}
1452 
1453 	ret = -EINVAL;
1454 	for (i = 0; i < fwspec->num_ids; i++) {
1455 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1456 		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1457 
1458 		if (sid & ~smmu->streamid_mask) {
1459 			dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1460 				sid, smmu->streamid_mask);
1461 			goto out_free;
1462 		}
1463 		if (mask & ~smmu->smr_mask_mask) {
1464 			dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1465 				mask, smmu->smr_mask_mask);
1466 			goto out_free;
1467 		}
1468 	}
1469 
1470 	ret = -ENOMEM;
1471 	cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1472 		      GFP_KERNEL);
1473 	if (!cfg)
1474 		goto out_free;
1475 
1476 	cfg->smmu = smmu;
1477 	dev_iommu_priv_set(dev, cfg);
1478 	while (i--)
1479 		cfg->smendx[i] = INVALID_SMENDX;
1480 
1481 	ret = arm_smmu_rpm_get(smmu);
1482 	if (ret < 0)
1483 		goto out_cfg_free;
1484 
1485 	ret = arm_smmu_master_alloc_smes(dev);
1486 	arm_smmu_rpm_put(smmu);
1487 
1488 	if (ret)
1489 		goto out_cfg_free;
1490 
1491 	device_link_add(dev, smmu->dev,
1492 			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1493 
1494 	return &smmu->iommu;
1495 
1496 out_cfg_free:
1497 	kfree(cfg);
1498 out_free:
1499 	iommu_fwspec_free(dev);
1500 	return ERR_PTR(ret);
1501 }
1502 
arm_smmu_release_device(struct device * dev)1503 static void arm_smmu_release_device(struct device *dev)
1504 {
1505 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1506 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1507 	int ret;
1508 
1509 	ret = arm_smmu_rpm_get(cfg->smmu);
1510 	if (ret < 0)
1511 		return;
1512 
1513 	arm_smmu_master_free_smes(cfg, fwspec);
1514 
1515 	arm_smmu_rpm_put(cfg->smmu);
1516 
1517 	kfree(cfg);
1518 }
1519 
arm_smmu_probe_finalize(struct device * dev)1520 static void arm_smmu_probe_finalize(struct device *dev)
1521 {
1522 	struct arm_smmu_master_cfg *cfg;
1523 	struct arm_smmu_device *smmu;
1524 
1525 	cfg = dev_iommu_priv_get(dev);
1526 	smmu = cfg->smmu;
1527 
1528 	if (smmu->impl && smmu->impl->probe_finalize)
1529 		smmu->impl->probe_finalize(smmu, dev);
1530 }
1531 
arm_smmu_device_group(struct device * dev)1532 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1533 {
1534 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1535 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1536 	struct arm_smmu_device *smmu = cfg->smmu;
1537 	struct iommu_group *group = NULL;
1538 	int i, idx;
1539 
1540 	mutex_lock(&smmu->stream_map_mutex);
1541 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1542 		if (group && smmu->s2crs[idx].group &&
1543 		    group != smmu->s2crs[idx].group) {
1544 			mutex_unlock(&smmu->stream_map_mutex);
1545 			return ERR_PTR(-EINVAL);
1546 		}
1547 
1548 		group = smmu->s2crs[idx].group;
1549 	}
1550 
1551 	if (group) {
1552 		mutex_unlock(&smmu->stream_map_mutex);
1553 		return iommu_group_ref_get(group);
1554 	}
1555 
1556 	if (dev_is_pci(dev))
1557 		group = pci_device_group(dev);
1558 	else if (dev_is_fsl_mc(dev))
1559 		group = fsl_mc_device_group(dev);
1560 	else
1561 		group = generic_device_group(dev);
1562 
1563 	/* Remember group for faster lookups */
1564 	if (!IS_ERR(group))
1565 		for_each_cfg_sme(cfg, fwspec, i, idx)
1566 			smmu->s2crs[idx].group = group;
1567 
1568 	mutex_unlock(&smmu->stream_map_mutex);
1569 	return group;
1570 }
1571 
arm_smmu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1572 static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
1573 		unsigned long quirks)
1574 {
1575 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1576 	int ret = 0;
1577 
1578 	mutex_lock(&smmu_domain->init_mutex);
1579 	if (smmu_domain->smmu)
1580 		ret = -EPERM;
1581 	else
1582 		smmu_domain->pgtbl_quirks = quirks;
1583 	mutex_unlock(&smmu_domain->init_mutex);
1584 
1585 	return ret;
1586 }
1587 
arm_smmu_of_xlate(struct device * dev,const struct of_phandle_args * args)1588 static int arm_smmu_of_xlate(struct device *dev,
1589 			     const struct of_phandle_args *args)
1590 {
1591 	u32 mask, fwid = 0;
1592 
1593 	if (args->args_count > 0)
1594 		fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1595 
1596 	if (args->args_count > 1)
1597 		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1598 	else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1599 		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1600 
1601 	return iommu_fwspec_add_ids(dev, &fwid, 1);
1602 }
1603 
arm_smmu_get_resv_regions(struct device * dev,struct list_head * head)1604 static void arm_smmu_get_resv_regions(struct device *dev,
1605 				      struct list_head *head)
1606 {
1607 	struct iommu_resv_region *region;
1608 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1609 
1610 	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1611 					 prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
1612 	if (!region)
1613 		return;
1614 
1615 	list_add_tail(&region->list, head);
1616 
1617 	iommu_dma_get_resv_regions(dev, head);
1618 }
1619 
arm_smmu_def_domain_type(struct device * dev)1620 static int arm_smmu_def_domain_type(struct device *dev)
1621 {
1622 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1623 	const struct arm_smmu_impl *impl = cfg->smmu->impl;
1624 
1625 	if (using_legacy_binding)
1626 		return IOMMU_DOMAIN_IDENTITY;
1627 
1628 	if (impl && impl->def_domain_type)
1629 		return impl->def_domain_type(dev);
1630 
1631 	return 0;
1632 }
1633 
1634 static struct iommu_ops arm_smmu_ops = {
1635 	.identity_domain	= &arm_smmu_identity_domain,
1636 	.blocked_domain		= &arm_smmu_blocked_domain,
1637 	.capable		= arm_smmu_capable,
1638 	.domain_alloc_paging	= arm_smmu_domain_alloc_paging,
1639 	.probe_device		= arm_smmu_probe_device,
1640 	.release_device		= arm_smmu_release_device,
1641 	.probe_finalize		= arm_smmu_probe_finalize,
1642 	.device_group		= arm_smmu_device_group,
1643 	.of_xlate		= arm_smmu_of_xlate,
1644 	.get_resv_regions	= arm_smmu_get_resv_regions,
1645 	.def_domain_type	= arm_smmu_def_domain_type,
1646 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
1647 	.owner			= THIS_MODULE,
1648 	.default_domain_ops = &(const struct iommu_domain_ops) {
1649 		.attach_dev		= arm_smmu_attach_dev,
1650 		.map_pages		= arm_smmu_map_pages,
1651 		.unmap_pages		= arm_smmu_unmap_pages,
1652 		.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
1653 		.iotlb_sync		= arm_smmu_iotlb_sync,
1654 		.iova_to_phys		= arm_smmu_iova_to_phys,
1655 		.set_pgtable_quirks	= arm_smmu_set_pgtable_quirks,
1656 		.free			= arm_smmu_domain_free,
1657 	}
1658 };
1659 
arm_smmu_device_reset(struct arm_smmu_device * smmu)1660 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1661 {
1662 	int i;
1663 	u32 reg;
1664 
1665 	/* clear global FSR */
1666 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1667 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1668 
1669 	/*
1670 	 * Reset stream mapping groups: Initial values mark all SMRn as
1671 	 * invalid and all S2CRn as bypass unless overridden.
1672 	 */
1673 	for (i = 0; i < smmu->num_mapping_groups; ++i)
1674 		arm_smmu_write_sme(smmu, i);
1675 
1676 	/* Make sure all context banks are disabled and clear CB_FSR  */
1677 	for (i = 0; i < smmu->num_context_banks; ++i) {
1678 		arm_smmu_write_context_bank(smmu, i);
1679 		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
1680 	}
1681 
1682 	/* Invalidate the TLB, just in case */
1683 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1684 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1685 
1686 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1687 
1688 	/* Enable fault reporting */
1689 	reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1690 		ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1691 
1692 	/* Disable TLB broadcasting. */
1693 	reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1694 
1695 	/* Enable client access, handling unmatched streams as appropriate */
1696 	reg &= ~ARM_SMMU_sCR0_CLIENTPD;
1697 	if (disable_bypass)
1698 		reg |= ARM_SMMU_sCR0_USFCFG;
1699 	else
1700 		reg &= ~ARM_SMMU_sCR0_USFCFG;
1701 
1702 	/* Disable forced broadcasting */
1703 	reg &= ~ARM_SMMU_sCR0_FB;
1704 
1705 	/* Don't upgrade barriers */
1706 	reg &= ~(ARM_SMMU_sCR0_BSU);
1707 
1708 	if (smmu->features & ARM_SMMU_FEAT_VMID16)
1709 		reg |= ARM_SMMU_sCR0_VMID16EN;
1710 
1711 	if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1712 		reg |= ARM_SMMU_sCR0_EXIDENABLE;
1713 
1714 	if (smmu->impl && smmu->impl->reset)
1715 		smmu->impl->reset(smmu);
1716 
1717 	/* Push the button */
1718 	arm_smmu_tlb_sync_global(smmu);
1719 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1720 }
1721 
arm_smmu_id_size_to_bits(int size)1722 static int arm_smmu_id_size_to_bits(int size)
1723 {
1724 	switch (size) {
1725 	case 0:
1726 		return 32;
1727 	case 1:
1728 		return 36;
1729 	case 2:
1730 		return 40;
1731 	case 3:
1732 		return 42;
1733 	case 4:
1734 		return 44;
1735 	case 5:
1736 	default:
1737 		return 48;
1738 	}
1739 }
1740 
arm_smmu_device_cfg_probe(struct arm_smmu_device * smmu)1741 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1742 {
1743 	unsigned int size;
1744 	u32 id;
1745 	bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1746 	int i, ret;
1747 
1748 	dev_notice(smmu->dev, "probing hardware configuration...\n");
1749 	dev_notice(smmu->dev, "SMMUv%d with:\n",
1750 			smmu->version == ARM_SMMU_V2 ? 2 : 1);
1751 
1752 	/* ID0 */
1753 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1754 
1755 	/* Restrict available stages based on module parameter */
1756 	if (force_stage == 1)
1757 		id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1758 	else if (force_stage == 2)
1759 		id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1760 
1761 	if (id & ARM_SMMU_ID0_S1TS) {
1762 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1763 		dev_notice(smmu->dev, "\tstage 1 translation\n");
1764 	}
1765 
1766 	if (id & ARM_SMMU_ID0_S2TS) {
1767 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1768 		dev_notice(smmu->dev, "\tstage 2 translation\n");
1769 	}
1770 
1771 	if (id & ARM_SMMU_ID0_NTS) {
1772 		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1773 		dev_notice(smmu->dev, "\tnested translation\n");
1774 	}
1775 
1776 	if (!(smmu->features &
1777 		(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1778 		dev_err(smmu->dev, "\tno translation support!\n");
1779 		return -ENODEV;
1780 	}
1781 
1782 	if ((id & ARM_SMMU_ID0_S1TS) &&
1783 	    ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1784 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1785 		dev_notice(smmu->dev, "\taddress translation ops\n");
1786 	}
1787 
1788 	/*
1789 	 * In order for DMA API calls to work properly, we must defer to what
1790 	 * the FW says about coherency, regardless of what the hardware claims.
1791 	 * Fortunately, this also opens up a workaround for systems where the
1792 	 * ID register value has ended up configured incorrectly.
1793 	 */
1794 	cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1795 	if (cttw_fw || cttw_reg)
1796 		dev_notice(smmu->dev, "\t%scoherent table walk\n",
1797 			   cttw_fw ? "" : "non-");
1798 	if (cttw_fw != cttw_reg)
1799 		dev_notice(smmu->dev,
1800 			   "\t(IDR0.CTTW overridden by FW configuration)\n");
1801 
1802 	/* Max. number of entries we have for stream matching/indexing */
1803 	if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1804 		smmu->features |= ARM_SMMU_FEAT_EXIDS;
1805 		size = 1 << 16;
1806 	} else {
1807 		size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1808 	}
1809 	smmu->streamid_mask = size - 1;
1810 	if (id & ARM_SMMU_ID0_SMS) {
1811 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1812 		size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1813 		if (size == 0) {
1814 			dev_err(smmu->dev,
1815 				"stream-matching supported, but no SMRs present!\n");
1816 			return -ENODEV;
1817 		}
1818 
1819 		/* Zero-initialised to mark as invalid */
1820 		smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1821 					  GFP_KERNEL);
1822 		if (!smmu->smrs)
1823 			return -ENOMEM;
1824 
1825 		dev_notice(smmu->dev,
1826 			   "\tstream matching with %u register groups", size);
1827 	}
1828 	/* s2cr->type == 0 means translation, so initialise explicitly */
1829 	smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1830 					 GFP_KERNEL);
1831 	if (!smmu->s2crs)
1832 		return -ENOMEM;
1833 	for (i = 0; i < size; i++)
1834 		smmu->s2crs[i] = s2cr_init_val;
1835 
1836 	smmu->num_mapping_groups = size;
1837 	mutex_init(&smmu->stream_map_mutex);
1838 	spin_lock_init(&smmu->global_sync_lock);
1839 
1840 	if (smmu->version < ARM_SMMU_V2 ||
1841 	    !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1842 		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1843 		if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1844 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1845 	}
1846 
1847 	/* ID1 */
1848 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1849 	smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1850 
1851 	/* Check for size mismatch of SMMU address space from mapped region */
1852 	size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1853 	if (smmu->numpage != 2 * size << smmu->pgshift)
1854 		dev_warn(smmu->dev,
1855 			"SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1856 			2 * size << smmu->pgshift, smmu->numpage);
1857 	/* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1858 	smmu->numpage = size;
1859 
1860 	smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1861 	smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1862 	if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1863 		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1864 		return -ENODEV;
1865 	}
1866 	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1867 		   smmu->num_context_banks, smmu->num_s2_context_banks);
1868 	smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1869 				 sizeof(*smmu->cbs), GFP_KERNEL);
1870 	if (!smmu->cbs)
1871 		return -ENOMEM;
1872 
1873 	/* ID2 */
1874 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1875 	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1876 	smmu->ipa_size = size;
1877 
1878 	/* The output mask is also applied for bypass */
1879 	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1880 	smmu->pa_size = size;
1881 
1882 	if (id & ARM_SMMU_ID2_VMID16)
1883 		smmu->features |= ARM_SMMU_FEAT_VMID16;
1884 
1885 	/*
1886 	 * What the page table walker can address actually depends on which
1887 	 * descriptor format is in use, but since a) we don't know that yet,
1888 	 * and b) it can vary per context bank, this will have to do...
1889 	 */
1890 	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1891 		dev_warn(smmu->dev,
1892 			 "failed to set DMA mask for table walker\n");
1893 
1894 	if (smmu->version < ARM_SMMU_V2) {
1895 		smmu->va_size = smmu->ipa_size;
1896 		if (smmu->version == ARM_SMMU_V1_64K)
1897 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1898 	} else {
1899 		size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1900 		smmu->va_size = arm_smmu_id_size_to_bits(size);
1901 		if (id & ARM_SMMU_ID2_PTFS_4K)
1902 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1903 		if (id & ARM_SMMU_ID2_PTFS_16K)
1904 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1905 		if (id & ARM_SMMU_ID2_PTFS_64K)
1906 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1907 	}
1908 
1909 	if (smmu->impl && smmu->impl->cfg_probe) {
1910 		ret = smmu->impl->cfg_probe(smmu);
1911 		if (ret)
1912 			return ret;
1913 	}
1914 
1915 	/* Now we've corralled the various formats, what'll it do? */
1916 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1917 		smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1918 	if (smmu->features &
1919 	    (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1920 		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1921 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1922 		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1923 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1924 		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1925 
1926 	if (arm_smmu_ops.pgsize_bitmap == -1UL)
1927 		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1928 	else
1929 		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1930 	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1931 		   smmu->pgsize_bitmap);
1932 
1933 
1934 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1935 		dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1936 			   smmu->va_size, smmu->ipa_size);
1937 
1938 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1939 		dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1940 			   smmu->ipa_size, smmu->pa_size);
1941 
1942 	return 0;
1943 }
1944 
1945 struct arm_smmu_match_data {
1946 	enum arm_smmu_arch_version version;
1947 	enum arm_smmu_implementation model;
1948 };
1949 
1950 #define ARM_SMMU_MATCH_DATA(name, ver, imp)	\
1951 static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1952 
1953 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1954 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1955 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1956 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1957 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1958 ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1959 
1960 static const struct of_device_id arm_smmu_of_match[] = {
1961 	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1962 	{ .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1963 	{ .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1964 	{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1965 	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1966 	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1967 	{ .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1968 	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1969 	{ },
1970 };
1971 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1972 
1973 #ifdef CONFIG_ACPI
acpi_smmu_get_data(u32 model,struct arm_smmu_device * smmu)1974 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1975 {
1976 	int ret = 0;
1977 
1978 	switch (model) {
1979 	case ACPI_IORT_SMMU_V1:
1980 	case ACPI_IORT_SMMU_CORELINK_MMU400:
1981 		smmu->version = ARM_SMMU_V1;
1982 		smmu->model = GENERIC_SMMU;
1983 		break;
1984 	case ACPI_IORT_SMMU_CORELINK_MMU401:
1985 		smmu->version = ARM_SMMU_V1_64K;
1986 		smmu->model = GENERIC_SMMU;
1987 		break;
1988 	case ACPI_IORT_SMMU_V2:
1989 		smmu->version = ARM_SMMU_V2;
1990 		smmu->model = GENERIC_SMMU;
1991 		break;
1992 	case ACPI_IORT_SMMU_CORELINK_MMU500:
1993 		smmu->version = ARM_SMMU_V2;
1994 		smmu->model = ARM_MMU500;
1995 		break;
1996 	case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
1997 		smmu->version = ARM_SMMU_V2;
1998 		smmu->model = CAVIUM_SMMUV2;
1999 		break;
2000 	default:
2001 		ret = -ENODEV;
2002 	}
2003 
2004 	return ret;
2005 }
2006 
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2007 static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2008 				      u32 *global_irqs, u32 *pmu_irqs)
2009 {
2010 	struct device *dev = smmu->dev;
2011 	struct acpi_iort_node *node =
2012 		*(struct acpi_iort_node **)dev_get_platdata(dev);
2013 	struct acpi_iort_smmu *iort_smmu;
2014 	int ret;
2015 
2016 	/* Retrieve SMMU1/2 specific data */
2017 	iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2018 
2019 	ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2020 	if (ret < 0)
2021 		return ret;
2022 
2023 	/* Ignore the configuration access interrupt */
2024 	*global_irqs = 1;
2025 	*pmu_irqs = 0;
2026 
2027 	if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2028 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2029 
2030 	return 0;
2031 }
2032 #else
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2033 static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2034 					     u32 *global_irqs, u32 *pmu_irqs)
2035 {
2036 	return -ENODEV;
2037 }
2038 #endif
2039 
arm_smmu_device_dt_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2040 static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
2041 				    u32 *global_irqs, u32 *pmu_irqs)
2042 {
2043 	const struct arm_smmu_match_data *data;
2044 	struct device *dev = smmu->dev;
2045 	bool legacy_binding;
2046 
2047 	if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2048 		return dev_err_probe(dev, -ENODEV,
2049 				     "missing #global-interrupts property\n");
2050 	*pmu_irqs = 0;
2051 
2052 	data = of_device_get_match_data(dev);
2053 	smmu->version = data->version;
2054 	smmu->model = data->model;
2055 
2056 	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2057 	if (legacy_binding && !using_generic_binding) {
2058 		if (!using_legacy_binding) {
2059 			pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2060 				  IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2061 		}
2062 		using_legacy_binding = true;
2063 	} else if (!legacy_binding && !using_legacy_binding) {
2064 		using_generic_binding = true;
2065 	} else {
2066 		dev_err(dev, "not probing due to mismatched DT properties\n");
2067 		return -ENODEV;
2068 	}
2069 
2070 	if (of_dma_is_coherent(dev->of_node))
2071 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2072 
2073 	return 0;
2074 }
2075 
arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device * smmu)2076 static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
2077 {
2078 	struct list_head rmr_list;
2079 	struct iommu_resv_region *e;
2080 	int idx, cnt = 0;
2081 	u32 reg;
2082 
2083 	INIT_LIST_HEAD(&rmr_list);
2084 	iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2085 
2086 	/*
2087 	 * Rather than trying to look at existing mappings that
2088 	 * are setup by the firmware and then invalidate the ones
2089 	 * that do no have matching RMR entries, just disable the
2090 	 * SMMU until it gets enabled again in the reset routine.
2091 	 */
2092 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
2093 	reg |= ARM_SMMU_sCR0_CLIENTPD;
2094 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
2095 
2096 	list_for_each_entry(e, &rmr_list, list) {
2097 		struct iommu_iort_rmr_data *rmr;
2098 		int i;
2099 
2100 		rmr = container_of(e, struct iommu_iort_rmr_data, rr);
2101 		for (i = 0; i < rmr->num_sids; i++) {
2102 			idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2103 			if (idx < 0)
2104 				continue;
2105 
2106 			if (smmu->s2crs[idx].count == 0) {
2107 				smmu->smrs[idx].id = rmr->sids[i];
2108 				smmu->smrs[idx].mask = 0;
2109 				smmu->smrs[idx].valid = true;
2110 			}
2111 			smmu->s2crs[idx].count++;
2112 			smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2113 			smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2114 
2115 			cnt++;
2116 		}
2117 	}
2118 
2119 	dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2120 		   cnt == 1 ? "" : "s");
2121 	iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2122 }
2123 
arm_smmu_device_probe(struct platform_device * pdev)2124 static int arm_smmu_device_probe(struct platform_device *pdev)
2125 {
2126 	struct resource *res;
2127 	struct arm_smmu_device *smmu;
2128 	struct device *dev = &pdev->dev;
2129 	int num_irqs, i, err;
2130 	u32 global_irqs, pmu_irqs;
2131 	irqreturn_t (*global_fault)(int irq, void *dev);
2132 
2133 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2134 	if (!smmu) {
2135 		dev_err(dev, "failed to allocate arm_smmu_device\n");
2136 		return -ENOMEM;
2137 	}
2138 	smmu->dev = dev;
2139 
2140 	if (dev->of_node)
2141 		err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
2142 	else
2143 		err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
2144 	if (err)
2145 		return err;
2146 
2147 	smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2148 	if (IS_ERR(smmu->base))
2149 		return PTR_ERR(smmu->base);
2150 	smmu->ioaddr = res->start;
2151 
2152 	/*
2153 	 * The resource size should effectively match the value of SMMU_TOP;
2154 	 * stash that temporarily until we know PAGESIZE to validate it with.
2155 	 */
2156 	smmu->numpage = resource_size(res);
2157 
2158 	smmu = arm_smmu_impl_init(smmu);
2159 	if (IS_ERR(smmu))
2160 		return PTR_ERR(smmu);
2161 
2162 	num_irqs = platform_irq_count(pdev);
2163 
2164 	smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2165 	if (smmu->num_context_irqs <= 0)
2166 		return dev_err_probe(dev, -ENODEV,
2167 				"found %d interrupts but expected at least %d\n",
2168 				num_irqs, global_irqs + pmu_irqs + 1);
2169 
2170 	smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2171 				  sizeof(*smmu->irqs), GFP_KERNEL);
2172 	if (!smmu->irqs)
2173 		return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2174 				     smmu->num_context_irqs);
2175 
2176 	for (i = 0; i < smmu->num_context_irqs; i++) {
2177 		int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
2178 
2179 		if (irq < 0)
2180 			return irq;
2181 		smmu->irqs[i] = irq;
2182 	}
2183 
2184 	err = devm_clk_bulk_get_all(dev, &smmu->clks);
2185 	if (err < 0) {
2186 		dev_err(dev, "failed to get clocks %d\n", err);
2187 		return err;
2188 	}
2189 	smmu->num_clks = err;
2190 
2191 	err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2192 	if (err)
2193 		return err;
2194 
2195 	err = arm_smmu_device_cfg_probe(smmu);
2196 	if (err)
2197 		return err;
2198 
2199 	if (smmu->version == ARM_SMMU_V2) {
2200 		if (smmu->num_context_banks > smmu->num_context_irqs) {
2201 			dev_err(dev,
2202 			      "found only %d context irq(s) but %d required\n",
2203 			      smmu->num_context_irqs, smmu->num_context_banks);
2204 			return -ENODEV;
2205 		}
2206 
2207 		/* Ignore superfluous interrupts */
2208 		smmu->num_context_irqs = smmu->num_context_banks;
2209 	}
2210 
2211 	if (smmu->impl && smmu->impl->global_fault)
2212 		global_fault = smmu->impl->global_fault;
2213 	else
2214 		global_fault = arm_smmu_global_fault;
2215 
2216 	for (i = 0; i < global_irqs; i++) {
2217 		int irq = platform_get_irq(pdev, i);
2218 
2219 		if (irq < 0)
2220 			return irq;
2221 
2222 		err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
2223 				       "arm-smmu global fault", smmu);
2224 		if (err)
2225 			return dev_err_probe(dev, err,
2226 					"failed to request global IRQ %d (%u)\n",
2227 					i, irq);
2228 	}
2229 
2230 	err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2231 				     "smmu.%pa", &smmu->ioaddr);
2232 	if (err) {
2233 		dev_err(dev, "Failed to register iommu in sysfs\n");
2234 		return err;
2235 	}
2236 
2237 	err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
2238 				    using_legacy_binding ? NULL : dev);
2239 	if (err) {
2240 		dev_err(dev, "Failed to register iommu\n");
2241 		iommu_device_sysfs_remove(&smmu->iommu);
2242 		return err;
2243 	}
2244 
2245 	platform_set_drvdata(pdev, smmu);
2246 
2247 	/* Check for RMRs and install bypass SMRs if any */
2248 	arm_smmu_rmr_install_bypass_smr(smmu);
2249 
2250 	arm_smmu_device_reset(smmu);
2251 	arm_smmu_test_smr_masks(smmu);
2252 
2253 	/*
2254 	 * We want to avoid touching dev->power.lock in fastpaths unless
2255 	 * it's really going to do something useful - pm_runtime_enabled()
2256 	 * can serve as an ideal proxy for that decision. So, conditionally
2257 	 * enable pm_runtime.
2258 	 */
2259 	if (dev->pm_domain) {
2260 		pm_runtime_set_active(dev);
2261 		pm_runtime_enable(dev);
2262 	}
2263 
2264 	return 0;
2265 }
2266 
arm_smmu_device_shutdown(struct platform_device * pdev)2267 static void arm_smmu_device_shutdown(struct platform_device *pdev)
2268 {
2269 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2270 
2271 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2272 		dev_notice(&pdev->dev, "disabling translation\n");
2273 
2274 	arm_smmu_rpm_get(smmu);
2275 	/* Turn the thing off */
2276 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2277 	arm_smmu_rpm_put(smmu);
2278 
2279 	if (pm_runtime_enabled(smmu->dev))
2280 		pm_runtime_force_suspend(smmu->dev);
2281 	else
2282 		clk_bulk_disable(smmu->num_clks, smmu->clks);
2283 
2284 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2285 }
2286 
arm_smmu_device_remove(struct platform_device * pdev)2287 static void arm_smmu_device_remove(struct platform_device *pdev)
2288 {
2289 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2290 
2291 	iommu_device_unregister(&smmu->iommu);
2292 	iommu_device_sysfs_remove(&smmu->iommu);
2293 
2294 	arm_smmu_device_shutdown(pdev);
2295 }
2296 
arm_smmu_runtime_resume(struct device * dev)2297 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
2298 {
2299 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2300 	int ret;
2301 
2302 	ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2303 	if (ret)
2304 		return ret;
2305 
2306 	arm_smmu_device_reset(smmu);
2307 
2308 	return 0;
2309 }
2310 
arm_smmu_runtime_suspend(struct device * dev)2311 static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2312 {
2313 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2314 
2315 	clk_bulk_disable(smmu->num_clks, smmu->clks);
2316 
2317 	return 0;
2318 }
2319 
arm_smmu_pm_resume(struct device * dev)2320 static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2321 {
2322 	int ret;
2323 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2324 
2325 	ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2326 	if (ret)
2327 		return ret;
2328 
2329 	if (pm_runtime_suspended(dev))
2330 		return 0;
2331 
2332 	ret = arm_smmu_runtime_resume(dev);
2333 	if (ret)
2334 		clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2335 
2336 	return ret;
2337 }
2338 
arm_smmu_pm_suspend(struct device * dev)2339 static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2340 {
2341 	int ret = 0;
2342 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2343 
2344 	if (pm_runtime_suspended(dev))
2345 		goto clk_unprepare;
2346 
2347 	ret = arm_smmu_runtime_suspend(dev);
2348 	if (ret)
2349 		return ret;
2350 
2351 clk_unprepare:
2352 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2353 	return ret;
2354 }
2355 
2356 static const struct dev_pm_ops arm_smmu_pm_ops = {
2357 	SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2358 	SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2359 			   arm_smmu_runtime_resume, NULL)
2360 };
2361 
2362 static struct platform_driver arm_smmu_driver = {
2363 	.driver	= {
2364 		.name			= "arm-smmu",
2365 		.of_match_table		= arm_smmu_of_match,
2366 		.pm			= &arm_smmu_pm_ops,
2367 		.suppress_bind_attrs    = true,
2368 	},
2369 	.probe	= arm_smmu_device_probe,
2370 	.remove = arm_smmu_device_remove,
2371 	.shutdown = arm_smmu_device_shutdown,
2372 };
2373 module_platform_driver(arm_smmu_driver);
2374 
2375 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2376 MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2377 MODULE_ALIAS("platform:arm-smmu");
2378 MODULE_LICENSE("GPL v2");
2379