xref: /linux/drivers/iommu/arm/arm-smmu/arm-smmu.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IOMMU API for ARM architected SMMU implementations.
4  *
5  * Copyright (C) 2013 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  *
9  * This driver currently supports:
10  *	- SMMUv1 and v2 implementations
11  *	- Stream-matching and stream-indexing
12  *	- v7/v8 long-descriptor format
13  *	- Non-secure access to the SMMU
14  *	- Context fault reporting
15  *	- Extended Stream ID (16 bit)
16  */
17 
18 #define pr_fmt(fmt) "arm-smmu: " fmt
19 
20 #include <linux/acpi.h>
21 #include <linux/acpi_iort.h>
22 #include <linux/bitfield.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/iopoll.h>
29 #include <linux/module.h>
30 #include <linux/of.h>
31 #include <linux/of_address.h>
32 #include <linux/pci.h>
33 #include <linux/platform_device.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/ratelimit.h>
36 #include <linux/slab.h>
37 
38 #include <linux/fsl/mc.h>
39 
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
42 
43 /*
44  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
45  * global register space are still, in fact, using a hypervisor to mediate it
46  * by trapping and emulating register accesses. Sadly, some deployed versions
47  * of said trapping code have bugs wherein they go horribly wrong for stores
48  * using r31 (i.e. XZR/WZR) as the source register.
49  */
50 #define QCOM_DUMMY_VAL -1
51 
52 #define MSI_IOVA_BASE			0x8000000
53 #define MSI_IOVA_LENGTH			0x100000
54 
55 static int force_stage;
56 module_param(force_stage, int, S_IRUGO);
57 MODULE_PARM_DESC(force_stage,
58 	"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
59 static bool disable_bypass =
60 	IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
61 module_param(disable_bypass, bool, S_IRUGO);
62 MODULE_PARM_DESC(disable_bypass,
63 	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
64 
65 #define s2cr_init_val (struct arm_smmu_s2cr){				\
66 	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
67 }
68 
69 static bool using_legacy_binding, using_generic_binding;
70 
arm_smmu_rpm_get(struct arm_smmu_device * smmu)71 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
72 {
73 	if (pm_runtime_enabled(smmu->dev))
74 		return pm_runtime_resume_and_get(smmu->dev);
75 
76 	return 0;
77 }
78 
arm_smmu_rpm_put(struct arm_smmu_device * smmu)79 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
80 {
81 	if (pm_runtime_enabled(smmu->dev))
82 		pm_runtime_put_autosuspend(smmu->dev);
83 }
84 
arm_smmu_rpm_use_autosuspend(struct arm_smmu_device * smmu)85 static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
86 {
87 	/*
88 	 * Setup an autosuspend delay to avoid bouncing runpm state.
89 	 * Otherwise, if a driver for a suspended consumer device
90 	 * unmaps buffers, it will runpm resume/suspend for each one.
91 	 *
92 	 * For example, when used by a GPU device, when an application
93 	 * or game exits, it can trigger unmapping 100s or 1000s of
94 	 * buffers.  With a runpm cycle for each buffer, that adds up
95 	 * to 5-10sec worth of reprogramming the context bank, while
96 	 * the system appears to be locked up to the user.
97 	 */
98 	pm_runtime_set_autosuspend_delay(smmu->dev, 20);
99 	pm_runtime_use_autosuspend(smmu->dev);
100 }
101 
to_smmu_domain(struct iommu_domain * dom)102 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
103 {
104 	return container_of(dom, struct arm_smmu_domain, domain);
105 }
106 
107 static struct platform_driver arm_smmu_driver;
108 static struct iommu_ops arm_smmu_ops;
109 
110 #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
dev_get_dev_node(struct device * dev)111 static struct device_node *dev_get_dev_node(struct device *dev)
112 {
113 	if (dev_is_pci(dev)) {
114 		struct pci_bus *bus = to_pci_dev(dev)->bus;
115 
116 		while (!pci_is_root_bus(bus))
117 			bus = bus->parent;
118 		return of_node_get(bus->bridge->parent->of_node);
119 	}
120 
121 	return of_node_get(dev->of_node);
122 }
123 
__arm_smmu_get_pci_sid(struct pci_dev * pdev,u16 alias,void * data)124 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
125 {
126 	*((__be32 *)data) = cpu_to_be32(alias);
127 	return 0; /* Continue walking */
128 }
129 
__find_legacy_master_phandle(struct device * dev,void * data)130 static int __find_legacy_master_phandle(struct device *dev, void *data)
131 {
132 	struct of_phandle_iterator *it = *(void **)data;
133 	struct device_node *np = it->node;
134 	int err;
135 
136 	of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
137 			    "#stream-id-cells", -1)
138 		if (it->node == np) {
139 			*(void **)data = dev;
140 			return 1;
141 		}
142 	it->node = np;
143 	return err == -ENOENT ? 0 : err;
144 }
145 
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)146 static int arm_smmu_register_legacy_master(struct device *dev,
147 					   struct arm_smmu_device **smmu)
148 {
149 	struct device *smmu_dev;
150 	struct device_node *np;
151 	struct of_phandle_iterator it;
152 	void *data = &it;
153 	u32 *sids;
154 	__be32 pci_sid;
155 	int err;
156 
157 	np = dev_get_dev_node(dev);
158 	if (!np || !of_property_present(np, "#stream-id-cells")) {
159 		of_node_put(np);
160 		return -ENODEV;
161 	}
162 
163 	it.node = np;
164 	err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
165 				     __find_legacy_master_phandle);
166 	smmu_dev = data;
167 	of_node_put(np);
168 	if (err == 0)
169 		return -ENODEV;
170 	if (err < 0)
171 		return err;
172 
173 	if (dev_is_pci(dev)) {
174 		/* "mmu-masters" assumes Stream ID == Requester ID */
175 		pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
176 				       &pci_sid);
177 		it.cur = &pci_sid;
178 		it.cur_count = 1;
179 	}
180 
181 	err = iommu_fwspec_init(dev, NULL);
182 	if (err)
183 		return err;
184 
185 	sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
186 	if (!sids)
187 		return -ENOMEM;
188 
189 	*smmu = dev_get_drvdata(smmu_dev);
190 	of_phandle_iterator_args(&it, sids, it.cur_count);
191 	err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
192 	kfree(sids);
193 	return err;
194 }
195 #else
arm_smmu_register_legacy_master(struct device * dev,struct arm_smmu_device ** smmu)196 static int arm_smmu_register_legacy_master(struct device *dev,
197 					   struct arm_smmu_device **smmu)
198 {
199 	return -ENODEV;
200 }
201 #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
202 
__arm_smmu_free_bitmap(unsigned long * map,int idx)203 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
204 {
205 	clear_bit(idx, map);
206 }
207 
208 /* Wait for any pending TLB invalidations to complete */
__arm_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)209 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
210 				int sync, int status)
211 {
212 	unsigned int spin_cnt, delay;
213 	u32 reg;
214 
215 	if (smmu->impl && unlikely(smmu->impl->tlb_sync))
216 		return smmu->impl->tlb_sync(smmu, page, sync, status);
217 
218 	arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
219 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
220 		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
221 			reg = arm_smmu_readl(smmu, page, status);
222 			if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
223 				return;
224 			cpu_relax();
225 		}
226 		udelay(delay);
227 	}
228 	dev_err_ratelimited(smmu->dev,
229 			    "TLB sync timed out -- SMMU may be deadlocked\n");
230 }
231 
arm_smmu_tlb_sync_global(struct arm_smmu_device * smmu)232 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
233 {
234 	unsigned long flags;
235 
236 	spin_lock_irqsave(&smmu->global_sync_lock, flags);
237 	__arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
238 			    ARM_SMMU_GR0_sTLBGSTATUS);
239 	spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
240 }
241 
arm_smmu_tlb_sync_context(struct arm_smmu_domain * smmu_domain)242 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
243 {
244 	struct arm_smmu_device *smmu = smmu_domain->smmu;
245 	unsigned long flags;
246 
247 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
248 	__arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
249 			    ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
250 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
251 }
252 
arm_smmu_tlb_inv_context_s1(void * cookie)253 static void arm_smmu_tlb_inv_context_s1(void *cookie)
254 {
255 	struct arm_smmu_domain *smmu_domain = cookie;
256 	/*
257 	 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
258 	 * current CPU are visible beforehand.
259 	 */
260 	wmb();
261 	arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
262 			  ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
263 	arm_smmu_tlb_sync_context(smmu_domain);
264 }
265 
arm_smmu_tlb_inv_context_s2(void * cookie)266 static void arm_smmu_tlb_inv_context_s2(void *cookie)
267 {
268 	struct arm_smmu_domain *smmu_domain = cookie;
269 	struct arm_smmu_device *smmu = smmu_domain->smmu;
270 
271 	/* See above */
272 	wmb();
273 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
274 	arm_smmu_tlb_sync_global(smmu);
275 }
276 
arm_smmu_tlb_inv_range_s1(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)277 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
278 				      size_t granule, void *cookie, int reg)
279 {
280 	struct arm_smmu_domain *smmu_domain = cookie;
281 	struct arm_smmu_device *smmu = smmu_domain->smmu;
282 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
283 	int idx = cfg->cbndx;
284 
285 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
286 		wmb();
287 
288 	if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
289 		iova = (iova >> 12) << 12;
290 		iova |= cfg->asid;
291 		do {
292 			arm_smmu_cb_write(smmu, idx, reg, iova);
293 			iova += granule;
294 		} while (size -= granule);
295 	} else {
296 		iova >>= 12;
297 		iova |= (u64)cfg->asid << 48;
298 		do {
299 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
300 			iova += granule >> 12;
301 		} while (size -= granule);
302 	}
303 }
304 
arm_smmu_tlb_inv_range_s2(unsigned long iova,size_t size,size_t granule,void * cookie,int reg)305 static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
306 				      size_t granule, void *cookie, int reg)
307 {
308 	struct arm_smmu_domain *smmu_domain = cookie;
309 	struct arm_smmu_device *smmu = smmu_domain->smmu;
310 	int idx = smmu_domain->cfg.cbndx;
311 
312 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
313 		wmb();
314 
315 	iova >>= 12;
316 	do {
317 		if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
318 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
319 		else
320 			arm_smmu_cb_write(smmu, idx, reg, iova);
321 		iova += granule >> 12;
322 	} while (size -= granule);
323 }
324 
arm_smmu_tlb_inv_walk_s1(unsigned long iova,size_t size,size_t granule,void * cookie)325 static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
326 				     size_t granule, void *cookie)
327 {
328 	struct arm_smmu_domain *smmu_domain = cookie;
329 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
330 
331 	if (cfg->flush_walk_prefer_tlbiasid) {
332 		arm_smmu_tlb_inv_context_s1(cookie);
333 	} else {
334 		arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
335 					  ARM_SMMU_CB_S1_TLBIVA);
336 		arm_smmu_tlb_sync_context(cookie);
337 	}
338 }
339 
arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)340 static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
341 				     unsigned long iova, size_t granule,
342 				     void *cookie)
343 {
344 	arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
345 				  ARM_SMMU_CB_S1_TLBIVAL);
346 }
347 
arm_smmu_tlb_inv_walk_s2(unsigned long iova,size_t size,size_t granule,void * cookie)348 static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
349 				     size_t granule, void *cookie)
350 {
351 	arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
352 				  ARM_SMMU_CB_S2_TLBIIPAS2);
353 	arm_smmu_tlb_sync_context(cookie);
354 }
355 
arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)356 static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
357 				     unsigned long iova, size_t granule,
358 				     void *cookie)
359 {
360 	arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
361 				  ARM_SMMU_CB_S2_TLBIIPAS2L);
362 }
363 
arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova,size_t size,size_t granule,void * cookie)364 static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
365 					size_t granule, void *cookie)
366 {
367 	arm_smmu_tlb_inv_context_s2(cookie);
368 }
369 /*
370  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
371  * almost negligible, but the benefit of getting the first one in as far ahead
372  * of the sync as possible is significant, hence we don't just make this a
373  * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
374  * think.
375  */
arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)376 static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
377 					unsigned long iova, size_t granule,
378 					void *cookie)
379 {
380 	struct arm_smmu_domain *smmu_domain = cookie;
381 	struct arm_smmu_device *smmu = smmu_domain->smmu;
382 
383 	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
384 		wmb();
385 
386 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
387 }
388 
389 static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
390 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
391 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s1,
392 	.tlb_add_page	= arm_smmu_tlb_add_page_s1,
393 };
394 
395 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
396 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
397 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s2,
398 	.tlb_add_page	= arm_smmu_tlb_add_page_s2,
399 };
400 
401 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
402 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
403 	.tlb_flush_walk	= arm_smmu_tlb_inv_walk_s2_v1,
404 	.tlb_add_page	= arm_smmu_tlb_add_page_s2_v1,
405 };
406 
407 
arm_smmu_read_context_fault_info(struct arm_smmu_device * smmu,int idx,struct arm_smmu_context_fault_info * cfi)408 void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
409 				      struct arm_smmu_context_fault_info *cfi)
410 {
411 	cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
412 	cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
413 	cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
414 	cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
415 }
416 
arm_smmu_print_context_fault_info(struct arm_smmu_device * smmu,int idx,const struct arm_smmu_context_fault_info * cfi)417 void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
418 				       const struct arm_smmu_context_fault_info *cfi)
419 {
420 	dev_err(smmu->dev,
421 		"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
422 		cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
423 
424 	dev_err(smmu->dev, "FSR    = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
425 		cfi->fsr,
426 		(cfi->fsr & ARM_SMMU_CB_FSR_MULTI)  ? "MULTI " : "",
427 		(cfi->fsr & ARM_SMMU_CB_FSR_SS)     ? "SS " : "",
428 		(u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
429 		(cfi->fsr & ARM_SMMU_CB_FSR_UUT)    ? " UUT" : "",
430 		(cfi->fsr & ARM_SMMU_CB_FSR_ASF)    ? " ASF" : "",
431 		(cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
432 		(cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
433 		(cfi->fsr & ARM_SMMU_CB_FSR_EF)     ? " EF" : "",
434 		(cfi->fsr & ARM_SMMU_CB_FSR_PF)     ? " PF" : "",
435 		(cfi->fsr & ARM_SMMU_CB_FSR_AFF)    ? " AFF" : "",
436 		(cfi->fsr & ARM_SMMU_CB_FSR_TF)     ? " TF" : "",
437 		cfi->cbfrsynra);
438 
439 	dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
440 		cfi->fsynr,
441 		(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
442 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
443 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
444 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
445 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
446 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
447 		(cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
448 		(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
449 }
450 
arm_smmu_context_fault(int irq,void * dev)451 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
452 {
453 	struct arm_smmu_context_fault_info cfi;
454 	struct arm_smmu_domain *smmu_domain = dev;
455 	struct arm_smmu_device *smmu = smmu_domain->smmu;
456 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
457 				      DEFAULT_RATELIMIT_BURST);
458 	int idx = smmu_domain->cfg.cbndx;
459 	int ret;
460 
461 	arm_smmu_read_context_fault_info(smmu, idx, &cfi);
462 
463 	if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
464 		return IRQ_NONE;
465 
466 	ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
467 		cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
468 
469 	if (ret == -ENOSYS && __ratelimit(&rs))
470 		arm_smmu_print_context_fault_info(smmu, idx, &cfi);
471 
472 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
473 	return IRQ_HANDLED;
474 }
475 
arm_smmu_global_fault(int irq,void * dev)476 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
477 {
478 	u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
479 	struct arm_smmu_device *smmu = dev;
480 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
481 				      DEFAULT_RATELIMIT_BURST);
482 
483 	gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
484 	gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
485 	gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
486 	gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
487 
488 	if (!gfsr)
489 		return IRQ_NONE;
490 
491 	if (__ratelimit(&rs)) {
492 		if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
493 		    (gfsr & ARM_SMMU_sGFSR_USF))
494 			dev_err(smmu->dev,
495 				"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
496 				(u16)gfsynr1);
497 		else
498 			dev_err(smmu->dev,
499 				"Unexpected global fault, this could be serious\n");
500 		dev_err(smmu->dev,
501 			"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
502 			gfsr, gfsynr0, gfsynr1, gfsynr2);
503 	}
504 
505 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
506 	return IRQ_HANDLED;
507 }
508 
arm_smmu_init_context_bank(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg)509 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
510 				       struct io_pgtable_cfg *pgtbl_cfg)
511 {
512 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
513 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
514 	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
515 
516 	cb->cfg = cfg;
517 
518 	/* TCR */
519 	if (stage1) {
520 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
521 			cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
522 		} else {
523 			cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
524 			cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
525 			if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
526 				cb->tcr[1] |= ARM_SMMU_TCR2_AS;
527 			else
528 				cb->tcr[0] |= ARM_SMMU_TCR_EAE;
529 		}
530 	} else {
531 		cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
532 	}
533 
534 	/* TTBRs */
535 	if (stage1) {
536 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
537 			cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
538 			cb->ttbr[1] = 0;
539 		} else {
540 			cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
541 						 cfg->asid);
542 			cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
543 						 cfg->asid);
544 
545 			if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
546 				cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
547 			else
548 				cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
549 		}
550 	} else {
551 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
552 	}
553 
554 	/* MAIRs (stage-1 only) */
555 	if (stage1) {
556 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
557 			cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
558 			cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
559 		} else {
560 			cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
561 			cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
562 		}
563 	}
564 }
565 
arm_smmu_write_context_bank(struct arm_smmu_device * smmu,int idx)566 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
567 {
568 	u32 reg;
569 	bool stage1;
570 	struct arm_smmu_cb *cb = &smmu->cbs[idx];
571 	struct arm_smmu_cfg *cfg = cb->cfg;
572 
573 	/* Unassigned context banks only need disabling */
574 	if (!cfg) {
575 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
576 		return;
577 	}
578 
579 	stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
580 
581 	/* CBA2R */
582 	if (smmu->version > ARM_SMMU_V1) {
583 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
584 			reg = ARM_SMMU_CBA2R_VA64;
585 		else
586 			reg = 0;
587 		/* 16-bit VMIDs live in CBA2R */
588 		if (smmu->features & ARM_SMMU_FEAT_VMID16)
589 			reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
590 
591 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
592 	}
593 
594 	/* CBAR */
595 	reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
596 	if (smmu->version < ARM_SMMU_V2)
597 		reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
598 
599 	/*
600 	 * Use the weakest shareability/memory types, so they are
601 	 * overridden by the ttbcr/pte.
602 	 */
603 	if (stage1) {
604 		reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
605 				  ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
606 		       FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
607 				  ARM_SMMU_CBAR_S1_MEMATTR_WB);
608 	} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
609 		/* 8-bit VMIDs live in CBAR */
610 		reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
611 	}
612 	arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
613 
614 	/*
615 	 * TCR
616 	 * We must write this before the TTBRs, since it determines the
617 	 * access behaviour of some fields (in particular, ASID[15:8]).
618 	 */
619 	if (stage1 && smmu->version > ARM_SMMU_V1)
620 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
621 	arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
622 
623 	/* TTBRs */
624 	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
625 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
626 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
627 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
628 	} else {
629 		arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
630 		if (stage1)
631 			arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
632 					   cb->ttbr[1]);
633 	}
634 
635 	/* MAIRs (stage-1 only) */
636 	if (stage1) {
637 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
638 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
639 	}
640 
641 	/* SCTLR */
642 	reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
643 	      ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
644 	if (stage1)
645 		reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
646 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
647 		reg |= ARM_SMMU_SCTLR_E;
648 
649 	if (smmu->impl && smmu->impl->write_sctlr)
650 		smmu->impl->write_sctlr(smmu, idx, reg);
651 	else
652 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
653 }
654 
arm_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,unsigned int start)655 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
656 				       struct arm_smmu_device *smmu,
657 				       struct device *dev, unsigned int start)
658 {
659 	if (smmu->impl && smmu->impl->alloc_context_bank)
660 		return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
661 
662 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
663 }
664 
arm_smmu_init_domain_context(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev)665 static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
666 					struct arm_smmu_device *smmu,
667 					struct device *dev)
668 {
669 	int irq, start, ret = 0;
670 	unsigned long ias, oas;
671 	struct io_pgtable_ops *pgtbl_ops;
672 	struct io_pgtable_cfg pgtbl_cfg;
673 	enum io_pgtable_fmt fmt;
674 	struct iommu_domain *domain = &smmu_domain->domain;
675 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
676 	irqreturn_t (*context_fault)(int irq, void *dev);
677 
678 	mutex_lock(&smmu_domain->init_mutex);
679 	if (smmu_domain->smmu)
680 		goto out_unlock;
681 
682 	/*
683 	 * Mapping the requested stage onto what we support is surprisingly
684 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
685 	 * support for nested translation. That means we end up with the
686 	 * following table:
687 	 *
688 	 * Requested        Supported        Actual
689 	 *     S1               N              S1
690 	 *     S1             S1+S2            S1
691 	 *     S1               S2             S2
692 	 *     S1               S1             S1
693 	 *     N                N              N
694 	 *     N              S1+S2            S2
695 	 *     N                S2             S2
696 	 *     N                S1             S1
697 	 *
698 	 * Note that you can't actually request stage-2 mappings.
699 	 */
700 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
701 		smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
702 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
703 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
704 
705 	/*
706 	 * Choosing a suitable context format is even more fiddly. Until we
707 	 * grow some way for the caller to express a preference, and/or move
708 	 * the decision into the io-pgtable code where it arguably belongs,
709 	 * just aim for the closest thing to the rest of the system, and hope
710 	 * that the hardware isn't esoteric enough that we can't assume AArch64
711 	 * support to be a superset of AArch32 support...
712 	 */
713 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
714 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
715 	if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
716 	    !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
717 	    (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
718 	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
719 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
720 	if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
721 	    (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
722 			       ARM_SMMU_FEAT_FMT_AARCH64_16K |
723 			       ARM_SMMU_FEAT_FMT_AARCH64_4K)))
724 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
725 
726 	if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
727 		ret = -EINVAL;
728 		goto out_unlock;
729 	}
730 
731 	switch (smmu_domain->stage) {
732 	case ARM_SMMU_DOMAIN_S1:
733 		cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
734 		start = smmu->num_s2_context_banks;
735 		ias = smmu->va_size;
736 		oas = smmu->ipa_size;
737 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
738 			fmt = ARM_64_LPAE_S1;
739 		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
740 			fmt = ARM_32_LPAE_S1;
741 			ias = min(ias, 32UL);
742 			oas = min(oas, 40UL);
743 		} else {
744 			fmt = ARM_V7S;
745 			ias = min(ias, 32UL);
746 			oas = min(oas, 32UL);
747 		}
748 		smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
749 		break;
750 	case ARM_SMMU_DOMAIN_NESTED:
751 		/*
752 		 * We will likely want to change this if/when KVM gets
753 		 * involved.
754 		 */
755 	case ARM_SMMU_DOMAIN_S2:
756 		cfg->cbar = CBAR_TYPE_S2_TRANS;
757 		start = 0;
758 		ias = smmu->ipa_size;
759 		oas = smmu->pa_size;
760 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
761 			fmt = ARM_64_LPAE_S2;
762 		} else {
763 			fmt = ARM_32_LPAE_S2;
764 			ias = min(ias, 40UL);
765 			oas = min(oas, 40UL);
766 		}
767 		if (smmu->version == ARM_SMMU_V2)
768 			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
769 		else
770 			smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
771 		break;
772 	default:
773 		ret = -EINVAL;
774 		goto out_unlock;
775 	}
776 
777 	ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
778 	if (ret < 0) {
779 		goto out_unlock;
780 	}
781 
782 	smmu_domain->smmu = smmu;
783 
784 	cfg->cbndx = ret;
785 	if (smmu->version < ARM_SMMU_V2) {
786 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
787 		cfg->irptndx %= smmu->num_context_irqs;
788 	} else {
789 		cfg->irptndx = cfg->cbndx;
790 	}
791 
792 	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
793 		cfg->vmid = cfg->cbndx + 1;
794 	else
795 		cfg->asid = cfg->cbndx;
796 
797 	pgtbl_cfg = (struct io_pgtable_cfg) {
798 		.pgsize_bitmap	= smmu->pgsize_bitmap,
799 		.ias		= ias,
800 		.oas		= oas,
801 		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
802 		.tlb		= smmu_domain->flush_ops,
803 		.iommu_dev	= smmu->dev,
804 	};
805 
806 	if (smmu->impl && smmu->impl->init_context) {
807 		ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
808 		if (ret)
809 			goto out_clear_smmu;
810 	}
811 
812 	if (smmu_domain->pgtbl_quirks)
813 		pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
814 
815 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
816 	if (!pgtbl_ops) {
817 		ret = -ENOMEM;
818 		goto out_clear_smmu;
819 	}
820 
821 	/* Update the domain's page sizes to reflect the page table format */
822 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
823 
824 	if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
825 		domain->geometry.aperture_start = ~0UL << ias;
826 		domain->geometry.aperture_end = ~0UL;
827 	} else {
828 		domain->geometry.aperture_end = (1UL << ias) - 1;
829 	}
830 
831 	domain->geometry.force_aperture = true;
832 
833 	/* Initialise the context bank with our page table cfg */
834 	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
835 	arm_smmu_write_context_bank(smmu, cfg->cbndx);
836 
837 	/*
838 	 * Request context fault interrupt. Do this last to avoid the
839 	 * handler seeing a half-initialised domain state.
840 	 */
841 	irq = smmu->irqs[cfg->irptndx];
842 
843 	if (smmu->impl && smmu->impl->context_fault)
844 		context_fault = smmu->impl->context_fault;
845 	else
846 		context_fault = arm_smmu_context_fault;
847 
848 	if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
849 		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
850 						context_fault,
851 						IRQF_ONESHOT | IRQF_SHARED,
852 						"arm-smmu-context-fault",
853 						smmu_domain);
854 	else
855 		ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
856 				       "arm-smmu-context-fault", smmu_domain);
857 
858 	if (ret < 0) {
859 		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
860 			cfg->irptndx, irq);
861 		cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
862 	}
863 
864 	mutex_unlock(&smmu_domain->init_mutex);
865 
866 	/* Publish page table ops for map/unmap */
867 	smmu_domain->pgtbl_ops = pgtbl_ops;
868 	return 0;
869 
870 out_clear_smmu:
871 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
872 	smmu_domain->smmu = NULL;
873 out_unlock:
874 	mutex_unlock(&smmu_domain->init_mutex);
875 	return ret;
876 }
877 
arm_smmu_destroy_domain_context(struct arm_smmu_domain * smmu_domain)878 static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
879 {
880 	struct arm_smmu_device *smmu = smmu_domain->smmu;
881 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
882 	int ret, irq;
883 
884 	if (!smmu)
885 		return;
886 
887 	ret = arm_smmu_rpm_get(smmu);
888 	if (ret < 0)
889 		return;
890 
891 	/*
892 	 * Disable the context bank and free the page tables before freeing
893 	 * it.
894 	 */
895 	smmu->cbs[cfg->cbndx].cfg = NULL;
896 	arm_smmu_write_context_bank(smmu, cfg->cbndx);
897 
898 	if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
899 		irq = smmu->irqs[cfg->irptndx];
900 		devm_free_irq(smmu->dev, irq, smmu_domain);
901 	}
902 
903 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
904 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
905 
906 	arm_smmu_rpm_put(smmu);
907 }
908 
arm_smmu_domain_alloc_paging(struct device * dev)909 static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
910 {
911 	struct arm_smmu_domain *smmu_domain;
912 
913 	/*
914 	 * Allocate the domain and initialise some of its data structures.
915 	 * We can't really do anything meaningful until we've added a
916 	 * master.
917 	 */
918 	smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
919 	if (!smmu_domain)
920 		return NULL;
921 
922 	mutex_init(&smmu_domain->init_mutex);
923 	spin_lock_init(&smmu_domain->cb_lock);
924 
925 	return &smmu_domain->domain;
926 }
927 
arm_smmu_domain_free(struct iommu_domain * domain)928 static void arm_smmu_domain_free(struct iommu_domain *domain)
929 {
930 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
931 
932 	/*
933 	 * Free the domain resources. We assume that all devices have
934 	 * already been detached.
935 	 */
936 	arm_smmu_destroy_domain_context(smmu_domain);
937 	kfree(smmu_domain);
938 }
939 
arm_smmu_write_smr(struct arm_smmu_device * smmu,int idx)940 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
941 {
942 	struct arm_smmu_smr *smr = smmu->smrs + idx;
943 	u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
944 		  FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
945 
946 	if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
947 		reg |= ARM_SMMU_SMR_VALID;
948 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
949 }
950 
arm_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)951 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
952 {
953 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
954 	u32 reg;
955 
956 	if (smmu->impl && smmu->impl->write_s2cr) {
957 		smmu->impl->write_s2cr(smmu, idx);
958 		return;
959 	}
960 
961 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
962 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
963 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
964 
965 	if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
966 	    smmu->smrs[idx].valid)
967 		reg |= ARM_SMMU_S2CR_EXIDVALID;
968 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
969 }
970 
arm_smmu_write_sme(struct arm_smmu_device * smmu,int idx)971 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
972 {
973 	arm_smmu_write_s2cr(smmu, idx);
974 	if (smmu->smrs)
975 		arm_smmu_write_smr(smmu, idx);
976 }
977 
978 /*
979  * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
980  * should be called after sCR0 is written.
981  */
arm_smmu_test_smr_masks(struct arm_smmu_device * smmu)982 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
983 {
984 	u32 smr;
985 	int i;
986 
987 	if (!smmu->smrs)
988 		return;
989 	/*
990 	 * If we've had to accommodate firmware memory regions, we may
991 	 * have live SMRs by now; tread carefully...
992 	 *
993 	 * Somewhat perversely, not having a free SMR for this test implies we
994 	 * can get away without it anyway, as we'll only be able to 'allocate'
995 	 * these SMRs for the ID/mask values we're already trusting to be OK.
996 	 */
997 	for (i = 0; i < smmu->num_mapping_groups; i++)
998 		if (!smmu->smrs[i].valid)
999 			goto smr_ok;
1000 	return;
1001 smr_ok:
1002 	/*
1003 	 * SMR.ID bits may not be preserved if the corresponding MASK
1004 	 * bits are set, so check each one separately. We can reject
1005 	 * masters later if they try to claim IDs outside these masks.
1006 	 */
1007 	smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
1008 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1009 	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1010 	smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
1011 
1012 	smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1013 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
1014 	smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
1015 	smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
1016 }
1017 
arm_smmu_find_sme(struct arm_smmu_device * smmu,u16 id,u16 mask)1018 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1019 {
1020 	struct arm_smmu_smr *smrs = smmu->smrs;
1021 	int i, free_idx = -ENOSPC;
1022 
1023 	/* Stream indexing is blissfully easy */
1024 	if (!smrs)
1025 		return id;
1026 
1027 	/* Validating SMRs is... less so */
1028 	for (i = 0; i < smmu->num_mapping_groups; ++i) {
1029 		if (!smrs[i].valid) {
1030 			/*
1031 			 * Note the first free entry we come across, which
1032 			 * we'll claim in the end if nothing else matches.
1033 			 */
1034 			if (free_idx < 0)
1035 				free_idx = i;
1036 			continue;
1037 		}
1038 		/*
1039 		 * If the new entry is _entirely_ matched by an existing entry,
1040 		 * then reuse that, with the guarantee that there also cannot
1041 		 * be any subsequent conflicting entries. In normal use we'd
1042 		 * expect simply identical entries for this case, but there's
1043 		 * no harm in accommodating the generalisation.
1044 		 */
1045 		if ((mask & smrs[i].mask) == mask &&
1046 		    !((id ^ smrs[i].id) & ~smrs[i].mask))
1047 			return i;
1048 		/*
1049 		 * If the new entry has any other overlap with an existing one,
1050 		 * though, then there always exists at least one stream ID
1051 		 * which would cause a conflict, and we can't allow that risk.
1052 		 */
1053 		if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1054 			return -EINVAL;
1055 	}
1056 
1057 	return free_idx;
1058 }
1059 
arm_smmu_free_sme(struct arm_smmu_device * smmu,int idx)1060 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1061 {
1062 	if (--smmu->s2crs[idx].count)
1063 		return false;
1064 
1065 	smmu->s2crs[idx] = s2cr_init_val;
1066 	if (smmu->smrs)
1067 		smmu->smrs[idx].valid = false;
1068 
1069 	return true;
1070 }
1071 
arm_smmu_master_alloc_smes(struct device * dev)1072 static int arm_smmu_master_alloc_smes(struct device *dev)
1073 {
1074 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1075 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1076 	struct arm_smmu_device *smmu = cfg->smmu;
1077 	struct arm_smmu_smr *smrs = smmu->smrs;
1078 	int i, idx, ret;
1079 
1080 	mutex_lock(&smmu->stream_map_mutex);
1081 	/* Figure out a viable stream map entry allocation */
1082 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1083 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1084 		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1085 
1086 		if (idx != INVALID_SMENDX) {
1087 			ret = -EEXIST;
1088 			goto out_err;
1089 		}
1090 
1091 		ret = arm_smmu_find_sme(smmu, sid, mask);
1092 		if (ret < 0)
1093 			goto out_err;
1094 
1095 		idx = ret;
1096 		if (smrs && smmu->s2crs[idx].count == 0) {
1097 			smrs[idx].id = sid;
1098 			smrs[idx].mask = mask;
1099 			smrs[idx].valid = true;
1100 		}
1101 		smmu->s2crs[idx].count++;
1102 		cfg->smendx[i] = (s16)idx;
1103 	}
1104 
1105 	/* It worked! Now, poke the actual hardware */
1106 	for_each_cfg_sme(cfg, fwspec, i, idx)
1107 		arm_smmu_write_sme(smmu, idx);
1108 
1109 	mutex_unlock(&smmu->stream_map_mutex);
1110 	return 0;
1111 
1112 out_err:
1113 	while (i--) {
1114 		arm_smmu_free_sme(smmu, cfg->smendx[i]);
1115 		cfg->smendx[i] = INVALID_SMENDX;
1116 	}
1117 	mutex_unlock(&smmu->stream_map_mutex);
1118 	return ret;
1119 }
1120 
arm_smmu_master_free_smes(struct arm_smmu_master_cfg * cfg,struct iommu_fwspec * fwspec)1121 static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
1122 				      struct iommu_fwspec *fwspec)
1123 {
1124 	struct arm_smmu_device *smmu = cfg->smmu;
1125 	int i, idx;
1126 
1127 	mutex_lock(&smmu->stream_map_mutex);
1128 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1129 		if (arm_smmu_free_sme(smmu, idx))
1130 			arm_smmu_write_sme(smmu, idx);
1131 		cfg->smendx[i] = INVALID_SMENDX;
1132 	}
1133 	mutex_unlock(&smmu->stream_map_mutex);
1134 }
1135 
arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg * cfg,enum arm_smmu_s2cr_type type,u8 cbndx,struct iommu_fwspec * fwspec)1136 static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
1137 					  enum arm_smmu_s2cr_type type,
1138 					  u8 cbndx, struct iommu_fwspec *fwspec)
1139 {
1140 	struct arm_smmu_device *smmu = cfg->smmu;
1141 	struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1142 	int i, idx;
1143 
1144 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1145 		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1146 			continue;
1147 
1148 		s2cr[idx].type = type;
1149 		s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1150 		s2cr[idx].cbndx = cbndx;
1151 		arm_smmu_write_s2cr(smmu, idx);
1152 	}
1153 }
1154 
arm_smmu_attach_dev(struct iommu_domain * domain,struct device * dev)1155 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1156 {
1157 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1158 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1159 	struct arm_smmu_master_cfg *cfg;
1160 	struct arm_smmu_device *smmu;
1161 	int ret;
1162 
1163 	/*
1164 	 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1165 	 * domains between of_xlate() and probe_device() - we have no way to cope
1166 	 * with that, so until ARM gets converted to rely on groups and default
1167 	 * domains, just say no (but more politely than by dereferencing NULL).
1168 	 * This should be at least a WARN_ON once that's sorted.
1169 	 */
1170 	cfg = dev_iommu_priv_get(dev);
1171 	if (!cfg)
1172 		return -ENODEV;
1173 
1174 	smmu = cfg->smmu;
1175 
1176 	ret = arm_smmu_rpm_get(smmu);
1177 	if (ret < 0)
1178 		return ret;
1179 
1180 	/* Ensure that the domain is finalised */
1181 	ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
1182 	if (ret < 0)
1183 		goto rpm_put;
1184 
1185 	/*
1186 	 * Sanity check the domain. We don't support domains across
1187 	 * different SMMUs.
1188 	 */
1189 	if (smmu_domain->smmu != smmu) {
1190 		ret = -EINVAL;
1191 		goto rpm_put;
1192 	}
1193 
1194 	/* Looks ok, so add the device to the domain */
1195 	arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
1196 				      smmu_domain->cfg.cbndx, fwspec);
1197 	arm_smmu_rpm_use_autosuspend(smmu);
1198 rpm_put:
1199 	arm_smmu_rpm_put(smmu);
1200 	return ret;
1201 }
1202 
arm_smmu_attach_dev_type(struct device * dev,enum arm_smmu_s2cr_type type)1203 static int arm_smmu_attach_dev_type(struct device *dev,
1204 				    enum arm_smmu_s2cr_type type)
1205 {
1206 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1207 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1208 	struct arm_smmu_device *smmu;
1209 	int ret;
1210 
1211 	if (!cfg)
1212 		return -ENODEV;
1213 	smmu = cfg->smmu;
1214 
1215 	ret = arm_smmu_rpm_get(smmu);
1216 	if (ret < 0)
1217 		return ret;
1218 
1219 	arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
1220 	arm_smmu_rpm_use_autosuspend(smmu);
1221 	arm_smmu_rpm_put(smmu);
1222 	return 0;
1223 }
1224 
arm_smmu_attach_dev_identity(struct iommu_domain * domain,struct device * dev)1225 static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
1226 					struct device *dev)
1227 {
1228 	return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
1229 }
1230 
1231 static const struct iommu_domain_ops arm_smmu_identity_ops = {
1232 	.attach_dev = arm_smmu_attach_dev_identity,
1233 };
1234 
1235 static struct iommu_domain arm_smmu_identity_domain = {
1236 	.type = IOMMU_DOMAIN_IDENTITY,
1237 	.ops = &arm_smmu_identity_ops,
1238 };
1239 
arm_smmu_attach_dev_blocked(struct iommu_domain * domain,struct device * dev)1240 static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
1241 				       struct device *dev)
1242 {
1243 	return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
1244 }
1245 
1246 static const struct iommu_domain_ops arm_smmu_blocked_ops = {
1247 	.attach_dev = arm_smmu_attach_dev_blocked,
1248 };
1249 
1250 static struct iommu_domain arm_smmu_blocked_domain = {
1251 	.type = IOMMU_DOMAIN_BLOCKED,
1252 	.ops = &arm_smmu_blocked_ops,
1253 };
1254 
arm_smmu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)1255 static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
1256 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
1257 			      int prot, gfp_t gfp, size_t *mapped)
1258 {
1259 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1260 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1261 	int ret;
1262 
1263 	if (!ops)
1264 		return -ENODEV;
1265 
1266 	arm_smmu_rpm_get(smmu);
1267 	ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1268 	arm_smmu_rpm_put(smmu);
1269 
1270 	return ret;
1271 }
1272 
arm_smmu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)1273 static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
1274 				   size_t pgsize, size_t pgcount,
1275 				   struct iommu_iotlb_gather *iotlb_gather)
1276 {
1277 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1278 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1279 	size_t ret;
1280 
1281 	if (!ops)
1282 		return 0;
1283 
1284 	arm_smmu_rpm_get(smmu);
1285 	ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1286 	arm_smmu_rpm_put(smmu);
1287 
1288 	return ret;
1289 }
1290 
arm_smmu_flush_iotlb_all(struct iommu_domain * domain)1291 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
1292 {
1293 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1294 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1295 
1296 	if (smmu_domain->flush_ops) {
1297 		arm_smmu_rpm_get(smmu);
1298 		smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1299 		arm_smmu_rpm_put(smmu);
1300 	}
1301 }
1302 
arm_smmu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)1303 static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
1304 				struct iommu_iotlb_gather *gather)
1305 {
1306 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1307 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1308 
1309 	if (!smmu)
1310 		return;
1311 
1312 	arm_smmu_rpm_get(smmu);
1313 	if (smmu->version == ARM_SMMU_V2 ||
1314 	    smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1315 		arm_smmu_tlb_sync_context(smmu_domain);
1316 	else
1317 		arm_smmu_tlb_sync_global(smmu);
1318 	arm_smmu_rpm_put(smmu);
1319 }
1320 
arm_smmu_iova_to_phys_hard(struct iommu_domain * domain,dma_addr_t iova)1321 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1322 					      dma_addr_t iova)
1323 {
1324 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1325 	struct arm_smmu_device *smmu = smmu_domain->smmu;
1326 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1327 	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1328 	struct device *dev = smmu->dev;
1329 	void __iomem *reg;
1330 	u32 tmp;
1331 	u64 phys;
1332 	unsigned long va, flags;
1333 	int ret, idx = cfg->cbndx;
1334 	phys_addr_t addr = 0;
1335 
1336 	ret = arm_smmu_rpm_get(smmu);
1337 	if (ret < 0)
1338 		return 0;
1339 
1340 	spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1341 	va = iova & ~0xfffUL;
1342 	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1343 		arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1344 	else
1345 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1346 
1347 	reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1348 	if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE),
1349 				      5, 50)) {
1350 		spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1351 		dev_err(dev,
1352 			"iova to phys timed out on %pad. Falling back to software table walk.\n",
1353 			&iova);
1354 		arm_smmu_rpm_put(smmu);
1355 		return ops->iova_to_phys(ops, iova);
1356 	}
1357 
1358 	phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1359 	spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1360 	if (phys & ARM_SMMU_CB_PAR_F) {
1361 		dev_err(dev, "translation fault!\n");
1362 		dev_err(dev, "PAR = 0x%llx\n", phys);
1363 		goto out;
1364 	}
1365 
1366 	addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1367 out:
1368 	arm_smmu_rpm_put(smmu);
1369 
1370 	return addr;
1371 }
1372 
arm_smmu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1373 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1374 					dma_addr_t iova)
1375 {
1376 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1377 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1378 
1379 	if (!ops)
1380 		return 0;
1381 
1382 	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1383 			smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1384 		return arm_smmu_iova_to_phys_hard(domain, iova);
1385 
1386 	return ops->iova_to_phys(ops, iova);
1387 }
1388 
arm_smmu_capable(struct device * dev,enum iommu_cap cap)1389 static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
1390 {
1391 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1392 
1393 	switch (cap) {
1394 	case IOMMU_CAP_CACHE_COHERENCY:
1395 		/*
1396 		 * It's overwhelmingly the case in practice that when the pagetable
1397 		 * walk interface is connected to a coherent interconnect, all the
1398 		 * translation interfaces are too. Furthermore if the device is
1399 		 * natively coherent, then its translation interface must also be.
1400 		 */
1401 		return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1402 			device_get_dma_attr(dev) == DEV_DMA_COHERENT;
1403 	case IOMMU_CAP_NOEXEC:
1404 	case IOMMU_CAP_DEFERRED_FLUSH:
1405 		return true;
1406 	default:
1407 		return false;
1408 	}
1409 }
1410 
1411 static
arm_smmu_get_by_fwnode(struct fwnode_handle * fwnode)1412 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1413 {
1414 	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
1415 							  fwnode);
1416 	put_device(dev);
1417 	return dev ? dev_get_drvdata(dev) : NULL;
1418 }
1419 
arm_smmu_probe_device(struct device * dev)1420 static struct iommu_device *arm_smmu_probe_device(struct device *dev)
1421 {
1422 	struct arm_smmu_device *smmu = NULL;
1423 	struct arm_smmu_master_cfg *cfg;
1424 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1425 	int i, ret;
1426 
1427 	if (using_legacy_binding) {
1428 		ret = arm_smmu_register_legacy_master(dev, &smmu);
1429 
1430 		/*
1431 		 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1432 		 * will allocate/initialise a new one. Thus we need to update fwspec for
1433 		 * later use.
1434 		 */
1435 		fwspec = dev_iommu_fwspec_get(dev);
1436 		if (ret)
1437 			goto out_free;
1438 	} else {
1439 		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1440 	}
1441 
1442 	ret = -EINVAL;
1443 	for (i = 0; i < fwspec->num_ids; i++) {
1444 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1445 		u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1446 
1447 		if (sid & ~smmu->streamid_mask) {
1448 			dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1449 				sid, smmu->streamid_mask);
1450 			goto out_free;
1451 		}
1452 		if (mask & ~smmu->smr_mask_mask) {
1453 			dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1454 				mask, smmu->smr_mask_mask);
1455 			goto out_free;
1456 		}
1457 	}
1458 
1459 	ret = -ENOMEM;
1460 	cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1461 		      GFP_KERNEL);
1462 	if (!cfg)
1463 		goto out_free;
1464 
1465 	cfg->smmu = smmu;
1466 	dev_iommu_priv_set(dev, cfg);
1467 	while (i--)
1468 		cfg->smendx[i] = INVALID_SMENDX;
1469 
1470 	ret = arm_smmu_rpm_get(smmu);
1471 	if (ret < 0)
1472 		goto out_cfg_free;
1473 
1474 	ret = arm_smmu_master_alloc_smes(dev);
1475 	arm_smmu_rpm_put(smmu);
1476 
1477 	if (ret)
1478 		goto out_cfg_free;
1479 
1480 	device_link_add(dev, smmu->dev,
1481 			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
1482 
1483 	return &smmu->iommu;
1484 
1485 out_cfg_free:
1486 	kfree(cfg);
1487 out_free:
1488 	iommu_fwspec_free(dev);
1489 	return ERR_PTR(ret);
1490 }
1491 
arm_smmu_release_device(struct device * dev)1492 static void arm_smmu_release_device(struct device *dev)
1493 {
1494 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1495 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1496 	int ret;
1497 
1498 	ret = arm_smmu_rpm_get(cfg->smmu);
1499 	if (ret < 0)
1500 		return;
1501 
1502 	arm_smmu_master_free_smes(cfg, fwspec);
1503 
1504 	arm_smmu_rpm_put(cfg->smmu);
1505 
1506 	kfree(cfg);
1507 }
1508 
arm_smmu_probe_finalize(struct device * dev)1509 static void arm_smmu_probe_finalize(struct device *dev)
1510 {
1511 	struct arm_smmu_master_cfg *cfg;
1512 	struct arm_smmu_device *smmu;
1513 
1514 	cfg = dev_iommu_priv_get(dev);
1515 	smmu = cfg->smmu;
1516 
1517 	if (smmu->impl && smmu->impl->probe_finalize)
1518 		smmu->impl->probe_finalize(smmu, dev);
1519 }
1520 
arm_smmu_device_group(struct device * dev)1521 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1522 {
1523 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1524 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1525 	struct arm_smmu_device *smmu = cfg->smmu;
1526 	struct iommu_group *group = NULL;
1527 	int i, idx;
1528 
1529 	mutex_lock(&smmu->stream_map_mutex);
1530 	for_each_cfg_sme(cfg, fwspec, i, idx) {
1531 		if (group && smmu->s2crs[idx].group &&
1532 		    group != smmu->s2crs[idx].group) {
1533 			mutex_unlock(&smmu->stream_map_mutex);
1534 			return ERR_PTR(-EINVAL);
1535 		}
1536 
1537 		group = smmu->s2crs[idx].group;
1538 	}
1539 
1540 	if (group) {
1541 		mutex_unlock(&smmu->stream_map_mutex);
1542 		return iommu_group_ref_get(group);
1543 	}
1544 
1545 	if (dev_is_pci(dev))
1546 		group = pci_device_group(dev);
1547 	else if (dev_is_fsl_mc(dev))
1548 		group = fsl_mc_device_group(dev);
1549 	else
1550 		group = generic_device_group(dev);
1551 
1552 	/* Remember group for faster lookups */
1553 	if (!IS_ERR(group))
1554 		for_each_cfg_sme(cfg, fwspec, i, idx)
1555 			smmu->s2crs[idx].group = group;
1556 
1557 	mutex_unlock(&smmu->stream_map_mutex);
1558 	return group;
1559 }
1560 
arm_smmu_enable_nesting(struct iommu_domain * domain)1561 static int arm_smmu_enable_nesting(struct iommu_domain *domain)
1562 {
1563 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1564 	int ret = 0;
1565 
1566 	mutex_lock(&smmu_domain->init_mutex);
1567 	if (smmu_domain->smmu)
1568 		ret = -EPERM;
1569 	else
1570 		smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1571 	mutex_unlock(&smmu_domain->init_mutex);
1572 
1573 	return ret;
1574 }
1575 
arm_smmu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1576 static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
1577 		unsigned long quirks)
1578 {
1579 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1580 	int ret = 0;
1581 
1582 	mutex_lock(&smmu_domain->init_mutex);
1583 	if (smmu_domain->smmu)
1584 		ret = -EPERM;
1585 	else
1586 		smmu_domain->pgtbl_quirks = quirks;
1587 	mutex_unlock(&smmu_domain->init_mutex);
1588 
1589 	return ret;
1590 }
1591 
arm_smmu_of_xlate(struct device * dev,const struct of_phandle_args * args)1592 static int arm_smmu_of_xlate(struct device *dev,
1593 			     const struct of_phandle_args *args)
1594 {
1595 	u32 mask, fwid = 0;
1596 
1597 	if (args->args_count > 0)
1598 		fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1599 
1600 	if (args->args_count > 1)
1601 		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1602 	else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1603 		fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
1604 
1605 	return iommu_fwspec_add_ids(dev, &fwid, 1);
1606 }
1607 
arm_smmu_get_resv_regions(struct device * dev,struct list_head * head)1608 static void arm_smmu_get_resv_regions(struct device *dev,
1609 				      struct list_head *head)
1610 {
1611 	struct iommu_resv_region *region;
1612 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1613 
1614 	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1615 					 prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
1616 	if (!region)
1617 		return;
1618 
1619 	list_add_tail(&region->list, head);
1620 
1621 	iommu_dma_get_resv_regions(dev, head);
1622 }
1623 
arm_smmu_def_domain_type(struct device * dev)1624 static int arm_smmu_def_domain_type(struct device *dev)
1625 {
1626 	struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
1627 	const struct arm_smmu_impl *impl = cfg->smmu->impl;
1628 
1629 	if (using_legacy_binding)
1630 		return IOMMU_DOMAIN_IDENTITY;
1631 
1632 	if (impl && impl->def_domain_type)
1633 		return impl->def_domain_type(dev);
1634 
1635 	return 0;
1636 }
1637 
1638 static struct iommu_ops arm_smmu_ops = {
1639 	.identity_domain	= &arm_smmu_identity_domain,
1640 	.blocked_domain		= &arm_smmu_blocked_domain,
1641 	.capable		= arm_smmu_capable,
1642 	.domain_alloc_paging	= arm_smmu_domain_alloc_paging,
1643 	.probe_device		= arm_smmu_probe_device,
1644 	.release_device		= arm_smmu_release_device,
1645 	.probe_finalize		= arm_smmu_probe_finalize,
1646 	.device_group		= arm_smmu_device_group,
1647 	.of_xlate		= arm_smmu_of_xlate,
1648 	.get_resv_regions	= arm_smmu_get_resv_regions,
1649 	.def_domain_type	= arm_smmu_def_domain_type,
1650 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
1651 	.owner			= THIS_MODULE,
1652 	.default_domain_ops = &(const struct iommu_domain_ops) {
1653 		.attach_dev		= arm_smmu_attach_dev,
1654 		.map_pages		= arm_smmu_map_pages,
1655 		.unmap_pages		= arm_smmu_unmap_pages,
1656 		.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
1657 		.iotlb_sync		= arm_smmu_iotlb_sync,
1658 		.iova_to_phys		= arm_smmu_iova_to_phys,
1659 		.enable_nesting		= arm_smmu_enable_nesting,
1660 		.set_pgtable_quirks	= arm_smmu_set_pgtable_quirks,
1661 		.free			= arm_smmu_domain_free,
1662 	}
1663 };
1664 
arm_smmu_device_reset(struct arm_smmu_device * smmu)1665 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1666 {
1667 	int i;
1668 	u32 reg;
1669 
1670 	/* clear global FSR */
1671 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1672 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1673 
1674 	/*
1675 	 * Reset stream mapping groups: Initial values mark all SMRn as
1676 	 * invalid and all S2CRn as bypass unless overridden.
1677 	 */
1678 	for (i = 0; i < smmu->num_mapping_groups; ++i)
1679 		arm_smmu_write_sme(smmu, i);
1680 
1681 	/* Make sure all context banks are disabled and clear CB_FSR  */
1682 	for (i = 0; i < smmu->num_context_banks; ++i) {
1683 		arm_smmu_write_context_bank(smmu, i);
1684 		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
1685 	}
1686 
1687 	/* Invalidate the TLB, just in case */
1688 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1689 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1690 
1691 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1692 
1693 	/* Enable fault reporting */
1694 	reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
1695 		ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
1696 
1697 	/* Disable TLB broadcasting. */
1698 	reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
1699 
1700 	/* Enable client access, handling unmatched streams as appropriate */
1701 	reg &= ~ARM_SMMU_sCR0_CLIENTPD;
1702 	if (disable_bypass)
1703 		reg |= ARM_SMMU_sCR0_USFCFG;
1704 	else
1705 		reg &= ~ARM_SMMU_sCR0_USFCFG;
1706 
1707 	/* Disable forced broadcasting */
1708 	reg &= ~ARM_SMMU_sCR0_FB;
1709 
1710 	/* Don't upgrade barriers */
1711 	reg &= ~(ARM_SMMU_sCR0_BSU);
1712 
1713 	if (smmu->features & ARM_SMMU_FEAT_VMID16)
1714 		reg |= ARM_SMMU_sCR0_VMID16EN;
1715 
1716 	if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1717 		reg |= ARM_SMMU_sCR0_EXIDENABLE;
1718 
1719 	if (smmu->impl && smmu->impl->reset)
1720 		smmu->impl->reset(smmu);
1721 
1722 	/* Push the button */
1723 	arm_smmu_tlb_sync_global(smmu);
1724 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1725 }
1726 
arm_smmu_id_size_to_bits(int size)1727 static int arm_smmu_id_size_to_bits(int size)
1728 {
1729 	switch (size) {
1730 	case 0:
1731 		return 32;
1732 	case 1:
1733 		return 36;
1734 	case 2:
1735 		return 40;
1736 	case 3:
1737 		return 42;
1738 	case 4:
1739 		return 44;
1740 	case 5:
1741 	default:
1742 		return 48;
1743 	}
1744 }
1745 
arm_smmu_device_cfg_probe(struct arm_smmu_device * smmu)1746 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1747 {
1748 	unsigned int size;
1749 	u32 id;
1750 	bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1751 	int i, ret;
1752 
1753 	dev_notice(smmu->dev, "probing hardware configuration...\n");
1754 	dev_notice(smmu->dev, "SMMUv%d with:\n",
1755 			smmu->version == ARM_SMMU_V2 ? 2 : 1);
1756 
1757 	/* ID0 */
1758 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1759 
1760 	/* Restrict available stages based on module parameter */
1761 	if (force_stage == 1)
1762 		id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
1763 	else if (force_stage == 2)
1764 		id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
1765 
1766 	if (id & ARM_SMMU_ID0_S1TS) {
1767 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1768 		dev_notice(smmu->dev, "\tstage 1 translation\n");
1769 	}
1770 
1771 	if (id & ARM_SMMU_ID0_S2TS) {
1772 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1773 		dev_notice(smmu->dev, "\tstage 2 translation\n");
1774 	}
1775 
1776 	if (id & ARM_SMMU_ID0_NTS) {
1777 		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1778 		dev_notice(smmu->dev, "\tnested translation\n");
1779 	}
1780 
1781 	if (!(smmu->features &
1782 		(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1783 		dev_err(smmu->dev, "\tno translation support!\n");
1784 		return -ENODEV;
1785 	}
1786 
1787 	if ((id & ARM_SMMU_ID0_S1TS) &&
1788 	    ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1789 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1790 		dev_notice(smmu->dev, "\taddress translation ops\n");
1791 	}
1792 
1793 	/*
1794 	 * In order for DMA API calls to work properly, we must defer to what
1795 	 * the FW says about coherency, regardless of what the hardware claims.
1796 	 * Fortunately, this also opens up a workaround for systems where the
1797 	 * ID register value has ended up configured incorrectly.
1798 	 */
1799 	cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
1800 	if (cttw_fw || cttw_reg)
1801 		dev_notice(smmu->dev, "\t%scoherent table walk\n",
1802 			   cttw_fw ? "" : "non-");
1803 	if (cttw_fw != cttw_reg)
1804 		dev_notice(smmu->dev,
1805 			   "\t(IDR0.CTTW overridden by FW configuration)\n");
1806 
1807 	/* Max. number of entries we have for stream matching/indexing */
1808 	if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1809 		smmu->features |= ARM_SMMU_FEAT_EXIDS;
1810 		size = 1 << 16;
1811 	} else {
1812 		size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
1813 	}
1814 	smmu->streamid_mask = size - 1;
1815 	if (id & ARM_SMMU_ID0_SMS) {
1816 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1817 		size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
1818 		if (size == 0) {
1819 			dev_err(smmu->dev,
1820 				"stream-matching supported, but no SMRs present!\n");
1821 			return -ENODEV;
1822 		}
1823 
1824 		/* Zero-initialised to mark as invalid */
1825 		smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1826 					  GFP_KERNEL);
1827 		if (!smmu->smrs)
1828 			return -ENOMEM;
1829 
1830 		dev_notice(smmu->dev,
1831 			   "\tstream matching with %u register groups", size);
1832 	}
1833 	/* s2cr->type == 0 means translation, so initialise explicitly */
1834 	smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1835 					 GFP_KERNEL);
1836 	if (!smmu->s2crs)
1837 		return -ENOMEM;
1838 	for (i = 0; i < size; i++)
1839 		smmu->s2crs[i] = s2cr_init_val;
1840 
1841 	smmu->num_mapping_groups = size;
1842 	mutex_init(&smmu->stream_map_mutex);
1843 	spin_lock_init(&smmu->global_sync_lock);
1844 
1845 	if (smmu->version < ARM_SMMU_V2 ||
1846 	    !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
1847 		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1848 		if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
1849 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1850 	}
1851 
1852 	/* ID1 */
1853 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1854 	smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1855 
1856 	/* Check for size mismatch of SMMU address space from mapped region */
1857 	size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
1858 	if (smmu->numpage != 2 * size << smmu->pgshift)
1859 		dev_warn(smmu->dev,
1860 			"SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1861 			2 * size << smmu->pgshift, smmu->numpage);
1862 	/* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1863 	smmu->numpage = size;
1864 
1865 	smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1866 	smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1867 	if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1868 		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1869 		return -ENODEV;
1870 	}
1871 	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1872 		   smmu->num_context_banks, smmu->num_s2_context_banks);
1873 	smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1874 				 sizeof(*smmu->cbs), GFP_KERNEL);
1875 	if (!smmu->cbs)
1876 		return -ENOMEM;
1877 
1878 	/* ID2 */
1879 	id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1880 	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
1881 	smmu->ipa_size = size;
1882 
1883 	/* The output mask is also applied for bypass */
1884 	size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
1885 	smmu->pa_size = size;
1886 
1887 	if (id & ARM_SMMU_ID2_VMID16)
1888 		smmu->features |= ARM_SMMU_FEAT_VMID16;
1889 
1890 	/*
1891 	 * What the page table walker can address actually depends on which
1892 	 * descriptor format is in use, but since a) we don't know that yet,
1893 	 * and b) it can vary per context bank, this will have to do...
1894 	 */
1895 	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1896 		dev_warn(smmu->dev,
1897 			 "failed to set DMA mask for table walker\n");
1898 
1899 	if (smmu->version < ARM_SMMU_V2) {
1900 		smmu->va_size = smmu->ipa_size;
1901 		if (smmu->version == ARM_SMMU_V1_64K)
1902 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1903 	} else {
1904 		size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
1905 		smmu->va_size = arm_smmu_id_size_to_bits(size);
1906 		if (id & ARM_SMMU_ID2_PTFS_4K)
1907 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1908 		if (id & ARM_SMMU_ID2_PTFS_16K)
1909 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1910 		if (id & ARM_SMMU_ID2_PTFS_64K)
1911 			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1912 	}
1913 
1914 	if (smmu->impl && smmu->impl->cfg_probe) {
1915 		ret = smmu->impl->cfg_probe(smmu);
1916 		if (ret)
1917 			return ret;
1918 	}
1919 
1920 	/* Now we've corralled the various formats, what'll it do? */
1921 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1922 		smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1923 	if (smmu->features &
1924 	    (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1925 		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1926 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1927 		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1928 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1929 		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1930 
1931 	if (arm_smmu_ops.pgsize_bitmap == -1UL)
1932 		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1933 	else
1934 		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1935 	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1936 		   smmu->pgsize_bitmap);
1937 
1938 
1939 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1940 		dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1941 			   smmu->va_size, smmu->ipa_size);
1942 
1943 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1944 		dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1945 			   smmu->ipa_size, smmu->pa_size);
1946 
1947 	return 0;
1948 }
1949 
1950 struct arm_smmu_match_data {
1951 	enum arm_smmu_arch_version version;
1952 	enum arm_smmu_implementation model;
1953 };
1954 
1955 #define ARM_SMMU_MATCH_DATA(name, ver, imp)	\
1956 static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1957 
1958 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1959 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1960 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1961 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1962 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1963 ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
1964 
1965 static const struct of_device_id arm_smmu_of_match[] = {
1966 	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1967 	{ .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1968 	{ .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1969 	{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1970 	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1971 	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1972 	{ .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1973 	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1974 	{ },
1975 };
1976 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1977 
1978 #ifdef CONFIG_ACPI
acpi_smmu_get_data(u32 model,struct arm_smmu_device * smmu)1979 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1980 {
1981 	int ret = 0;
1982 
1983 	switch (model) {
1984 	case ACPI_IORT_SMMU_V1:
1985 	case ACPI_IORT_SMMU_CORELINK_MMU400:
1986 		smmu->version = ARM_SMMU_V1;
1987 		smmu->model = GENERIC_SMMU;
1988 		break;
1989 	case ACPI_IORT_SMMU_CORELINK_MMU401:
1990 		smmu->version = ARM_SMMU_V1_64K;
1991 		smmu->model = GENERIC_SMMU;
1992 		break;
1993 	case ACPI_IORT_SMMU_V2:
1994 		smmu->version = ARM_SMMU_V2;
1995 		smmu->model = GENERIC_SMMU;
1996 		break;
1997 	case ACPI_IORT_SMMU_CORELINK_MMU500:
1998 		smmu->version = ARM_SMMU_V2;
1999 		smmu->model = ARM_MMU500;
2000 		break;
2001 	case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2002 		smmu->version = ARM_SMMU_V2;
2003 		smmu->model = CAVIUM_SMMUV2;
2004 		break;
2005 	default:
2006 		ret = -ENODEV;
2007 	}
2008 
2009 	return ret;
2010 }
2011 
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2012 static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2013 				      u32 *global_irqs, u32 *pmu_irqs)
2014 {
2015 	struct device *dev = smmu->dev;
2016 	struct acpi_iort_node *node =
2017 		*(struct acpi_iort_node **)dev_get_platdata(dev);
2018 	struct acpi_iort_smmu *iort_smmu;
2019 	int ret;
2020 
2021 	/* Retrieve SMMU1/2 specific data */
2022 	iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2023 
2024 	ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2025 	if (ret < 0)
2026 		return ret;
2027 
2028 	/* Ignore the configuration access interrupt */
2029 	*global_irqs = 1;
2030 	*pmu_irqs = 0;
2031 
2032 	if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2033 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2034 
2035 	return 0;
2036 }
2037 #else
arm_smmu_device_acpi_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2038 static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
2039 					     u32 *global_irqs, u32 *pmu_irqs)
2040 {
2041 	return -ENODEV;
2042 }
2043 #endif
2044 
arm_smmu_device_dt_probe(struct arm_smmu_device * smmu,u32 * global_irqs,u32 * pmu_irqs)2045 static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
2046 				    u32 *global_irqs, u32 *pmu_irqs)
2047 {
2048 	const struct arm_smmu_match_data *data;
2049 	struct device *dev = smmu->dev;
2050 	bool legacy_binding;
2051 
2052 	if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2053 		return dev_err_probe(dev, -ENODEV,
2054 				     "missing #global-interrupts property\n");
2055 	*pmu_irqs = 0;
2056 
2057 	data = of_device_get_match_data(dev);
2058 	smmu->version = data->version;
2059 	smmu->model = data->model;
2060 
2061 	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2062 	if (legacy_binding && !using_generic_binding) {
2063 		if (!using_legacy_binding) {
2064 			pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2065 				  IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
2066 		}
2067 		using_legacy_binding = true;
2068 	} else if (!legacy_binding && !using_legacy_binding) {
2069 		using_generic_binding = true;
2070 	} else {
2071 		dev_err(dev, "not probing due to mismatched DT properties\n");
2072 		return -ENODEV;
2073 	}
2074 
2075 	if (of_dma_is_coherent(dev->of_node))
2076 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2077 
2078 	return 0;
2079 }
2080 
arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device * smmu)2081 static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
2082 {
2083 	struct list_head rmr_list;
2084 	struct iommu_resv_region *e;
2085 	int idx, cnt = 0;
2086 	u32 reg;
2087 
2088 	INIT_LIST_HEAD(&rmr_list);
2089 	iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2090 
2091 	/*
2092 	 * Rather than trying to look at existing mappings that
2093 	 * are setup by the firmware and then invalidate the ones
2094 	 * that do no have matching RMR entries, just disable the
2095 	 * SMMU until it gets enabled again in the reset routine.
2096 	 */
2097 	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
2098 	reg |= ARM_SMMU_sCR0_CLIENTPD;
2099 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
2100 
2101 	list_for_each_entry(e, &rmr_list, list) {
2102 		struct iommu_iort_rmr_data *rmr;
2103 		int i;
2104 
2105 		rmr = container_of(e, struct iommu_iort_rmr_data, rr);
2106 		for (i = 0; i < rmr->num_sids; i++) {
2107 			idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2108 			if (idx < 0)
2109 				continue;
2110 
2111 			if (smmu->s2crs[idx].count == 0) {
2112 				smmu->smrs[idx].id = rmr->sids[i];
2113 				smmu->smrs[idx].mask = 0;
2114 				smmu->smrs[idx].valid = true;
2115 			}
2116 			smmu->s2crs[idx].count++;
2117 			smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2118 			smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2119 
2120 			cnt++;
2121 		}
2122 	}
2123 
2124 	dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2125 		   cnt == 1 ? "" : "s");
2126 	iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2127 }
2128 
arm_smmu_device_probe(struct platform_device * pdev)2129 static int arm_smmu_device_probe(struct platform_device *pdev)
2130 {
2131 	struct resource *res;
2132 	struct arm_smmu_device *smmu;
2133 	struct device *dev = &pdev->dev;
2134 	int num_irqs, i, err;
2135 	u32 global_irqs, pmu_irqs;
2136 	irqreturn_t (*global_fault)(int irq, void *dev);
2137 
2138 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2139 	if (!smmu) {
2140 		dev_err(dev, "failed to allocate arm_smmu_device\n");
2141 		return -ENOMEM;
2142 	}
2143 	smmu->dev = dev;
2144 
2145 	if (dev->of_node)
2146 		err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
2147 	else
2148 		err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
2149 	if (err)
2150 		return err;
2151 
2152 	smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2153 	if (IS_ERR(smmu->base))
2154 		return PTR_ERR(smmu->base);
2155 	smmu->ioaddr = res->start;
2156 
2157 	/*
2158 	 * The resource size should effectively match the value of SMMU_TOP;
2159 	 * stash that temporarily until we know PAGESIZE to validate it with.
2160 	 */
2161 	smmu->numpage = resource_size(res);
2162 
2163 	smmu = arm_smmu_impl_init(smmu);
2164 	if (IS_ERR(smmu))
2165 		return PTR_ERR(smmu);
2166 
2167 	num_irqs = platform_irq_count(pdev);
2168 
2169 	smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2170 	if (smmu->num_context_irqs <= 0)
2171 		return dev_err_probe(dev, -ENODEV,
2172 				"found %d interrupts but expected at least %d\n",
2173 				num_irqs, global_irqs + pmu_irqs + 1);
2174 
2175 	smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2176 				  sizeof(*smmu->irqs), GFP_KERNEL);
2177 	if (!smmu->irqs)
2178 		return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2179 				     smmu->num_context_irqs);
2180 
2181 	for (i = 0; i < smmu->num_context_irqs; i++) {
2182 		int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
2183 
2184 		if (irq < 0)
2185 			return irq;
2186 		smmu->irqs[i] = irq;
2187 	}
2188 
2189 	err = devm_clk_bulk_get_all(dev, &smmu->clks);
2190 	if (err < 0) {
2191 		dev_err(dev, "failed to get clocks %d\n", err);
2192 		return err;
2193 	}
2194 	smmu->num_clks = err;
2195 
2196 	err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2197 	if (err)
2198 		return err;
2199 
2200 	err = arm_smmu_device_cfg_probe(smmu);
2201 	if (err)
2202 		return err;
2203 
2204 	if (smmu->version == ARM_SMMU_V2) {
2205 		if (smmu->num_context_banks > smmu->num_context_irqs) {
2206 			dev_err(dev,
2207 			      "found only %d context irq(s) but %d required\n",
2208 			      smmu->num_context_irqs, smmu->num_context_banks);
2209 			return -ENODEV;
2210 		}
2211 
2212 		/* Ignore superfluous interrupts */
2213 		smmu->num_context_irqs = smmu->num_context_banks;
2214 	}
2215 
2216 	if (smmu->impl && smmu->impl->global_fault)
2217 		global_fault = smmu->impl->global_fault;
2218 	else
2219 		global_fault = arm_smmu_global_fault;
2220 
2221 	for (i = 0; i < global_irqs; i++) {
2222 		int irq = platform_get_irq(pdev, i);
2223 
2224 		if (irq < 0)
2225 			return irq;
2226 
2227 		err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
2228 				       "arm-smmu global fault", smmu);
2229 		if (err)
2230 			return dev_err_probe(dev, err,
2231 					"failed to request global IRQ %d (%u)\n",
2232 					i, irq);
2233 	}
2234 
2235 	err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2236 				     "smmu.%pa", &smmu->ioaddr);
2237 	if (err) {
2238 		dev_err(dev, "Failed to register iommu in sysfs\n");
2239 		return err;
2240 	}
2241 
2242 	err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
2243 				    using_legacy_binding ? NULL : dev);
2244 	if (err) {
2245 		dev_err(dev, "Failed to register iommu\n");
2246 		iommu_device_sysfs_remove(&smmu->iommu);
2247 		return err;
2248 	}
2249 
2250 	platform_set_drvdata(pdev, smmu);
2251 
2252 	/* Check for RMRs and install bypass SMRs if any */
2253 	arm_smmu_rmr_install_bypass_smr(smmu);
2254 
2255 	arm_smmu_device_reset(smmu);
2256 	arm_smmu_test_smr_masks(smmu);
2257 
2258 	/*
2259 	 * We want to avoid touching dev->power.lock in fastpaths unless
2260 	 * it's really going to do something useful - pm_runtime_enabled()
2261 	 * can serve as an ideal proxy for that decision. So, conditionally
2262 	 * enable pm_runtime.
2263 	 */
2264 	if (dev->pm_domain) {
2265 		pm_runtime_set_active(dev);
2266 		pm_runtime_enable(dev);
2267 	}
2268 
2269 	return 0;
2270 }
2271 
arm_smmu_device_shutdown(struct platform_device * pdev)2272 static void arm_smmu_device_shutdown(struct platform_device *pdev)
2273 {
2274 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2275 
2276 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2277 		dev_notice(&pdev->dev, "disabling translation\n");
2278 
2279 	arm_smmu_rpm_get(smmu);
2280 	/* Turn the thing off */
2281 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2282 	arm_smmu_rpm_put(smmu);
2283 
2284 	if (pm_runtime_enabled(smmu->dev))
2285 		pm_runtime_force_suspend(smmu->dev);
2286 	else
2287 		clk_bulk_disable(smmu->num_clks, smmu->clks);
2288 
2289 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2290 }
2291 
arm_smmu_device_remove(struct platform_device * pdev)2292 static void arm_smmu_device_remove(struct platform_device *pdev)
2293 {
2294 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2295 
2296 	iommu_device_unregister(&smmu->iommu);
2297 	iommu_device_sysfs_remove(&smmu->iommu);
2298 
2299 	arm_smmu_device_shutdown(pdev);
2300 }
2301 
arm_smmu_runtime_resume(struct device * dev)2302 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
2303 {
2304 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2305 	int ret;
2306 
2307 	ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2308 	if (ret)
2309 		return ret;
2310 
2311 	arm_smmu_device_reset(smmu);
2312 
2313 	return 0;
2314 }
2315 
arm_smmu_runtime_suspend(struct device * dev)2316 static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
2317 {
2318 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2319 
2320 	clk_bulk_disable(smmu->num_clks, smmu->clks);
2321 
2322 	return 0;
2323 }
2324 
arm_smmu_pm_resume(struct device * dev)2325 static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
2326 {
2327 	int ret;
2328 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2329 
2330 	ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2331 	if (ret)
2332 		return ret;
2333 
2334 	if (pm_runtime_suspended(dev))
2335 		return 0;
2336 
2337 	ret = arm_smmu_runtime_resume(dev);
2338 	if (ret)
2339 		clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2340 
2341 	return ret;
2342 }
2343 
arm_smmu_pm_suspend(struct device * dev)2344 static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
2345 {
2346 	int ret = 0;
2347 	struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2348 
2349 	if (pm_runtime_suspended(dev))
2350 		goto clk_unprepare;
2351 
2352 	ret = arm_smmu_runtime_suspend(dev);
2353 	if (ret)
2354 		return ret;
2355 
2356 clk_unprepare:
2357 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2358 	return ret;
2359 }
2360 
2361 static const struct dev_pm_ops arm_smmu_pm_ops = {
2362 	SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
2363 	SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
2364 			   arm_smmu_runtime_resume, NULL)
2365 };
2366 
2367 static struct platform_driver arm_smmu_driver = {
2368 	.driver	= {
2369 		.name			= "arm-smmu",
2370 		.of_match_table		= arm_smmu_of_match,
2371 		.pm			= &arm_smmu_pm_ops,
2372 		.suppress_bind_attrs    = true,
2373 	},
2374 	.probe	= arm_smmu_device_probe,
2375 	.remove_new = arm_smmu_device_remove,
2376 	.shutdown = arm_smmu_device_shutdown,
2377 };
2378 module_platform_driver(arm_smmu_driver);
2379 
2380 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2381 MODULE_AUTHOR("Will Deacon <will@kernel.org>");
2382 MODULE_ALIAS("platform:arm-smmu");
2383 MODULE_LICENSE("GPL v2");
2384