xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c (revision 64efb3def3a53effe01fa750eec6e7369f65e386)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the IOMMU SVA API for the ARM SMMUv3
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched/mm.h>
10 #include <linux/slab.h>
11 #include <kunit/visibility.h>
12 
13 #include "arm-smmu-v3.h"
14 #include "../../io-pgtable-arm.h"
15 
16 struct arm_smmu_mmu_notifier {
17 	struct mmu_notifier		mn;
18 	struct arm_smmu_ctx_desc	*cd;
19 	bool				cleared;
20 	refcount_t			refs;
21 	struct list_head		list;
22 	struct arm_smmu_domain		*domain;
23 };
24 
25 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
26 
27 struct arm_smmu_bond {
28 	struct mm_struct		*mm;
29 	struct arm_smmu_mmu_notifier	*smmu_mn;
30 	struct list_head		list;
31 };
32 
33 #define sva_to_bond(handle) \
34 	container_of(handle, struct arm_smmu_bond, sva)
35 
36 static DEFINE_MUTEX(sva_lock);
37 
38 static void
39 arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
40 {
41 	struct arm_smmu_master_domain *master_domain;
42 	struct arm_smmu_cd target_cd;
43 	unsigned long flags;
44 
45 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
46 	list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) {
47 		struct arm_smmu_master *master = master_domain->master;
48 		struct arm_smmu_cd *cdptr;
49 
50 		cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
51 		if (WARN_ON(!cdptr))
52 			continue;
53 
54 		arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
55 		arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
56 					&target_cd);
57 	}
58 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
59 }
60 
61 /*
62  * Check if the CPU ASID is available on the SMMU side. If a private context
63  * descriptor is using it, try to replace it.
64  */
65 static struct arm_smmu_ctx_desc *
66 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
67 {
68 	int ret;
69 	u32 new_asid;
70 	struct arm_smmu_ctx_desc *cd;
71 	struct arm_smmu_device *smmu;
72 	struct arm_smmu_domain *smmu_domain;
73 
74 	cd = xa_load(&arm_smmu_asid_xa, asid);
75 	if (!cd)
76 		return NULL;
77 
78 	if (cd->mm) {
79 		if (WARN_ON(cd->mm != mm))
80 			return ERR_PTR(-EINVAL);
81 		/* All devices bound to this mm use the same cd struct. */
82 		refcount_inc(&cd->refs);
83 		return cd;
84 	}
85 
86 	smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
87 	smmu = smmu_domain->smmu;
88 
89 	ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
90 		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
91 	if (ret)
92 		return ERR_PTR(-ENOSPC);
93 	/*
94 	 * Race with unmap: TLB invalidations will start targeting the new ASID,
95 	 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
96 	 * later, so it doesn't matter.
97 	 */
98 	cd->asid = new_asid;
99 	/*
100 	 * Update ASID and invalidate CD in all associated masters. There will
101 	 * be some overlap between use of both ASIDs, until we invalidate the
102 	 * TLB.
103 	 */
104 	arm_smmu_update_s1_domain_cd_entry(smmu_domain);
105 
106 	/* Invalidate TLB entries previously associated with that context */
107 	arm_smmu_tlb_inv_asid(smmu, asid);
108 
109 	xa_erase(&arm_smmu_asid_xa, asid);
110 	return NULL;
111 }
112 
113 static u64 page_size_to_cd(void)
114 {
115 	static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
116 		      PAGE_SIZE == SZ_64K);
117 	if (PAGE_SIZE == SZ_64K)
118 		return ARM_LPAE_TCR_TG0_64K;
119 	if (PAGE_SIZE == SZ_16K)
120 		return ARM_LPAE_TCR_TG0_16K;
121 	return ARM_LPAE_TCR_TG0_4K;
122 }
123 
124 VISIBLE_IF_KUNIT
125 void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
126 			  struct arm_smmu_master *master, struct mm_struct *mm,
127 			  u16 asid)
128 {
129 	u64 par;
130 
131 	memset(target, 0, sizeof(*target));
132 
133 	par = cpuid_feature_extract_unsigned_field(
134 		read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
135 		ID_AA64MMFR0_EL1_PARANGE_SHIFT);
136 
137 	target->data[0] = cpu_to_le64(
138 		CTXDESC_CD_0_TCR_EPD1 |
139 #ifdef __BIG_ENDIAN
140 		CTXDESC_CD_0_ENDI |
141 #endif
142 		CTXDESC_CD_0_V |
143 		FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
144 		CTXDESC_CD_0_AA64 |
145 		(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
146 		CTXDESC_CD_0_R |
147 		CTXDESC_CD_0_A |
148 		CTXDESC_CD_0_ASET |
149 		FIELD_PREP(CTXDESC_CD_0_ASID, asid));
150 
151 	/*
152 	 * If no MM is passed then this creates a SVA entry that faults
153 	 * everything. arm_smmu_write_cd_entry() can hitlessly go between these
154 	 * two entries types since TTB0 is ignored by HW when EPD0 is set.
155 	 */
156 	if (mm) {
157 		target->data[0] |= cpu_to_le64(
158 			FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
159 				   64ULL - vabits_actual) |
160 			FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
161 			FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
162 				   ARM_LPAE_TCR_RGN_WBWA) |
163 			FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
164 				   ARM_LPAE_TCR_RGN_WBWA) |
165 			FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
166 
167 		target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
168 					      CTXDESC_CD_1_TTB0_MASK);
169 	} else {
170 		target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
171 
172 		/*
173 		 * Disable stall and immediately generate an abort if stall
174 		 * disable is permitted. This speeds up cleanup for an unclean
175 		 * exit if the device is still doing a lot of DMA.
176 		 */
177 		if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
178 			target->data[0] &=
179 				cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
180 	}
181 
182 	/*
183 	 * MAIR value is pretty much constant and global, so we can just get it
184 	 * from the current CPU register
185 	 */
186 	target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
187 }
188 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
189 
190 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
191 {
192 	u16 asid;
193 	int err = 0;
194 	struct arm_smmu_ctx_desc *cd;
195 	struct arm_smmu_ctx_desc *ret = NULL;
196 
197 	/* Don't free the mm until we release the ASID */
198 	mmgrab(mm);
199 
200 	asid = arm64_mm_context_get(mm);
201 	if (!asid) {
202 		err = -ESRCH;
203 		goto out_drop_mm;
204 	}
205 
206 	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
207 	if (!cd) {
208 		err = -ENOMEM;
209 		goto out_put_context;
210 	}
211 
212 	refcount_set(&cd->refs, 1);
213 
214 	mutex_lock(&arm_smmu_asid_lock);
215 	ret = arm_smmu_share_asid(mm, asid);
216 	if (ret) {
217 		mutex_unlock(&arm_smmu_asid_lock);
218 		goto out_free_cd;
219 	}
220 
221 	err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
222 	mutex_unlock(&arm_smmu_asid_lock);
223 
224 	if (err)
225 		goto out_free_asid;
226 
227 	cd->asid = asid;
228 	cd->mm = mm;
229 
230 	return cd;
231 
232 out_free_asid:
233 	arm_smmu_free_asid(cd);
234 out_free_cd:
235 	kfree(cd);
236 out_put_context:
237 	arm64_mm_context_put(mm);
238 out_drop_mm:
239 	mmdrop(mm);
240 	return err < 0 ? ERR_PTR(err) : ret;
241 }
242 
243 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
244 {
245 	if (arm_smmu_free_asid(cd)) {
246 		/* Unpin ASID */
247 		arm64_mm_context_put(cd->mm);
248 		mmdrop(cd->mm);
249 		kfree(cd);
250 	}
251 }
252 
253 /*
254  * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
255  * is used as a threshold to replace per-page TLBI commands to issue in the
256  * command queue with an address-space TLBI command, when SMMU w/o a range
257  * invalidation feature handles too many per-page TLBI commands, which will
258  * otherwise result in a soft lockup.
259  */
260 #define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))
261 
262 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
263 						struct mm_struct *mm,
264 						unsigned long start,
265 						unsigned long end)
266 {
267 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
268 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
269 	size_t size;
270 
271 	/*
272 	 * The mm_types defines vm_end as the first byte after the end address,
273 	 * different from IOMMU subsystem using the last address of an address
274 	 * range. So do a simple translation here by calculating size correctly.
275 	 */
276 	size = end - start;
277 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
278 		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
279 			size = 0;
280 	} else {
281 		if (size == ULONG_MAX)
282 			size = 0;
283 	}
284 
285 	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
286 		if (!size)
287 			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
288 					      smmu_mn->cd->asid);
289 		else
290 			arm_smmu_tlb_inv_range_asid(start, size,
291 						    smmu_mn->cd->asid,
292 						    PAGE_SIZE, false,
293 						    smmu_domain);
294 	}
295 
296 	arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), start,
297 				    size);
298 }
299 
300 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
301 {
302 	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
303 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
304 	struct arm_smmu_master_domain *master_domain;
305 	unsigned long flags;
306 
307 	mutex_lock(&sva_lock);
308 	if (smmu_mn->cleared) {
309 		mutex_unlock(&sva_lock);
310 		return;
311 	}
312 
313 	/*
314 	 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
315 	 * but disable translation.
316 	 */
317 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
318 	list_for_each_entry(master_domain, &smmu_domain->devices,
319 			    devices_elm) {
320 		struct arm_smmu_master *master = master_domain->master;
321 		struct arm_smmu_cd target;
322 		struct arm_smmu_cd *cdptr;
323 
324 		cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm));
325 		if (WARN_ON(!cdptr))
326 			continue;
327 		arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
328 		arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr,
329 					&target);
330 	}
331 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
332 
333 	arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
334 	arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
335 
336 	smmu_mn->cleared = true;
337 	mutex_unlock(&sva_lock);
338 }
339 
340 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
341 {
342 	kfree(mn_to_smmu(mn));
343 }
344 
345 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
346 	.arch_invalidate_secondary_tlbs	= arm_smmu_mm_arch_invalidate_secondary_tlbs,
347 	.release			= arm_smmu_mm_release,
348 	.free_notifier			= arm_smmu_mmu_notifier_free,
349 };
350 
351 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
352 static struct arm_smmu_mmu_notifier *
353 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
354 			  struct mm_struct *mm)
355 {
356 	int ret;
357 	struct arm_smmu_ctx_desc *cd;
358 	struct arm_smmu_mmu_notifier *smmu_mn;
359 
360 	list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
361 		if (smmu_mn->mn.mm == mm) {
362 			refcount_inc(&smmu_mn->refs);
363 			return smmu_mn;
364 		}
365 	}
366 
367 	cd = arm_smmu_alloc_shared_cd(mm);
368 	if (IS_ERR(cd))
369 		return ERR_CAST(cd);
370 
371 	smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
372 	if (!smmu_mn) {
373 		ret = -ENOMEM;
374 		goto err_free_cd;
375 	}
376 
377 	refcount_set(&smmu_mn->refs, 1);
378 	smmu_mn->cd = cd;
379 	smmu_mn->domain = smmu_domain;
380 	smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
381 
382 	ret = mmu_notifier_register(&smmu_mn->mn, mm);
383 	if (ret) {
384 		kfree(smmu_mn);
385 		goto err_free_cd;
386 	}
387 
388 	list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
389 	return smmu_mn;
390 
391 err_free_cd:
392 	arm_smmu_free_shared_cd(cd);
393 	return ERR_PTR(ret);
394 }
395 
396 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
397 {
398 	struct mm_struct *mm = smmu_mn->mn.mm;
399 	struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
400 	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
401 
402 	if (!refcount_dec_and_test(&smmu_mn->refs))
403 		return;
404 
405 	list_del(&smmu_mn->list);
406 
407 	/*
408 	 * If we went through clear(), we've already invalidated, and no
409 	 * new TLB entry can have been formed.
410 	 */
411 	if (!smmu_mn->cleared) {
412 		arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
413 		arm_smmu_atc_inv_domain_sva(smmu_domain,
414 					    mm_get_enqcmd_pasid(mm), 0, 0);
415 	}
416 
417 	/* Frees smmu_mn */
418 	mmu_notifier_put(&smmu_mn->mn);
419 	arm_smmu_free_shared_cd(cd);
420 }
421 
422 static struct arm_smmu_bond *__arm_smmu_sva_bind(struct device *dev,
423 						 struct mm_struct *mm)
424 {
425 	int ret;
426 	struct arm_smmu_bond *bond;
427 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
428 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
429 	struct arm_smmu_domain *smmu_domain;
430 
431 	if (!(domain->type & __IOMMU_DOMAIN_PAGING))
432 		return ERR_PTR(-ENODEV);
433 	smmu_domain = to_smmu_domain(domain);
434 	if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
435 		return ERR_PTR(-ENODEV);
436 
437 	if (!master || !master->sva_enabled)
438 		return ERR_PTR(-ENODEV);
439 
440 	bond = kzalloc(sizeof(*bond), GFP_KERNEL);
441 	if (!bond)
442 		return ERR_PTR(-ENOMEM);
443 
444 	bond->mm = mm;
445 
446 	bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
447 	if (IS_ERR(bond->smmu_mn)) {
448 		ret = PTR_ERR(bond->smmu_mn);
449 		goto err_free_bond;
450 	}
451 
452 	list_add(&bond->list, &master->bonds);
453 	return bond;
454 
455 err_free_bond:
456 	kfree(bond);
457 	return ERR_PTR(ret);
458 }
459 
460 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
461 {
462 	unsigned long reg, fld;
463 	unsigned long oas;
464 	unsigned long asid_bits;
465 	u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
466 
467 	if (vabits_actual == 52)
468 		feat_mask |= ARM_SMMU_FEAT_VAX;
469 
470 	if ((smmu->features & feat_mask) != feat_mask)
471 		return false;
472 
473 	if (!(smmu->pgsize_bitmap & PAGE_SIZE))
474 		return false;
475 
476 	/*
477 	 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
478 	 * not even pretending to support AArch32 here. Abort if the MMU outputs
479 	 * addresses larger than what we support.
480 	 */
481 	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
482 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
483 	oas = id_aa64mmfr0_parange_to_phys_shift(fld);
484 	if (smmu->oas < oas)
485 		return false;
486 
487 	/* We can support bigger ASIDs than the CPU, but not smaller */
488 	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
489 	asid_bits = fld ? 16 : 8;
490 	if (smmu->asid_bits < asid_bits)
491 		return false;
492 
493 	/*
494 	 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
495 	 * generally the maximum number of bindable processes.
496 	 */
497 	if (arm64_kernel_unmapped_at_el0())
498 		asid_bits--;
499 	dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
500 		num_possible_cpus() - 2);
501 
502 	return true;
503 }
504 
505 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
506 {
507 	/* We're not keeping track of SIDs in fault events */
508 	if (master->num_streams != 1)
509 		return false;
510 
511 	return master->stall_enabled;
512 }
513 
514 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
515 {
516 	if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
517 		return false;
518 
519 	/* SSID support is mandatory for the moment */
520 	return master->ssid_bits;
521 }
522 
523 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
524 {
525 	bool enabled;
526 
527 	mutex_lock(&sva_lock);
528 	enabled = master->sva_enabled;
529 	mutex_unlock(&sva_lock);
530 	return enabled;
531 }
532 
533 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
534 {
535 	struct device *dev = master->dev;
536 
537 	/*
538 	 * Drivers for devices supporting PRI or stall should enable IOPF first.
539 	 * Others have device-specific fault handlers and don't need IOPF.
540 	 */
541 	if (!arm_smmu_master_iopf_supported(master))
542 		return 0;
543 
544 	if (!master->iopf_enabled)
545 		return -EINVAL;
546 
547 	return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
548 }
549 
550 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
551 {
552 	struct device *dev = master->dev;
553 
554 	if (!master->iopf_enabled)
555 		return;
556 
557 	iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
558 }
559 
560 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
561 {
562 	int ret;
563 
564 	mutex_lock(&sva_lock);
565 	ret = arm_smmu_master_sva_enable_iopf(master);
566 	if (!ret)
567 		master->sva_enabled = true;
568 	mutex_unlock(&sva_lock);
569 
570 	return ret;
571 }
572 
573 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
574 {
575 	mutex_lock(&sva_lock);
576 	if (!list_empty(&master->bonds)) {
577 		dev_err(master->dev, "cannot disable SVA, device is bound\n");
578 		mutex_unlock(&sva_lock);
579 		return -EBUSY;
580 	}
581 	arm_smmu_master_sva_disable_iopf(master);
582 	master->sva_enabled = false;
583 	mutex_unlock(&sva_lock);
584 
585 	return 0;
586 }
587 
588 void arm_smmu_sva_notifier_synchronize(void)
589 {
590 	/*
591 	 * Some MMU notifiers may still be waiting to be freed, using
592 	 * arm_smmu_mmu_notifier_free(). Wait for them.
593 	 */
594 	mmu_notifier_synchronize();
595 }
596 
597 void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
598 				   struct device *dev, ioasid_t id)
599 {
600 	struct mm_struct *mm = domain->mm;
601 	struct arm_smmu_bond *bond = NULL, *t;
602 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
603 
604 	arm_smmu_remove_pasid(master, to_smmu_domain(domain), id);
605 
606 	mutex_lock(&sva_lock);
607 	list_for_each_entry(t, &master->bonds, list) {
608 		if (t->mm == mm) {
609 			bond = t;
610 			break;
611 		}
612 	}
613 
614 	if (!WARN_ON(!bond)) {
615 		list_del(&bond->list);
616 		arm_smmu_mmu_notifier_put(bond->smmu_mn);
617 		kfree(bond);
618 	}
619 	mutex_unlock(&sva_lock);
620 }
621 
622 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
623 				      struct device *dev, ioasid_t id)
624 {
625 	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
626 	struct mm_struct *mm = domain->mm;
627 	struct arm_smmu_bond *bond;
628 	struct arm_smmu_cd target;
629 	int ret;
630 
631 	if (mm_get_enqcmd_pasid(mm) != id)
632 		return -EINVAL;
633 
634 	mutex_lock(&sva_lock);
635 	bond = __arm_smmu_sva_bind(dev, mm);
636 	if (IS_ERR(bond)) {
637 		mutex_unlock(&sva_lock);
638 		return PTR_ERR(bond);
639 	}
640 
641 	arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
642 	ret = arm_smmu_set_pasid(master, NULL, id, &target);
643 	if (ret) {
644 		list_del(&bond->list);
645 		arm_smmu_mmu_notifier_put(bond->smmu_mn);
646 		kfree(bond);
647 		mutex_unlock(&sva_lock);
648 		return ret;
649 	}
650 	mutex_unlock(&sva_lock);
651 	return 0;
652 }
653 
654 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
655 {
656 	kfree(domain);
657 }
658 
659 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
660 	.set_dev_pasid		= arm_smmu_sva_set_dev_pasid,
661 	.free			= arm_smmu_sva_domain_free
662 };
663 
664 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
665 					       struct mm_struct *mm)
666 {
667 	struct iommu_domain *domain;
668 
669 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
670 	if (!domain)
671 		return ERR_PTR(-ENOMEM);
672 	domain->type = IOMMU_DOMAIN_SVA;
673 	domain->ops = &arm_smmu_sva_domain_ops;
674 
675 	return domain;
676 }
677