xref: /linux/drivers/iommu/iommu-sva.c (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helpers for IOMMU drivers implementing SVA
4  */
5 #include <linux/mmu_context.h>
6 #include <linux/mutex.h>
7 #include <linux/sched/mm.h>
8 #include <linux/iommu.h>
9 
10 #include "iommu-sva.h"
11 
12 static DEFINE_MUTEX(iommu_sva_lock);
13 
14 /* Allocate a PASID for the mm within range (inclusive) */
15 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
16 {
17 	struct iommu_mm_data *iommu_mm;
18 	ioasid_t pasid;
19 
20 	lockdep_assert_held(&iommu_sva_lock);
21 
22 	if (!arch_pgtable_dma_compat(mm))
23 		return ERR_PTR(-EBUSY);
24 
25 	iommu_mm = mm->iommu_mm;
26 	/* Is a PASID already associated with this mm? */
27 	if (iommu_mm) {
28 		if (iommu_mm->pasid >= dev->iommu->max_pasids)
29 			return ERR_PTR(-EOVERFLOW);
30 		return iommu_mm;
31 	}
32 
33 	iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
34 	if (!iommu_mm)
35 		return ERR_PTR(-ENOMEM);
36 
37 	pasid = iommu_alloc_global_pasid(dev);
38 	if (pasid == IOMMU_PASID_INVALID) {
39 		kfree(iommu_mm);
40 		return ERR_PTR(-ENOSPC);
41 	}
42 	iommu_mm->pasid = pasid;
43 	INIT_LIST_HEAD(&iommu_mm->sva_domains);
44 	/*
45 	 * Make sure the write to mm->iommu_mm is not reordered in front of
46 	 * initialization to iommu_mm fields. If it does, readers may see a
47 	 * valid iommu_mm with uninitialized values.
48 	 */
49 	smp_store_release(&mm->iommu_mm, iommu_mm);
50 	return iommu_mm;
51 }
52 
53 /**
54  * iommu_sva_bind_device() - Bind a process address space to a device
55  * @dev: the device
56  * @mm: the mm to bind, caller must hold a reference to mm_users
57  *
58  * Create a bond between device and address space, allowing the device to
59  * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
60  * bond already exists between @device and @mm, an additional internal
61  * reference is taken. Caller must call iommu_sva_unbind_device()
62  * to release each reference.
63  *
64  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
65  * initialize the required SVA features.
66  *
67  * On error, returns an ERR_PTR value.
68  */
69 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
70 {
71 	struct iommu_mm_data *iommu_mm;
72 	struct iommu_domain *domain;
73 	struct iommu_sva *handle;
74 	int ret;
75 
76 	mutex_lock(&iommu_sva_lock);
77 
78 	/* Allocate mm->pasid if necessary. */
79 	iommu_mm = iommu_alloc_mm_data(mm, dev);
80 	if (IS_ERR(iommu_mm)) {
81 		ret = PTR_ERR(iommu_mm);
82 		goto out_unlock;
83 	}
84 
85 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
86 	if (!handle) {
87 		ret = -ENOMEM;
88 		goto out_unlock;
89 	}
90 
91 	/* Search for an existing domain. */
92 	list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
93 		ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
94 		if (!ret) {
95 			domain->users++;
96 			goto out;
97 		}
98 	}
99 
100 	/* Allocate a new domain and set it on device pasid. */
101 	domain = iommu_sva_domain_alloc(dev, mm);
102 	if (!domain) {
103 		ret = -ENOMEM;
104 		goto out_free_handle;
105 	}
106 
107 	ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
108 	if (ret)
109 		goto out_free_domain;
110 	domain->users = 1;
111 	list_add(&domain->next, &mm->iommu_mm->sva_domains);
112 
113 out:
114 	mutex_unlock(&iommu_sva_lock);
115 	handle->dev = dev;
116 	handle->domain = domain;
117 	return handle;
118 
119 out_free_domain:
120 	iommu_domain_free(domain);
121 out_free_handle:
122 	kfree(handle);
123 out_unlock:
124 	mutex_unlock(&iommu_sva_lock);
125 	return ERR_PTR(ret);
126 }
127 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
128 
129 /**
130  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
131  * @handle: the handle returned by iommu_sva_bind_device()
132  *
133  * Put reference to a bond between device and address space. The device should
134  * not be issuing any more transaction for this PASID. All outstanding page
135  * requests for this PASID must have been flushed to the IOMMU.
136  */
137 void iommu_sva_unbind_device(struct iommu_sva *handle)
138 {
139 	struct iommu_domain *domain = handle->domain;
140 	struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
141 	struct device *dev = handle->dev;
142 
143 	mutex_lock(&iommu_sva_lock);
144 	iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
145 	if (--domain->users == 0) {
146 		list_del(&domain->next);
147 		iommu_domain_free(domain);
148 	}
149 	mutex_unlock(&iommu_sva_lock);
150 	kfree(handle);
151 }
152 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
153 
154 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
155 {
156 	struct iommu_domain *domain = handle->domain;
157 
158 	return mm_get_enqcmd_pasid(domain->mm);
159 }
160 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
161 
162 /*
163  * I/O page fault handler for SVA
164  */
165 enum iommu_page_response_code
166 iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
167 {
168 	vm_fault_t ret;
169 	struct vm_area_struct *vma;
170 	struct mm_struct *mm = data;
171 	unsigned int access_flags = 0;
172 	unsigned int fault_flags = FAULT_FLAG_REMOTE;
173 	struct iommu_fault_page_request *prm = &fault->prm;
174 	enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
175 
176 	if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
177 		return status;
178 
179 	if (!mmget_not_zero(mm))
180 		return status;
181 
182 	mmap_read_lock(mm);
183 
184 	vma = vma_lookup(mm, prm->addr);
185 	if (!vma)
186 		/* Unmapped area */
187 		goto out_put_mm;
188 
189 	if (prm->perm & IOMMU_FAULT_PERM_READ)
190 		access_flags |= VM_READ;
191 
192 	if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
193 		access_flags |= VM_WRITE;
194 		fault_flags |= FAULT_FLAG_WRITE;
195 	}
196 
197 	if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
198 		access_flags |= VM_EXEC;
199 		fault_flags |= FAULT_FLAG_INSTRUCTION;
200 	}
201 
202 	if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
203 		fault_flags |= FAULT_FLAG_USER;
204 
205 	if (access_flags & ~vma->vm_flags)
206 		/* Access fault */
207 		goto out_put_mm;
208 
209 	ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
210 	status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
211 		IOMMU_PAGE_RESP_SUCCESS;
212 
213 out_put_mm:
214 	mmap_read_unlock(mm);
215 	mmput(mm);
216 
217 	return status;
218 }
219 
220 void mm_pasid_drop(struct mm_struct *mm)
221 {
222 	struct iommu_mm_data *iommu_mm = mm->iommu_mm;
223 
224 	if (!iommu_mm)
225 		return;
226 
227 	iommu_free_global_pasid(iommu_mm->pasid);
228 	kfree(iommu_mm);
229 }
230