1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Helpers for IOMMU drivers implementing SVA 4 */ 5 #include <linux/mmu_context.h> 6 #include <linux/mutex.h> 7 #include <linux/sched/mm.h> 8 #include <linux/iommu.h> 9 10 #include "iommu-priv.h" 11 12 static DEFINE_MUTEX(iommu_sva_lock); 13 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 14 struct mm_struct *mm); 15 16 /* Allocate a PASID for the mm within range (inclusive) */ 17 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) 18 { 19 struct iommu_mm_data *iommu_mm; 20 ioasid_t pasid; 21 22 lockdep_assert_held(&iommu_sva_lock); 23 24 if (!arch_pgtable_dma_compat(mm)) 25 return ERR_PTR(-EBUSY); 26 27 iommu_mm = mm->iommu_mm; 28 /* Is a PASID already associated with this mm? */ 29 if (iommu_mm) { 30 if (iommu_mm->pasid >= dev->iommu->max_pasids) 31 return ERR_PTR(-EOVERFLOW); 32 return iommu_mm; 33 } 34 35 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL); 36 if (!iommu_mm) 37 return ERR_PTR(-ENOMEM); 38 39 pasid = iommu_alloc_global_pasid(dev); 40 if (pasid == IOMMU_PASID_INVALID) { 41 kfree(iommu_mm); 42 return ERR_PTR(-ENOSPC); 43 } 44 iommu_mm->pasid = pasid; 45 INIT_LIST_HEAD(&iommu_mm->sva_domains); 46 /* 47 * Make sure the write to mm->iommu_mm is not reordered in front of 48 * initialization to iommu_mm fields. If it does, readers may see a 49 * valid iommu_mm with uninitialized values. 50 */ 51 smp_store_release(&mm->iommu_mm, iommu_mm); 52 return iommu_mm; 53 } 54 55 /** 56 * iommu_sva_bind_device() - Bind a process address space to a device 57 * @dev: the device 58 * @mm: the mm to bind, caller must hold a reference to mm_users 59 * 60 * Create a bond between device and address space, allowing the device to 61 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a 62 * bond already exists between @device and @mm, an additional internal 63 * reference is taken. Caller must call iommu_sva_unbind_device() 64 * to release each reference. 65 * 66 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 67 * initialize the required SVA features. 68 * 69 * On error, returns an ERR_PTR value. 70 */ 71 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 72 { 73 struct iommu_group *group = dev->iommu_group; 74 struct iommu_attach_handle *attach_handle; 75 struct iommu_mm_data *iommu_mm; 76 struct iommu_domain *domain; 77 struct iommu_sva *handle; 78 int ret; 79 80 if (!group) 81 return ERR_PTR(-ENODEV); 82 83 mutex_lock(&iommu_sva_lock); 84 85 /* Allocate mm->pasid if necessary. */ 86 iommu_mm = iommu_alloc_mm_data(mm, dev); 87 if (IS_ERR(iommu_mm)) { 88 ret = PTR_ERR(iommu_mm); 89 goto out_unlock; 90 } 91 92 /* A bond already exists, just take a reference`. */ 93 attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA); 94 if (!IS_ERR(attach_handle)) { 95 handle = container_of(attach_handle, struct iommu_sva, handle); 96 if (attach_handle->domain->mm != mm) { 97 ret = -EBUSY; 98 goto out_unlock; 99 } 100 refcount_inc(&handle->users); 101 mutex_unlock(&iommu_sva_lock); 102 return handle; 103 } 104 105 if (PTR_ERR(attach_handle) != -ENOENT) { 106 ret = PTR_ERR(attach_handle); 107 goto out_unlock; 108 } 109 110 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 111 if (!handle) { 112 ret = -ENOMEM; 113 goto out_unlock; 114 } 115 116 /* Search for an existing domain. */ 117 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) { 118 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, 119 &handle->handle); 120 if (!ret) { 121 domain->users++; 122 goto out; 123 } 124 } 125 126 /* Allocate a new domain and set it on device pasid. */ 127 domain = iommu_sva_domain_alloc(dev, mm); 128 if (IS_ERR(domain)) { 129 ret = PTR_ERR(domain); 130 goto out_free_handle; 131 } 132 133 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, 134 &handle->handle); 135 if (ret) 136 goto out_free_domain; 137 domain->users = 1; 138 list_add(&domain->next, &mm->iommu_mm->sva_domains); 139 140 out: 141 refcount_set(&handle->users, 1); 142 mutex_unlock(&iommu_sva_lock); 143 handle->dev = dev; 144 return handle; 145 146 out_free_domain: 147 iommu_domain_free(domain); 148 out_free_handle: 149 kfree(handle); 150 out_unlock: 151 mutex_unlock(&iommu_sva_lock); 152 return ERR_PTR(ret); 153 } 154 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 155 156 /** 157 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 158 * @handle: the handle returned by iommu_sva_bind_device() 159 * 160 * Put reference to a bond between device and address space. The device should 161 * not be issuing any more transaction for this PASID. All outstanding page 162 * requests for this PASID must have been flushed to the IOMMU. 163 */ 164 void iommu_sva_unbind_device(struct iommu_sva *handle) 165 { 166 struct iommu_domain *domain = handle->handle.domain; 167 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm; 168 struct device *dev = handle->dev; 169 170 mutex_lock(&iommu_sva_lock); 171 if (!refcount_dec_and_test(&handle->users)) { 172 mutex_unlock(&iommu_sva_lock); 173 return; 174 } 175 176 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); 177 if (--domain->users == 0) { 178 list_del(&domain->next); 179 iommu_domain_free(domain); 180 } 181 mutex_unlock(&iommu_sva_lock); 182 kfree(handle); 183 } 184 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 185 186 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 187 { 188 struct iommu_domain *domain = handle->handle.domain; 189 190 return mm_get_enqcmd_pasid(domain->mm); 191 } 192 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 193 194 void mm_pasid_drop(struct mm_struct *mm) 195 { 196 struct iommu_mm_data *iommu_mm = mm->iommu_mm; 197 198 if (!iommu_mm) 199 return; 200 201 iommu_free_global_pasid(iommu_mm->pasid); 202 kfree(iommu_mm); 203 } 204 205 /* 206 * I/O page fault handler for SVA 207 */ 208 static enum iommu_page_response_code 209 iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm) 210 { 211 vm_fault_t ret; 212 struct vm_area_struct *vma; 213 unsigned int access_flags = 0; 214 unsigned int fault_flags = FAULT_FLAG_REMOTE; 215 struct iommu_fault_page_request *prm = &fault->prm; 216 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; 217 218 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) 219 return status; 220 221 if (!mmget_not_zero(mm)) 222 return status; 223 224 mmap_read_lock(mm); 225 226 vma = vma_lookup(mm, prm->addr); 227 if (!vma) 228 /* Unmapped area */ 229 goto out_put_mm; 230 231 if (prm->perm & IOMMU_FAULT_PERM_READ) 232 access_flags |= VM_READ; 233 234 if (prm->perm & IOMMU_FAULT_PERM_WRITE) { 235 access_flags |= VM_WRITE; 236 fault_flags |= FAULT_FLAG_WRITE; 237 } 238 239 if (prm->perm & IOMMU_FAULT_PERM_EXEC) { 240 access_flags |= VM_EXEC; 241 fault_flags |= FAULT_FLAG_INSTRUCTION; 242 } 243 244 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) 245 fault_flags |= FAULT_FLAG_USER; 246 247 if (access_flags & ~vma->vm_flags) 248 /* Access fault */ 249 goto out_put_mm; 250 251 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); 252 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : 253 IOMMU_PAGE_RESP_SUCCESS; 254 255 out_put_mm: 256 mmap_read_unlock(mm); 257 mmput(mm); 258 259 return status; 260 } 261 262 static void iommu_sva_handle_iopf(struct work_struct *work) 263 { 264 struct iopf_fault *iopf; 265 struct iopf_group *group; 266 enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; 267 268 group = container_of(work, struct iopf_group, work); 269 list_for_each_entry(iopf, &group->faults, list) { 270 /* 271 * For the moment, errors are sticky: don't handle subsequent 272 * faults in the group if there is an error. 273 */ 274 if (status != IOMMU_PAGE_RESP_SUCCESS) 275 break; 276 277 status = iommu_sva_handle_mm(&iopf->fault, 278 group->attach_handle->domain->mm); 279 } 280 281 iopf_group_response(group, status); 282 iopf_free_group(group); 283 } 284 285 static int iommu_sva_iopf_handler(struct iopf_group *group) 286 { 287 struct iommu_fault_param *fault_param = group->fault_param; 288 289 INIT_WORK(&group->work, iommu_sva_handle_iopf); 290 if (!queue_work(fault_param->queue->wq, &group->work)) 291 return -EBUSY; 292 293 return 0; 294 } 295 296 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 297 struct mm_struct *mm) 298 { 299 const struct iommu_ops *ops = dev_iommu_ops(dev); 300 struct iommu_domain *domain; 301 302 if (ops->domain_alloc_sva) { 303 domain = ops->domain_alloc_sva(dev, mm); 304 if (IS_ERR(domain)) 305 return domain; 306 } else { 307 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 308 if (!domain) 309 return ERR_PTR(-ENOMEM); 310 } 311 312 domain->type = IOMMU_DOMAIN_SVA; 313 mmgrab(mm); 314 domain->mm = mm; 315 domain->owner = ops; 316 domain->iopf_handler = iommu_sva_iopf_handler; 317 318 return domain; 319 } 320