1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 4 */ 5 #include <linux/iommu.h> 6 #include <uapi/linux/iommufd.h> 7 8 #include "../iommu-priv.h" 9 #include "iommufd_private.h" 10 11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt) 12 { 13 if (hwpt->domain) 14 iommu_domain_free(hwpt->domain); 15 16 if (hwpt->fault) 17 refcount_dec(&hwpt->fault->obj.users); 18 } 19 20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) 21 { 22 struct iommufd_hwpt_paging *hwpt_paging = 23 container_of(obj, struct iommufd_hwpt_paging, common.obj); 24 25 if (!list_empty(&hwpt_paging->hwpt_item)) { 26 mutex_lock(&hwpt_paging->ioas->mutex); 27 list_del(&hwpt_paging->hwpt_item); 28 mutex_unlock(&hwpt_paging->ioas->mutex); 29 30 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 31 hwpt_paging->common.domain); 32 } 33 34 __iommufd_hwpt_destroy(&hwpt_paging->common); 35 refcount_dec(&hwpt_paging->ioas->obj.users); 36 } 37 38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj) 39 { 40 struct iommufd_hwpt_paging *hwpt_paging = 41 container_of(obj, struct iommufd_hwpt_paging, common.obj); 42 43 /* The ioas->mutex must be held until finalize is called. */ 44 lockdep_assert_held(&hwpt_paging->ioas->mutex); 45 46 if (!list_empty(&hwpt_paging->hwpt_item)) { 47 list_del_init(&hwpt_paging->hwpt_item); 48 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 49 hwpt_paging->common.domain); 50 } 51 iommufd_hwpt_paging_destroy(obj); 52 } 53 54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) 55 { 56 struct iommufd_hwpt_nested *hwpt_nested = 57 container_of(obj, struct iommufd_hwpt_nested, common.obj); 58 59 __iommufd_hwpt_destroy(&hwpt_nested->common); 60 refcount_dec(&hwpt_nested->parent->common.obj.users); 61 } 62 63 void iommufd_hwpt_nested_abort(struct iommufd_object *obj) 64 { 65 iommufd_hwpt_nested_destroy(obj); 66 } 67 68 static int 69 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging) 70 { 71 struct iommu_domain *paging_domain = hwpt_paging->common.domain; 72 73 if (hwpt_paging->enforce_cache_coherency) 74 return 0; 75 76 if (paging_domain->ops->enforce_cache_coherency) 77 hwpt_paging->enforce_cache_coherency = 78 paging_domain->ops->enforce_cache_coherency( 79 paging_domain); 80 if (!hwpt_paging->enforce_cache_coherency) 81 return -EINVAL; 82 return 0; 83 } 84 85 /** 86 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device 87 * @ictx: iommufd context 88 * @ioas: IOAS to associate the domain with 89 * @idev: Device to get an iommu_domain for 90 * @flags: Flags from userspace 91 * @immediate_attach: True if idev should be attached to the hwpt 92 * @user_data: The user provided driver specific data describing the domain to 93 * create 94 * 95 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT 96 * will be linked to the given ioas and upon return the underlying iommu_domain 97 * is fully popoulated. 98 * 99 * The caller must hold the ioas->mutex until after 100 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on 101 * the returned hwpt. 102 */ 103 struct iommufd_hwpt_paging * 104 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, 105 struct iommufd_device *idev, u32 flags, 106 bool immediate_attach, 107 const struct iommu_user_data *user_data) 108 { 109 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | 110 IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 111 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 112 struct iommufd_hwpt_paging *hwpt_paging; 113 struct iommufd_hw_pagetable *hwpt; 114 int rc; 115 116 lockdep_assert_held(&ioas->mutex); 117 118 if ((flags || user_data) && !ops->domain_alloc_user) 119 return ERR_PTR(-EOPNOTSUPP); 120 if (flags & ~valid_flags) 121 return ERR_PTR(-EOPNOTSUPP); 122 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && 123 !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) 124 return ERR_PTR(-EOPNOTSUPP); 125 126 hwpt_paging = __iommufd_object_alloc( 127 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); 128 if (IS_ERR(hwpt_paging)) 129 return ERR_CAST(hwpt_paging); 130 hwpt = &hwpt_paging->common; 131 132 INIT_LIST_HEAD(&hwpt_paging->hwpt_item); 133 /* Pairs with iommufd_hw_pagetable_destroy() */ 134 refcount_inc(&ioas->obj.users); 135 hwpt_paging->ioas = ioas; 136 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; 137 138 if (ops->domain_alloc_user) { 139 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, 140 user_data); 141 if (IS_ERR(hwpt->domain)) { 142 rc = PTR_ERR(hwpt->domain); 143 hwpt->domain = NULL; 144 goto out_abort; 145 } 146 hwpt->domain->owner = ops; 147 } else { 148 hwpt->domain = iommu_paging_domain_alloc(idev->dev); 149 if (IS_ERR(hwpt->domain)) { 150 rc = PTR_ERR(hwpt->domain); 151 hwpt->domain = NULL; 152 goto out_abort; 153 } 154 } 155 156 /* 157 * Set the coherency mode before we do iopt_table_add_domain() as some 158 * iommus have a per-PTE bit that controls it and need to decide before 159 * doing any maps. It is an iommu driver bug to report 160 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on 161 * a new domain. 162 * 163 * The cache coherency mode must be configured here and unchanged later. 164 * Note that a HWPT (non-CC) created for a device (non-CC) can be later 165 * reused by another device (either non-CC or CC). However, A HWPT (CC) 166 * created for a device (CC) cannot be reused by another device (non-CC) 167 * but only devices (CC). Instead user space in this case would need to 168 * allocate a separate HWPT (non-CC). 169 */ 170 if (idev->enforce_cache_coherency) { 171 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging); 172 if (WARN_ON(rc)) 173 goto out_abort; 174 } 175 176 /* 177 * immediate_attach exists only to accommodate iommu drivers that cannot 178 * directly allocate a domain. These drivers do not finish creating the 179 * domain until attach is completed. Thus we must have this call 180 * sequence. Once those drivers are fixed this should be removed. 181 */ 182 if (immediate_attach) { 183 rc = iommufd_hw_pagetable_attach(hwpt, idev); 184 if (rc) 185 goto out_abort; 186 } 187 188 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain); 189 if (rc) 190 goto out_detach; 191 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list); 192 return hwpt_paging; 193 194 out_detach: 195 if (immediate_attach) 196 iommufd_hw_pagetable_detach(idev); 197 out_abort: 198 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 199 return ERR_PTR(rc); 200 } 201 202 /** 203 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device 204 * @ictx: iommufd context 205 * @parent: Parent PAGING-type hwpt to associate the domain with 206 * @idev: Device to get an iommu_domain for 207 * @flags: Flags from userspace 208 * @user_data: user_data pointer. Must be valid 209 * 210 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as 211 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of 212 * being a parent. 213 */ 214 static struct iommufd_hwpt_nested * 215 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, 216 struct iommufd_hwpt_paging *parent, 217 struct iommufd_device *idev, u32 flags, 218 const struct iommu_user_data *user_data) 219 { 220 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 221 struct iommufd_hwpt_nested *hwpt_nested; 222 struct iommufd_hw_pagetable *hwpt; 223 int rc; 224 225 if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || 226 !user_data->len || !ops->domain_alloc_user) 227 return ERR_PTR(-EOPNOTSUPP); 228 if (parent->auto_domain || !parent->nest_parent || 229 parent->common.domain->owner != ops) 230 return ERR_PTR(-EINVAL); 231 232 hwpt_nested = __iommufd_object_alloc( 233 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); 234 if (IS_ERR(hwpt_nested)) 235 return ERR_CAST(hwpt_nested); 236 hwpt = &hwpt_nested->common; 237 238 refcount_inc(&parent->common.obj.users); 239 hwpt_nested->parent = parent; 240 241 hwpt->domain = ops->domain_alloc_user(idev->dev, 242 flags & ~IOMMU_HWPT_FAULT_ID_VALID, 243 parent->common.domain, user_data); 244 if (IS_ERR(hwpt->domain)) { 245 rc = PTR_ERR(hwpt->domain); 246 hwpt->domain = NULL; 247 goto out_abort; 248 } 249 hwpt->domain->owner = ops; 250 251 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED || 252 !hwpt->domain->ops->cache_invalidate_user)) { 253 rc = -EINVAL; 254 goto out_abort; 255 } 256 return hwpt_nested; 257 258 out_abort: 259 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 260 return ERR_PTR(rc); 261 } 262 263 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) 264 { 265 struct iommu_hwpt_alloc *cmd = ucmd->cmd; 266 const struct iommu_user_data user_data = { 267 .type = cmd->data_type, 268 .uptr = u64_to_user_ptr(cmd->data_uptr), 269 .len = cmd->data_len, 270 }; 271 struct iommufd_hw_pagetable *hwpt; 272 struct iommufd_ioas *ioas = NULL; 273 struct iommufd_object *pt_obj; 274 struct iommufd_device *idev; 275 int rc; 276 277 if (cmd->__reserved) 278 return -EOPNOTSUPP; 279 if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) || 280 (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len)) 281 return -EINVAL; 282 283 idev = iommufd_get_device(ucmd, cmd->dev_id); 284 if (IS_ERR(idev)) 285 return PTR_ERR(idev); 286 287 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY); 288 if (IS_ERR(pt_obj)) { 289 rc = -EINVAL; 290 goto out_put_idev; 291 } 292 293 if (pt_obj->type == IOMMUFD_OBJ_IOAS) { 294 struct iommufd_hwpt_paging *hwpt_paging; 295 296 ioas = container_of(pt_obj, struct iommufd_ioas, obj); 297 mutex_lock(&ioas->mutex); 298 hwpt_paging = iommufd_hwpt_paging_alloc( 299 ucmd->ictx, ioas, idev, cmd->flags, false, 300 user_data.len ? &user_data : NULL); 301 if (IS_ERR(hwpt_paging)) { 302 rc = PTR_ERR(hwpt_paging); 303 goto out_unlock; 304 } 305 hwpt = &hwpt_paging->common; 306 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) { 307 struct iommufd_hwpt_nested *hwpt_nested; 308 309 hwpt_nested = iommufd_hwpt_nested_alloc( 310 ucmd->ictx, 311 container_of(pt_obj, struct iommufd_hwpt_paging, 312 common.obj), 313 idev, cmd->flags, &user_data); 314 if (IS_ERR(hwpt_nested)) { 315 rc = PTR_ERR(hwpt_nested); 316 goto out_unlock; 317 } 318 hwpt = &hwpt_nested->common; 319 } else { 320 rc = -EINVAL; 321 goto out_put_pt; 322 } 323 324 if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) { 325 struct iommufd_fault *fault; 326 327 fault = iommufd_get_fault(ucmd, cmd->fault_id); 328 if (IS_ERR(fault)) { 329 rc = PTR_ERR(fault); 330 goto out_hwpt; 331 } 332 hwpt->fault = fault; 333 hwpt->domain->iopf_handler = iommufd_fault_iopf_handler; 334 hwpt->domain->fault_data = hwpt; 335 refcount_inc(&fault->obj.users); 336 iommufd_put_object(ucmd->ictx, &fault->obj); 337 } 338 339 cmd->out_hwpt_id = hwpt->obj.id; 340 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 341 if (rc) 342 goto out_hwpt; 343 iommufd_object_finalize(ucmd->ictx, &hwpt->obj); 344 goto out_unlock; 345 346 out_hwpt: 347 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj); 348 out_unlock: 349 if (ioas) 350 mutex_unlock(&ioas->mutex); 351 out_put_pt: 352 iommufd_put_object(ucmd->ictx, pt_obj); 353 out_put_idev: 354 iommufd_put_object(ucmd->ictx, &idev->obj); 355 return rc; 356 } 357 358 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) 359 { 360 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; 361 struct iommufd_hwpt_paging *hwpt_paging; 362 struct iommufd_ioas *ioas; 363 int rc = -EOPNOTSUPP; 364 bool enable; 365 366 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) 367 return rc; 368 369 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 370 if (IS_ERR(hwpt_paging)) 371 return PTR_ERR(hwpt_paging); 372 373 ioas = hwpt_paging->ioas; 374 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; 375 376 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain, 377 enable); 378 379 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 380 return rc; 381 } 382 383 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) 384 { 385 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; 386 struct iommufd_hwpt_paging *hwpt_paging; 387 struct iommufd_ioas *ioas; 388 int rc = -EOPNOTSUPP; 389 390 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || 391 cmd->__reserved) 392 return -EOPNOTSUPP; 393 394 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 395 if (IS_ERR(hwpt_paging)) 396 return PTR_ERR(hwpt_paging); 397 398 ioas = hwpt_paging->ioas; 399 rc = iopt_read_and_clear_dirty_data( 400 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd); 401 402 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 403 return rc; 404 } 405 406 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) 407 { 408 struct iommu_hwpt_invalidate *cmd = ucmd->cmd; 409 struct iommu_user_data_array data_array = { 410 .type = cmd->data_type, 411 .uptr = u64_to_user_ptr(cmd->data_uptr), 412 .entry_len = cmd->entry_len, 413 .entry_num = cmd->entry_num, 414 }; 415 struct iommufd_hw_pagetable *hwpt; 416 u32 done_num = 0; 417 int rc; 418 419 if (cmd->__reserved) { 420 rc = -EOPNOTSUPP; 421 goto out; 422 } 423 424 if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) { 425 rc = -EINVAL; 426 goto out; 427 } 428 429 hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id); 430 if (IS_ERR(hwpt)) { 431 rc = PTR_ERR(hwpt); 432 goto out; 433 } 434 435 rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, 436 &data_array); 437 done_num = data_array.entry_num; 438 439 iommufd_put_object(ucmd->ictx, &hwpt->obj); 440 out: 441 cmd->entry_num = done_num; 442 if (iommufd_ucmd_respond(ucmd, sizeof(*cmd))) 443 return -EFAULT; 444 return rc; 445 } 446