1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 4 */ 5 #include <linux/iommu.h> 6 #include <uapi/linux/iommufd.h> 7 8 #include "../iommu-priv.h" 9 #include "iommufd_private.h" 10 11 static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt) 12 { 13 if (hwpt->domain) 14 iommu_domain_free(hwpt->domain); 15 16 if (hwpt->fault) 17 refcount_dec(&hwpt->fault->obj.users); 18 } 19 20 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) 21 { 22 struct iommufd_hwpt_paging *hwpt_paging = 23 container_of(obj, struct iommufd_hwpt_paging, common.obj); 24 25 if (!list_empty(&hwpt_paging->hwpt_item)) { 26 mutex_lock(&hwpt_paging->ioas->mutex); 27 list_del(&hwpt_paging->hwpt_item); 28 mutex_unlock(&hwpt_paging->ioas->mutex); 29 30 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 31 hwpt_paging->common.domain); 32 } 33 34 __iommufd_hwpt_destroy(&hwpt_paging->common); 35 refcount_dec(&hwpt_paging->ioas->obj.users); 36 } 37 38 void iommufd_hwpt_paging_abort(struct iommufd_object *obj) 39 { 40 struct iommufd_hwpt_paging *hwpt_paging = 41 container_of(obj, struct iommufd_hwpt_paging, common.obj); 42 43 /* The ioas->mutex must be held until finalize is called. */ 44 lockdep_assert_held(&hwpt_paging->ioas->mutex); 45 46 if (!list_empty(&hwpt_paging->hwpt_item)) { 47 list_del_init(&hwpt_paging->hwpt_item); 48 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 49 hwpt_paging->common.domain); 50 } 51 iommufd_hwpt_paging_destroy(obj); 52 } 53 54 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) 55 { 56 struct iommufd_hwpt_nested *hwpt_nested = 57 container_of(obj, struct iommufd_hwpt_nested, common.obj); 58 59 __iommufd_hwpt_destroy(&hwpt_nested->common); 60 if (hwpt_nested->viommu) 61 refcount_dec(&hwpt_nested->viommu->obj.users); 62 else 63 refcount_dec(&hwpt_nested->parent->common.obj.users); 64 } 65 66 void iommufd_hwpt_nested_abort(struct iommufd_object *obj) 67 { 68 iommufd_hwpt_nested_destroy(obj); 69 } 70 71 static int 72 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging) 73 { 74 struct iommu_domain *paging_domain = hwpt_paging->common.domain; 75 76 if (hwpt_paging->enforce_cache_coherency) 77 return 0; 78 79 if (paging_domain->ops->enforce_cache_coherency) 80 hwpt_paging->enforce_cache_coherency = 81 paging_domain->ops->enforce_cache_coherency( 82 paging_domain); 83 if (!hwpt_paging->enforce_cache_coherency) 84 return -EINVAL; 85 return 0; 86 } 87 88 /** 89 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device 90 * @ictx: iommufd context 91 * @ioas: IOAS to associate the domain with 92 * @idev: Device to get an iommu_domain for 93 * @flags: Flags from userspace 94 * @immediate_attach: True if idev should be attached to the hwpt 95 * @user_data: The user provided driver specific data describing the domain to 96 * create 97 * 98 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT 99 * will be linked to the given ioas and upon return the underlying iommu_domain 100 * is fully popoulated. 101 * 102 * The caller must hold the ioas->mutex until after 103 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on 104 * the returned hwpt. 105 */ 106 struct iommufd_hwpt_paging * 107 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, 108 struct iommufd_device *idev, u32 flags, 109 bool immediate_attach, 110 const struct iommu_user_data *user_data) 111 { 112 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | 113 IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 114 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 115 struct iommufd_hwpt_paging *hwpt_paging; 116 struct iommufd_hw_pagetable *hwpt; 117 int rc; 118 119 lockdep_assert_held(&ioas->mutex); 120 121 if ((flags || user_data) && !ops->domain_alloc_user) 122 return ERR_PTR(-EOPNOTSUPP); 123 if (flags & ~valid_flags) 124 return ERR_PTR(-EOPNOTSUPP); 125 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && 126 !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) 127 return ERR_PTR(-EOPNOTSUPP); 128 129 hwpt_paging = __iommufd_object_alloc( 130 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); 131 if (IS_ERR(hwpt_paging)) 132 return ERR_CAST(hwpt_paging); 133 hwpt = &hwpt_paging->common; 134 135 INIT_LIST_HEAD(&hwpt_paging->hwpt_item); 136 /* Pairs with iommufd_hw_pagetable_destroy() */ 137 refcount_inc(&ioas->obj.users); 138 hwpt_paging->ioas = ioas; 139 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; 140 141 if (ops->domain_alloc_user) { 142 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, 143 user_data); 144 if (IS_ERR(hwpt->domain)) { 145 rc = PTR_ERR(hwpt->domain); 146 hwpt->domain = NULL; 147 goto out_abort; 148 } 149 hwpt->domain->owner = ops; 150 } else { 151 hwpt->domain = iommu_paging_domain_alloc(idev->dev); 152 if (IS_ERR(hwpt->domain)) { 153 rc = PTR_ERR(hwpt->domain); 154 hwpt->domain = NULL; 155 goto out_abort; 156 } 157 } 158 159 /* 160 * Set the coherency mode before we do iopt_table_add_domain() as some 161 * iommus have a per-PTE bit that controls it and need to decide before 162 * doing any maps. It is an iommu driver bug to report 163 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on 164 * a new domain. 165 * 166 * The cache coherency mode must be configured here and unchanged later. 167 * Note that a HWPT (non-CC) created for a device (non-CC) can be later 168 * reused by another device (either non-CC or CC). However, A HWPT (CC) 169 * created for a device (CC) cannot be reused by another device (non-CC) 170 * but only devices (CC). Instead user space in this case would need to 171 * allocate a separate HWPT (non-CC). 172 */ 173 if (idev->enforce_cache_coherency) { 174 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging); 175 if (WARN_ON(rc)) 176 goto out_abort; 177 } 178 179 /* 180 * immediate_attach exists only to accommodate iommu drivers that cannot 181 * directly allocate a domain. These drivers do not finish creating the 182 * domain until attach is completed. Thus we must have this call 183 * sequence. Once those drivers are fixed this should be removed. 184 */ 185 if (immediate_attach) { 186 rc = iommufd_hw_pagetable_attach(hwpt, idev); 187 if (rc) 188 goto out_abort; 189 } 190 191 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain); 192 if (rc) 193 goto out_detach; 194 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list); 195 return hwpt_paging; 196 197 out_detach: 198 if (immediate_attach) 199 iommufd_hw_pagetable_detach(idev); 200 out_abort: 201 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 202 return ERR_PTR(rc); 203 } 204 205 /** 206 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device 207 * @ictx: iommufd context 208 * @parent: Parent PAGING-type hwpt to associate the domain with 209 * @idev: Device to get an iommu_domain for 210 * @flags: Flags from userspace 211 * @user_data: user_data pointer. Must be valid 212 * 213 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as 214 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of 215 * being a parent. 216 */ 217 static struct iommufd_hwpt_nested * 218 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, 219 struct iommufd_hwpt_paging *parent, 220 struct iommufd_device *idev, u32 flags, 221 const struct iommu_user_data *user_data) 222 { 223 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 224 struct iommufd_hwpt_nested *hwpt_nested; 225 struct iommufd_hw_pagetable *hwpt; 226 int rc; 227 228 if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || 229 !user_data->len || !ops->domain_alloc_user) 230 return ERR_PTR(-EOPNOTSUPP); 231 if (parent->auto_domain || !parent->nest_parent || 232 parent->common.domain->owner != ops) 233 return ERR_PTR(-EINVAL); 234 235 hwpt_nested = __iommufd_object_alloc( 236 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); 237 if (IS_ERR(hwpt_nested)) 238 return ERR_CAST(hwpt_nested); 239 hwpt = &hwpt_nested->common; 240 241 refcount_inc(&parent->common.obj.users); 242 hwpt_nested->parent = parent; 243 244 hwpt->domain = ops->domain_alloc_user(idev->dev, 245 flags & ~IOMMU_HWPT_FAULT_ID_VALID, 246 parent->common.domain, user_data); 247 if (IS_ERR(hwpt->domain)) { 248 rc = PTR_ERR(hwpt->domain); 249 hwpt->domain = NULL; 250 goto out_abort; 251 } 252 hwpt->domain->owner = ops; 253 254 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { 255 rc = -EINVAL; 256 goto out_abort; 257 } 258 return hwpt_nested; 259 260 out_abort: 261 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 262 return ERR_PTR(rc); 263 } 264 265 /** 266 * iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU 267 * @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with 268 * @flags: Flags from userspace 269 * @user_data: user_data pointer. Must be valid 270 * 271 * Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED 272 * hw_pagetable. 273 */ 274 static struct iommufd_hwpt_nested * 275 iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags, 276 const struct iommu_user_data *user_data) 277 { 278 struct iommufd_hwpt_nested *hwpt_nested; 279 struct iommufd_hw_pagetable *hwpt; 280 int rc; 281 282 if (!user_data->len) 283 return ERR_PTR(-EOPNOTSUPP); 284 if (!viommu->ops || !viommu->ops->alloc_domain_nested) 285 return ERR_PTR(-EOPNOTSUPP); 286 287 hwpt_nested = __iommufd_object_alloc( 288 viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); 289 if (IS_ERR(hwpt_nested)) 290 return ERR_CAST(hwpt_nested); 291 hwpt = &hwpt_nested->common; 292 293 hwpt_nested->viommu = viommu; 294 refcount_inc(&viommu->obj.users); 295 hwpt_nested->parent = viommu->hwpt; 296 297 hwpt->domain = 298 viommu->ops->alloc_domain_nested(viommu, flags, user_data); 299 if (IS_ERR(hwpt->domain)) { 300 rc = PTR_ERR(hwpt->domain); 301 hwpt->domain = NULL; 302 goto out_abort; 303 } 304 hwpt->domain->owner = viommu->iommu_dev->ops; 305 306 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { 307 rc = -EINVAL; 308 goto out_abort; 309 } 310 return hwpt_nested; 311 312 out_abort: 313 iommufd_object_abort_and_destroy(viommu->ictx, &hwpt->obj); 314 return ERR_PTR(rc); 315 } 316 317 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) 318 { 319 struct iommu_hwpt_alloc *cmd = ucmd->cmd; 320 const struct iommu_user_data user_data = { 321 .type = cmd->data_type, 322 .uptr = u64_to_user_ptr(cmd->data_uptr), 323 .len = cmd->data_len, 324 }; 325 struct iommufd_hw_pagetable *hwpt; 326 struct iommufd_ioas *ioas = NULL; 327 struct iommufd_object *pt_obj; 328 struct iommufd_device *idev; 329 int rc; 330 331 if (cmd->__reserved) 332 return -EOPNOTSUPP; 333 if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) || 334 (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len)) 335 return -EINVAL; 336 337 idev = iommufd_get_device(ucmd, cmd->dev_id); 338 if (IS_ERR(idev)) 339 return PTR_ERR(idev); 340 341 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY); 342 if (IS_ERR(pt_obj)) { 343 rc = -EINVAL; 344 goto out_put_idev; 345 } 346 347 if (pt_obj->type == IOMMUFD_OBJ_IOAS) { 348 struct iommufd_hwpt_paging *hwpt_paging; 349 350 ioas = container_of(pt_obj, struct iommufd_ioas, obj); 351 mutex_lock(&ioas->mutex); 352 hwpt_paging = iommufd_hwpt_paging_alloc( 353 ucmd->ictx, ioas, idev, cmd->flags, false, 354 user_data.len ? &user_data : NULL); 355 if (IS_ERR(hwpt_paging)) { 356 rc = PTR_ERR(hwpt_paging); 357 goto out_unlock; 358 } 359 hwpt = &hwpt_paging->common; 360 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) { 361 struct iommufd_hwpt_nested *hwpt_nested; 362 363 hwpt_nested = iommufd_hwpt_nested_alloc( 364 ucmd->ictx, 365 container_of(pt_obj, struct iommufd_hwpt_paging, 366 common.obj), 367 idev, cmd->flags, &user_data); 368 if (IS_ERR(hwpt_nested)) { 369 rc = PTR_ERR(hwpt_nested); 370 goto out_unlock; 371 } 372 hwpt = &hwpt_nested->common; 373 } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) { 374 struct iommufd_hwpt_nested *hwpt_nested; 375 struct iommufd_viommu *viommu; 376 377 viommu = container_of(pt_obj, struct iommufd_viommu, obj); 378 if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) { 379 rc = -EINVAL; 380 goto out_unlock; 381 } 382 hwpt_nested = iommufd_viommu_alloc_hwpt_nested( 383 viommu, cmd->flags, &user_data); 384 if (IS_ERR(hwpt_nested)) { 385 rc = PTR_ERR(hwpt_nested); 386 goto out_unlock; 387 } 388 hwpt = &hwpt_nested->common; 389 } else { 390 rc = -EINVAL; 391 goto out_put_pt; 392 } 393 394 if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) { 395 struct iommufd_fault *fault; 396 397 fault = iommufd_get_fault(ucmd, cmd->fault_id); 398 if (IS_ERR(fault)) { 399 rc = PTR_ERR(fault); 400 goto out_hwpt; 401 } 402 hwpt->fault = fault; 403 hwpt->domain->iopf_handler = iommufd_fault_iopf_handler; 404 hwpt->domain->fault_data = hwpt; 405 refcount_inc(&fault->obj.users); 406 iommufd_put_object(ucmd->ictx, &fault->obj); 407 } 408 409 cmd->out_hwpt_id = hwpt->obj.id; 410 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 411 if (rc) 412 goto out_hwpt; 413 iommufd_object_finalize(ucmd->ictx, &hwpt->obj); 414 goto out_unlock; 415 416 out_hwpt: 417 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj); 418 out_unlock: 419 if (ioas) 420 mutex_unlock(&ioas->mutex); 421 out_put_pt: 422 iommufd_put_object(ucmd->ictx, pt_obj); 423 out_put_idev: 424 iommufd_put_object(ucmd->ictx, &idev->obj); 425 return rc; 426 } 427 428 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) 429 { 430 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; 431 struct iommufd_hwpt_paging *hwpt_paging; 432 struct iommufd_ioas *ioas; 433 int rc = -EOPNOTSUPP; 434 bool enable; 435 436 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) 437 return rc; 438 439 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 440 if (IS_ERR(hwpt_paging)) 441 return PTR_ERR(hwpt_paging); 442 443 ioas = hwpt_paging->ioas; 444 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; 445 446 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain, 447 enable); 448 449 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 450 return rc; 451 } 452 453 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) 454 { 455 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; 456 struct iommufd_hwpt_paging *hwpt_paging; 457 struct iommufd_ioas *ioas; 458 int rc = -EOPNOTSUPP; 459 460 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || 461 cmd->__reserved) 462 return -EOPNOTSUPP; 463 464 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 465 if (IS_ERR(hwpt_paging)) 466 return PTR_ERR(hwpt_paging); 467 468 ioas = hwpt_paging->ioas; 469 rc = iopt_read_and_clear_dirty_data( 470 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd); 471 472 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 473 return rc; 474 } 475 476 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) 477 { 478 struct iommu_hwpt_invalidate *cmd = ucmd->cmd; 479 struct iommu_user_data_array data_array = { 480 .type = cmd->data_type, 481 .uptr = u64_to_user_ptr(cmd->data_uptr), 482 .entry_len = cmd->entry_len, 483 .entry_num = cmd->entry_num, 484 }; 485 struct iommufd_object *pt_obj; 486 u32 done_num = 0; 487 int rc; 488 489 if (cmd->__reserved) { 490 rc = -EOPNOTSUPP; 491 goto out; 492 } 493 494 if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) { 495 rc = -EINVAL; 496 goto out; 497 } 498 499 pt_obj = iommufd_get_object(ucmd->ictx, cmd->hwpt_id, IOMMUFD_OBJ_ANY); 500 if (IS_ERR(pt_obj)) { 501 rc = PTR_ERR(pt_obj); 502 goto out; 503 } 504 if (pt_obj->type == IOMMUFD_OBJ_HWPT_NESTED) { 505 struct iommufd_hw_pagetable *hwpt = 506 container_of(pt_obj, struct iommufd_hw_pagetable, obj); 507 508 if (!hwpt->domain->ops || 509 !hwpt->domain->ops->cache_invalidate_user) { 510 rc = -EOPNOTSUPP; 511 goto out_put_pt; 512 } 513 rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, 514 &data_array); 515 } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) { 516 struct iommufd_viommu *viommu = 517 container_of(pt_obj, struct iommufd_viommu, obj); 518 519 if (!viommu->ops || !viommu->ops->cache_invalidate) { 520 rc = -EOPNOTSUPP; 521 goto out_put_pt; 522 } 523 rc = viommu->ops->cache_invalidate(viommu, &data_array); 524 } else { 525 rc = -EINVAL; 526 goto out_put_pt; 527 } 528 529 done_num = data_array.entry_num; 530 531 out_put_pt: 532 iommufd_put_object(ucmd->ictx, pt_obj); 533 out: 534 cmd->entry_num = done_num; 535 if (iommufd_ucmd_respond(ucmd, sizeof(*cmd))) 536 return -EFAULT; 537 return rc; 538 } 539