1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 4 */ 5 #include <linux/iommu.h> 6 #include <uapi/linux/iommufd.h> 7 8 #include "../iommu-priv.h" 9 #include "iommufd_private.h" 10 11 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) 12 { 13 struct iommufd_hwpt_paging *hwpt_paging = 14 container_of(obj, struct iommufd_hwpt_paging, common.obj); 15 16 if (!list_empty(&hwpt_paging->hwpt_item)) { 17 mutex_lock(&hwpt_paging->ioas->mutex); 18 list_del(&hwpt_paging->hwpt_item); 19 mutex_unlock(&hwpt_paging->ioas->mutex); 20 21 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 22 hwpt_paging->common.domain); 23 } 24 25 if (hwpt_paging->common.domain) 26 iommu_domain_free(hwpt_paging->common.domain); 27 28 refcount_dec(&hwpt_paging->ioas->obj.users); 29 } 30 31 void iommufd_hwpt_paging_abort(struct iommufd_object *obj) 32 { 33 struct iommufd_hwpt_paging *hwpt_paging = 34 container_of(obj, struct iommufd_hwpt_paging, common.obj); 35 36 /* The ioas->mutex must be held until finalize is called. */ 37 lockdep_assert_held(&hwpt_paging->ioas->mutex); 38 39 if (!list_empty(&hwpt_paging->hwpt_item)) { 40 list_del_init(&hwpt_paging->hwpt_item); 41 iopt_table_remove_domain(&hwpt_paging->ioas->iopt, 42 hwpt_paging->common.domain); 43 } 44 iommufd_hwpt_paging_destroy(obj); 45 } 46 47 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) 48 { 49 struct iommufd_hwpt_nested *hwpt_nested = 50 container_of(obj, struct iommufd_hwpt_nested, common.obj); 51 52 if (hwpt_nested->common.domain) 53 iommu_domain_free(hwpt_nested->common.domain); 54 55 refcount_dec(&hwpt_nested->parent->common.obj.users); 56 } 57 58 void iommufd_hwpt_nested_abort(struct iommufd_object *obj) 59 { 60 iommufd_hwpt_nested_destroy(obj); 61 } 62 63 static int 64 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging) 65 { 66 struct iommu_domain *paging_domain = hwpt_paging->common.domain; 67 68 if (hwpt_paging->enforce_cache_coherency) 69 return 0; 70 71 if (paging_domain->ops->enforce_cache_coherency) 72 hwpt_paging->enforce_cache_coherency = 73 paging_domain->ops->enforce_cache_coherency( 74 paging_domain); 75 if (!hwpt_paging->enforce_cache_coherency) 76 return -EINVAL; 77 return 0; 78 } 79 80 /** 81 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device 82 * @ictx: iommufd context 83 * @ioas: IOAS to associate the domain with 84 * @idev: Device to get an iommu_domain for 85 * @flags: Flags from userspace 86 * @immediate_attach: True if idev should be attached to the hwpt 87 * @user_data: The user provided driver specific data describing the domain to 88 * create 89 * 90 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT 91 * will be linked to the given ioas and upon return the underlying iommu_domain 92 * is fully popoulated. 93 * 94 * The caller must hold the ioas->mutex until after 95 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on 96 * the returned hwpt. 97 */ 98 struct iommufd_hwpt_paging * 99 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, 100 struct iommufd_device *idev, u32 flags, 101 bool immediate_attach, 102 const struct iommu_user_data *user_data) 103 { 104 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | 105 IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 106 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 107 struct iommufd_hwpt_paging *hwpt_paging; 108 struct iommufd_hw_pagetable *hwpt; 109 int rc; 110 111 lockdep_assert_held(&ioas->mutex); 112 113 if ((flags || user_data) && !ops->domain_alloc_user) 114 return ERR_PTR(-EOPNOTSUPP); 115 if (flags & ~valid_flags) 116 return ERR_PTR(-EOPNOTSUPP); 117 118 hwpt_paging = __iommufd_object_alloc( 119 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); 120 if (IS_ERR(hwpt_paging)) 121 return ERR_CAST(hwpt_paging); 122 hwpt = &hwpt_paging->common; 123 124 INIT_LIST_HEAD(&hwpt_paging->hwpt_item); 125 /* Pairs with iommufd_hw_pagetable_destroy() */ 126 refcount_inc(&ioas->obj.users); 127 hwpt_paging->ioas = ioas; 128 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; 129 130 if (ops->domain_alloc_user) { 131 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, 132 user_data); 133 if (IS_ERR(hwpt->domain)) { 134 rc = PTR_ERR(hwpt->domain); 135 hwpt->domain = NULL; 136 goto out_abort; 137 } 138 hwpt->domain->owner = ops; 139 } else { 140 hwpt->domain = iommu_domain_alloc(idev->dev->bus); 141 if (!hwpt->domain) { 142 rc = -ENOMEM; 143 goto out_abort; 144 } 145 } 146 147 /* 148 * Set the coherency mode before we do iopt_table_add_domain() as some 149 * iommus have a per-PTE bit that controls it and need to decide before 150 * doing any maps. It is an iommu driver bug to report 151 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on 152 * a new domain. 153 * 154 * The cache coherency mode must be configured here and unchanged later. 155 * Note that a HWPT (non-CC) created for a device (non-CC) can be later 156 * reused by another device (either non-CC or CC). However, A HWPT (CC) 157 * created for a device (CC) cannot be reused by another device (non-CC) 158 * but only devices (CC). Instead user space in this case would need to 159 * allocate a separate HWPT (non-CC). 160 */ 161 if (idev->enforce_cache_coherency) { 162 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging); 163 if (WARN_ON(rc)) 164 goto out_abort; 165 } 166 167 /* 168 * immediate_attach exists only to accommodate iommu drivers that cannot 169 * directly allocate a domain. These drivers do not finish creating the 170 * domain until attach is completed. Thus we must have this call 171 * sequence. Once those drivers are fixed this should be removed. 172 */ 173 if (immediate_attach) { 174 rc = iommufd_hw_pagetable_attach(hwpt, idev); 175 if (rc) 176 goto out_abort; 177 } 178 179 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain); 180 if (rc) 181 goto out_detach; 182 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list); 183 return hwpt_paging; 184 185 out_detach: 186 if (immediate_attach) 187 iommufd_hw_pagetable_detach(idev); 188 out_abort: 189 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 190 return ERR_PTR(rc); 191 } 192 193 /** 194 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device 195 * @ictx: iommufd context 196 * @parent: Parent PAGING-type hwpt to associate the domain with 197 * @idev: Device to get an iommu_domain for 198 * @flags: Flags from userspace 199 * @user_data: user_data pointer. Must be valid 200 * 201 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as 202 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of 203 * being a parent. 204 */ 205 static struct iommufd_hwpt_nested * 206 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, 207 struct iommufd_hwpt_paging *parent, 208 struct iommufd_device *idev, u32 flags, 209 const struct iommu_user_data *user_data) 210 { 211 const struct iommu_ops *ops = dev_iommu_ops(idev->dev); 212 struct iommufd_hwpt_nested *hwpt_nested; 213 struct iommufd_hw_pagetable *hwpt; 214 int rc; 215 216 if (flags || !user_data->len || !ops->domain_alloc_user) 217 return ERR_PTR(-EOPNOTSUPP); 218 if (parent->auto_domain || !parent->nest_parent) 219 return ERR_PTR(-EINVAL); 220 221 hwpt_nested = __iommufd_object_alloc( 222 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); 223 if (IS_ERR(hwpt_nested)) 224 return ERR_CAST(hwpt_nested); 225 hwpt = &hwpt_nested->common; 226 227 refcount_inc(&parent->common.obj.users); 228 hwpt_nested->parent = parent; 229 230 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, 231 parent->common.domain, user_data); 232 if (IS_ERR(hwpt->domain)) { 233 rc = PTR_ERR(hwpt->domain); 234 hwpt->domain = NULL; 235 goto out_abort; 236 } 237 hwpt->domain->owner = ops; 238 239 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { 240 rc = -EINVAL; 241 goto out_abort; 242 } 243 return hwpt_nested; 244 245 out_abort: 246 iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 247 return ERR_PTR(rc); 248 } 249 250 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) 251 { 252 struct iommu_hwpt_alloc *cmd = ucmd->cmd; 253 const struct iommu_user_data user_data = { 254 .type = cmd->data_type, 255 .uptr = u64_to_user_ptr(cmd->data_uptr), 256 .len = cmd->data_len, 257 }; 258 struct iommufd_hw_pagetable *hwpt; 259 struct iommufd_ioas *ioas = NULL; 260 struct iommufd_object *pt_obj; 261 struct iommufd_device *idev; 262 int rc; 263 264 if (cmd->__reserved) 265 return -EOPNOTSUPP; 266 if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) 267 return -EINVAL; 268 269 idev = iommufd_get_device(ucmd, cmd->dev_id); 270 if (IS_ERR(idev)) 271 return PTR_ERR(idev); 272 273 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY); 274 if (IS_ERR(pt_obj)) { 275 rc = -EINVAL; 276 goto out_put_idev; 277 } 278 279 if (pt_obj->type == IOMMUFD_OBJ_IOAS) { 280 struct iommufd_hwpt_paging *hwpt_paging; 281 282 ioas = container_of(pt_obj, struct iommufd_ioas, obj); 283 mutex_lock(&ioas->mutex); 284 hwpt_paging = iommufd_hwpt_paging_alloc( 285 ucmd->ictx, ioas, idev, cmd->flags, false, 286 user_data.len ? &user_data : NULL); 287 if (IS_ERR(hwpt_paging)) { 288 rc = PTR_ERR(hwpt_paging); 289 goto out_unlock; 290 } 291 hwpt = &hwpt_paging->common; 292 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) { 293 struct iommufd_hwpt_nested *hwpt_nested; 294 295 hwpt_nested = iommufd_hwpt_nested_alloc( 296 ucmd->ictx, 297 container_of(pt_obj, struct iommufd_hwpt_paging, 298 common.obj), 299 idev, cmd->flags, &user_data); 300 if (IS_ERR(hwpt_nested)) { 301 rc = PTR_ERR(hwpt_nested); 302 goto out_unlock; 303 } 304 hwpt = &hwpt_nested->common; 305 } else { 306 rc = -EINVAL; 307 goto out_put_pt; 308 } 309 310 cmd->out_hwpt_id = hwpt->obj.id; 311 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 312 if (rc) 313 goto out_hwpt; 314 iommufd_object_finalize(ucmd->ictx, &hwpt->obj); 315 goto out_unlock; 316 317 out_hwpt: 318 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj); 319 out_unlock: 320 if (ioas) 321 mutex_unlock(&ioas->mutex); 322 out_put_pt: 323 iommufd_put_object(ucmd->ictx, pt_obj); 324 out_put_idev: 325 iommufd_put_object(ucmd->ictx, &idev->obj); 326 return rc; 327 } 328 329 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) 330 { 331 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; 332 struct iommufd_hwpt_paging *hwpt_paging; 333 struct iommufd_ioas *ioas; 334 int rc = -EOPNOTSUPP; 335 bool enable; 336 337 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) 338 return rc; 339 340 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 341 if (IS_ERR(hwpt_paging)) 342 return PTR_ERR(hwpt_paging); 343 344 ioas = hwpt_paging->ioas; 345 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; 346 347 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain, 348 enable); 349 350 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 351 return rc; 352 } 353 354 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) 355 { 356 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; 357 struct iommufd_hwpt_paging *hwpt_paging; 358 struct iommufd_ioas *ioas; 359 int rc = -EOPNOTSUPP; 360 361 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || 362 cmd->__reserved) 363 return -EOPNOTSUPP; 364 365 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 366 if (IS_ERR(hwpt_paging)) 367 return PTR_ERR(hwpt_paging); 368 369 ioas = hwpt_paging->ioas; 370 rc = iopt_read_and_clear_dirty_data( 371 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd); 372 373 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 374 return rc; 375 } 376 377 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) 378 { 379 struct iommu_hwpt_invalidate *cmd = ucmd->cmd; 380 struct iommu_user_data_array data_array = { 381 .type = cmd->data_type, 382 .uptr = u64_to_user_ptr(cmd->data_uptr), 383 .entry_len = cmd->entry_len, 384 .entry_num = cmd->entry_num, 385 }; 386 struct iommufd_hw_pagetable *hwpt; 387 u32 done_num = 0; 388 int rc; 389 390 if (cmd->__reserved) { 391 rc = -EOPNOTSUPP; 392 goto out; 393 } 394 395 if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) { 396 rc = -EINVAL; 397 goto out; 398 } 399 400 hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id); 401 if (IS_ERR(hwpt)) { 402 rc = PTR_ERR(hwpt); 403 goto out; 404 } 405 406 rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, 407 &data_array); 408 done_num = data_array.entry_num; 409 410 iommufd_put_object(ucmd->ictx, &hwpt->obj); 411 out: 412 cmd->entry_num = done_num; 413 if (iommufd_ucmd_respond(ucmd, sizeof(*cmd))) 414 return -EFAULT; 415 return rc; 416 } 417