1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES 3 */ 4 #include "iommufd_private.h" 5 6 void iommufd_viommu_destroy(struct iommufd_object *obj) 7 { 8 struct iommufd_viommu *viommu = 9 container_of(obj, struct iommufd_viommu, obj); 10 11 if (viommu->ops && viommu->ops->destroy) 12 viommu->ops->destroy(viommu); 13 refcount_dec(&viommu->hwpt->common.obj.users); 14 xa_destroy(&viommu->vdevs); 15 } 16 17 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd) 18 { 19 struct iommu_viommu_alloc *cmd = ucmd->cmd; 20 struct iommufd_hwpt_paging *hwpt_paging; 21 struct iommufd_viommu *viommu; 22 struct iommufd_device *idev; 23 const struct iommu_ops *ops; 24 int rc; 25 26 if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT) 27 return -EOPNOTSUPP; 28 29 idev = iommufd_get_device(ucmd, cmd->dev_id); 30 if (IS_ERR(idev)) 31 return PTR_ERR(idev); 32 33 ops = dev_iommu_ops(idev->dev); 34 if (!ops->viommu_alloc) { 35 rc = -EOPNOTSUPP; 36 goto out_put_idev; 37 } 38 39 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 40 if (IS_ERR(hwpt_paging)) { 41 rc = PTR_ERR(hwpt_paging); 42 goto out_put_idev; 43 } 44 45 if (!hwpt_paging->nest_parent) { 46 rc = -EINVAL; 47 goto out_put_hwpt; 48 } 49 50 viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain, 51 ucmd->ictx, cmd->type); 52 if (IS_ERR(viommu)) { 53 rc = PTR_ERR(viommu); 54 goto out_put_hwpt; 55 } 56 57 xa_init(&viommu->vdevs); 58 viommu->type = cmd->type; 59 viommu->ictx = ucmd->ictx; 60 viommu->hwpt = hwpt_paging; 61 refcount_inc(&viommu->hwpt->common.obj.users); 62 /* 63 * It is the most likely case that a physical IOMMU is unpluggable. A 64 * pluggable IOMMU instance (if exists) is responsible for refcounting 65 * on its own. 66 */ 67 viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev); 68 69 cmd->out_viommu_id = viommu->obj.id; 70 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 71 if (rc) 72 goto out_abort; 73 iommufd_object_finalize(ucmd->ictx, &viommu->obj); 74 goto out_put_hwpt; 75 76 out_abort: 77 iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj); 78 out_put_hwpt: 79 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 80 out_put_idev: 81 iommufd_put_object(ucmd->ictx, &idev->obj); 82 return rc; 83 } 84 85 void iommufd_vdevice_destroy(struct iommufd_object *obj) 86 { 87 struct iommufd_vdevice *vdev = 88 container_of(obj, struct iommufd_vdevice, obj); 89 struct iommufd_viommu *viommu = vdev->viommu; 90 91 /* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */ 92 xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL); 93 refcount_dec(&viommu->obj.users); 94 put_device(vdev->dev); 95 } 96 97 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd) 98 { 99 struct iommu_vdevice_alloc *cmd = ucmd->cmd; 100 struct iommufd_vdevice *vdev, *curr; 101 struct iommufd_viommu *viommu; 102 struct iommufd_device *idev; 103 u64 virt_id = cmd->virt_id; 104 int rc = 0; 105 106 /* virt_id indexes an xarray */ 107 if (virt_id > ULONG_MAX) 108 return -EINVAL; 109 110 viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); 111 if (IS_ERR(viommu)) 112 return PTR_ERR(viommu); 113 114 idev = iommufd_get_device(ucmd, cmd->dev_id); 115 if (IS_ERR(idev)) { 116 rc = PTR_ERR(idev); 117 goto out_put_viommu; 118 } 119 120 if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) { 121 rc = -EINVAL; 122 goto out_put_idev; 123 } 124 125 vdev = iommufd_object_alloc(ucmd->ictx, vdev, IOMMUFD_OBJ_VDEVICE); 126 if (IS_ERR(vdev)) { 127 rc = PTR_ERR(vdev); 128 goto out_put_idev; 129 } 130 131 vdev->id = virt_id; 132 vdev->dev = idev->dev; 133 get_device(idev->dev); 134 vdev->viommu = viommu; 135 refcount_inc(&viommu->obj.users); 136 137 curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL); 138 if (curr) { 139 rc = xa_err(curr) ?: -EEXIST; 140 goto out_abort; 141 } 142 143 cmd->out_vdevice_id = vdev->obj.id; 144 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 145 if (rc) 146 goto out_abort; 147 iommufd_object_finalize(ucmd->ictx, &vdev->obj); 148 goto out_put_idev; 149 150 out_abort: 151 iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj); 152 out_put_idev: 153 iommufd_put_object(ucmd->ictx, &idev->obj); 154 out_put_viommu: 155 iommufd_put_object(ucmd->ictx, &viommu->obj); 156 return rc; 157 } 158