14db97c21SNicolin Chen // SPDX-License-Identifier: GPL-2.0-only 24db97c21SNicolin Chen /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES 34db97c21SNicolin Chen */ 44db97c21SNicolin Chen #include "iommufd_private.h" 54db97c21SNicolin Chen 64db97c21SNicolin Chen void iommufd_viommu_destroy(struct iommufd_object *obj) 74db97c21SNicolin Chen { 84db97c21SNicolin Chen struct iommufd_viommu *viommu = 94db97c21SNicolin Chen container_of(obj, struct iommufd_viommu, obj); 104db97c21SNicolin Chen 114db97c21SNicolin Chen if (viommu->ops && viommu->ops->destroy) 124db97c21SNicolin Chen viommu->ops->destroy(viommu); 134db97c21SNicolin Chen refcount_dec(&viommu->hwpt->common.obj.users); 14*0ce5c247SNicolin Chen xa_destroy(&viommu->vdevs); 154db97c21SNicolin Chen } 164db97c21SNicolin Chen 174db97c21SNicolin Chen int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd) 184db97c21SNicolin Chen { 194db97c21SNicolin Chen struct iommu_viommu_alloc *cmd = ucmd->cmd; 204db97c21SNicolin Chen struct iommufd_hwpt_paging *hwpt_paging; 214db97c21SNicolin Chen struct iommufd_viommu *viommu; 224db97c21SNicolin Chen struct iommufd_device *idev; 234db97c21SNicolin Chen const struct iommu_ops *ops; 244db97c21SNicolin Chen int rc; 254db97c21SNicolin Chen 264db97c21SNicolin Chen if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT) 274db97c21SNicolin Chen return -EOPNOTSUPP; 284db97c21SNicolin Chen 294db97c21SNicolin Chen idev = iommufd_get_device(ucmd, cmd->dev_id); 304db97c21SNicolin Chen if (IS_ERR(idev)) 314db97c21SNicolin Chen return PTR_ERR(idev); 324db97c21SNicolin Chen 334db97c21SNicolin Chen ops = dev_iommu_ops(idev->dev); 344db97c21SNicolin Chen if (!ops->viommu_alloc) { 354db97c21SNicolin Chen rc = -EOPNOTSUPP; 364db97c21SNicolin Chen goto out_put_idev; 374db97c21SNicolin Chen } 384db97c21SNicolin Chen 394db97c21SNicolin Chen hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); 404db97c21SNicolin Chen if (IS_ERR(hwpt_paging)) { 414db97c21SNicolin Chen rc = PTR_ERR(hwpt_paging); 424db97c21SNicolin Chen goto out_put_idev; 434db97c21SNicolin Chen } 444db97c21SNicolin Chen 454db97c21SNicolin Chen if (!hwpt_paging->nest_parent) { 464db97c21SNicolin Chen rc = -EINVAL; 474db97c21SNicolin Chen goto out_put_hwpt; 484db97c21SNicolin Chen } 494db97c21SNicolin Chen 504db97c21SNicolin Chen viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain, 514db97c21SNicolin Chen ucmd->ictx, cmd->type); 524db97c21SNicolin Chen if (IS_ERR(viommu)) { 534db97c21SNicolin Chen rc = PTR_ERR(viommu); 544db97c21SNicolin Chen goto out_put_hwpt; 554db97c21SNicolin Chen } 564db97c21SNicolin Chen 57*0ce5c247SNicolin Chen xa_init(&viommu->vdevs); 584db97c21SNicolin Chen viommu->type = cmd->type; 594db97c21SNicolin Chen viommu->ictx = ucmd->ictx; 604db97c21SNicolin Chen viommu->hwpt = hwpt_paging; 614db97c21SNicolin Chen refcount_inc(&viommu->hwpt->common.obj.users); 624db97c21SNicolin Chen /* 634db97c21SNicolin Chen * It is the most likely case that a physical IOMMU is unpluggable. A 644db97c21SNicolin Chen * pluggable IOMMU instance (if exists) is responsible for refcounting 654db97c21SNicolin Chen * on its own. 664db97c21SNicolin Chen */ 674db97c21SNicolin Chen viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev); 684db97c21SNicolin Chen 694db97c21SNicolin Chen cmd->out_viommu_id = viommu->obj.id; 704db97c21SNicolin Chen rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 714db97c21SNicolin Chen if (rc) 724db97c21SNicolin Chen goto out_abort; 734db97c21SNicolin Chen iommufd_object_finalize(ucmd->ictx, &viommu->obj); 744db97c21SNicolin Chen goto out_put_hwpt; 754db97c21SNicolin Chen 764db97c21SNicolin Chen out_abort: 774db97c21SNicolin Chen iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj); 784db97c21SNicolin Chen out_put_hwpt: 794db97c21SNicolin Chen iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); 804db97c21SNicolin Chen out_put_idev: 814db97c21SNicolin Chen iommufd_put_object(ucmd->ictx, &idev->obj); 824db97c21SNicolin Chen return rc; 834db97c21SNicolin Chen } 84*0ce5c247SNicolin Chen 85*0ce5c247SNicolin Chen void iommufd_vdevice_destroy(struct iommufd_object *obj) 86*0ce5c247SNicolin Chen { 87*0ce5c247SNicolin Chen struct iommufd_vdevice *vdev = 88*0ce5c247SNicolin Chen container_of(obj, struct iommufd_vdevice, obj); 89*0ce5c247SNicolin Chen struct iommufd_viommu *viommu = vdev->viommu; 90*0ce5c247SNicolin Chen 91*0ce5c247SNicolin Chen /* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */ 92*0ce5c247SNicolin Chen xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL); 93*0ce5c247SNicolin Chen refcount_dec(&viommu->obj.users); 94*0ce5c247SNicolin Chen put_device(vdev->dev); 95*0ce5c247SNicolin Chen } 96*0ce5c247SNicolin Chen 97*0ce5c247SNicolin Chen int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd) 98*0ce5c247SNicolin Chen { 99*0ce5c247SNicolin Chen struct iommu_vdevice_alloc *cmd = ucmd->cmd; 100*0ce5c247SNicolin Chen struct iommufd_vdevice *vdev, *curr; 101*0ce5c247SNicolin Chen struct iommufd_viommu *viommu; 102*0ce5c247SNicolin Chen struct iommufd_device *idev; 103*0ce5c247SNicolin Chen u64 virt_id = cmd->virt_id; 104*0ce5c247SNicolin Chen int rc = 0; 105*0ce5c247SNicolin Chen 106*0ce5c247SNicolin Chen /* virt_id indexes an xarray */ 107*0ce5c247SNicolin Chen if (virt_id > ULONG_MAX) 108*0ce5c247SNicolin Chen return -EINVAL; 109*0ce5c247SNicolin Chen 110*0ce5c247SNicolin Chen viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); 111*0ce5c247SNicolin Chen if (IS_ERR(viommu)) 112*0ce5c247SNicolin Chen return PTR_ERR(viommu); 113*0ce5c247SNicolin Chen 114*0ce5c247SNicolin Chen idev = iommufd_get_device(ucmd, cmd->dev_id); 115*0ce5c247SNicolin Chen if (IS_ERR(idev)) { 116*0ce5c247SNicolin Chen rc = PTR_ERR(idev); 117*0ce5c247SNicolin Chen goto out_put_viommu; 118*0ce5c247SNicolin Chen } 119*0ce5c247SNicolin Chen 120*0ce5c247SNicolin Chen if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) { 121*0ce5c247SNicolin Chen rc = -EINVAL; 122*0ce5c247SNicolin Chen goto out_put_idev; 123*0ce5c247SNicolin Chen } 124*0ce5c247SNicolin Chen 125*0ce5c247SNicolin Chen vdev = iommufd_object_alloc(ucmd->ictx, vdev, IOMMUFD_OBJ_VDEVICE); 126*0ce5c247SNicolin Chen if (IS_ERR(vdev)) { 127*0ce5c247SNicolin Chen rc = PTR_ERR(vdev); 128*0ce5c247SNicolin Chen goto out_put_idev; 129*0ce5c247SNicolin Chen } 130*0ce5c247SNicolin Chen 131*0ce5c247SNicolin Chen vdev->id = virt_id; 132*0ce5c247SNicolin Chen vdev->dev = idev->dev; 133*0ce5c247SNicolin Chen get_device(idev->dev); 134*0ce5c247SNicolin Chen vdev->viommu = viommu; 135*0ce5c247SNicolin Chen refcount_inc(&viommu->obj.users); 136*0ce5c247SNicolin Chen 137*0ce5c247SNicolin Chen curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL); 138*0ce5c247SNicolin Chen if (curr) { 139*0ce5c247SNicolin Chen rc = xa_err(curr) ?: -EEXIST; 140*0ce5c247SNicolin Chen goto out_abort; 141*0ce5c247SNicolin Chen } 142*0ce5c247SNicolin Chen 143*0ce5c247SNicolin Chen cmd->out_vdevice_id = vdev->obj.id; 144*0ce5c247SNicolin Chen rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 145*0ce5c247SNicolin Chen if (rc) 146*0ce5c247SNicolin Chen goto out_abort; 147*0ce5c247SNicolin Chen iommufd_object_finalize(ucmd->ictx, &vdev->obj); 148*0ce5c247SNicolin Chen goto out_put_idev; 149*0ce5c247SNicolin Chen 150*0ce5c247SNicolin Chen out_abort: 151*0ce5c247SNicolin Chen iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj); 152*0ce5c247SNicolin Chen out_put_idev: 153*0ce5c247SNicolin Chen iommufd_put_object(ucmd->ictx, &idev->obj); 154*0ce5c247SNicolin Chen out_put_viommu: 155*0ce5c247SNicolin Chen iommufd_put_object(ucmd->ictx, &viommu->obj); 156*0ce5c247SNicolin Chen return rc; 157*0ce5c247SNicolin Chen } 158