xref: /linux/drivers/vfio/iommufd.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7 
8 #include "vfio.h"
9 
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
12 
13 int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
14 {
15 	u32 ioas_id;
16 	u32 device_id;
17 	int ret;
18 
19 	lockdep_assert_held(&vdev->dev_set->lock);
20 
21 	/*
22 	 * If the driver doesn't provide this op then it means the device does
23 	 * not do DMA at all. So nothing to do.
24 	 */
25 	if (!vdev->ops->bind_iommufd)
26 		return 0;
27 
28 	ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
29 	if (ret)
30 		return ret;
31 
32 	ret = iommufd_vfio_compat_ioas_id(ictx, &ioas_id);
33 	if (ret)
34 		goto err_unbind;
35 	ret = vdev->ops->attach_ioas(vdev, &ioas_id);
36 	if (ret)
37 		goto err_unbind;
38 
39 	/*
40 	 * The legacy path has no way to return the device id or the selected
41 	 * pt_id
42 	 */
43 	return 0;
44 
45 err_unbind:
46 	if (vdev->ops->unbind_iommufd)
47 		vdev->ops->unbind_iommufd(vdev);
48 	return ret;
49 }
50 
51 void vfio_iommufd_unbind(struct vfio_device *vdev)
52 {
53 	lockdep_assert_held(&vdev->dev_set->lock);
54 
55 	if (vdev->ops->unbind_iommufd)
56 		vdev->ops->unbind_iommufd(vdev);
57 }
58 
59 /*
60  * The physical standard ops mean that the iommufd_device is bound to the
61  * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
62  * using this ops set should call vfio_register_group_dev()
63  */
64 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
65 			       struct iommufd_ctx *ictx, u32 *out_device_id)
66 {
67 	struct iommufd_device *idev;
68 
69 	idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
70 	if (IS_ERR(idev))
71 		return PTR_ERR(idev);
72 	vdev->iommufd_device = idev;
73 	return 0;
74 }
75 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
76 
77 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
78 {
79 	lockdep_assert_held(&vdev->dev_set->lock);
80 
81 	if (vdev->iommufd_attached) {
82 		iommufd_device_detach(vdev->iommufd_device);
83 		vdev->iommufd_attached = false;
84 	}
85 	iommufd_device_unbind(vdev->iommufd_device);
86 	vdev->iommufd_device = NULL;
87 }
88 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
89 
90 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
91 {
92 	int rc;
93 
94 	rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
95 	if (rc)
96 		return rc;
97 	vdev->iommufd_attached = true;
98 	return 0;
99 }
100 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
101 
102 /*
103  * The emulated standard ops mean that vfio_device is going to use the
104  * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
105  * ops set should call vfio_register_emulated_iommu_dev().
106  */
107 
108 static void vfio_emulated_unmap(void *data, unsigned long iova,
109 				unsigned long length)
110 {
111 	struct vfio_device *vdev = data;
112 
113 	vdev->ops->dma_unmap(vdev, iova, length);
114 }
115 
116 static const struct iommufd_access_ops vfio_user_ops = {
117 	.needs_pin_pages = 1,
118 	.unmap = vfio_emulated_unmap,
119 };
120 
121 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
122 			       struct iommufd_ctx *ictx, u32 *out_device_id)
123 {
124 	lockdep_assert_held(&vdev->dev_set->lock);
125 
126 	vdev->iommufd_ictx = ictx;
127 	iommufd_ctx_get(ictx);
128 	return 0;
129 }
130 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
131 
132 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
133 {
134 	lockdep_assert_held(&vdev->dev_set->lock);
135 
136 	if (vdev->iommufd_access) {
137 		iommufd_access_destroy(vdev->iommufd_access);
138 		vdev->iommufd_access = NULL;
139 	}
140 	iommufd_ctx_put(vdev->iommufd_ictx);
141 	vdev->iommufd_ictx = NULL;
142 }
143 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
144 
145 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
146 {
147 	struct iommufd_access *user;
148 
149 	lockdep_assert_held(&vdev->dev_set->lock);
150 
151 	user = iommufd_access_create(vdev->iommufd_ictx, *pt_id, &vfio_user_ops,
152 				     vdev);
153 	if (IS_ERR(user))
154 		return PTR_ERR(user);
155 	vdev->iommufd_access = user;
156 	return 0;
157 }
158 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
159