xref: /linux/drivers/vfio/iommufd.c (revision 5e0266f0e5f57617472d5aac4013f58a3ef264ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4  */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7 
8 #include "vfio.h"
9 
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
12 
13 int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
14 {
15 	u32 ioas_id;
16 	u32 device_id;
17 	int ret;
18 
19 	lockdep_assert_held(&vdev->dev_set->lock);
20 
21 	if (vfio_device_is_noiommu(vdev)) {
22 		if (!capable(CAP_SYS_RAWIO))
23 			return -EPERM;
24 
25 		/*
26 		 * Require no compat ioas to be assigned to proceed. The basic
27 		 * statement is that the user cannot have done something that
28 		 * implies they expected translation to exist
29 		 */
30 		if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
31 			return -EPERM;
32 		return 0;
33 	}
34 
35 	/*
36 	 * If the driver doesn't provide this op then it means the device does
37 	 * not do DMA at all. So nothing to do.
38 	 */
39 	if (!vdev->ops->bind_iommufd)
40 		return 0;
41 
42 	ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id);
43 	if (ret)
44 		return ret;
45 
46 	ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
47 	if (ret)
48 		goto err_unbind;
49 	ret = vdev->ops->attach_ioas(vdev, &ioas_id);
50 	if (ret)
51 		goto err_unbind;
52 
53 	/*
54 	 * The legacy path has no way to return the device id or the selected
55 	 * pt_id
56 	 */
57 	return 0;
58 
59 err_unbind:
60 	if (vdev->ops->unbind_iommufd)
61 		vdev->ops->unbind_iommufd(vdev);
62 	return ret;
63 }
64 
65 void vfio_iommufd_unbind(struct vfio_device *vdev)
66 {
67 	lockdep_assert_held(&vdev->dev_set->lock);
68 
69 	if (vfio_device_is_noiommu(vdev))
70 		return;
71 
72 	if (vdev->ops->unbind_iommufd)
73 		vdev->ops->unbind_iommufd(vdev);
74 }
75 
76 /*
77  * The physical standard ops mean that the iommufd_device is bound to the
78  * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
79  * using this ops set should call vfio_register_group_dev()
80  */
81 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
82 			       struct iommufd_ctx *ictx, u32 *out_device_id)
83 {
84 	struct iommufd_device *idev;
85 
86 	idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
87 	if (IS_ERR(idev))
88 		return PTR_ERR(idev);
89 	vdev->iommufd_device = idev;
90 	return 0;
91 }
92 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
93 
94 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
95 {
96 	lockdep_assert_held(&vdev->dev_set->lock);
97 
98 	if (vdev->iommufd_attached) {
99 		iommufd_device_detach(vdev->iommufd_device);
100 		vdev->iommufd_attached = false;
101 	}
102 	iommufd_device_unbind(vdev->iommufd_device);
103 	vdev->iommufd_device = NULL;
104 }
105 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
106 
107 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
108 {
109 	int rc;
110 
111 	rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
112 	if (rc)
113 		return rc;
114 	vdev->iommufd_attached = true;
115 	return 0;
116 }
117 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
118 
119 /*
120  * The emulated standard ops mean that vfio_device is going to use the
121  * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
122  * ops set should call vfio_register_emulated_iommu_dev().
123  */
124 
125 static void vfio_emulated_unmap(void *data, unsigned long iova,
126 				unsigned long length)
127 {
128 	struct vfio_device *vdev = data;
129 
130 	vdev->ops->dma_unmap(vdev, iova, length);
131 }
132 
133 static const struct iommufd_access_ops vfio_user_ops = {
134 	.needs_pin_pages = 1,
135 	.unmap = vfio_emulated_unmap,
136 };
137 
138 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
139 			       struct iommufd_ctx *ictx, u32 *out_device_id)
140 {
141 	lockdep_assert_held(&vdev->dev_set->lock);
142 
143 	vdev->iommufd_ictx = ictx;
144 	iommufd_ctx_get(ictx);
145 	return 0;
146 }
147 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
148 
149 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
150 {
151 	lockdep_assert_held(&vdev->dev_set->lock);
152 
153 	if (vdev->iommufd_access) {
154 		iommufd_access_destroy(vdev->iommufd_access);
155 		vdev->iommufd_access = NULL;
156 	}
157 	iommufd_ctx_put(vdev->iommufd_ictx);
158 	vdev->iommufd_ictx = NULL;
159 }
160 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
161 
162 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
163 {
164 	struct iommufd_access *user;
165 
166 	lockdep_assert_held(&vdev->dev_set->lock);
167 
168 	user = iommufd_access_create(vdev->iommufd_ictx, *pt_id, &vfio_user_ops,
169 				     vdev);
170 	if (IS_ERR(user))
171 		return PTR_ERR(user);
172 	vdev->iommufd_access = user;
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
176