xref: /linux/drivers/iommu/iommufd/driver.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
3  */
4 #include "iommufd_private.h"
5 
6 /* Driver should use a per-structure helper in include/linux/iommufd.h */
7 int _iommufd_object_depend(struct iommufd_object *obj_dependent,
8 			   struct iommufd_object *obj_depended)
9 {
10 	/* Reject self dependency that dead locks */
11 	if (obj_dependent == obj_depended)
12 		return -EINVAL;
13 	/* Only support dependency between two objects of the same type */
14 	if (obj_dependent->type != obj_depended->type)
15 		return -EINVAL;
16 
17 	refcount_inc(&obj_depended->users);
18 	return 0;
19 }
20 EXPORT_SYMBOL_NS_GPL(_iommufd_object_depend, "IOMMUFD");
21 
22 /* Driver should use a per-structure helper in include/linux/iommufd.h */
23 void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
24 			      struct iommufd_object *obj_depended)
25 {
26 	if (WARN_ON_ONCE(obj_dependent == obj_depended ||
27 			 obj_dependent->type != obj_depended->type))
28 		return;
29 
30 	refcount_dec(&obj_depended->users);
31 }
32 EXPORT_SYMBOL_NS_GPL(_iommufd_object_undepend, "IOMMUFD");
33 
34 /*
35  * Allocate an @offset to return to user space to use for an mmap() syscall
36  *
37  * Driver should use a per-structure helper in include/linux/iommufd.h
38  */
39 int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
40 			phys_addr_t mmio_addr, size_t length,
41 			unsigned long *offset)
42 {
43 	struct iommufd_mmap *immap;
44 	unsigned long startp;
45 	int rc;
46 
47 	if (!PAGE_ALIGNED(mmio_addr))
48 		return -EINVAL;
49 	if (!length || !PAGE_ALIGNED(length))
50 		return -EINVAL;
51 
52 	immap = kzalloc(sizeof(*immap), GFP_KERNEL);
53 	if (!immap)
54 		return -ENOMEM;
55 	immap->owner = owner;
56 	immap->length = length;
57 	immap->mmio_addr = mmio_addr;
58 
59 	/* Skip the first page to ease caller identifying the returned offset */
60 	rc = mtree_alloc_range(&ictx->mt_mmap, &startp, immap, immap->length,
61 			       PAGE_SIZE, ULONG_MAX, GFP_KERNEL);
62 	if (rc < 0) {
63 		kfree(immap);
64 		return rc;
65 	}
66 
67 	/* mmap() syscall will right-shift the offset in vma->vm_pgoff too */
68 	immap->vm_pgoff = startp >> PAGE_SHIFT;
69 	*offset = startp;
70 	return 0;
71 }
72 EXPORT_SYMBOL_NS_GPL(_iommufd_alloc_mmap, "IOMMUFD");
73 
74 /* Driver should use a per-structure helper in include/linux/iommufd.h */
75 void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
76 			   struct iommufd_object *owner, unsigned long offset)
77 {
78 	struct iommufd_mmap *immap;
79 
80 	immap = mtree_erase(&ictx->mt_mmap, offset);
81 	WARN_ON_ONCE(!immap || immap->owner != owner);
82 	kfree(immap);
83 }
84 EXPORT_SYMBOL_NS_GPL(_iommufd_destroy_mmap, "IOMMUFD");
85 
86 struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
87 {
88 	return vdev->idev->dev;
89 }
90 EXPORT_SYMBOL_NS_GPL(iommufd_vdevice_to_device, "IOMMUFD");
91 
92 /* Caller should xa_lock(&viommu->vdevs) to protect the return value */
93 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
94 				       unsigned long vdev_id)
95 {
96 	struct iommufd_vdevice *vdev;
97 
98 	lockdep_assert_held(&viommu->vdevs.xa_lock);
99 
100 	vdev = xa_load(&viommu->vdevs, vdev_id);
101 	return vdev ? iommufd_vdevice_to_device(vdev) : NULL;
102 }
103 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_find_dev, "IOMMUFD");
104 
105 /* Return -ENOENT if device is not associated to the vIOMMU */
106 int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
107 			       struct device *dev, unsigned long *vdev_id)
108 {
109 	struct iommufd_vdevice *vdev;
110 	unsigned long index;
111 	int rc = -ENOENT;
112 
113 	if (WARN_ON_ONCE(!vdev_id))
114 		return -EINVAL;
115 
116 	xa_lock(&viommu->vdevs);
117 	xa_for_each(&viommu->vdevs, index, vdev) {
118 		if (iommufd_vdevice_to_device(vdev) == dev) {
119 			*vdev_id = vdev->virt_id;
120 			rc = 0;
121 			break;
122 		}
123 	}
124 	xa_unlock(&viommu->vdevs);
125 	return rc;
126 }
127 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_get_vdev_id, "IOMMUFD");
128 
129 /*
130  * Typically called in driver's threaded IRQ handler.
131  * The @type and @event_data must be defined in include/uapi/linux/iommufd.h
132  */
133 int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
134 				enum iommu_veventq_type type, void *event_data,
135 				size_t data_len)
136 {
137 	struct iommufd_veventq *veventq;
138 	struct iommufd_vevent *vevent;
139 	int rc = 0;
140 
141 	if (WARN_ON_ONCE(!data_len || !event_data))
142 		return -EINVAL;
143 
144 	down_read(&viommu->veventqs_rwsem);
145 
146 	veventq = iommufd_viommu_find_veventq(viommu, type);
147 	if (!veventq) {
148 		rc = -EOPNOTSUPP;
149 		goto out_unlock_veventqs;
150 	}
151 
152 	spin_lock(&veventq->common.lock);
153 	if (veventq->num_events == veventq->depth) {
154 		vevent = &veventq->lost_events_header;
155 		goto out_set_header;
156 	}
157 
158 	vevent = kzalloc(struct_size(vevent, event_data, data_len), GFP_ATOMIC);
159 	if (!vevent) {
160 		rc = -ENOMEM;
161 		vevent = &veventq->lost_events_header;
162 		goto out_set_header;
163 	}
164 	memcpy(vevent->event_data, event_data, data_len);
165 	vevent->data_len = data_len;
166 	veventq->num_events++;
167 
168 out_set_header:
169 	iommufd_vevent_handler(veventq, vevent);
170 	spin_unlock(&veventq->common.lock);
171 out_unlock_veventqs:
172 	up_read(&viommu->veventqs_rwsem);
173 	return rc;
174 }
175 EXPORT_SYMBOL_NS_GPL(iommufd_viommu_report_event, "IOMMUFD");
176 
177 #ifdef CONFIG_IRQ_MSI_IOMMU
178 /*
179  * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
180  * layer. The mapping to IOVA is global to the iommufd file descriptor, every
181  * domain that is attached to a device using the same MSI parameters will use
182  * the same IOVA.
183  */
184 static struct iommufd_sw_msi_map *
185 iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
186 		       phys_addr_t sw_msi_start)
187 {
188 	struct iommufd_sw_msi_map *cur;
189 	unsigned int max_pgoff = 0;
190 
191 	lockdep_assert_held(&ictx->sw_msi_lock);
192 
193 	list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
194 		if (cur->sw_msi_start != sw_msi_start)
195 			continue;
196 		max_pgoff = max(max_pgoff, cur->pgoff + 1);
197 		if (cur->msi_addr == msi_addr)
198 			return cur;
199 	}
200 
201 	if (ictx->sw_msi_id >=
202 	    BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
203 		return ERR_PTR(-EOVERFLOW);
204 
205 	cur = kzalloc(sizeof(*cur), GFP_KERNEL);
206 	if (!cur)
207 		return ERR_PTR(-ENOMEM);
208 
209 	cur->sw_msi_start = sw_msi_start;
210 	cur->msi_addr = msi_addr;
211 	cur->pgoff = max_pgoff;
212 	cur->id = ictx->sw_msi_id++;
213 	list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
214 	return cur;
215 }
216 
217 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
218 			   struct iommufd_hwpt_paging *hwpt_paging,
219 			   struct iommufd_sw_msi_map *msi_map)
220 {
221 	unsigned long iova;
222 
223 	lockdep_assert_held(&ictx->sw_msi_lock);
224 
225 	iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
226 	if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
227 		int rc;
228 
229 		rc = iommu_map(hwpt_paging->common.domain, iova,
230 			       msi_map->msi_addr, PAGE_SIZE,
231 			       IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
232 			       GFP_KERNEL_ACCOUNT);
233 		if (rc)
234 			return rc;
235 		__set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
236 	}
237 	return 0;
238 }
239 EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi_install, "IOMMUFD_INTERNAL");
240 
241 /*
242  * Called by the irq code if the platform translates the MSI address through the
243  * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
244  * allocate a fd global iova for the physical page that is the same on all
245  * domains and devices.
246  */
247 int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
248 		   phys_addr_t msi_addr)
249 {
250 	struct device *dev = msi_desc_to_dev(desc);
251 	struct iommufd_hwpt_paging *hwpt_paging;
252 	struct iommu_attach_handle *raw_handle;
253 	struct iommufd_attach_handle *handle;
254 	struct iommufd_sw_msi_map *msi_map;
255 	struct iommufd_ctx *ictx;
256 	unsigned long iova;
257 	int rc;
258 
259 	/*
260 	 * It is safe to call iommu_attach_handle_get() here because the iommu
261 	 * core code invokes this under the group mutex which also prevents any
262 	 * change of the attach handle for the duration of this function.
263 	 */
264 	iommu_group_mutex_assert(dev);
265 
266 	raw_handle =
267 		iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
268 	if (IS_ERR(raw_handle))
269 		return 0;
270 	hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
271 
272 	handle = to_iommufd_handle(raw_handle);
273 	/* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
274 	if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
275 		return 0;
276 
277 	ictx = handle->idev->ictx;
278 	guard(mutex)(&ictx->sw_msi_lock);
279 	/*
280 	 * The input msi_addr is the exact byte offset of the MSI doorbell, we
281 	 * assume the caller has checked that it is contained with a MMIO region
282 	 * that is secure to map at PAGE_SIZE.
283 	 */
284 	msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
285 					 msi_addr & PAGE_MASK,
286 					 handle->idev->igroup->sw_msi_start);
287 	if (IS_ERR(msi_map))
288 		return PTR_ERR(msi_map);
289 
290 	rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
291 	if (rc)
292 		return rc;
293 	__set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
294 
295 	iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
296 	msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
297 	return 0;
298 }
299 EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi, "IOMMUFD");
300 #endif
301 
302 MODULE_DESCRIPTION("iommufd code shared with builtin modules");
303 MODULE_IMPORT_NS("IOMMUFD_INTERNAL");
304 MODULE_LICENSE("GPL");
305