xref: /linux/drivers/misc/uacce/uacce.c (revision 7a309195d11cde854eb75559fbd6b48f9e518f25)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/uacce.h>
8 
9 static struct class *uacce_class;
10 static dev_t uacce_devt;
11 static DEFINE_MUTEX(uacce_mutex);
12 static DEFINE_XARRAY_ALLOC(uacce_xa);
13 
14 static int uacce_start_queue(struct uacce_queue *q)
15 {
16 	int ret = 0;
17 
18 	mutex_lock(&uacce_mutex);
19 
20 	if (q->state != UACCE_Q_INIT) {
21 		ret = -EINVAL;
22 		goto out_with_lock;
23 	}
24 
25 	if (q->uacce->ops->start_queue) {
26 		ret = q->uacce->ops->start_queue(q);
27 		if (ret < 0)
28 			goto out_with_lock;
29 	}
30 
31 	q->state = UACCE_Q_STARTED;
32 
33 out_with_lock:
34 	mutex_unlock(&uacce_mutex);
35 
36 	return ret;
37 }
38 
39 static int uacce_put_queue(struct uacce_queue *q)
40 {
41 	struct uacce_device *uacce = q->uacce;
42 
43 	mutex_lock(&uacce_mutex);
44 
45 	if (q->state == UACCE_Q_ZOMBIE)
46 		goto out;
47 
48 	if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
49 		uacce->ops->stop_queue(q);
50 
51 	if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
52 	     uacce->ops->put_queue)
53 		uacce->ops->put_queue(q);
54 
55 	q->state = UACCE_Q_ZOMBIE;
56 out:
57 	mutex_unlock(&uacce_mutex);
58 
59 	return 0;
60 }
61 
62 static long uacce_fops_unl_ioctl(struct file *filep,
63 				 unsigned int cmd, unsigned long arg)
64 {
65 	struct uacce_queue *q = filep->private_data;
66 	struct uacce_device *uacce = q->uacce;
67 
68 	switch (cmd) {
69 	case UACCE_CMD_START_Q:
70 		return uacce_start_queue(q);
71 
72 	case UACCE_CMD_PUT_Q:
73 		return uacce_put_queue(q);
74 
75 	default:
76 		if (!uacce->ops->ioctl)
77 			return -EINVAL;
78 
79 		return uacce->ops->ioctl(q, cmd, arg);
80 	}
81 }
82 
83 #ifdef CONFIG_COMPAT
84 static long uacce_fops_compat_ioctl(struct file *filep,
85 				   unsigned int cmd, unsigned long arg)
86 {
87 	arg = (unsigned long)compat_ptr(arg);
88 
89 	return uacce_fops_unl_ioctl(filep, cmd, arg);
90 }
91 #endif
92 
93 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
94 {
95 	int pasid;
96 	struct iommu_sva *handle;
97 
98 	if (!(uacce->flags & UACCE_DEV_SVA))
99 		return 0;
100 
101 	handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
102 	if (IS_ERR(handle))
103 		return PTR_ERR(handle);
104 
105 	pasid = iommu_sva_get_pasid(handle);
106 	if (pasid == IOMMU_PASID_INVALID) {
107 		iommu_sva_unbind_device(handle);
108 		return -ENODEV;
109 	}
110 
111 	q->handle = handle;
112 	q->pasid = pasid;
113 	return 0;
114 }
115 
116 static void uacce_unbind_queue(struct uacce_queue *q)
117 {
118 	if (!q->handle)
119 		return;
120 	iommu_sva_unbind_device(q->handle);
121 	q->handle = NULL;
122 }
123 
124 static int uacce_fops_open(struct inode *inode, struct file *filep)
125 {
126 	struct uacce_device *uacce;
127 	struct uacce_queue *q;
128 	int ret = 0;
129 
130 	uacce = xa_load(&uacce_xa, iminor(inode));
131 	if (!uacce)
132 		return -ENODEV;
133 
134 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
135 	if (!q)
136 		return -ENOMEM;
137 
138 	ret = uacce_bind_queue(uacce, q);
139 	if (ret)
140 		goto out_with_mem;
141 
142 	q->uacce = uacce;
143 
144 	if (uacce->ops->get_queue) {
145 		ret = uacce->ops->get_queue(uacce, q->pasid, q);
146 		if (ret < 0)
147 			goto out_with_bond;
148 	}
149 
150 	init_waitqueue_head(&q->wait);
151 	filep->private_data = q;
152 	uacce->inode = inode;
153 	q->state = UACCE_Q_INIT;
154 
155 	mutex_lock(&uacce->queues_lock);
156 	list_add(&q->list, &uacce->queues);
157 	mutex_unlock(&uacce->queues_lock);
158 
159 	return 0;
160 
161 out_with_bond:
162 	uacce_unbind_queue(q);
163 out_with_mem:
164 	kfree(q);
165 	return ret;
166 }
167 
168 static int uacce_fops_release(struct inode *inode, struct file *filep)
169 {
170 	struct uacce_queue *q = filep->private_data;
171 
172 	mutex_lock(&q->uacce->queues_lock);
173 	list_del(&q->list);
174 	mutex_unlock(&q->uacce->queues_lock);
175 	uacce_put_queue(q);
176 	uacce_unbind_queue(q);
177 	kfree(q);
178 
179 	return 0;
180 }
181 
182 static void uacce_vma_close(struct vm_area_struct *vma)
183 {
184 	struct uacce_queue *q = vma->vm_private_data;
185 	struct uacce_qfile_region *qfr = NULL;
186 
187 	if (vma->vm_pgoff < UACCE_MAX_REGION)
188 		qfr = q->qfrs[vma->vm_pgoff];
189 
190 	kfree(qfr);
191 }
192 
193 static const struct vm_operations_struct uacce_vm_ops = {
194 	.close = uacce_vma_close,
195 };
196 
197 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
198 {
199 	struct uacce_queue *q = filep->private_data;
200 	struct uacce_device *uacce = q->uacce;
201 	struct uacce_qfile_region *qfr;
202 	enum uacce_qfrt type = UACCE_MAX_REGION;
203 	int ret = 0;
204 
205 	if (vma->vm_pgoff < UACCE_MAX_REGION)
206 		type = vma->vm_pgoff;
207 	else
208 		return -EINVAL;
209 
210 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
211 	if (!qfr)
212 		return -ENOMEM;
213 
214 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
215 	vma->vm_ops = &uacce_vm_ops;
216 	vma->vm_private_data = q;
217 	qfr->type = type;
218 
219 	mutex_lock(&uacce_mutex);
220 
221 	if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
222 		ret = -EINVAL;
223 		goto out_with_lock;
224 	}
225 
226 	if (q->qfrs[type]) {
227 		ret = -EEXIST;
228 		goto out_with_lock;
229 	}
230 
231 	switch (type) {
232 	case UACCE_QFRT_MMIO:
233 		if (!uacce->ops->mmap) {
234 			ret = -EINVAL;
235 			goto out_with_lock;
236 		}
237 
238 		ret = uacce->ops->mmap(q, vma, qfr);
239 		if (ret)
240 			goto out_with_lock;
241 
242 		break;
243 
244 	case UACCE_QFRT_DUS:
245 		if (!uacce->ops->mmap) {
246 			ret = -EINVAL;
247 			goto out_with_lock;
248 		}
249 
250 		ret = uacce->ops->mmap(q, vma, qfr);
251 		if (ret)
252 			goto out_with_lock;
253 		break;
254 
255 	default:
256 		ret = -EINVAL;
257 		goto out_with_lock;
258 	}
259 
260 	q->qfrs[type] = qfr;
261 	mutex_unlock(&uacce_mutex);
262 
263 	return ret;
264 
265 out_with_lock:
266 	mutex_unlock(&uacce_mutex);
267 	kfree(qfr);
268 	return ret;
269 }
270 
271 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
272 {
273 	struct uacce_queue *q = file->private_data;
274 	struct uacce_device *uacce = q->uacce;
275 
276 	poll_wait(file, &q->wait, wait);
277 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
278 		return EPOLLIN | EPOLLRDNORM;
279 
280 	return 0;
281 }
282 
283 static const struct file_operations uacce_fops = {
284 	.owner		= THIS_MODULE,
285 	.open		= uacce_fops_open,
286 	.release	= uacce_fops_release,
287 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
288 #ifdef CONFIG_COMPAT
289 	.compat_ioctl	= uacce_fops_compat_ioctl,
290 #endif
291 	.mmap		= uacce_fops_mmap,
292 	.poll		= uacce_fops_poll,
293 };
294 
295 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
296 
297 static ssize_t api_show(struct device *dev,
298 			struct device_attribute *attr, char *buf)
299 {
300 	struct uacce_device *uacce = to_uacce_device(dev);
301 
302 	return sprintf(buf, "%s\n", uacce->api_ver);
303 }
304 
305 static ssize_t flags_show(struct device *dev,
306 			  struct device_attribute *attr, char *buf)
307 {
308 	struct uacce_device *uacce = to_uacce_device(dev);
309 
310 	return sprintf(buf, "%u\n", uacce->flags);
311 }
312 
313 static ssize_t available_instances_show(struct device *dev,
314 					struct device_attribute *attr,
315 					char *buf)
316 {
317 	struct uacce_device *uacce = to_uacce_device(dev);
318 
319 	if (!uacce->ops->get_available_instances)
320 		return -ENODEV;
321 
322 	return sprintf(buf, "%d\n",
323 		       uacce->ops->get_available_instances(uacce));
324 }
325 
326 static ssize_t algorithms_show(struct device *dev,
327 			       struct device_attribute *attr, char *buf)
328 {
329 	struct uacce_device *uacce = to_uacce_device(dev);
330 
331 	return sprintf(buf, "%s\n", uacce->algs);
332 }
333 
334 static ssize_t region_mmio_size_show(struct device *dev,
335 				     struct device_attribute *attr, char *buf)
336 {
337 	struct uacce_device *uacce = to_uacce_device(dev);
338 
339 	return sprintf(buf, "%lu\n",
340 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
341 }
342 
343 static ssize_t region_dus_size_show(struct device *dev,
344 				    struct device_attribute *attr, char *buf)
345 {
346 	struct uacce_device *uacce = to_uacce_device(dev);
347 
348 	return sprintf(buf, "%lu\n",
349 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
350 }
351 
352 static DEVICE_ATTR_RO(api);
353 static DEVICE_ATTR_RO(flags);
354 static DEVICE_ATTR_RO(available_instances);
355 static DEVICE_ATTR_RO(algorithms);
356 static DEVICE_ATTR_RO(region_mmio_size);
357 static DEVICE_ATTR_RO(region_dus_size);
358 
359 static struct attribute *uacce_dev_attrs[] = {
360 	&dev_attr_api.attr,
361 	&dev_attr_flags.attr,
362 	&dev_attr_available_instances.attr,
363 	&dev_attr_algorithms.attr,
364 	&dev_attr_region_mmio_size.attr,
365 	&dev_attr_region_dus_size.attr,
366 	NULL,
367 };
368 
369 static umode_t uacce_dev_is_visible(struct kobject *kobj,
370 				    struct attribute *attr, int n)
371 {
372 	struct device *dev = container_of(kobj, struct device, kobj);
373 	struct uacce_device *uacce = to_uacce_device(dev);
374 
375 	if (((attr == &dev_attr_region_mmio_size.attr) &&
376 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
377 	    ((attr == &dev_attr_region_dus_size.attr) &&
378 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
379 		return 0;
380 
381 	return attr->mode;
382 }
383 
384 static struct attribute_group uacce_dev_group = {
385 	.is_visible	= uacce_dev_is_visible,
386 	.attrs		= uacce_dev_attrs,
387 };
388 
389 __ATTRIBUTE_GROUPS(uacce_dev);
390 
391 static void uacce_release(struct device *dev)
392 {
393 	struct uacce_device *uacce = to_uacce_device(dev);
394 
395 	kfree(uacce);
396 }
397 
398 /**
399  * uacce_alloc() - alloc an accelerator
400  * @parent: pointer of uacce parent device
401  * @interface: pointer of uacce_interface for register
402  *
403  * Returns uacce pointer if success and ERR_PTR if not
404  * Need check returned negotiated uacce->flags
405  */
406 struct uacce_device *uacce_alloc(struct device *parent,
407 				 struct uacce_interface *interface)
408 {
409 	unsigned int flags = interface->flags;
410 	struct uacce_device *uacce;
411 	int ret;
412 
413 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
414 	if (!uacce)
415 		return ERR_PTR(-ENOMEM);
416 
417 	if (flags & UACCE_DEV_SVA) {
418 		ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
419 		if (ret)
420 			flags &= ~UACCE_DEV_SVA;
421 	}
422 
423 	uacce->parent = parent;
424 	uacce->flags = flags;
425 	uacce->ops = interface->ops;
426 
427 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
428 		       GFP_KERNEL);
429 	if (ret < 0)
430 		goto err_with_uacce;
431 
432 	INIT_LIST_HEAD(&uacce->queues);
433 	mutex_init(&uacce->queues_lock);
434 	device_initialize(&uacce->dev);
435 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
436 	uacce->dev.class = uacce_class;
437 	uacce->dev.groups = uacce_dev_groups;
438 	uacce->dev.parent = uacce->parent;
439 	uacce->dev.release = uacce_release;
440 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
441 
442 	return uacce;
443 
444 err_with_uacce:
445 	if (flags & UACCE_DEV_SVA)
446 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
447 	kfree(uacce);
448 	return ERR_PTR(ret);
449 }
450 EXPORT_SYMBOL_GPL(uacce_alloc);
451 
452 /**
453  * uacce_register() - add the accelerator to cdev and export to user space
454  * @uacce: The initialized uacce device
455  *
456  * Return 0 if register succeeded, or an error.
457  */
458 int uacce_register(struct uacce_device *uacce)
459 {
460 	if (!uacce)
461 		return -ENODEV;
462 
463 	uacce->cdev = cdev_alloc();
464 	if (!uacce->cdev)
465 		return -ENOMEM;
466 
467 	uacce->cdev->ops = &uacce_fops;
468 	uacce->cdev->owner = THIS_MODULE;
469 
470 	return cdev_device_add(uacce->cdev, &uacce->dev);
471 }
472 EXPORT_SYMBOL_GPL(uacce_register);
473 
474 /**
475  * uacce_remove() - remove the accelerator
476  * @uacce: the accelerator to remove
477  */
478 void uacce_remove(struct uacce_device *uacce)
479 {
480 	struct uacce_queue *q, *next_q;
481 
482 	if (!uacce)
483 		return;
484 	/*
485 	 * unmap remaining mapping from user space, preventing user still
486 	 * access the mmaped area while parent device is already removed
487 	 */
488 	if (uacce->inode)
489 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
490 
491 	/* ensure no open queue remains */
492 	mutex_lock(&uacce->queues_lock);
493 	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
494 		uacce_put_queue(q);
495 		uacce_unbind_queue(q);
496 	}
497 	mutex_unlock(&uacce->queues_lock);
498 
499 	/* disable sva now since no opened queues */
500 	if (uacce->flags & UACCE_DEV_SVA)
501 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
502 
503 	if (uacce->cdev)
504 		cdev_device_del(uacce->cdev, &uacce->dev);
505 	xa_erase(&uacce_xa, uacce->dev_id);
506 	put_device(&uacce->dev);
507 }
508 EXPORT_SYMBOL_GPL(uacce_remove);
509 
510 static int __init uacce_init(void)
511 {
512 	int ret;
513 
514 	uacce_class = class_create(THIS_MODULE, UACCE_NAME);
515 	if (IS_ERR(uacce_class))
516 		return PTR_ERR(uacce_class);
517 
518 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
519 	if (ret)
520 		class_destroy(uacce_class);
521 
522 	return ret;
523 }
524 
525 static __exit void uacce_exit(void)
526 {
527 	unregister_chrdev_region(uacce_devt, MINORMASK);
528 	class_destroy(uacce_class);
529 }
530 
531 subsys_initcall(uacce_init);
532 module_exit(uacce_exit);
533 
534 MODULE_LICENSE("GPL");
535 MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
536 MODULE_DESCRIPTION("Accelerator interface for Userland applications");
537