xref: /linux/drivers/misc/uacce/uacce.c (revision 0a6dce0a5c66ab2cb3e9f01902e5b188ada8a89d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/slab.h>
8 #include <linux/uacce.h>
9 
10 static dev_t uacce_devt;
11 static DEFINE_XARRAY_ALLOC(uacce_xa);
12 
13 static const struct class uacce_class = {
14 	.name = UACCE_NAME,
15 };
16 
17 /*
18  * If the parent driver or the device disappears, the queue state is invalid and
19  * ops are not usable anymore.
20  */
uacce_queue_is_valid(struct uacce_queue * q)21 static bool uacce_queue_is_valid(struct uacce_queue *q)
22 {
23 	return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
24 }
25 
uacce_start_queue(struct uacce_queue * q)26 static int uacce_start_queue(struct uacce_queue *q)
27 {
28 	int ret;
29 
30 	if (q->state != UACCE_Q_INIT)
31 		return -EINVAL;
32 
33 	if (q->uacce->ops->start_queue) {
34 		ret = q->uacce->ops->start_queue(q);
35 		if (ret < 0)
36 			return ret;
37 	}
38 
39 	q->state = UACCE_Q_STARTED;
40 	return 0;
41 }
42 
uacce_stop_queue(struct uacce_queue * q)43 static int uacce_stop_queue(struct uacce_queue *q)
44 {
45 	struct uacce_device *uacce = q->uacce;
46 
47 	if (q->state != UACCE_Q_STARTED)
48 		return 0;
49 
50 	if (uacce->ops->stop_queue)
51 		uacce->ops->stop_queue(q);
52 
53 	q->state = UACCE_Q_INIT;
54 
55 	return 0;
56 }
57 
uacce_put_queue(struct uacce_queue * q)58 static void uacce_put_queue(struct uacce_queue *q)
59 {
60 	struct uacce_device *uacce = q->uacce;
61 
62 	uacce_stop_queue(q);
63 
64 	if (q->state != UACCE_Q_INIT)
65 		return;
66 
67 	if (uacce->ops->put_queue)
68 		uacce->ops->put_queue(q);
69 
70 	q->state = UACCE_Q_ZOMBIE;
71 }
72 
uacce_fops_unl_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)73 static long uacce_fops_unl_ioctl(struct file *filep,
74 				 unsigned int cmd, unsigned long arg)
75 {
76 	struct uacce_queue *q = filep->private_data;
77 	struct uacce_device *uacce = q->uacce;
78 	long ret = -ENXIO;
79 
80 	/*
81 	 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
82 	 * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
83 	 * gets called with mmap_lock held, by taking uacce->mutex instead of
84 	 * q->mutex. Doing this in uacce_fops_mmap() is not possible because
85 	 * uacce_fops_open() calls iommu_sva_bind_device(), which takes
86 	 * mmap_lock, while holding uacce->mutex.
87 	 */
88 	mutex_lock(&uacce->mutex);
89 	if (!uacce_queue_is_valid(q))
90 		goto out_unlock;
91 
92 	switch (cmd) {
93 	case UACCE_CMD_START_Q:
94 		ret = uacce_start_queue(q);
95 		break;
96 	case UACCE_CMD_PUT_Q:
97 		ret = uacce_stop_queue(q);
98 		break;
99 	default:
100 		if (uacce->ops->ioctl)
101 			ret = uacce->ops->ioctl(q, cmd, arg);
102 		else
103 			ret = -EINVAL;
104 	}
105 out_unlock:
106 	mutex_unlock(&uacce->mutex);
107 	return ret;
108 }
109 
110 #ifdef CONFIG_COMPAT
uacce_fops_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)111 static long uacce_fops_compat_ioctl(struct file *filep,
112 				   unsigned int cmd, unsigned long arg)
113 {
114 	arg = (unsigned long)compat_ptr(arg);
115 
116 	return uacce_fops_unl_ioctl(filep, cmd, arg);
117 }
118 #endif
119 
uacce_bind_queue(struct uacce_device * uacce,struct uacce_queue * q)120 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
121 {
122 	u32 pasid;
123 	struct iommu_sva *handle;
124 
125 	if (!(uacce->flags & UACCE_DEV_SVA))
126 		return 0;
127 
128 	handle = iommu_sva_bind_device(uacce->parent, current->mm);
129 	if (IS_ERR(handle))
130 		return PTR_ERR(handle);
131 
132 	pasid = iommu_sva_get_pasid(handle);
133 	if (pasid == IOMMU_PASID_INVALID) {
134 		iommu_sva_unbind_device(handle);
135 		return -ENODEV;
136 	}
137 
138 	q->handle = handle;
139 	q->pasid = pasid;
140 	return 0;
141 }
142 
uacce_unbind_queue(struct uacce_queue * q)143 static void uacce_unbind_queue(struct uacce_queue *q)
144 {
145 	if (!q->handle)
146 		return;
147 	iommu_sva_unbind_device(q->handle);
148 	q->handle = NULL;
149 }
150 
uacce_fops_open(struct inode * inode,struct file * filep)151 static int uacce_fops_open(struct inode *inode, struct file *filep)
152 {
153 	struct uacce_device *uacce;
154 	struct uacce_queue *q;
155 	int ret;
156 
157 	uacce = xa_load(&uacce_xa, iminor(inode));
158 	if (!uacce)
159 		return -ENODEV;
160 
161 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
162 	if (!q)
163 		return -ENOMEM;
164 
165 	mutex_lock(&uacce->mutex);
166 
167 	if (!uacce->parent) {
168 		ret = -EINVAL;
169 		goto out_with_mem;
170 	}
171 
172 	ret = uacce_bind_queue(uacce, q);
173 	if (ret)
174 		goto out_with_mem;
175 
176 	q->uacce = uacce;
177 
178 	if (uacce->ops->get_queue) {
179 		ret = uacce->ops->get_queue(uacce, q->pasid, q);
180 		if (ret < 0)
181 			goto out_with_bond;
182 	}
183 
184 	init_waitqueue_head(&q->wait);
185 	filep->private_data = q;
186 	q->state = UACCE_Q_INIT;
187 	q->mapping = filep->f_mapping;
188 	mutex_init(&q->mutex);
189 	list_add(&q->list, &uacce->queues);
190 	mutex_unlock(&uacce->mutex);
191 
192 	return 0;
193 
194 out_with_bond:
195 	uacce_unbind_queue(q);
196 out_with_mem:
197 	kfree(q);
198 	mutex_unlock(&uacce->mutex);
199 	return ret;
200 }
201 
uacce_fops_release(struct inode * inode,struct file * filep)202 static int uacce_fops_release(struct inode *inode, struct file *filep)
203 {
204 	struct uacce_queue *q = filep->private_data;
205 	struct uacce_device *uacce = q->uacce;
206 
207 	mutex_lock(&uacce->mutex);
208 	uacce_put_queue(q);
209 	uacce_unbind_queue(q);
210 	list_del(&q->list);
211 	mutex_unlock(&uacce->mutex);
212 	kfree(q);
213 
214 	return 0;
215 }
216 
uacce_vma_close(struct vm_area_struct * vma)217 static void uacce_vma_close(struct vm_area_struct *vma)
218 {
219 	struct uacce_queue *q = vma->vm_private_data;
220 
221 	if (vma->vm_pgoff < UACCE_MAX_REGION) {
222 		struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff];
223 
224 		mutex_lock(&q->mutex);
225 		q->qfrs[vma->vm_pgoff] = NULL;
226 		mutex_unlock(&q->mutex);
227 		kfree(qfr);
228 	}
229 }
230 
uacce_vma_mremap(struct vm_area_struct * area)231 static int uacce_vma_mremap(struct vm_area_struct *area)
232 {
233 	return -EPERM;
234 }
235 
236 static const struct vm_operations_struct uacce_vm_ops = {
237 	.close = uacce_vma_close,
238 	.mremap = uacce_vma_mremap,
239 };
240 
uacce_fops_mmap(struct file * filep,struct vm_area_struct * vma)241 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
242 {
243 	struct uacce_queue *q = filep->private_data;
244 	struct uacce_device *uacce = q->uacce;
245 	struct uacce_qfile_region *qfr;
246 	enum uacce_qfrt type = UACCE_MAX_REGION;
247 	int ret = 0;
248 
249 	if (vma->vm_pgoff < UACCE_MAX_REGION)
250 		type = vma->vm_pgoff;
251 	else
252 		return -EINVAL;
253 
254 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
255 	if (!qfr)
256 		return -ENOMEM;
257 
258 	vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
259 	vma->vm_ops = &uacce_vm_ops;
260 	vma->vm_private_data = q;
261 	qfr->type = type;
262 
263 	mutex_lock(&q->mutex);
264 	if (!uacce_queue_is_valid(q)) {
265 		ret = -ENXIO;
266 		goto out_with_lock;
267 	}
268 
269 	if (q->qfrs[type]) {
270 		ret = -EEXIST;
271 		goto out_with_lock;
272 	}
273 
274 	switch (type) {
275 	case UACCE_QFRT_MMIO:
276 	case UACCE_QFRT_DUS:
277 		if (!uacce->ops->mmap) {
278 			ret = -EINVAL;
279 			goto out_with_lock;
280 		}
281 
282 		ret = uacce->ops->mmap(q, vma, qfr);
283 		if (ret)
284 			goto out_with_lock;
285 		break;
286 
287 	default:
288 		ret = -EINVAL;
289 		goto out_with_lock;
290 	}
291 
292 	q->qfrs[type] = qfr;
293 	mutex_unlock(&q->mutex);
294 
295 	return ret;
296 
297 out_with_lock:
298 	mutex_unlock(&q->mutex);
299 	kfree(qfr);
300 	return ret;
301 }
302 
uacce_fops_poll(struct file * file,poll_table * wait)303 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
304 {
305 	struct uacce_queue *q = file->private_data;
306 	struct uacce_device *uacce = q->uacce;
307 	__poll_t ret = 0;
308 
309 	mutex_lock(&q->mutex);
310 	if (!uacce_queue_is_valid(q))
311 		goto out_unlock;
312 
313 	poll_wait(file, &q->wait, wait);
314 
315 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
316 		ret = EPOLLIN | EPOLLRDNORM;
317 
318 out_unlock:
319 	mutex_unlock(&q->mutex);
320 	return ret;
321 }
322 
323 static const struct file_operations uacce_fops = {
324 	.owner		= THIS_MODULE,
325 	.open		= uacce_fops_open,
326 	.release	= uacce_fops_release,
327 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
328 #ifdef CONFIG_COMPAT
329 	.compat_ioctl	= uacce_fops_compat_ioctl,
330 #endif
331 	.mmap		= uacce_fops_mmap,
332 	.poll		= uacce_fops_poll,
333 };
334 
335 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
336 
api_show(struct device * dev,struct device_attribute * attr,char * buf)337 static ssize_t api_show(struct device *dev,
338 			struct device_attribute *attr, char *buf)
339 {
340 	struct uacce_device *uacce = to_uacce_device(dev);
341 
342 	return sysfs_emit(buf, "%s\n", uacce->api_ver);
343 }
344 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)345 static ssize_t flags_show(struct device *dev,
346 			  struct device_attribute *attr, char *buf)
347 {
348 	struct uacce_device *uacce = to_uacce_device(dev);
349 
350 	return sysfs_emit(buf, "%u\n", uacce->flags);
351 }
352 
available_instances_show(struct device * dev,struct device_attribute * attr,char * buf)353 static ssize_t available_instances_show(struct device *dev,
354 					struct device_attribute *attr,
355 					char *buf)
356 {
357 	struct uacce_device *uacce = to_uacce_device(dev);
358 
359 	if (!uacce->ops->get_available_instances)
360 		return -ENODEV;
361 
362 	return sysfs_emit(buf, "%d\n",
363 		       uacce->ops->get_available_instances(uacce));
364 }
365 
algorithms_show(struct device * dev,struct device_attribute * attr,char * buf)366 static ssize_t algorithms_show(struct device *dev,
367 			       struct device_attribute *attr, char *buf)
368 {
369 	struct uacce_device *uacce = to_uacce_device(dev);
370 
371 	return sysfs_emit(buf, "%s\n", uacce->algs);
372 }
373 
region_mmio_size_show(struct device * dev,struct device_attribute * attr,char * buf)374 static ssize_t region_mmio_size_show(struct device *dev,
375 				     struct device_attribute *attr, char *buf)
376 {
377 	struct uacce_device *uacce = to_uacce_device(dev);
378 
379 	return sysfs_emit(buf, "%lu\n",
380 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
381 }
382 
region_dus_size_show(struct device * dev,struct device_attribute * attr,char * buf)383 static ssize_t region_dus_size_show(struct device *dev,
384 				    struct device_attribute *attr, char *buf)
385 {
386 	struct uacce_device *uacce = to_uacce_device(dev);
387 
388 	return sysfs_emit(buf, "%lu\n",
389 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
390 }
391 
isolate_show(struct device * dev,struct device_attribute * attr,char * buf)392 static ssize_t isolate_show(struct device *dev,
393 			    struct device_attribute *attr, char *buf)
394 {
395 	struct uacce_device *uacce = to_uacce_device(dev);
396 
397 	return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
398 }
399 
isolate_strategy_show(struct device * dev,struct device_attribute * attr,char * buf)400 static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf)
401 {
402 	struct uacce_device *uacce = to_uacce_device(dev);
403 	u32 val;
404 
405 	if (!uacce->ops->isolate_err_threshold_read)
406 		return -ENOENT;
407 
408 	val = uacce->ops->isolate_err_threshold_read(uacce);
409 
410 	return sysfs_emit(buf, "%u\n", val);
411 }
412 
isolate_strategy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)413 static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr,
414 				   const char *buf, size_t count)
415 {
416 	struct uacce_device *uacce = to_uacce_device(dev);
417 	unsigned long val;
418 	int ret;
419 
420 	if (!uacce->ops->isolate_err_threshold_write)
421 		return -ENOENT;
422 
423 	if (kstrtoul(buf, 0, &val) < 0)
424 		return -EINVAL;
425 
426 	if (val > UACCE_MAX_ERR_THRESHOLD)
427 		return -EINVAL;
428 
429 	ret = uacce->ops->isolate_err_threshold_write(uacce, val);
430 	if (ret)
431 		return ret;
432 
433 	return count;
434 }
435 
436 static DEVICE_ATTR_RO(api);
437 static DEVICE_ATTR_RO(flags);
438 static DEVICE_ATTR_RO(available_instances);
439 static DEVICE_ATTR_RO(algorithms);
440 static DEVICE_ATTR_RO(region_mmio_size);
441 static DEVICE_ATTR_RO(region_dus_size);
442 static DEVICE_ATTR_RO(isolate);
443 static DEVICE_ATTR_RW(isolate_strategy);
444 
445 static struct attribute *uacce_dev_attrs[] = {
446 	&dev_attr_api.attr,
447 	&dev_attr_flags.attr,
448 	&dev_attr_available_instances.attr,
449 	&dev_attr_algorithms.attr,
450 	&dev_attr_region_mmio_size.attr,
451 	&dev_attr_region_dus_size.attr,
452 	&dev_attr_isolate.attr,
453 	&dev_attr_isolate_strategy.attr,
454 	NULL,
455 };
456 
uacce_dev_is_visible(struct kobject * kobj,struct attribute * attr,int n)457 static umode_t uacce_dev_is_visible(struct kobject *kobj,
458 				    struct attribute *attr, int n)
459 {
460 	struct device *dev = kobj_to_dev(kobj);
461 	struct uacce_device *uacce = to_uacce_device(dev);
462 
463 	if (((attr == &dev_attr_region_mmio_size.attr) &&
464 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
465 	    ((attr == &dev_attr_region_dus_size.attr) &&
466 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
467 		return 0;
468 
469 	if (attr == &dev_attr_isolate_strategy.attr &&
470 	    (!uacce->ops->isolate_err_threshold_read &&
471 	     !uacce->ops->isolate_err_threshold_write))
472 		return 0;
473 
474 	if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
475 		return 0;
476 
477 	return attr->mode;
478 }
479 
480 static struct attribute_group uacce_dev_group = {
481 	.is_visible	= uacce_dev_is_visible,
482 	.attrs		= uacce_dev_attrs,
483 };
484 
485 __ATTRIBUTE_GROUPS(uacce_dev);
486 
uacce_release(struct device * dev)487 static void uacce_release(struct device *dev)
488 {
489 	struct uacce_device *uacce = to_uacce_device(dev);
490 
491 	kfree(uacce);
492 }
493 
494 /**
495  * uacce_alloc() - alloc an accelerator
496  * @parent: pointer of uacce parent device
497  * @interface: pointer of uacce_interface for register
498  *
499  * Returns uacce pointer if success and ERR_PTR if not
500  * Need check returned negotiated uacce->flags
501  */
uacce_alloc(struct device * parent,struct uacce_interface * interface)502 struct uacce_device *uacce_alloc(struct device *parent,
503 				 struct uacce_interface *interface)
504 {
505 	unsigned int flags = interface->flags;
506 	struct uacce_device *uacce;
507 	int ret;
508 
509 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
510 	if (!uacce)
511 		return ERR_PTR(-ENOMEM);
512 
513 	uacce->parent = parent;
514 	uacce->flags = flags;
515 	uacce->ops = interface->ops;
516 
517 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
518 		       GFP_KERNEL);
519 	if (ret < 0)
520 		goto err_with_uacce;
521 
522 	INIT_LIST_HEAD(&uacce->queues);
523 	mutex_init(&uacce->mutex);
524 	device_initialize(&uacce->dev);
525 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
526 	uacce->dev.class = &uacce_class;
527 	uacce->dev.groups = uacce_dev_groups;
528 	uacce->dev.parent = uacce->parent;
529 	uacce->dev.release = uacce_release;
530 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
531 
532 	return uacce;
533 
534 err_with_uacce:
535 	kfree(uacce);
536 	return ERR_PTR(ret);
537 }
538 EXPORT_SYMBOL_GPL(uacce_alloc);
539 
540 /**
541  * uacce_register() - add the accelerator to cdev and export to user space
542  * @uacce: The initialized uacce device
543  *
544  * Return 0 if register succeeded, or an error.
545  */
uacce_register(struct uacce_device * uacce)546 int uacce_register(struct uacce_device *uacce)
547 {
548 	int ret;
549 
550 	if (!uacce)
551 		return -ENODEV;
552 
553 	uacce->cdev = cdev_alloc();
554 	if (!uacce->cdev)
555 		return -ENOMEM;
556 
557 	uacce->cdev->ops = &uacce_fops;
558 	uacce->cdev->owner = THIS_MODULE;
559 
560 	ret = cdev_device_add(uacce->cdev, &uacce->dev);
561 	if (ret)
562 		uacce->cdev = NULL;
563 
564 	return ret;
565 }
566 EXPORT_SYMBOL_GPL(uacce_register);
567 
568 /**
569  * uacce_remove() - remove the accelerator
570  * @uacce: the accelerator to remove
571  */
uacce_remove(struct uacce_device * uacce)572 void uacce_remove(struct uacce_device *uacce)
573 {
574 	struct uacce_queue *q, *next_q;
575 
576 	if (!uacce)
577 		return;
578 
579 	/*
580 	 * uacce_fops_open() may be running concurrently, even after we remove
581 	 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
582 	 * removed uacce device.
583 	 */
584 	mutex_lock(&uacce->mutex);
585 	/* ensure no open queue remains */
586 	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
587 		/*
588 		 * Taking q->mutex ensures that fops do not use the defunct
589 		 * uacce->ops after the queue is disabled.
590 		 */
591 		mutex_lock(&q->mutex);
592 		uacce_put_queue(q);
593 		mutex_unlock(&q->mutex);
594 		uacce_unbind_queue(q);
595 
596 		/*
597 		 * unmap remaining mapping from user space, preventing user still
598 		 * access the mmaped area while parent device is already removed
599 		 */
600 		unmap_mapping_range(q->mapping, 0, 0, 1);
601 	}
602 
603 	if (uacce->cdev)
604 		cdev_device_del(uacce->cdev, &uacce->dev);
605 	xa_erase(&uacce_xa, uacce->dev_id);
606 	/*
607 	 * uacce exists as long as there are open fds, but ops will be freed
608 	 * now. Ensure that bugs cause NULL deref rather than use-after-free.
609 	 */
610 	uacce->ops = NULL;
611 	uacce->parent = NULL;
612 	mutex_unlock(&uacce->mutex);
613 	put_device(&uacce->dev);
614 }
615 EXPORT_SYMBOL_GPL(uacce_remove);
616 
uacce_init(void)617 static int __init uacce_init(void)
618 {
619 	int ret;
620 
621 	ret = class_register(&uacce_class);
622 	if (ret)
623 		return ret;
624 
625 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
626 	if (ret)
627 		class_unregister(&uacce_class);
628 
629 	return ret;
630 }
631 
uacce_exit(void)632 static __exit void uacce_exit(void)
633 {
634 	unregister_chrdev_region(uacce_devt, MINORMASK);
635 	class_unregister(&uacce_class);
636 }
637 
638 subsys_initcall(uacce_init);
639 module_exit(uacce_exit);
640 
641 MODULE_LICENSE("GPL");
642 MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
643 MODULE_DESCRIPTION("Accelerator interface for Userland applications");
644