xref: /linux/drivers/misc/uacce/uacce.c (revision 666ed8bfd1de3b091cf32ca03b651757dd86cfff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/uacce.h>
8 
9 static struct class *uacce_class;
10 static dev_t uacce_devt;
11 static DEFINE_MUTEX(uacce_mutex);
12 static DEFINE_XARRAY_ALLOC(uacce_xa);
13 
14 static int uacce_start_queue(struct uacce_queue *q)
15 {
16 	int ret = 0;
17 
18 	mutex_lock(&uacce_mutex);
19 
20 	if (q->state != UACCE_Q_INIT) {
21 		ret = -EINVAL;
22 		goto out_with_lock;
23 	}
24 
25 	if (q->uacce->ops->start_queue) {
26 		ret = q->uacce->ops->start_queue(q);
27 		if (ret < 0)
28 			goto out_with_lock;
29 	}
30 
31 	q->state = UACCE_Q_STARTED;
32 
33 out_with_lock:
34 	mutex_unlock(&uacce_mutex);
35 
36 	return ret;
37 }
38 
39 static int uacce_put_queue(struct uacce_queue *q)
40 {
41 	struct uacce_device *uacce = q->uacce;
42 
43 	mutex_lock(&uacce_mutex);
44 
45 	if (q->state == UACCE_Q_ZOMBIE)
46 		goto out;
47 
48 	if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
49 		uacce->ops->stop_queue(q);
50 
51 	if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
52 	     uacce->ops->put_queue)
53 		uacce->ops->put_queue(q);
54 
55 	q->state = UACCE_Q_ZOMBIE;
56 out:
57 	mutex_unlock(&uacce_mutex);
58 
59 	return 0;
60 }
61 
62 static long uacce_fops_unl_ioctl(struct file *filep,
63 				 unsigned int cmd, unsigned long arg)
64 {
65 	struct uacce_queue *q = filep->private_data;
66 	struct uacce_device *uacce = q->uacce;
67 
68 	switch (cmd) {
69 	case UACCE_CMD_START_Q:
70 		return uacce_start_queue(q);
71 
72 	case UACCE_CMD_PUT_Q:
73 		return uacce_put_queue(q);
74 
75 	default:
76 		if (!uacce->ops->ioctl)
77 			return -EINVAL;
78 
79 		return uacce->ops->ioctl(q, cmd, arg);
80 	}
81 }
82 
83 #ifdef CONFIG_COMPAT
84 static long uacce_fops_compat_ioctl(struct file *filep,
85 				   unsigned int cmd, unsigned long arg)
86 {
87 	arg = (unsigned long)compat_ptr(arg);
88 
89 	return uacce_fops_unl_ioctl(filep, cmd, arg);
90 }
91 #endif
92 
93 static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
94 			  void *data)
95 {
96 	struct uacce_mm *uacce_mm = data;
97 	struct uacce_queue *q;
98 
99 	/*
100 	 * No new queue can be added concurrently because no caller can have a
101 	 * reference to this mm. But there may be concurrent calls to
102 	 * uacce_mm_put(), so we need the lock.
103 	 */
104 	mutex_lock(&uacce_mm->lock);
105 	list_for_each_entry(q, &uacce_mm->queues, list)
106 		uacce_put_queue(q);
107 	uacce_mm->mm = NULL;
108 	mutex_unlock(&uacce_mm->lock);
109 
110 	return 0;
111 }
112 
113 static struct iommu_sva_ops uacce_sva_ops = {
114 	.mm_exit = uacce_sva_exit,
115 };
116 
117 static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
118 				     struct uacce_queue *q,
119 				     struct mm_struct *mm)
120 {
121 	struct uacce_mm *uacce_mm = NULL;
122 	struct iommu_sva *handle = NULL;
123 	int ret;
124 
125 	lockdep_assert_held(&uacce->mm_lock);
126 
127 	list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
128 		if (uacce_mm->mm == mm) {
129 			mutex_lock(&uacce_mm->lock);
130 			list_add(&q->list, &uacce_mm->queues);
131 			mutex_unlock(&uacce_mm->lock);
132 			return uacce_mm;
133 		}
134 	}
135 
136 	uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
137 	if (!uacce_mm)
138 		return NULL;
139 
140 	if (uacce->flags & UACCE_DEV_SVA) {
141 		/*
142 		 * Safe to pass an incomplete uacce_mm, since mm_exit cannot
143 		 * fire while we hold a reference to the mm.
144 		 */
145 		handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
146 		if (IS_ERR(handle))
147 			goto err_free;
148 
149 		ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
150 		if (ret)
151 			goto err_unbind;
152 
153 		uacce_mm->pasid = iommu_sva_get_pasid(handle);
154 		if (uacce_mm->pasid == IOMMU_PASID_INVALID)
155 			goto err_unbind;
156 	}
157 
158 	uacce_mm->mm = mm;
159 	uacce_mm->handle = handle;
160 	INIT_LIST_HEAD(&uacce_mm->queues);
161 	mutex_init(&uacce_mm->lock);
162 	list_add(&q->list, &uacce_mm->queues);
163 	list_add(&uacce_mm->list, &uacce->mm_list);
164 
165 	return uacce_mm;
166 
167 err_unbind:
168 	if (handle)
169 		iommu_sva_unbind_device(handle);
170 err_free:
171 	kfree(uacce_mm);
172 	return NULL;
173 }
174 
175 static void uacce_mm_put(struct uacce_queue *q)
176 {
177 	struct uacce_mm *uacce_mm = q->uacce_mm;
178 
179 	lockdep_assert_held(&q->uacce->mm_lock);
180 
181 	mutex_lock(&uacce_mm->lock);
182 	list_del(&q->list);
183 	mutex_unlock(&uacce_mm->lock);
184 
185 	if (list_empty(&uacce_mm->queues)) {
186 		if (uacce_mm->handle)
187 			iommu_sva_unbind_device(uacce_mm->handle);
188 		list_del(&uacce_mm->list);
189 		kfree(uacce_mm);
190 	}
191 }
192 
193 static int uacce_fops_open(struct inode *inode, struct file *filep)
194 {
195 	struct uacce_mm *uacce_mm = NULL;
196 	struct uacce_device *uacce;
197 	struct uacce_queue *q;
198 	int ret = 0;
199 
200 	uacce = xa_load(&uacce_xa, iminor(inode));
201 	if (!uacce)
202 		return -ENODEV;
203 
204 	q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
205 	if (!q)
206 		return -ENOMEM;
207 
208 	mutex_lock(&uacce->mm_lock);
209 	uacce_mm = uacce_mm_get(uacce, q, current->mm);
210 	mutex_unlock(&uacce->mm_lock);
211 	if (!uacce_mm) {
212 		ret = -ENOMEM;
213 		goto out_with_mem;
214 	}
215 
216 	q->uacce = uacce;
217 	q->uacce_mm = uacce_mm;
218 
219 	if (uacce->ops->get_queue) {
220 		ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
221 		if (ret < 0)
222 			goto out_with_mm;
223 	}
224 
225 	init_waitqueue_head(&q->wait);
226 	filep->private_data = q;
227 	uacce->inode = inode;
228 	q->state = UACCE_Q_INIT;
229 
230 	return 0;
231 
232 out_with_mm:
233 	mutex_lock(&uacce->mm_lock);
234 	uacce_mm_put(q);
235 	mutex_unlock(&uacce->mm_lock);
236 out_with_mem:
237 	kfree(q);
238 	return ret;
239 }
240 
241 static int uacce_fops_release(struct inode *inode, struct file *filep)
242 {
243 	struct uacce_queue *q = filep->private_data;
244 	struct uacce_device *uacce = q->uacce;
245 
246 	uacce_put_queue(q);
247 
248 	mutex_lock(&uacce->mm_lock);
249 	uacce_mm_put(q);
250 	mutex_unlock(&uacce->mm_lock);
251 
252 	kfree(q);
253 
254 	return 0;
255 }
256 
257 static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
258 {
259 	if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
260 		return VM_FAULT_SIGBUS;
261 
262 	return 0;
263 }
264 
265 static void uacce_vma_close(struct vm_area_struct *vma)
266 {
267 	struct uacce_queue *q = vma->vm_private_data;
268 	struct uacce_qfile_region *qfr = NULL;
269 
270 	if (vma->vm_pgoff < UACCE_MAX_REGION)
271 		qfr = q->qfrs[vma->vm_pgoff];
272 
273 	kfree(qfr);
274 }
275 
276 static const struct vm_operations_struct uacce_vm_ops = {
277 	.fault = uacce_vma_fault,
278 	.close = uacce_vma_close,
279 };
280 
281 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
282 {
283 	struct uacce_queue *q = filep->private_data;
284 	struct uacce_device *uacce = q->uacce;
285 	struct uacce_qfile_region *qfr;
286 	enum uacce_qfrt type = UACCE_MAX_REGION;
287 	int ret = 0;
288 
289 	if (vma->vm_pgoff < UACCE_MAX_REGION)
290 		type = vma->vm_pgoff;
291 	else
292 		return -EINVAL;
293 
294 	qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
295 	if (!qfr)
296 		return -ENOMEM;
297 
298 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
299 	vma->vm_ops = &uacce_vm_ops;
300 	vma->vm_private_data = q;
301 	qfr->type = type;
302 
303 	mutex_lock(&uacce_mutex);
304 
305 	if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
306 		ret = -EINVAL;
307 		goto out_with_lock;
308 	}
309 
310 	if (q->qfrs[type]) {
311 		ret = -EEXIST;
312 		goto out_with_lock;
313 	}
314 
315 	switch (type) {
316 	case UACCE_QFRT_MMIO:
317 		if (!uacce->ops->mmap) {
318 			ret = -EINVAL;
319 			goto out_with_lock;
320 		}
321 
322 		ret = uacce->ops->mmap(q, vma, qfr);
323 		if (ret)
324 			goto out_with_lock;
325 
326 		break;
327 
328 	case UACCE_QFRT_DUS:
329 		if (!uacce->ops->mmap) {
330 			ret = -EINVAL;
331 			goto out_with_lock;
332 		}
333 
334 		ret = uacce->ops->mmap(q, vma, qfr);
335 		if (ret)
336 			goto out_with_lock;
337 		break;
338 
339 	default:
340 		ret = -EINVAL;
341 		goto out_with_lock;
342 	}
343 
344 	q->qfrs[type] = qfr;
345 	mutex_unlock(&uacce_mutex);
346 
347 	return ret;
348 
349 out_with_lock:
350 	mutex_unlock(&uacce_mutex);
351 	kfree(qfr);
352 	return ret;
353 }
354 
355 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
356 {
357 	struct uacce_queue *q = file->private_data;
358 	struct uacce_device *uacce = q->uacce;
359 
360 	poll_wait(file, &q->wait, wait);
361 	if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
362 		return EPOLLIN | EPOLLRDNORM;
363 
364 	return 0;
365 }
366 
367 static const struct file_operations uacce_fops = {
368 	.owner		= THIS_MODULE,
369 	.open		= uacce_fops_open,
370 	.release	= uacce_fops_release,
371 	.unlocked_ioctl	= uacce_fops_unl_ioctl,
372 #ifdef CONFIG_COMPAT
373 	.compat_ioctl	= uacce_fops_compat_ioctl,
374 #endif
375 	.mmap		= uacce_fops_mmap,
376 	.poll		= uacce_fops_poll,
377 };
378 
379 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
380 
381 static ssize_t api_show(struct device *dev,
382 			struct device_attribute *attr, char *buf)
383 {
384 	struct uacce_device *uacce = to_uacce_device(dev);
385 
386 	return sprintf(buf, "%s\n", uacce->api_ver);
387 }
388 
389 static ssize_t flags_show(struct device *dev,
390 			  struct device_attribute *attr, char *buf)
391 {
392 	struct uacce_device *uacce = to_uacce_device(dev);
393 
394 	return sprintf(buf, "%u\n", uacce->flags);
395 }
396 
397 static ssize_t available_instances_show(struct device *dev,
398 					struct device_attribute *attr,
399 					char *buf)
400 {
401 	struct uacce_device *uacce = to_uacce_device(dev);
402 
403 	if (!uacce->ops->get_available_instances)
404 		return -ENODEV;
405 
406 	return sprintf(buf, "%d\n",
407 		       uacce->ops->get_available_instances(uacce));
408 }
409 
410 static ssize_t algorithms_show(struct device *dev,
411 			       struct device_attribute *attr, char *buf)
412 {
413 	struct uacce_device *uacce = to_uacce_device(dev);
414 
415 	return sprintf(buf, "%s\n", uacce->algs);
416 }
417 
418 static ssize_t region_mmio_size_show(struct device *dev,
419 				     struct device_attribute *attr, char *buf)
420 {
421 	struct uacce_device *uacce = to_uacce_device(dev);
422 
423 	return sprintf(buf, "%lu\n",
424 		       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
425 }
426 
427 static ssize_t region_dus_size_show(struct device *dev,
428 				    struct device_attribute *attr, char *buf)
429 {
430 	struct uacce_device *uacce = to_uacce_device(dev);
431 
432 	return sprintf(buf, "%lu\n",
433 		       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
434 }
435 
436 static DEVICE_ATTR_RO(api);
437 static DEVICE_ATTR_RO(flags);
438 static DEVICE_ATTR_RO(available_instances);
439 static DEVICE_ATTR_RO(algorithms);
440 static DEVICE_ATTR_RO(region_mmio_size);
441 static DEVICE_ATTR_RO(region_dus_size);
442 
443 static struct attribute *uacce_dev_attrs[] = {
444 	&dev_attr_api.attr,
445 	&dev_attr_flags.attr,
446 	&dev_attr_available_instances.attr,
447 	&dev_attr_algorithms.attr,
448 	&dev_attr_region_mmio_size.attr,
449 	&dev_attr_region_dus_size.attr,
450 	NULL,
451 };
452 
453 static umode_t uacce_dev_is_visible(struct kobject *kobj,
454 				    struct attribute *attr, int n)
455 {
456 	struct device *dev = container_of(kobj, struct device, kobj);
457 	struct uacce_device *uacce = to_uacce_device(dev);
458 
459 	if (((attr == &dev_attr_region_mmio_size.attr) &&
460 	    (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
461 	    ((attr == &dev_attr_region_dus_size.attr) &&
462 	    (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
463 		return 0;
464 
465 	return attr->mode;
466 }
467 
468 static struct attribute_group uacce_dev_group = {
469 	.is_visible	= uacce_dev_is_visible,
470 	.attrs		= uacce_dev_attrs,
471 };
472 
473 __ATTRIBUTE_GROUPS(uacce_dev);
474 
475 static void uacce_release(struct device *dev)
476 {
477 	struct uacce_device *uacce = to_uacce_device(dev);
478 
479 	kfree(uacce);
480 }
481 
482 /**
483  * uacce_alloc() - alloc an accelerator
484  * @parent: pointer of uacce parent device
485  * @interface: pointer of uacce_interface for register
486  *
487  * Returns uacce pointer if success and ERR_PTR if not
488  * Need check returned negotiated uacce->flags
489  */
490 struct uacce_device *uacce_alloc(struct device *parent,
491 				 struct uacce_interface *interface)
492 {
493 	unsigned int flags = interface->flags;
494 	struct uacce_device *uacce;
495 	int ret;
496 
497 	uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
498 	if (!uacce)
499 		return ERR_PTR(-ENOMEM);
500 
501 	if (flags & UACCE_DEV_SVA) {
502 		ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
503 		if (ret)
504 			flags &= ~UACCE_DEV_SVA;
505 	}
506 
507 	uacce->parent = parent;
508 	uacce->flags = flags;
509 	uacce->ops = interface->ops;
510 
511 	ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
512 		       GFP_KERNEL);
513 	if (ret < 0)
514 		goto err_with_uacce;
515 
516 	INIT_LIST_HEAD(&uacce->mm_list);
517 	mutex_init(&uacce->mm_lock);
518 	device_initialize(&uacce->dev);
519 	uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
520 	uacce->dev.class = uacce_class;
521 	uacce->dev.groups = uacce_dev_groups;
522 	uacce->dev.parent = uacce->parent;
523 	uacce->dev.release = uacce_release;
524 	dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
525 
526 	return uacce;
527 
528 err_with_uacce:
529 	if (flags & UACCE_DEV_SVA)
530 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
531 	kfree(uacce);
532 	return ERR_PTR(ret);
533 }
534 EXPORT_SYMBOL_GPL(uacce_alloc);
535 
536 /**
537  * uacce_register() - add the accelerator to cdev and export to user space
538  * @uacce: The initialized uacce device
539  *
540  * Return 0 if register succeeded, or an error.
541  */
542 int uacce_register(struct uacce_device *uacce)
543 {
544 	if (!uacce)
545 		return -ENODEV;
546 
547 	uacce->cdev = cdev_alloc();
548 	if (!uacce->cdev)
549 		return -ENOMEM;
550 
551 	uacce->cdev->ops = &uacce_fops;
552 	uacce->cdev->owner = THIS_MODULE;
553 
554 	return cdev_device_add(uacce->cdev, &uacce->dev);
555 }
556 EXPORT_SYMBOL_GPL(uacce_register);
557 
558 /**
559  * uacce_remove() - remove the accelerator
560  * @uacce: the accelerator to remove
561  */
562 void uacce_remove(struct uacce_device *uacce)
563 {
564 	struct uacce_mm *uacce_mm;
565 	struct uacce_queue *q;
566 
567 	if (!uacce)
568 		return;
569 	/*
570 	 * unmap remaining mapping from user space, preventing user still
571 	 * access the mmaped area while parent device is already removed
572 	 */
573 	if (uacce->inode)
574 		unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
575 
576 	/* ensure no open queue remains */
577 	mutex_lock(&uacce->mm_lock);
578 	list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
579 		/*
580 		 * We don't take the uacce_mm->lock here. Since we hold the
581 		 * device's mm_lock, no queue can be added to or removed from
582 		 * this uacce_mm. We may run concurrently with mm_exit, but
583 		 * uacce_put_queue() is serialized and iommu_sva_unbind_device()
584 		 * waits for the lock that mm_exit is holding.
585 		 */
586 		list_for_each_entry(q, &uacce_mm->queues, list)
587 			uacce_put_queue(q);
588 
589 		if (uacce->flags & UACCE_DEV_SVA) {
590 			iommu_sva_unbind_device(uacce_mm->handle);
591 			uacce_mm->handle = NULL;
592 		}
593 	}
594 	mutex_unlock(&uacce->mm_lock);
595 
596 	/* disable sva now since no opened queues */
597 	if (uacce->flags & UACCE_DEV_SVA)
598 		iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
599 
600 	if (uacce->cdev)
601 		cdev_device_del(uacce->cdev, &uacce->dev);
602 	xa_erase(&uacce_xa, uacce->dev_id);
603 	put_device(&uacce->dev);
604 }
605 EXPORT_SYMBOL_GPL(uacce_remove);
606 
607 static int __init uacce_init(void)
608 {
609 	int ret;
610 
611 	uacce_class = class_create(THIS_MODULE, UACCE_NAME);
612 	if (IS_ERR(uacce_class))
613 		return PTR_ERR(uacce_class);
614 
615 	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
616 	if (ret)
617 		class_destroy(uacce_class);
618 
619 	return ret;
620 }
621 
622 static __exit void uacce_exit(void)
623 {
624 	unregister_chrdev_region(uacce_devt, MINORMASK);
625 	class_destroy(uacce_class);
626 }
627 
628 subsys_initcall(uacce_init);
629 module_exit(uacce_exit);
630 
631 MODULE_LICENSE("GPL");
632 MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
633 MODULE_DESCRIPTION("Accelerator interface for Userland applications");
634