1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7 #include <linux/cdev.h>
8 #include <linux/fs.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/compat.h>
12 #include <linux/miscdevice.h>
13
14 #include <linux/raspberrypi/vchiq_core.h>
15 #include <linux/raspberrypi/vchiq_arm.h>
16 #include <linux/raspberrypi/vchiq_debugfs.h>
17
18 #include "vchiq_ioctl.h"
19
20 static const char *const ioctl_names[] = {
21 "CONNECT",
22 "SHUTDOWN",
23 "CREATE_SERVICE",
24 "REMOVE_SERVICE",
25 "QUEUE_MESSAGE",
26 "QUEUE_BULK_TRANSMIT",
27 "QUEUE_BULK_RECEIVE",
28 "AWAIT_COMPLETION",
29 "DEQUEUE_MESSAGE",
30 "GET_CLIENT_ID",
31 "GET_CONFIG",
32 "CLOSE_SERVICE",
33 "USE_SERVICE",
34 "RELEASE_SERVICE",
35 "SET_SERVICE_OPTION",
36 "DUMP_PHYS_MEM",
37 "LIB_VERSION",
38 "CLOSE_DELIVERED"
39 };
40
41 static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1));
42
43 static void
user_service_free(void * userdata)44 user_service_free(void *userdata)
45 {
46 kfree(userdata);
47 }
48
close_delivered(struct user_service * user_service)49 static void close_delivered(struct user_service *user_service)
50 {
51 dev_dbg(user_service->service->state->dev,
52 "arm: (handle=%x)\n", user_service->service->handle);
53
54 if (user_service->close_pending) {
55 /* Allow the underlying service to be culled */
56 vchiq_service_put(user_service->service);
57
58 /* Wake the user-thread blocked in close_ or remove_service */
59 complete(&user_service->close_event);
60
61 user_service->close_pending = 0;
62 }
63 }
64
65 struct vchiq_io_copy_callback_context {
66 struct vchiq_element *element;
67 size_t element_offset;
68 unsigned long elements_to_go;
69 };
70
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)71 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
72 size_t offset, size_t maxsize)
73 {
74 struct vchiq_io_copy_callback_context *cc = context;
75 size_t total_bytes_copied = 0;
76 size_t bytes_this_round;
77
78 while (total_bytes_copied < maxsize) {
79 if (!cc->elements_to_go)
80 return total_bytes_copied;
81
82 if (!cc->element->size) {
83 cc->elements_to_go--;
84 cc->element++;
85 cc->element_offset = 0;
86 continue;
87 }
88
89 bytes_this_round = min(cc->element->size - cc->element_offset,
90 maxsize - total_bytes_copied);
91
92 if (copy_from_user(dest + total_bytes_copied,
93 cc->element->data + cc->element_offset,
94 bytes_this_round))
95 return -EFAULT;
96
97 cc->element_offset += bytes_this_round;
98 total_bytes_copied += bytes_this_round;
99
100 if (cc->element_offset == cc->element->size) {
101 cc->elements_to_go--;
102 cc->element++;
103 cc->element_offset = 0;
104 }
105 }
106
107 return maxsize;
108 }
109
110 static int
vchiq_ioc_queue_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_element * elements,unsigned long count)111 vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
112 struct vchiq_element *elements, unsigned long count)
113 {
114 struct vchiq_io_copy_callback_context context;
115 int status = 0;
116 unsigned long i;
117 size_t total_size = 0;
118
119 context.element = elements;
120 context.element_offset = 0;
121 context.elements_to_go = count;
122
123 for (i = 0; i < count; i++) {
124 if (!elements[i].data && elements[i].size != 0)
125 return -EFAULT;
126
127 total_size += elements[i].size;
128 }
129
130 status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
131 &context, total_size);
132
133 if (status == -EINVAL)
134 return -EIO;
135 else if (status == -EAGAIN)
136 return -EINTR;
137 return 0;
138 }
139
vchiq_ioc_create_service(struct vchiq_instance * instance,struct vchiq_create_service * args)140 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
141 struct vchiq_create_service *args)
142 {
143 struct user_service *user_service = NULL;
144 struct vchiq_service *service;
145 int status = 0;
146 struct vchiq_service_params_kernel params;
147 int srvstate;
148
149 if (args->is_open && !instance->connected)
150 return -ENOTCONN;
151
152 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
153 if (!user_service)
154 return -ENOMEM;
155
156 if (args->is_open) {
157 srvstate = VCHIQ_SRVSTATE_OPENING;
158 } else {
159 srvstate = instance->connected ?
160 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
161 }
162
163 params = (struct vchiq_service_params_kernel) {
164 .fourcc = args->params.fourcc,
165 .callback = service_callback,
166 .userdata = user_service,
167 .version = args->params.version,
168 .version_min = args->params.version_min,
169 };
170 service = vchiq_add_service_internal(instance->state, ¶ms,
171 srvstate, instance,
172 user_service_free);
173 if (!service) {
174 kfree(user_service);
175 return -EEXIST;
176 }
177
178 user_service->service = service;
179 user_service->userdata = args->params.userdata;
180 user_service->instance = instance;
181 user_service->is_vchi = (args->is_vchi != 0);
182 user_service->dequeue_pending = 0;
183 user_service->close_pending = 0;
184 user_service->message_available_pos = instance->completion_remove - 1;
185 user_service->msg_insert = 0;
186 user_service->msg_remove = 0;
187 init_completion(&user_service->insert_event);
188 init_completion(&user_service->remove_event);
189 init_completion(&user_service->close_event);
190
191 if (args->is_open) {
192 status = vchiq_open_service_internal(service, instance->pid);
193 if (status) {
194 vchiq_remove_service(instance, service->handle);
195 return (status == -EAGAIN) ?
196 -EINTR : -EIO;
197 }
198 }
199 args->handle = service->handle;
200
201 return 0;
202 }
203
vchiq_ioc_dequeue_message(struct vchiq_instance * instance,struct vchiq_dequeue_message * args)204 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
205 struct vchiq_dequeue_message *args)
206 {
207 struct user_service *user_service;
208 struct vchiq_service *service;
209 struct vchiq_header *header;
210 int ret;
211
212 DEBUG_INITIALISE(instance->state->local);
213 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
214 service = find_service_for_instance(instance, args->handle);
215 if (!service)
216 return -EINVAL;
217
218 user_service = (struct user_service *)service->base.userdata;
219 if (user_service->is_vchi == 0) {
220 ret = -EINVAL;
221 goto out;
222 }
223
224 spin_lock(&service->state->msg_queue_spinlock);
225 if (user_service->msg_remove == user_service->msg_insert) {
226 if (!args->blocking) {
227 spin_unlock(&service->state->msg_queue_spinlock);
228 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
229 ret = -EWOULDBLOCK;
230 goto out;
231 }
232 user_service->dequeue_pending = 1;
233 ret = 0;
234 do {
235 spin_unlock(&service->state->msg_queue_spinlock);
236 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
237 if (wait_for_completion_interruptible(&user_service->insert_event)) {
238 dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n");
239 ret = -EINTR;
240 break;
241 }
242 spin_lock(&service->state->msg_queue_spinlock);
243 } while (user_service->msg_remove == user_service->msg_insert);
244
245 if (ret)
246 goto out;
247 }
248
249 if (WARN_ON_ONCE((int)(user_service->msg_insert -
250 user_service->msg_remove) < 0)) {
251 spin_unlock(&service->state->msg_queue_spinlock);
252 ret = -EINVAL;
253 goto out;
254 }
255
256 header = user_service->msg_queue[user_service->msg_remove &
257 (MSG_QUEUE_SIZE - 1)];
258 user_service->msg_remove++;
259 spin_unlock(&service->state->msg_queue_spinlock);
260
261 complete(&user_service->remove_event);
262 if (!header) {
263 ret = -ENOTCONN;
264 } else if (header->size <= args->bufsize) {
265 /* Copy to user space if msgbuf is not NULL */
266 if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
267 ret = header->size;
268 vchiq_release_message(instance, service->handle, header);
269 } else {
270 ret = -EFAULT;
271 }
272 } else {
273 dev_err(service->state->dev,
274 "arm: header %p: bufsize %x < size %x\n",
275 header, args->bufsize, header->size);
276 WARN(1, "invalid size\n");
277 ret = -EMSGSIZE;
278 }
279 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
280 out:
281 vchiq_service_put(service);
282 return ret;
283 }
284
vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance * instance,struct vchiq_queue_bulk_transfer * args,enum vchiq_bulk_dir dir,enum vchiq_bulk_mode __user * mode)285 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
286 struct vchiq_queue_bulk_transfer *args,
287 enum vchiq_bulk_dir dir,
288 enum vchiq_bulk_mode __user *mode)
289 {
290 struct vchiq_service *service;
291 struct bulk_waiter_node *waiter = NULL, *iter;
292 struct vchiq_bulk bulk_params = {};
293 int status = 0;
294 int ret;
295
296 service = find_service_for_instance(instance, args->handle);
297 if (!service)
298 return -EINVAL;
299
300 if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
301 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
302 if (!waiter) {
303 ret = -ENOMEM;
304 goto out;
305 }
306
307 bulk_params.uoffset = args->data;
308 bulk_params.mode = args->mode;
309 bulk_params.size = args->size;
310 bulk_params.dir = dir;
311 bulk_params.waiter = &waiter->bulk_waiter;
312
313 status = vchiq_bulk_xfer_blocking(instance, args->handle,
314 &bulk_params);
315 } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
316 mutex_lock(&instance->bulk_waiter_list_mutex);
317 list_for_each_entry(iter, &instance->bulk_waiter_list,
318 list) {
319 if (iter->pid == current->pid) {
320 list_del(&iter->list);
321 waiter = iter;
322 break;
323 }
324 }
325 mutex_unlock(&instance->bulk_waiter_list_mutex);
326 if (!waiter) {
327 dev_err(service->state->dev,
328 "arm: no bulk_waiter found for pid %d\n", current->pid);
329 ret = -ESRCH;
330 goto out;
331 }
332 dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n",
333 waiter, current->pid);
334
335 status = vchiq_bulk_xfer_waiting(instance, args->handle,
336 &waiter->bulk_waiter);
337 } else {
338 bulk_params.uoffset = args->data;
339 bulk_params.mode = args->mode;
340 bulk_params.size = args->size;
341 bulk_params.dir = dir;
342 bulk_params.cb_userdata = args->userdata;
343
344 status = vchiq_bulk_xfer_callback(instance, args->handle,
345 &bulk_params);
346 }
347
348 if (!waiter) {
349 ret = 0;
350 goto out;
351 }
352
353 if ((status != -EAGAIN) || fatal_signal_pending(current) ||
354 !waiter->bulk_waiter.bulk) {
355 if (waiter->bulk_waiter.bulk) {
356 /* Cancel the signal when the transfer completes. */
357 spin_lock(&service->state->bulk_waiter_spinlock);
358 waiter->bulk_waiter.bulk->waiter = NULL;
359 spin_unlock(&service->state->bulk_waiter_spinlock);
360 }
361 kfree(waiter);
362 ret = 0;
363 } else {
364 const enum vchiq_bulk_mode mode_waiting =
365 VCHIQ_BULK_MODE_WAITING;
366 waiter->pid = current->pid;
367 mutex_lock(&instance->bulk_waiter_list_mutex);
368 list_add(&waiter->list, &instance->bulk_waiter_list);
369 mutex_unlock(&instance->bulk_waiter_list_mutex);
370 dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
371 waiter, current->pid);
372
373 ret = put_user(mode_waiting, mode);
374 }
375 out:
376 vchiq_service_put(service);
377 if (ret)
378 return ret;
379 else if (status == -EINVAL)
380 return -EIO;
381 else if (status == -EAGAIN)
382 return -EINTR;
383 return 0;
384 }
385
386 /* read a user pointer value from an array pointers in user space */
vchiq_get_user_ptr(void __user ** buf,void __user * ubuf,int index)387 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
388 {
389 int ret;
390
391 if (in_compat_syscall()) {
392 compat_uptr_t ptr32;
393 compat_uptr_t __user *uptr = ubuf;
394
395 ret = get_user(ptr32, uptr + index);
396 if (ret)
397 return ret;
398
399 *buf = compat_ptr(ptr32);
400 } else {
401 uintptr_t ptr, __user *uptr = ubuf;
402
403 ret = get_user(ptr, uptr + index);
404
405 if (ret)
406 return ret;
407
408 *buf = (void __user *)ptr;
409 }
410
411 return 0;
412 }
413
414 struct vchiq_completion_data32 {
415 enum vchiq_reason reason;
416 compat_uptr_t header;
417 compat_uptr_t service_userdata;
418 compat_uptr_t cb_data;
419 };
420
vchiq_put_completion(struct vchiq_completion_data __user * buf,struct vchiq_completion_data * completion,int index)421 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
422 struct vchiq_completion_data *completion,
423 int index)
424 {
425 struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
426
427 if (in_compat_syscall()) {
428 struct vchiq_completion_data32 tmp = {
429 .reason = completion->reason,
430 .header = ptr_to_compat(completion->header),
431 .service_userdata = ptr_to_compat(completion->service_userdata),
432 .cb_data = ptr_to_compat(completion->cb_userdata),
433 };
434 if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
435 return -EFAULT;
436 } else {
437 if (copy_to_user(&buf[index], completion, sizeof(*completion)))
438 return -EFAULT;
439 }
440
441 return 0;
442 }
443
vchiq_ioc_await_completion(struct vchiq_instance * instance,struct vchiq_await_completion * args,int __user * msgbufcountp)444 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
445 struct vchiq_await_completion *args,
446 int __user *msgbufcountp)
447 {
448 int msgbufcount;
449 int remove;
450 int ret;
451
452 DEBUG_INITIALISE(instance->state->local);
453
454 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
455 if (!instance->connected)
456 return -ENOTCONN;
457
458 mutex_lock(&instance->completion_mutex);
459
460 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
461 while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
462 int rc;
463
464 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
465 mutex_unlock(&instance->completion_mutex);
466 rc = wait_for_completion_interruptible(&instance->insert_event);
467 mutex_lock(&instance->completion_mutex);
468 if (rc) {
469 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
470 dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n");
471 ret = -EINTR;
472 goto out;
473 }
474 }
475 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
476
477 msgbufcount = args->msgbufcount;
478 remove = instance->completion_remove;
479
480 for (ret = 0; ret < args->count; ret++) {
481 struct vchiq_completion_data_kernel *completion;
482 struct vchiq_completion_data user_completion;
483 struct vchiq_service *service;
484 struct user_service *user_service;
485 struct vchiq_header *header;
486
487 if (remove == instance->completion_insert)
488 break;
489
490 completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
491
492 /*
493 * A read memory barrier is needed to stop
494 * prefetch of a stale completion record
495 */
496 rmb();
497
498 service = completion->service_userdata;
499 user_service = service->base.userdata;
500
501 memset(&user_completion, 0, sizeof(user_completion));
502 user_completion = (struct vchiq_completion_data) {
503 .reason = completion->reason,
504 .service_userdata = user_service->userdata,
505 };
506
507 header = completion->header;
508 if (header) {
509 void __user *msgbuf;
510 int msglen;
511
512 msglen = header->size + sizeof(struct vchiq_header);
513 /* This must be a VCHIQ-style service */
514 if (args->msgbufsize < msglen) {
515 dev_err(service->state->dev,
516 "arm: header %p: msgbufsize %x < msglen %x\n",
517 header, args->msgbufsize, msglen);
518 WARN(1, "invalid message size\n");
519 if (ret == 0)
520 ret = -EMSGSIZE;
521 break;
522 }
523 if (msgbufcount <= 0)
524 /* Stall here for lack of a buffer for the message. */
525 break;
526 /* Get the pointer from user space */
527 msgbufcount--;
528 if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
529 msgbufcount)) {
530 if (ret == 0)
531 ret = -EFAULT;
532 break;
533 }
534
535 /* Copy the message to user space */
536 if (copy_to_user(msgbuf, header, msglen)) {
537 if (ret == 0)
538 ret = -EFAULT;
539 break;
540 }
541
542 /* Now it has been copied, the message can be released. */
543 vchiq_release_message(instance, service->handle, header);
544
545 /* The completion must point to the msgbuf. */
546 user_completion.header = msgbuf;
547 }
548
549 if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
550 !instance->use_close_delivered)
551 vchiq_service_put(service);
552
553 user_completion.cb_userdata = completion->cb_userdata;
554
555 if (vchiq_put_completion(args->buf, &user_completion, ret)) {
556 if (ret == 0)
557 ret = -EFAULT;
558 break;
559 }
560
561 /*
562 * Ensure that the above copy has completed
563 * before advancing the remove pointer.
564 */
565 mb();
566 remove++;
567 instance->completion_remove = remove;
568 }
569
570 if (msgbufcount != args->msgbufcount) {
571 if (put_user(msgbufcount, msgbufcountp))
572 ret = -EFAULT;
573 }
574 out:
575 if (ret)
576 complete(&instance->remove_event);
577 mutex_unlock(&instance->completion_mutex);
578 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
579
580 return ret;
581 }
582
583 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)584 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
585 {
586 struct vchiq_instance *instance = file->private_data;
587 int status = 0;
588 struct vchiq_service *service = NULL;
589 long ret = 0;
590 int i, rc;
591
592 dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance,
593 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
594 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
595
596 switch (cmd) {
597 case VCHIQ_IOC_SHUTDOWN:
598 if (!instance->connected)
599 break;
600
601 /* Remove all services */
602 i = 0;
603 while ((service = next_service_by_instance(instance->state,
604 instance, &i))) {
605 status = vchiq_remove_service(instance, service->handle);
606 vchiq_service_put(service);
607 if (status)
608 break;
609 }
610 service = NULL;
611
612 if (!status) {
613 /* Wake the completion thread and ask it to exit */
614 instance->closing = 1;
615 complete(&instance->insert_event);
616 }
617
618 break;
619
620 case VCHIQ_IOC_CONNECT:
621 if (instance->connected) {
622 ret = -EINVAL;
623 break;
624 }
625 rc = mutex_lock_killable(&instance->state->mutex);
626 if (rc) {
627 dev_err(instance->state->dev,
628 "arm: vchiq: connect: could not lock mutex for state %d: %d\n",
629 instance->state->id, rc);
630 ret = -EINTR;
631 break;
632 }
633 status = vchiq_connect_internal(instance->state, instance);
634 mutex_unlock(&instance->state->mutex);
635
636 if (!status)
637 instance->connected = 1;
638 else
639 dev_err(instance->state->dev,
640 "arm: vchiq: could not connect: %d\n", status);
641 break;
642
643 case VCHIQ_IOC_CREATE_SERVICE: {
644 struct vchiq_create_service __user *argp;
645 struct vchiq_create_service args;
646
647 argp = (void __user *)arg;
648 if (copy_from_user(&args, argp, sizeof(args))) {
649 ret = -EFAULT;
650 break;
651 }
652
653 ret = vchiq_ioc_create_service(instance, &args);
654 if (ret < 0)
655 break;
656
657 if (put_user(args.handle, &argp->handle)) {
658 vchiq_remove_service(instance, args.handle);
659 ret = -EFAULT;
660 }
661 } break;
662
663 case VCHIQ_IOC_CLOSE_SERVICE:
664 case VCHIQ_IOC_REMOVE_SERVICE: {
665 unsigned int handle = (unsigned int)arg;
666 struct user_service *user_service;
667
668 service = find_service_for_instance(instance, handle);
669 if (!service) {
670 ret = -EINVAL;
671 break;
672 }
673
674 user_service = service->base.userdata;
675
676 /*
677 * close_pending is false on first entry, and when the
678 * wait in vchiq_close_service has been interrupted.
679 */
680 if (!user_service->close_pending) {
681 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
682 vchiq_close_service(instance, service->handle) :
683 vchiq_remove_service(instance, service->handle);
684 if (status)
685 break;
686 }
687
688 /*
689 * close_pending is true once the underlying service
690 * has been closed until the client library calls the
691 * CLOSE_DELIVERED ioctl, signalling close_event.
692 */
693 if (user_service->close_pending &&
694 wait_for_completion_interruptible(&user_service->close_event))
695 status = -EAGAIN;
696 break;
697 }
698
699 case VCHIQ_IOC_USE_SERVICE:
700 case VCHIQ_IOC_RELEASE_SERVICE: {
701 unsigned int handle = (unsigned int)arg;
702
703 service = find_service_for_instance(instance, handle);
704 if (service) {
705 ret = (cmd == VCHIQ_IOC_USE_SERVICE) ?
706 vchiq_use_service_internal(service) :
707 vchiq_release_service_internal(service);
708 if (ret) {
709 dev_err(instance->state->dev,
710 "suspend: cmd %s returned error %ld for service %p4cc:%03d\n",
711 (cmd == VCHIQ_IOC_USE_SERVICE) ?
712 "VCHIQ_IOC_USE_SERVICE" :
713 "VCHIQ_IOC_RELEASE_SERVICE",
714 ret, &service->base.fourcc,
715 service->client_id);
716 }
717 } else {
718 ret = -EINVAL;
719 }
720 } break;
721
722 case VCHIQ_IOC_QUEUE_MESSAGE: {
723 struct vchiq_queue_message args;
724
725 if (copy_from_user(&args, (const void __user *)arg,
726 sizeof(args))) {
727 ret = -EFAULT;
728 break;
729 }
730
731 service = find_service_for_instance(instance, args.handle);
732
733 if (service && (args.count <= MAX_ELEMENTS)) {
734 /* Copy elements into kernel space */
735 struct vchiq_element elements[MAX_ELEMENTS];
736
737 if (copy_from_user(elements, args.elements,
738 args.count * sizeof(struct vchiq_element)) == 0)
739 ret = vchiq_ioc_queue_message(instance, args.handle, elements,
740 args.count);
741 else
742 ret = -EFAULT;
743 } else {
744 ret = -EINVAL;
745 }
746 } break;
747
748 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
749 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
750 struct vchiq_queue_bulk_transfer args;
751 struct vchiq_queue_bulk_transfer __user *argp;
752
753 enum vchiq_bulk_dir dir =
754 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
755 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
756
757 argp = (void __user *)arg;
758 if (copy_from_user(&args, argp, sizeof(args))) {
759 ret = -EFAULT;
760 break;
761 }
762
763 ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
764 dir, &argp->mode);
765 } break;
766
767 case VCHIQ_IOC_AWAIT_COMPLETION: {
768 struct vchiq_await_completion args;
769 struct vchiq_await_completion __user *argp;
770
771 argp = (void __user *)arg;
772 if (copy_from_user(&args, argp, sizeof(args))) {
773 ret = -EFAULT;
774 break;
775 }
776
777 ret = vchiq_ioc_await_completion(instance, &args,
778 &argp->msgbufcount);
779 } break;
780
781 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
782 struct vchiq_dequeue_message args;
783
784 if (copy_from_user(&args, (const void __user *)arg,
785 sizeof(args))) {
786 ret = -EFAULT;
787 break;
788 }
789
790 ret = vchiq_ioc_dequeue_message(instance, &args);
791 } break;
792
793 case VCHIQ_IOC_GET_CLIENT_ID: {
794 unsigned int handle = (unsigned int)arg;
795
796 ret = vchiq_get_client_id(instance, handle);
797 } break;
798
799 case VCHIQ_IOC_GET_CONFIG: {
800 struct vchiq_get_config args;
801 struct vchiq_config config;
802
803 if (copy_from_user(&args, (const void __user *)arg,
804 sizeof(args))) {
805 ret = -EFAULT;
806 break;
807 }
808 if (args.config_size > sizeof(config)) {
809 ret = -EINVAL;
810 break;
811 }
812
813 vchiq_get_config(&config);
814 if (copy_to_user(args.pconfig, &config, args.config_size)) {
815 ret = -EFAULT;
816 break;
817 }
818 } break;
819
820 case VCHIQ_IOC_SET_SERVICE_OPTION: {
821 struct vchiq_set_service_option args;
822
823 if (copy_from_user(&args, (const void __user *)arg,
824 sizeof(args))) {
825 ret = -EFAULT;
826 break;
827 }
828
829 service = find_service_for_instance(instance, args.handle);
830 if (!service) {
831 ret = -EINVAL;
832 break;
833 }
834
835 ret = vchiq_set_service_option(instance, args.handle, args.option,
836 args.value);
837 } break;
838
839 case VCHIQ_IOC_LIB_VERSION: {
840 unsigned int lib_version = (unsigned int)arg;
841
842 if (lib_version < VCHIQ_VERSION_MIN)
843 ret = -EINVAL;
844 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
845 instance->use_close_delivered = 1;
846 } break;
847
848 case VCHIQ_IOC_CLOSE_DELIVERED: {
849 unsigned int handle = (unsigned int)arg;
850
851 service = find_closed_service_for_instance(instance, handle);
852 if (service) {
853 struct user_service *user_service =
854 (struct user_service *)service->base.userdata;
855 close_delivered(user_service);
856 } else {
857 ret = -EINVAL;
858 }
859 } break;
860
861 default:
862 ret = -ENOTTY;
863 break;
864 }
865
866 if (service)
867 vchiq_service_put(service);
868
869 if (ret == 0) {
870 if (status == -EINVAL)
871 ret = -EIO;
872 else if (status == -EAGAIN)
873 ret = -EINTR;
874 }
875
876 if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
877 dev_dbg(instance->state->dev,
878 "arm: ioctl instance %p, cmd %s -> status %d, %ld\n",
879 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
880 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
881 } else {
882 dev_dbg(instance->state->dev,
883 "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n",
884 instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
885 ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
886 }
887
888 return ret;
889 }
890
891 #if defined(CONFIG_COMPAT)
892
893 struct vchiq_service_params32 {
894 int fourcc;
895 compat_uptr_t callback;
896 compat_uptr_t userdata;
897 short version; /* Increment for non-trivial changes */
898 short version_min; /* Update for incompatible changes */
899 };
900
901 struct vchiq_create_service32 {
902 struct vchiq_service_params32 params;
903 int is_open;
904 int is_vchi;
905 unsigned int handle; /* OUT */
906 };
907
908 #define VCHIQ_IOC_CREATE_SERVICE32 \
909 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
910
911 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,struct vchiq_create_service32 __user * ptrargs32)912 vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd,
913 struct vchiq_create_service32 __user *ptrargs32)
914 {
915 struct vchiq_create_service args;
916 struct vchiq_create_service32 args32;
917 struct vchiq_instance *instance = file->private_data;
918 long ret;
919
920 if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
921 return -EFAULT;
922
923 args = (struct vchiq_create_service) {
924 .params = {
925 .fourcc = args32.params.fourcc,
926 .callback = compat_ptr(args32.params.callback),
927 .userdata = compat_ptr(args32.params.userdata),
928 .version = args32.params.version,
929 .version_min = args32.params.version_min,
930 },
931 .is_open = args32.is_open,
932 .is_vchi = args32.is_vchi,
933 .handle = args32.handle,
934 };
935
936 ret = vchiq_ioc_create_service(instance, &args);
937 if (ret < 0)
938 return ret;
939
940 if (put_user(args.handle, &ptrargs32->handle)) {
941 vchiq_remove_service(instance, args.handle);
942 return -EFAULT;
943 }
944
945 return 0;
946 }
947
948 struct vchiq_element32 {
949 compat_uptr_t data;
950 unsigned int size;
951 };
952
953 struct vchiq_queue_message32 {
954 unsigned int handle;
955 unsigned int count;
956 compat_uptr_t elements;
957 };
958
959 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
960 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
961
962 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,struct vchiq_queue_message32 __user * arg)963 vchiq_compat_ioctl_queue_message(struct file *file,
964 unsigned int cmd,
965 struct vchiq_queue_message32 __user *arg)
966 {
967 struct vchiq_queue_message args;
968 struct vchiq_queue_message32 args32;
969 struct vchiq_service *service;
970 struct vchiq_instance *instance = file->private_data;
971 int ret;
972
973 if (copy_from_user(&args32, arg, sizeof(args32)))
974 return -EFAULT;
975
976 args = (struct vchiq_queue_message) {
977 .handle = args32.handle,
978 .count = args32.count,
979 .elements = compat_ptr(args32.elements),
980 };
981
982 if (args32.count > MAX_ELEMENTS)
983 return -EINVAL;
984
985 service = find_service_for_instance(instance, args.handle);
986 if (!service)
987 return -EINVAL;
988
989 if (args32.elements && args32.count) {
990 struct vchiq_element32 element32[MAX_ELEMENTS];
991 struct vchiq_element elements[MAX_ELEMENTS];
992 unsigned int count;
993
994 if (copy_from_user(&element32, args.elements,
995 sizeof(element32))) {
996 vchiq_service_put(service);
997 return -EFAULT;
998 }
999
1000 for (count = 0; count < args32.count; count++) {
1001 elements[count].data =
1002 compat_ptr(element32[count].data);
1003 elements[count].size = element32[count].size;
1004 }
1005 ret = vchiq_ioc_queue_message(instance, args.handle, elements,
1006 args.count);
1007 } else {
1008 ret = -EINVAL;
1009 }
1010 vchiq_service_put(service);
1011
1012 return ret;
1013 }
1014
1015 struct vchiq_queue_bulk_transfer32 {
1016 unsigned int handle;
1017 compat_uptr_t data;
1018 unsigned int size;
1019 compat_uptr_t userdata;
1020 enum vchiq_bulk_mode mode;
1021 };
1022
1023 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1024 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1025 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1026 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1027
1028 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,struct vchiq_queue_bulk_transfer32 __user * argp)1029 vchiq_compat_ioctl_queue_bulk(struct file *file,
1030 unsigned int cmd,
1031 struct vchiq_queue_bulk_transfer32 __user *argp)
1032 {
1033 struct vchiq_queue_bulk_transfer32 args32;
1034 struct vchiq_queue_bulk_transfer args;
1035 enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
1036 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1037
1038 if (copy_from_user(&args32, argp, sizeof(args32)))
1039 return -EFAULT;
1040
1041 args = (struct vchiq_queue_bulk_transfer) {
1042 .handle = args32.handle,
1043 .data = compat_ptr(args32.data),
1044 .size = args32.size,
1045 .userdata = compat_ptr(args32.userdata),
1046 .mode = args32.mode,
1047 };
1048
1049 return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1050 dir, &argp->mode);
1051 }
1052
1053 struct vchiq_await_completion32 {
1054 unsigned int count;
1055 compat_uptr_t buf;
1056 unsigned int msgbufsize;
1057 unsigned int msgbufcount; /* IN/OUT */
1058 compat_uptr_t msgbufs;
1059 };
1060
1061 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1062 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1063
1064 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,struct vchiq_await_completion32 __user * argp)1065 vchiq_compat_ioctl_await_completion(struct file *file,
1066 unsigned int cmd,
1067 struct vchiq_await_completion32 __user *argp)
1068 {
1069 struct vchiq_await_completion args;
1070 struct vchiq_await_completion32 args32;
1071
1072 if (copy_from_user(&args32, argp, sizeof(args32)))
1073 return -EFAULT;
1074
1075 args = (struct vchiq_await_completion) {
1076 .count = args32.count,
1077 .buf = compat_ptr(args32.buf),
1078 .msgbufsize = args32.msgbufsize,
1079 .msgbufcount = args32.msgbufcount,
1080 .msgbufs = compat_ptr(args32.msgbufs),
1081 };
1082
1083 return vchiq_ioc_await_completion(file->private_data, &args,
1084 &argp->msgbufcount);
1085 }
1086
1087 struct vchiq_dequeue_message32 {
1088 unsigned int handle;
1089 int blocking;
1090 unsigned int bufsize;
1091 compat_uptr_t buf;
1092 };
1093
1094 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1095 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1096
1097 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,struct vchiq_dequeue_message32 __user * arg)1098 vchiq_compat_ioctl_dequeue_message(struct file *file,
1099 unsigned int cmd,
1100 struct vchiq_dequeue_message32 __user *arg)
1101 {
1102 struct vchiq_dequeue_message32 args32;
1103 struct vchiq_dequeue_message args;
1104
1105 if (copy_from_user(&args32, arg, sizeof(args32)))
1106 return -EFAULT;
1107
1108 args = (struct vchiq_dequeue_message) {
1109 .handle = args32.handle,
1110 .blocking = args32.blocking,
1111 .bufsize = args32.bufsize,
1112 .buf = compat_ptr(args32.buf),
1113 };
1114
1115 return vchiq_ioc_dequeue_message(file->private_data, &args);
1116 }
1117
1118 struct vchiq_get_config32 {
1119 unsigned int config_size;
1120 compat_uptr_t pconfig;
1121 };
1122
1123 #define VCHIQ_IOC_GET_CONFIG32 \
1124 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1125
1126 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,struct vchiq_get_config32 __user * arg)1127 vchiq_compat_ioctl_get_config(struct file *file,
1128 unsigned int cmd,
1129 struct vchiq_get_config32 __user *arg)
1130 {
1131 struct vchiq_get_config32 args32;
1132 struct vchiq_config config;
1133 void __user *ptr;
1134
1135 if (copy_from_user(&args32, arg, sizeof(args32)))
1136 return -EFAULT;
1137 if (args32.config_size > sizeof(config))
1138 return -EINVAL;
1139
1140 vchiq_get_config(&config);
1141 ptr = compat_ptr(args32.pconfig);
1142 if (copy_to_user(ptr, &config, args32.config_size))
1143 return -EFAULT;
1144
1145 return 0;
1146 }
1147
1148 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1149 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1150 {
1151 void __user *argp = compat_ptr(arg);
1152
1153 switch (cmd) {
1154 case VCHIQ_IOC_CREATE_SERVICE32:
1155 return vchiq_compat_ioctl_create_service(file, cmd, argp);
1156 case VCHIQ_IOC_QUEUE_MESSAGE32:
1157 return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1158 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1159 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1160 return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1161 case VCHIQ_IOC_AWAIT_COMPLETION32:
1162 return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1163 case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1164 return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1165 case VCHIQ_IOC_GET_CONFIG32:
1166 return vchiq_compat_ioctl_get_config(file, cmd, argp);
1167 default:
1168 return vchiq_ioctl(file, cmd, (unsigned long)argp);
1169 }
1170 }
1171
1172 #endif
1173
vchiq_open(struct inode * inode,struct file * file)1174 static int vchiq_open(struct inode *inode, struct file *file)
1175 {
1176 struct miscdevice *vchiq_miscdev = file->private_data;
1177 struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(vchiq_miscdev->parent);
1178 struct vchiq_state *state = &mgmt->state;
1179 struct vchiq_instance *instance;
1180
1181 dev_dbg(state->dev, "arm: vchiq open\n");
1182
1183 if (!vchiq_remote_initialised(state)) {
1184 dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n");
1185 return -ENOTCONN;
1186 }
1187
1188 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1189 if (!instance)
1190 return -ENOMEM;
1191
1192 instance->state = state;
1193 instance->pid = current->tgid;
1194
1195 vchiq_debugfs_add_instance(instance);
1196
1197 init_completion(&instance->insert_event);
1198 init_completion(&instance->remove_event);
1199 mutex_init(&instance->completion_mutex);
1200 mutex_init(&instance->bulk_waiter_list_mutex);
1201 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1202
1203 file->private_data = instance;
1204
1205 return 0;
1206 }
1207
vchiq_release(struct inode * inode,struct file * file)1208 static int vchiq_release(struct inode *inode, struct file *file)
1209 {
1210 struct vchiq_instance *instance = file->private_data;
1211 struct vchiq_state *state = instance->state;
1212 struct vchiq_service *service;
1213 int ret = 0;
1214 int i;
1215
1216 dev_dbg(state->dev, "arm: instance=%p\n", instance);
1217
1218 if (!vchiq_remote_initialised(state)) {
1219 ret = -EPERM;
1220 goto out;
1221 }
1222
1223 /* Ensure videocore is awake to allow termination. */
1224 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1225
1226 mutex_lock(&instance->completion_mutex);
1227
1228 /* Wake the completion thread and ask it to exit */
1229 instance->closing = 1;
1230 complete(&instance->insert_event);
1231
1232 mutex_unlock(&instance->completion_mutex);
1233
1234 /* Wake the slot handler if the completion queue is full. */
1235 complete(&instance->remove_event);
1236
1237 /* Mark all services for termination... */
1238 i = 0;
1239 while ((service = next_service_by_instance(state, instance, &i))) {
1240 struct user_service *user_service = service->base.userdata;
1241
1242 /* Wake the slot handler if the msg queue is full. */
1243 complete(&user_service->remove_event);
1244
1245 vchiq_terminate_service_internal(service);
1246 vchiq_service_put(service);
1247 }
1248
1249 /* ...and wait for them to die */
1250 i = 0;
1251 while ((service = next_service_by_instance(state, instance, &i))) {
1252 struct user_service *user_service = service->base.userdata;
1253
1254 wait_for_completion(&service->remove_event);
1255
1256 if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1257 vchiq_service_put(service);
1258 break;
1259 }
1260
1261 spin_lock(&service->state->msg_queue_spinlock);
1262
1263 while (user_service->msg_remove != user_service->msg_insert) {
1264 struct vchiq_header *header;
1265 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1266
1267 header = user_service->msg_queue[m];
1268 user_service->msg_remove++;
1269 spin_unlock(&service->state->msg_queue_spinlock);
1270
1271 if (header)
1272 vchiq_release_message(instance, service->handle, header);
1273 spin_lock(&service->state->msg_queue_spinlock);
1274 }
1275
1276 spin_unlock(&service->state->msg_queue_spinlock);
1277
1278 vchiq_service_put(service);
1279 }
1280
1281 /* Release any closed services */
1282 while (instance->completion_remove != instance->completion_insert) {
1283 struct vchiq_completion_data_kernel *completion;
1284 struct vchiq_service *service;
1285
1286 completion = &instance->completions[instance->completion_remove
1287 & (MAX_COMPLETIONS - 1)];
1288 service = completion->service_userdata;
1289 if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1290 struct user_service *user_service =
1291 service->base.userdata;
1292
1293 /* Wake any blocked user-thread */
1294 if (instance->use_close_delivered)
1295 complete(&user_service->close_event);
1296 vchiq_service_put(service);
1297 }
1298 instance->completion_remove++;
1299 }
1300
1301 /* Release the PEER service count. */
1302 vchiq_release_internal(instance->state, NULL);
1303
1304 free_bulk_waiter(instance);
1305
1306 vchiq_debugfs_remove_instance(instance);
1307
1308 kfree(instance);
1309 file->private_data = NULL;
1310
1311 out:
1312 return ret;
1313 }
1314
1315 static const struct file_operations
1316 vchiq_fops = {
1317 .owner = THIS_MODULE,
1318 .unlocked_ioctl = vchiq_ioctl,
1319 #if defined(CONFIG_COMPAT)
1320 .compat_ioctl = vchiq_compat_ioctl,
1321 #endif
1322 .open = vchiq_open,
1323 .release = vchiq_release,
1324 };
1325
1326 static struct miscdevice vchiq_miscdev = {
1327 .fops = &vchiq_fops,
1328 .minor = MISC_DYNAMIC_MINOR,
1329 .name = "vchiq",
1330
1331 };
1332
1333 /**
1334 * vchiq_register_chrdev - Register the char driver for vchiq
1335 * and create the necessary class and
1336 * device files in userspace.
1337 * @parent: The parent of the char device.
1338 *
1339 * Returns 0 on success else returns the error code.
1340 */
vchiq_register_chrdev(struct device * parent)1341 int vchiq_register_chrdev(struct device *parent)
1342 {
1343 vchiq_miscdev.parent = parent;
1344
1345 return misc_register(&vchiq_miscdev);
1346 }
1347
1348 /**
1349 * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char
1350 * driver and device files
1351 */
vchiq_deregister_chrdev(void)1352 void vchiq_deregister_chrdev(void)
1353 {
1354 misc_deregister(&vchiq_miscdev);
1355 }
1356