1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32
33 #include <linux/raspberrypi/vchiq_core.h>
34 #include <linux/raspberrypi/vchiq_arm.h>
35 #include <linux/raspberrypi/vchiq_bus.h>
36 #include <linux/raspberrypi/vchiq_debugfs.h>
37
38 #include "vchiq_ioctl.h"
39
40 #define DEVICE_NAME "vchiq"
41
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
48
49 #define BELL0 0x00
50
51 #define ARM_DS_ACTIVE BIT(2)
52
53 /* Override the default prefix, which would be vchiq_arm (from the filename) */
54 #undef MODULE_PARAM_PREFIX
55 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
56
57 #define KEEPALIVE_VER 1
58 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
59
60 /*
61 * The devices implemented in the VCHIQ firmware are not discoverable,
62 * so we need to maintain a list of them in order to register them with
63 * the interface.
64 */
65 static struct vchiq_device *bcm2835_audio;
66
67 static const struct vchiq_platform_info bcm2835_info = {
68 .cache_line_size = 32,
69 };
70
71 static const struct vchiq_platform_info bcm2836_info = {
72 .cache_line_size = 64,
73 };
74
75 struct vchiq_arm_state {
76 /*
77 * Keepalive-related data
78 *
79 * The keepalive mechanism was retro-fitted to VCHIQ to allow active
80 * services to prevent the system from suspending.
81 * This feature is not used on Raspberry Pi devices.
82 */
83 struct task_struct *ka_thread;
84 struct completion ka_evt;
85 atomic_t ka_use_count;
86 atomic_t ka_use_ack_count;
87 atomic_t ka_release_count;
88
89 rwlock_t susp_res_lock;
90
91 struct vchiq_state *state;
92
93 /*
94 * Global use count for videocore.
95 * This is equal to the sum of the use counts for all services. When
96 * this hits zero the videocore suspend procedure will be initiated.
97 */
98 int videocore_use_count;
99
100 /*
101 * Use count to track requests from videocore peer.
102 * This use count is not associated with a service, so needs to be
103 * tracked separately with the state.
104 */
105 int peer_use_count;
106
107 /*
108 * Flag to indicate that the first vchiq connect has made it through.
109 * This means that both sides should be fully ready, and we should
110 * be able to suspend after this point.
111 */
112 int first_connect;
113 };
114
115 static int
116 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
117 struct vchiq_bulk *bulk_params);
118
119 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)120 vchiq_doorbell_irq(int irq, void *dev_id)
121 {
122 struct vchiq_state *state = dev_id;
123 struct vchiq_drv_mgmt *mgmt;
124 irqreturn_t ret = IRQ_NONE;
125 unsigned int status;
126
127 mgmt = dev_get_drvdata(state->dev);
128
129 /* Read (and clear) the doorbell */
130 status = readl(mgmt->regs + BELL0);
131
132 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
133 remote_event_pollall(state);
134 ret = IRQ_HANDLED;
135 }
136
137 return ret;
138 }
139
140 /*
141 * This function is called by the vchiq stack once it has been connected to
142 * the videocore and clients can start to use the stack.
143 */
vchiq_call_connected_callbacks(struct vchiq_drv_mgmt * drv_mgmt)144 static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
145 {
146 int i;
147
148 if (mutex_lock_killable(&drv_mgmt->connected_mutex))
149 return;
150
151 for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
152 drv_mgmt->deferred_callback[i]();
153
154 drv_mgmt->num_deferred_callbacks = 0;
155 drv_mgmt->connected = true;
156 mutex_unlock(&drv_mgmt->connected_mutex);
157 }
158
159 /*
160 * This function is used to defer initialization until the vchiq stack is
161 * initialized. If the stack is already initialized, then the callback will
162 * be made immediately, otherwise it will be deferred until
163 * vchiq_call_connected_callbacks is called.
164 */
vchiq_add_connected_callback(struct vchiq_device * device,void (* callback)(void))165 void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
166 {
167 struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
168
169 if (mutex_lock_killable(&drv_mgmt->connected_mutex))
170 return;
171
172 if (drv_mgmt->connected) {
173 /* We're already connected. Call the callback immediately. */
174 callback();
175 } else {
176 if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
177 dev_err(&device->dev,
178 "core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
179 drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
180 } else {
181 drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
182 callback;
183 drv_mgmt->num_deferred_callbacks++;
184 }
185 }
186 mutex_unlock(&drv_mgmt->connected_mutex);
187 }
188 EXPORT_SYMBOL(vchiq_add_connected_callback);
189
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)190 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
191 {
192 struct device *dev = &pdev->dev;
193 struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
194 struct rpi_firmware *fw = drv_mgmt->fw;
195 struct vchiq_slot_zero *vchiq_slot_zero;
196 void *slot_mem;
197 dma_addr_t slot_phys;
198 u32 channelbase;
199 int slot_mem_size, frag_mem_size;
200 int err, irq, i;
201
202 /*
203 * VCHI messages between the CPU and firmware use
204 * 32-bit bus addresses.
205 */
206 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
207
208 if (err < 0)
209 return err;
210
211 drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
212
213 /* Allocate space for the channels in coherent memory */
214 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
215 frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
216
217 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
218 &slot_phys, GFP_KERNEL);
219 if (!slot_mem) {
220 dev_err(dev, "could not allocate DMA memory\n");
221 return -ENOMEM;
222 }
223
224 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
225
226 vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
227 if (!vchiq_slot_zero)
228 return -ENOMEM;
229
230 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
231 (int)slot_phys + slot_mem_size;
232 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
233 MAX_FRAGMENTS;
234
235 drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
236
237 drv_mgmt->free_fragments = drv_mgmt->fragments_base;
238 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
239 *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
240 &drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
241 }
242 *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
243 sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
244 sema_init(&drv_mgmt->free_fragments_mutex, 1);
245
246 err = vchiq_init_state(state, vchiq_slot_zero, dev);
247 if (err)
248 return err;
249
250 drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
251 if (IS_ERR(drv_mgmt->regs))
252 return PTR_ERR(drv_mgmt->regs);
253
254 irq = platform_get_irq(pdev, 0);
255 if (irq <= 0)
256 return irq;
257
258 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
259 "VCHIQ doorbell", state);
260 if (err) {
261 dev_err(dev, "failed to register irq=%d\n", irq);
262 return err;
263 }
264
265 /* Send the base address of the slots to VideoCore */
266 channelbase = slot_phys;
267 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
268 &channelbase, sizeof(channelbase));
269 if (err) {
270 dev_err(dev, "failed to send firmware property: %d\n", err);
271 return err;
272 }
273
274 if (channelbase) {
275 dev_err(dev, "failed to set channelbase (response: %x)\n",
276 channelbase);
277 return -ENXIO;
278 }
279
280 dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
281 vchiq_slot_zero, &slot_phys);
282
283 mutex_init(&drv_mgmt->connected_mutex);
284 vchiq_call_connected_callbacks(drv_mgmt);
285
286 return 0;
287 }
288
289 int
vchiq_platform_init_state(struct vchiq_state * state)290 vchiq_platform_init_state(struct vchiq_state *state)
291 {
292 struct vchiq_arm_state *platform_state;
293
294 platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
295 if (!platform_state)
296 return -ENOMEM;
297
298 rwlock_init(&platform_state->susp_res_lock);
299
300 init_completion(&platform_state->ka_evt);
301 atomic_set(&platform_state->ka_use_count, 0);
302 atomic_set(&platform_state->ka_use_ack_count, 0);
303 atomic_set(&platform_state->ka_release_count, 0);
304
305 platform_state->state = state;
306
307 state->platform_state = (struct opaque_platform_state *)platform_state;
308
309 return 0;
310 }
311
vchiq_platform_get_arm_state(struct vchiq_state * state)312 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
313 {
314 return (struct vchiq_arm_state *)state->platform_state;
315 }
316
317 static void
vchiq_platform_uninit(struct vchiq_drv_mgmt * mgmt)318 vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
319 {
320 struct vchiq_arm_state *arm_state;
321
322 kthread_stop(mgmt->state.sync_thread);
323 kthread_stop(mgmt->state.recycle_thread);
324 kthread_stop(mgmt->state.slot_handler_thread);
325
326 arm_state = vchiq_platform_get_arm_state(&mgmt->state);
327 if (!IS_ERR_OR_NULL(arm_state->ka_thread))
328 kthread_stop(arm_state->ka_thread);
329 }
330
vchiq_dump_platform_state(struct seq_file * f)331 void vchiq_dump_platform_state(struct seq_file *f)
332 {
333 seq_puts(f, " Platform: 2835 (VC master)\n");
334 }
335
336 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_state * state,struct vchiq_instance ** instance_out)337 int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
338 {
339 struct vchiq_instance *instance = NULL;
340 int i, ret;
341
342 /*
343 * VideoCore may not be ready due to boot up timing.
344 * It may never be ready if kernel and firmware are mismatched,so don't
345 * block forever.
346 */
347 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
348 if (vchiq_remote_initialised(state))
349 break;
350 usleep_range(500, 600);
351 }
352 if (i == VCHIQ_INIT_RETRIES) {
353 dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
354 ret = -ENOTCONN;
355 goto failed;
356 } else if (i > 0) {
357 dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
358 __func__, i);
359 }
360
361 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
362 if (!instance) {
363 ret = -ENOMEM;
364 goto failed;
365 }
366
367 instance->connected = 0;
368 instance->state = state;
369 mutex_init(&instance->bulk_waiter_list_mutex);
370 INIT_LIST_HEAD(&instance->bulk_waiter_list);
371
372 *instance_out = instance;
373
374 ret = 0;
375
376 failed:
377 dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
378
379 return ret;
380 }
381 EXPORT_SYMBOL(vchiq_initialise);
382
free_bulk_waiter(struct vchiq_instance * instance)383 void free_bulk_waiter(struct vchiq_instance *instance)
384 {
385 struct bulk_waiter_node *waiter, *next;
386
387 list_for_each_entry_safe(waiter, next,
388 &instance->bulk_waiter_list, list) {
389 list_del(&waiter->list);
390 dev_dbg(instance->state->dev,
391 "arm: bulk_waiter - cleaned up %p for pid %d\n",
392 waiter, waiter->pid);
393 kfree(waiter);
394 }
395 }
396
vchiq_shutdown(struct vchiq_instance * instance)397 int vchiq_shutdown(struct vchiq_instance *instance)
398 {
399 struct vchiq_state *state = instance->state;
400 int ret = 0;
401
402 mutex_lock(&state->mutex);
403
404 /* Remove all services */
405 vchiq_shutdown_internal(state, instance);
406
407 mutex_unlock(&state->mutex);
408
409 dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
410
411 free_bulk_waiter(instance);
412 kfree(instance);
413
414 return ret;
415 }
416 EXPORT_SYMBOL(vchiq_shutdown);
417
vchiq_is_connected(struct vchiq_instance * instance)418 static int vchiq_is_connected(struct vchiq_instance *instance)
419 {
420 return instance->connected;
421 }
422
vchiq_connect(struct vchiq_instance * instance)423 int vchiq_connect(struct vchiq_instance *instance)
424 {
425 struct vchiq_state *state = instance->state;
426 int ret;
427
428 if (mutex_lock_killable(&state->mutex)) {
429 dev_dbg(state->dev,
430 "core: call to mutex_lock failed\n");
431 ret = -EAGAIN;
432 goto failed;
433 }
434 ret = vchiq_connect_internal(state, instance);
435
436 if (!ret)
437 instance->connected = 1;
438
439 mutex_unlock(&state->mutex);
440
441 failed:
442 dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
443
444 return ret;
445 }
446 EXPORT_SYMBOL(vchiq_connect);
447
448 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)449 vchiq_add_service(struct vchiq_instance *instance,
450 const struct vchiq_service_params_kernel *params,
451 unsigned int *phandle)
452 {
453 struct vchiq_state *state = instance->state;
454 struct vchiq_service *service = NULL;
455 int srvstate, ret;
456
457 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
458
459 srvstate = vchiq_is_connected(instance)
460 ? VCHIQ_SRVSTATE_LISTENING
461 : VCHIQ_SRVSTATE_HIDDEN;
462
463 service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
464
465 if (service) {
466 *phandle = service->handle;
467 ret = 0;
468 } else {
469 ret = -EINVAL;
470 }
471
472 dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
473
474 return ret;
475 }
476
477 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)478 vchiq_open_service(struct vchiq_instance *instance,
479 const struct vchiq_service_params_kernel *params,
480 unsigned int *phandle)
481 {
482 struct vchiq_state *state = instance->state;
483 struct vchiq_service *service = NULL;
484 int ret = -EINVAL;
485
486 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
487
488 if (!vchiq_is_connected(instance))
489 goto failed;
490
491 service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
492
493 if (service) {
494 *phandle = service->handle;
495 ret = vchiq_open_service_internal(service, current->pid);
496 if (ret) {
497 vchiq_remove_service(instance, service->handle);
498 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
499 }
500 }
501
502 failed:
503 dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
504
505 return ret;
506 }
507 EXPORT_SYMBOL(vchiq_open_service);
508
509 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)510 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
511 unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
512 {
513 struct vchiq_bulk bulk_params = {};
514 int ret;
515
516 switch (mode) {
517 case VCHIQ_BULK_MODE_NOCALLBACK:
518 case VCHIQ_BULK_MODE_CALLBACK:
519
520 bulk_params.offset = (void *)data;
521 bulk_params.mode = mode;
522 bulk_params.size = size;
523 bulk_params.cb_data = userdata;
524 bulk_params.dir = VCHIQ_BULK_TRANSMIT;
525
526 ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
527 break;
528 case VCHIQ_BULK_MODE_BLOCKING:
529 bulk_params.offset = (void *)data;
530 bulk_params.mode = mode;
531 bulk_params.size = size;
532 bulk_params.dir = VCHIQ_BULK_TRANSMIT;
533
534 ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
535 break;
536 default:
537 return -EINVAL;
538 }
539
540 return ret;
541 }
542 EXPORT_SYMBOL(vchiq_bulk_transmit);
543
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)544 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
545 void *data, unsigned int size, void *userdata,
546 enum vchiq_bulk_mode mode)
547 {
548 struct vchiq_bulk bulk_params = {};
549 int ret;
550
551 switch (mode) {
552 case VCHIQ_BULK_MODE_NOCALLBACK:
553 case VCHIQ_BULK_MODE_CALLBACK:
554
555 bulk_params.offset = (void *)data;
556 bulk_params.mode = mode;
557 bulk_params.size = size;
558 bulk_params.cb_data = userdata;
559 bulk_params.dir = VCHIQ_BULK_RECEIVE;
560
561 ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
562 break;
563 case VCHIQ_BULK_MODE_BLOCKING:
564 bulk_params.offset = (void *)data;
565 bulk_params.mode = mode;
566 bulk_params.size = size;
567 bulk_params.dir = VCHIQ_BULK_RECEIVE;
568
569 ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
570 break;
571 default:
572 return -EINVAL;
573 }
574
575 return ret;
576 }
577 EXPORT_SYMBOL(vchiq_bulk_receive);
578
579 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)580 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
581 struct vchiq_bulk *bulk_params)
582 {
583 struct vchiq_service *service;
584 struct bulk_waiter_node *waiter = NULL, *iter;
585 int ret;
586
587 service = find_service_by_handle(instance, handle);
588 if (!service)
589 return -EINVAL;
590
591 vchiq_service_put(service);
592
593 mutex_lock(&instance->bulk_waiter_list_mutex);
594 list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
595 if (iter->pid == current->pid) {
596 list_del(&iter->list);
597 waiter = iter;
598 break;
599 }
600 }
601 mutex_unlock(&instance->bulk_waiter_list_mutex);
602
603 if (waiter) {
604 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
605
606 if (bulk) {
607 /* This thread has an outstanding bulk transfer. */
608 /* FIXME: why compare a dma address to a pointer? */
609 if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) ||
610 (bulk->size != bulk_params->size)) {
611 /*
612 * This is not a retry of the previous one.
613 * Cancel the signal when the transfer completes.
614 */
615 spin_lock(&service->state->bulk_waiter_spinlock);
616 bulk->waiter = NULL;
617 spin_unlock(&service->state->bulk_waiter_spinlock);
618 }
619 }
620 } else {
621 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
622 if (!waiter)
623 return -ENOMEM;
624 }
625
626 bulk_params->waiter = &waiter->bulk_waiter;
627
628 ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params);
629 if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
630 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
631
632 if (bulk) {
633 /* Cancel the signal when the transfer completes. */
634 spin_lock(&service->state->bulk_waiter_spinlock);
635 bulk->waiter = NULL;
636 spin_unlock(&service->state->bulk_waiter_spinlock);
637 }
638 kfree(waiter);
639 } else {
640 waiter->pid = current->pid;
641 mutex_lock(&instance->bulk_waiter_list_mutex);
642 list_add(&waiter->list, &instance->bulk_waiter_list);
643 mutex_unlock(&instance->bulk_waiter_list_mutex);
644 dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
645 waiter, current->pid);
646 }
647
648 return ret;
649 }
650
651 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * cb_data,void __user * cb_userdata)652 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
653 struct vchiq_header *header, struct user_service *user_service,
654 void *cb_data, void __user *cb_userdata)
655 {
656 struct vchiq_completion_data_kernel *completion;
657 struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
658 int insert;
659
660 DEBUG_INITIALISE(mgmt->state.local);
661
662 insert = instance->completion_insert;
663 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
664 /* Out of space - wait for the client */
665 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
666 dev_dbg(instance->state->dev, "core: completion queue full\n");
667 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
668 if (wait_for_completion_interruptible(&instance->remove_event)) {
669 dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
670 return -EAGAIN;
671 } else if (instance->closing) {
672 dev_dbg(instance->state->dev, "arm: service_callback closing\n");
673 return 0;
674 }
675 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
676 }
677
678 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
679
680 completion->header = header;
681 completion->reason = reason;
682 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
683 completion->service_userdata = user_service->service;
684 completion->cb_data = cb_data;
685 completion->cb_userdata = cb_userdata;
686
687 if (reason == VCHIQ_SERVICE_CLOSED) {
688 /*
689 * Take an extra reference, to be held until
690 * this CLOSED notification is delivered.
691 */
692 vchiq_service_get(user_service->service);
693 if (instance->use_close_delivered)
694 user_service->close_pending = 1;
695 }
696
697 /*
698 * A write barrier is needed here to ensure that the entire completion
699 * record is written out before the insert point.
700 */
701 wmb();
702
703 if (reason == VCHIQ_MESSAGE_AVAILABLE)
704 user_service->message_available_pos = insert;
705
706 insert++;
707 instance->completion_insert = insert;
708
709 complete(&instance->insert_event);
710
711 return 0;
712 }
713
714 static int
service_single_message(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_service * service,void * cb_data,void __user * cb_userdata)715 service_single_message(struct vchiq_instance *instance,
716 enum vchiq_reason reason, struct vchiq_service *service,
717 void *cb_data, void __user *cb_userdata)
718 {
719 struct user_service *user_service;
720
721 user_service = (struct user_service *)service->base.userdata;
722
723 dev_dbg(service->state->dev, "arm: msg queue full\n");
724 /*
725 * If there is no MESSAGE_AVAILABLE in the completion
726 * queue, add one
727 */
728 if ((user_service->message_available_pos -
729 instance->completion_remove) < 0) {
730 int ret;
731
732 dev_dbg(instance->state->dev,
733 "arm: Inserting extra MESSAGE_AVAILABLE\n");
734 ret = add_completion(instance, reason, NULL, user_service,
735 cb_data, cb_userdata);
736 if (ret)
737 return ret;
738 }
739
740 if (wait_for_completion_interruptible(&user_service->remove_event)) {
741 dev_dbg(instance->state->dev, "arm: interrupted\n");
742 return -EAGAIN;
743 } else if (instance->closing) {
744 dev_dbg(instance->state->dev, "arm: closing\n");
745 return -EINVAL;
746 }
747
748 return 0;
749 }
750
751 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)752 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
753 struct vchiq_header *header, unsigned int handle,
754 void *cb_data, void __user *cb_userdata)
755 {
756 /*
757 * How do we ensure the callback goes to the right client?
758 * The service_user data points to a user_service record
759 * containing the original callback and the user state structure, which
760 * contains a circular buffer for completion records.
761 */
762 struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
763 struct user_service *user_service;
764 struct vchiq_service *service;
765 bool skip_completion = false;
766
767 DEBUG_INITIALISE(mgmt->state.local);
768
769 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
770
771 rcu_read_lock();
772 service = handle_to_service(instance, handle);
773 if (WARN_ON(!service)) {
774 rcu_read_unlock();
775 return 0;
776 }
777
778 user_service = (struct user_service *)service->base.userdata;
779
780 if (instance->closing) {
781 rcu_read_unlock();
782 return 0;
783 }
784
785 /*
786 * As hopping around different synchronization mechanism,
787 * taking an extra reference results in simpler implementation.
788 */
789 vchiq_service_get(service);
790 rcu_read_unlock();
791
792 dev_dbg(service->state->dev,
793 "arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
794 user_service, service->localport, user_service->userdata,
795 reason, header, instance, cb_data, cb_userdata);
796
797 if (header && user_service->is_vchi) {
798 spin_lock(&service->state->msg_queue_spinlock);
799 while (user_service->msg_insert ==
800 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
801 int ret;
802
803 spin_unlock(&service->state->msg_queue_spinlock);
804 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
805 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
806
807 ret = service_single_message(instance, reason, service,
808 cb_data, cb_userdata);
809 if (ret) {
810 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
811 vchiq_service_put(service);
812 return ret;
813 }
814 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
815 spin_lock(&service->state->msg_queue_spinlock);
816 }
817
818 user_service->msg_queue[user_service->msg_insert &
819 (MSG_QUEUE_SIZE - 1)] = header;
820 user_service->msg_insert++;
821
822 /*
823 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
824 * there is a MESSAGE_AVAILABLE in the completion queue then
825 * bypass the completion queue.
826 */
827 if (((user_service->message_available_pos -
828 instance->completion_remove) >= 0) ||
829 user_service->dequeue_pending) {
830 user_service->dequeue_pending = 0;
831 skip_completion = true;
832 }
833
834 spin_unlock(&service->state->msg_queue_spinlock);
835 complete(&user_service->insert_event);
836
837 header = NULL;
838 }
839 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
840 vchiq_service_put(service);
841
842 if (skip_completion)
843 return 0;
844
845 return add_completion(instance, reason, header, user_service,
846 cb_data, cb_userdata);
847 }
848
vchiq_dump_platform_instances(struct vchiq_state * state,struct seq_file * f)849 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
850 {
851 int i;
852
853 if (!vchiq_remote_initialised(state))
854 return;
855
856 /*
857 * There is no list of instances, so instead scan all services,
858 * marking those that have been dumped.
859 */
860
861 rcu_read_lock();
862 for (i = 0; i < state->unused_service; i++) {
863 struct vchiq_service *service;
864 struct vchiq_instance *instance;
865
866 service = rcu_dereference(state->services[i]);
867 if (!service || service->base.callback != service_callback)
868 continue;
869
870 instance = service->instance;
871 if (instance)
872 instance->mark = 0;
873 }
874 rcu_read_unlock();
875
876 for (i = 0; i < state->unused_service; i++) {
877 struct vchiq_service *service;
878 struct vchiq_instance *instance;
879
880 rcu_read_lock();
881 service = rcu_dereference(state->services[i]);
882 if (!service || service->base.callback != service_callback) {
883 rcu_read_unlock();
884 continue;
885 }
886
887 instance = service->instance;
888 if (!instance || instance->mark) {
889 rcu_read_unlock();
890 continue;
891 }
892 rcu_read_unlock();
893
894 seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
895 instance, instance->pid,
896 instance->connected ? " connected, " :
897 "",
898 instance->completion_insert -
899 instance->completion_remove,
900 MAX_COMPLETIONS);
901 instance->mark = 1;
902 }
903 }
904
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)905 void vchiq_dump_platform_service_state(struct seq_file *f,
906 struct vchiq_service *service)
907 {
908 struct user_service *user_service =
909 (struct user_service *)service->base.userdata;
910
911 seq_printf(f, " instance %pK", service->instance);
912
913 if ((service->base.callback == service_callback) && user_service->is_vchi) {
914 seq_printf(f, ", %d/%d messages",
915 user_service->msg_insert - user_service->msg_remove,
916 MSG_QUEUE_SIZE);
917
918 if (user_service->dequeue_pending)
919 seq_puts(f, " (dequeue pending)");
920 }
921
922 seq_puts(f, "\n");
923 }
924
925 /*
926 * Autosuspend related functionality
927 */
928
929 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * cb_data,void __user * cb_userdata)930 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
931 enum vchiq_reason reason,
932 struct vchiq_header *header,
933 unsigned int service_user,
934 void *cb_data, void __user *cb_userdata)
935 {
936 dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
937 __func__, reason);
938 return 0;
939 }
940
941 static int
vchiq_keepalive_thread_func(void * v)942 vchiq_keepalive_thread_func(void *v)
943 {
944 struct vchiq_state *state = (struct vchiq_state *)v;
945 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
946 struct vchiq_instance *instance;
947 unsigned int ka_handle;
948 int ret;
949
950 struct vchiq_service_params_kernel params = {
951 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
952 .callback = vchiq_keepalive_vchiq_callback,
953 .version = KEEPALIVE_VER,
954 .version_min = KEEPALIVE_VER_MIN
955 };
956
957 ret = vchiq_initialise(state, &instance);
958 if (ret) {
959 dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
960 goto exit;
961 }
962
963 ret = vchiq_connect(instance);
964 if (ret) {
965 dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
966 goto shutdown;
967 }
968
969 ret = vchiq_add_service(instance, ¶ms, &ka_handle);
970 if (ret) {
971 dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
972 __func__, ret);
973 goto shutdown;
974 }
975
976 while (!kthread_should_stop()) {
977 long rc = 0, uc = 0;
978
979 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
980 dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
981 flush_signals(current);
982 continue;
983 }
984
985 /*
986 * read and clear counters. Do release_count then use_count to
987 * prevent getting more releases than uses
988 */
989 rc = atomic_xchg(&arm_state->ka_release_count, 0);
990 uc = atomic_xchg(&arm_state->ka_use_count, 0);
991
992 /*
993 * Call use/release service the requisite number of times.
994 * Process use before release so use counts don't go negative
995 */
996 while (uc--) {
997 atomic_inc(&arm_state->ka_use_ack_count);
998 ret = vchiq_use_service(instance, ka_handle);
999 if (ret) {
1000 dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
1001 __func__, ret);
1002 }
1003 }
1004 while (rc--) {
1005 ret = vchiq_release_service(instance, ka_handle);
1006 if (ret) {
1007 dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
1008 __func__, ret);
1009 }
1010 }
1011 }
1012
1013 shutdown:
1014 vchiq_shutdown(instance);
1015 exit:
1016 return 0;
1017 }
1018
1019 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1020 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1021 enum USE_TYPE_E use_type)
1022 {
1023 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1024 int ret = 0;
1025 char entity[64];
1026 int *entity_uc;
1027 int local_uc;
1028
1029 if (!arm_state) {
1030 ret = -EINVAL;
1031 goto out;
1032 }
1033
1034 if (use_type == USE_TYPE_VCHIQ) {
1035 snprintf(entity, sizeof(entity), "VCHIQ: ");
1036 entity_uc = &arm_state->peer_use_count;
1037 } else if (service) {
1038 snprintf(entity, sizeof(entity), "%p4cc:%03d",
1039 &service->base.fourcc,
1040 service->client_id);
1041 entity_uc = &service->service_use_count;
1042 } else {
1043 dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1044 ret = -EINVAL;
1045 goto out;
1046 }
1047
1048 write_lock_bh(&arm_state->susp_res_lock);
1049 local_uc = ++arm_state->videocore_use_count;
1050 ++(*entity_uc);
1051
1052 dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1053 entity, *entity_uc, local_uc);
1054
1055 write_unlock_bh(&arm_state->susp_res_lock);
1056
1057 if (!ret) {
1058 int ret = 0;
1059 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1060
1061 while (ack_cnt && !ret) {
1062 /* Send the use notify to videocore */
1063 ret = vchiq_send_remote_use_active(state);
1064 if (!ret)
1065 ack_cnt--;
1066 else
1067 atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1068 }
1069 }
1070
1071 out:
1072 dev_dbg(state->dev, "suspend: exit %d\n", ret);
1073 return ret;
1074 }
1075
1076 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1077 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1078 {
1079 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1080 int ret = 0;
1081 char entity[64];
1082 int *entity_uc;
1083
1084 if (!arm_state) {
1085 ret = -EINVAL;
1086 goto out;
1087 }
1088
1089 if (service) {
1090 snprintf(entity, sizeof(entity), "%p4cc:%03d",
1091 &service->base.fourcc,
1092 service->client_id);
1093 entity_uc = &service->service_use_count;
1094 } else {
1095 snprintf(entity, sizeof(entity), "PEER: ");
1096 entity_uc = &arm_state->peer_use_count;
1097 }
1098
1099 write_lock_bh(&arm_state->susp_res_lock);
1100 if (!arm_state->videocore_use_count || !(*entity_uc)) {
1101 WARN_ON(!arm_state->videocore_use_count);
1102 WARN_ON(!(*entity_uc));
1103 ret = -EINVAL;
1104 goto unlock;
1105 }
1106 --arm_state->videocore_use_count;
1107 --(*entity_uc);
1108
1109 dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1110 entity, *entity_uc, arm_state->videocore_use_count);
1111
1112 unlock:
1113 write_unlock_bh(&arm_state->susp_res_lock);
1114
1115 out:
1116 dev_dbg(state->dev, "suspend: exit %d\n", ret);
1117 return ret;
1118 }
1119
1120 void
vchiq_on_remote_use(struct vchiq_state * state)1121 vchiq_on_remote_use(struct vchiq_state *state)
1122 {
1123 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1124
1125 atomic_inc(&arm_state->ka_use_count);
1126 complete(&arm_state->ka_evt);
1127 }
1128
1129 void
vchiq_on_remote_release(struct vchiq_state * state)1130 vchiq_on_remote_release(struct vchiq_state *state)
1131 {
1132 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1133
1134 atomic_inc(&arm_state->ka_release_count);
1135 complete(&arm_state->ka_evt);
1136 }
1137
1138 int
vchiq_use_service_internal(struct vchiq_service * service)1139 vchiq_use_service_internal(struct vchiq_service *service)
1140 {
1141 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1142 }
1143
1144 int
vchiq_release_service_internal(struct vchiq_service * service)1145 vchiq_release_service_internal(struct vchiq_service *service)
1146 {
1147 return vchiq_release_internal(service->state, service);
1148 }
1149
1150 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1151 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1152 {
1153 return &instance->debugfs_node;
1154 }
1155
1156 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1157 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1158 {
1159 struct vchiq_service *service;
1160 int use_count = 0, i;
1161
1162 i = 0;
1163 rcu_read_lock();
1164 while ((service = __next_service_by_instance(instance->state,
1165 instance, &i)))
1166 use_count += service->service_use_count;
1167 rcu_read_unlock();
1168 return use_count;
1169 }
1170
1171 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1172 vchiq_instance_get_pid(struct vchiq_instance *instance)
1173 {
1174 return instance->pid;
1175 }
1176
1177 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1178 vchiq_instance_get_trace(struct vchiq_instance *instance)
1179 {
1180 return instance->trace;
1181 }
1182
1183 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1184 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1185 {
1186 struct vchiq_service *service;
1187 int i;
1188
1189 i = 0;
1190 rcu_read_lock();
1191 while ((service = __next_service_by_instance(instance->state,
1192 instance, &i)))
1193 service->trace = trace;
1194 rcu_read_unlock();
1195 instance->trace = (trace != 0);
1196 }
1197
1198 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1199 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1200 {
1201 int ret = -EINVAL;
1202 struct vchiq_service *service = find_service_by_handle(instance, handle);
1203
1204 if (service) {
1205 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1206 vchiq_service_put(service);
1207 }
1208 return ret;
1209 }
1210 EXPORT_SYMBOL(vchiq_use_service);
1211
1212 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1213 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1214 {
1215 int ret = -EINVAL;
1216 struct vchiq_service *service = find_service_by_handle(instance, handle);
1217
1218 if (service) {
1219 ret = vchiq_release_internal(service->state, service);
1220 vchiq_service_put(service);
1221 }
1222 return ret;
1223 }
1224 EXPORT_SYMBOL(vchiq_release_service);
1225
1226 struct service_data_struct {
1227 int fourcc;
1228 int clientid;
1229 int use_count;
1230 };
1231
1232 void
vchiq_dump_service_use_state(struct vchiq_state * state)1233 vchiq_dump_service_use_state(struct vchiq_state *state)
1234 {
1235 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1236 struct service_data_struct *service_data;
1237 int i, found = 0;
1238 /*
1239 * If there's more than 64 services, only dump ones with
1240 * non-zero counts
1241 */
1242 int only_nonzero = 0;
1243 static const char *nz = "<-- preventing suspend";
1244
1245 int peer_count;
1246 int vc_use_count;
1247 int active_services;
1248
1249 if (!arm_state)
1250 return;
1251
1252 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1253 GFP_KERNEL);
1254 if (!service_data)
1255 return;
1256
1257 read_lock_bh(&arm_state->susp_res_lock);
1258 peer_count = arm_state->peer_use_count;
1259 vc_use_count = arm_state->videocore_use_count;
1260 active_services = state->unused_service;
1261 if (active_services > MAX_SERVICES)
1262 only_nonzero = 1;
1263
1264 rcu_read_lock();
1265 for (i = 0; i < active_services; i++) {
1266 struct vchiq_service *service_ptr =
1267 rcu_dereference(state->services[i]);
1268
1269 if (!service_ptr)
1270 continue;
1271
1272 if (only_nonzero && !service_ptr->service_use_count)
1273 continue;
1274
1275 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1276 continue;
1277
1278 service_data[found].fourcc = service_ptr->base.fourcc;
1279 service_data[found].clientid = service_ptr->client_id;
1280 service_data[found].use_count = service_ptr->service_use_count;
1281 found++;
1282 if (found >= MAX_SERVICES)
1283 break;
1284 }
1285 rcu_read_unlock();
1286
1287 read_unlock_bh(&arm_state->susp_res_lock);
1288
1289 if (only_nonzero)
1290 dev_warn(state->dev,
1291 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1292 active_services, found);
1293
1294 for (i = 0; i < found; i++) {
1295 dev_warn(state->dev,
1296 "suspend: %p4cc:%d service count %d %s\n",
1297 &service_data[i].fourcc,
1298 service_data[i].clientid, service_data[i].use_count,
1299 service_data[i].use_count ? nz : "");
1300 }
1301 dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1302 dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1303
1304 kfree(service_data);
1305 }
1306
1307 int
vchiq_check_service(struct vchiq_service * service)1308 vchiq_check_service(struct vchiq_service *service)
1309 {
1310 struct vchiq_arm_state *arm_state;
1311 int ret = -EINVAL;
1312
1313 if (!service || !service->state)
1314 goto out;
1315
1316 arm_state = vchiq_platform_get_arm_state(service->state);
1317
1318 read_lock_bh(&arm_state->susp_res_lock);
1319 if (service->service_use_count)
1320 ret = 0;
1321 read_unlock_bh(&arm_state->susp_res_lock);
1322
1323 if (ret) {
1324 dev_err(service->state->dev,
1325 "suspend: %s: %p4cc:%d service count %d, state count %d\n",
1326 __func__, &service->base.fourcc, service->client_id,
1327 service->service_use_count, arm_state->videocore_use_count);
1328 vchiq_dump_service_use_state(service->state);
1329 }
1330 out:
1331 return ret;
1332 }
1333
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1334 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1335 enum vchiq_connstate oldstate,
1336 enum vchiq_connstate newstate)
1337 {
1338 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1339 char threadname[16];
1340
1341 dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1342 state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1343 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1344 return;
1345
1346 write_lock_bh(&arm_state->susp_res_lock);
1347 if (arm_state->first_connect) {
1348 write_unlock_bh(&arm_state->susp_res_lock);
1349 return;
1350 }
1351
1352 arm_state->first_connect = 1;
1353 write_unlock_bh(&arm_state->susp_res_lock);
1354 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1355 state->id);
1356 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1357 (void *)state,
1358 threadname);
1359 if (IS_ERR(arm_state->ka_thread)) {
1360 dev_err(state->dev, "suspend: Couldn't create thread %s\n",
1361 threadname);
1362 } else {
1363 wake_up_process(arm_state->ka_thread);
1364 }
1365 }
1366
1367 static const struct of_device_id vchiq_of_match[] = {
1368 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
1369 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
1370 {},
1371 };
1372 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1373
vchiq_probe(struct platform_device * pdev)1374 static int vchiq_probe(struct platform_device *pdev)
1375 {
1376 const struct vchiq_platform_info *info;
1377 struct vchiq_drv_mgmt *mgmt;
1378 int ret;
1379
1380 info = of_device_get_match_data(&pdev->dev);
1381 if (!info)
1382 return -EINVAL;
1383
1384 struct device_node *fw_node __free(device_node) =
1385 of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
1386 if (!fw_node) {
1387 dev_err(&pdev->dev, "Missing firmware node\n");
1388 return -ENOENT;
1389 }
1390
1391 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
1392 if (!mgmt)
1393 return -ENOMEM;
1394
1395 mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1396 if (!mgmt->fw)
1397 return -EPROBE_DEFER;
1398
1399 mgmt->info = info;
1400 platform_set_drvdata(pdev, mgmt);
1401
1402 ret = vchiq_platform_init(pdev, &mgmt->state);
1403 if (ret) {
1404 dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1405 return ret;
1406 }
1407
1408 dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1409 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1410
1411 /*
1412 * Simply exit on error since the function handles cleanup in
1413 * cases of failure.
1414 */
1415 ret = vchiq_register_chrdev(&pdev->dev);
1416 if (ret) {
1417 dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1418 vchiq_platform_uninit(mgmt);
1419 return ret;
1420 }
1421
1422 vchiq_debugfs_init(&mgmt->state);
1423
1424 bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1425
1426 return 0;
1427 }
1428
vchiq_remove(struct platform_device * pdev)1429 static void vchiq_remove(struct platform_device *pdev)
1430 {
1431 struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
1432
1433 vchiq_device_unregister(bcm2835_audio);
1434 vchiq_debugfs_deinit();
1435 vchiq_deregister_chrdev();
1436 vchiq_platform_uninit(mgmt);
1437 }
1438
1439 static struct platform_driver vchiq_driver = {
1440 .driver = {
1441 .name = "bcm2835_vchiq",
1442 .of_match_table = vchiq_of_match,
1443 },
1444 .probe = vchiq_probe,
1445 .remove = vchiq_remove,
1446 };
1447
vchiq_driver_init(void)1448 static int __init vchiq_driver_init(void)
1449 {
1450 int ret;
1451
1452 ret = bus_register(&vchiq_bus_type);
1453 if (ret) {
1454 pr_err("Failed to register %s\n", vchiq_bus_type.name);
1455 return ret;
1456 }
1457
1458 ret = platform_driver_register(&vchiq_driver);
1459 if (ret) {
1460 pr_err("Failed to register vchiq driver\n");
1461 bus_unregister(&vchiq_bus_type);
1462 }
1463
1464 return ret;
1465 }
1466 module_init(vchiq_driver_init);
1467
vchiq_driver_exit(void)1468 static void __exit vchiq_driver_exit(void)
1469 {
1470 bus_unregister(&vchiq_bus_type);
1471 platform_driver_unregister(&vchiq_driver);
1472 }
1473 module_exit(vchiq_driver_exit);
1474
1475 MODULE_LICENSE("Dual BSD/GPL");
1476 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1477 MODULE_AUTHOR("Broadcom Corporation");
1478