xref: /freebsd/sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c (revision c2546a2196c23f63a553602584813757b7240e36)
1 /**
2  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The names of the above-listed copyright holders may not be used
15  *    to endorse or promote products derived from this software without
16  *    specific prior written permission.
17  *
18  * ALTERNATIVELY, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2, as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 
36 #include "vchiq_core.h"
37 #include "vchiq_ioctl.h"
38 #include "vchiq_arm.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 /* Override the default prefix, which would be vchiq_arm (from the filename) */
43 #undef MODULE_PARAM_PREFIX
44 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
45 
46 #define VCHIQ_MINOR 0
47 
48 /* Some per-instance constants */
49 #define MAX_COMPLETIONS 16
50 #define MAX_SERVICES 64
51 #define MAX_ELEMENTS 8
52 #define MSG_QUEUE_SIZE 64
53 
54 #define KEEPALIVE_VER 1
55 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
56 
57 MALLOC_DEFINE(M_VCHIQ, "vchiq_cdev", "VideoCore cdev memroy");
58 
59 /* Run time control of log level, based on KERN_XXX level. */
60 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
61 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
62 
63 #define SUSPEND_TIMER_TIMEOUT_MS 100
64 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
65 
66 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
67 static const char *const suspend_state_names[] = {
68 	"VC_SUSPEND_FORCE_CANCELED",
69 	"VC_SUSPEND_REJECTED",
70 	"VC_SUSPEND_FAILED",
71 	"VC_SUSPEND_IDLE",
72 	"VC_SUSPEND_REQUESTED",
73 	"VC_SUSPEND_IN_PROGRESS",
74 	"VC_SUSPEND_SUSPENDED"
75 };
76 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
77 static const char *const resume_state_names[] = {
78 	"VC_RESUME_FAILED",
79 	"VC_RESUME_IDLE",
80 	"VC_RESUME_REQUESTED",
81 	"VC_RESUME_IN_PROGRESS",
82 	"VC_RESUME_RESUMED"
83 };
84 /* The number of times we allow force suspend to timeout before actually
85 ** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
86 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
87 */
88 #define FORCE_SUSPEND_FAIL_MAX 8
89 
90 /* The time in ms allowed for videocore to go idle when force suspend has been
91  * requested */
92 #define FORCE_SUSPEND_TIMEOUT_MS 200
93 
94 
95 static void suspend_timer_callback(unsigned long context);
96 #ifdef notyet
97 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
98 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
99 #endif
100 
101 
102 typedef struct user_service_struct {
103 	VCHIQ_SERVICE_T *service;
104 	void *userdata;
105 	VCHIQ_INSTANCE_T instance;
106 	char is_vchi;
107 	char dequeue_pending;
108 	char close_pending;
109 	int message_available_pos;
110 	int msg_insert;
111 	int msg_remove;
112 	struct semaphore insert_event;
113 	struct semaphore remove_event;
114 	struct semaphore close_event;
115 	VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
116 } USER_SERVICE_T;
117 
118 struct bulk_waiter_node {
119 	struct bulk_waiter bulk_waiter;
120 	int pid;
121 	struct list_head list;
122 };
123 
124 struct vchiq_instance_struct {
125 	VCHIQ_STATE_T *state;
126 	VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
127 	int completion_insert;
128 	int completion_remove;
129 	struct semaphore insert_event;
130 	struct semaphore remove_event;
131 	struct mutex completion_mutex;
132 
133 	int connected;
134 	int closing;
135 	int pid;
136 	int mark;
137 	int use_close_delivered;
138 	int trace;
139 
140 	struct list_head bulk_waiter_list;
141 	struct mutex bulk_waiter_list_mutex;
142 
143 #ifdef notyet
144 	VCHIQ_DEBUGFS_NODE_T proc_entry;
145 #endif
146 };
147 
148 typedef struct dump_context_struct {
149 	char __user *buf;
150 	size_t actual;
151 	size_t space;
152 	loff_t offset;
153 } DUMP_CONTEXT_T;
154 
155 static struct cdev *  vchiq_cdev;
156 VCHIQ_STATE_T g_state;
157 static DEFINE_SPINLOCK(msg_queue_spinlock);
158 
159 static const char *const ioctl_names[] = {
160 	"CONNECT",
161 	"SHUTDOWN",
162 	"CREATE_SERVICE",
163 	"REMOVE_SERVICE",
164 	"QUEUE_MESSAGE",
165 	"QUEUE_BULK_TRANSMIT",
166 	"QUEUE_BULK_RECEIVE",
167 	"AWAIT_COMPLETION",
168 	"DEQUEUE_MESSAGE",
169 	"GET_CLIENT_ID",
170 	"GET_CONFIG",
171 	"CLOSE_SERVICE",
172 	"USE_SERVICE",
173 	"RELEASE_SERVICE",
174 	"SET_SERVICE_OPTION",
175 	"DUMP_PHYS_MEM",
176 	"LIB_VERSION",
177 	"CLOSE_DELIVERED"
178 };
179 
180 vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
181 	(VCHIQ_IOC_MAX + 1));
182 
183 static eventhandler_tag vchiq_ehtag = NULL;
184 static d_open_t		vchiq_open;
185 static d_close_t	vchiq_close;
186 static d_ioctl_t	vchiq_ioctl;
187 
188 static struct cdevsw vchiq_cdevsw = {
189 	.d_version	= D_VERSION,
190 	.d_ioctl	= vchiq_ioctl,
191 	.d_open		= vchiq_open,
192 	.d_close	= vchiq_close,
193 	.d_name		= DEVICE_NAME,
194 };
195 
196 #if 0
197 static void
198 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
199 #endif
200 
201 /****************************************************************************
202 *
203 *   add_completion
204 *
205 ***************************************************************************/
206 
207 static VCHIQ_STATUS_T
208 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
209 	VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
210 	void *bulk_userdata)
211 {
212 	VCHIQ_COMPLETION_DATA_T *completion;
213 	DEBUG_INITIALISE(g_state.local)
214 
215 	while (instance->completion_insert ==
216 		(instance->completion_remove + MAX_COMPLETIONS)) {
217 		/* Out of space - wait for the client */
218 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
219 		vchiq_log_trace(vchiq_arm_log_level,
220 			"add_completion - completion queue full");
221 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
222 		if (down_interruptible(&instance->remove_event) != 0) {
223 			vchiq_log_info(vchiq_arm_log_level,
224 				"service_callback interrupted");
225 			return VCHIQ_RETRY;
226 		} else if (instance->closing) {
227 			vchiq_log_info(vchiq_arm_log_level,
228 				"service_callback closing");
229 			return VCHIQ_ERROR;
230 		}
231 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
232 	}
233 
234 	completion =
235 		 &instance->completions[instance->completion_insert &
236 		 (MAX_COMPLETIONS - 1)];
237 
238 	completion->header = header;
239 	completion->reason = reason;
240 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
241 	completion->service_userdata = user_service->service;
242 	completion->bulk_userdata = bulk_userdata;
243 
244 	if (reason == VCHIQ_SERVICE_CLOSED) {
245 		/* Take an extra reference, to be held until
246 		   this CLOSED notification is delivered. */
247 		lock_service(user_service->service);
248 		if (instance->use_close_delivered)
249 			user_service->close_pending = 1;
250 	}
251 
252 	/* A write barrier is needed here to ensure that the entire completion
253 		record is written out before the insert point. */
254 	wmb();
255 
256 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
257 		user_service->message_available_pos =
258 			instance->completion_insert;
259 	instance->completion_insert++;
260 
261 	up(&instance->insert_event);
262 
263 	return VCHIQ_SUCCESS;
264 }
265 
266 /****************************************************************************
267 *
268 *   service_callback
269 *
270 ***************************************************************************/
271 
272 static VCHIQ_STATUS_T
273 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
274 	VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
275 {
276 	/* How do we ensure the callback goes to the right client?
277 	** The service_user data points to a USER_SERVICE_T record containing
278 	** the original callback and the user state structure, which contains a
279 	** circular buffer for completion records.
280 	*/
281 	USER_SERVICE_T *user_service;
282 	VCHIQ_SERVICE_T *service;
283 	VCHIQ_INSTANCE_T instance;
284 	DEBUG_INITIALISE(g_state.local)
285 
286 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
287 
288 	service = handle_to_service(handle);
289 	BUG_ON(!service);
290 	user_service = (USER_SERVICE_T *)service->base.userdata;
291 	instance = user_service->instance;
292 
293 	if (!instance || instance->closing)
294 		return VCHIQ_SUCCESS;
295 
296 	vchiq_log_trace(vchiq_arm_log_level,
297 		"service_callback - service %lx(%d,%p), reason %d, header %lx, "
298 		"instance %lx, bulk_userdata %lx",
299 		(unsigned long)user_service,
300 		service->localport, user_service->userdata,
301 		reason, (unsigned long)header,
302 		(unsigned long)instance, (unsigned long)bulk_userdata);
303 
304 	if (header && user_service->is_vchi) {
305 		spin_lock(&msg_queue_spinlock);
306 		while (user_service->msg_insert ==
307 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
308 			spin_unlock(&msg_queue_spinlock);
309 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
310 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
311 			vchiq_log_trace(vchiq_arm_log_level,
312 				"service_callback - msg queue full");
313 			/* If there is no MESSAGE_AVAILABLE in the completion
314 			** queue, add one
315 			*/
316 			if ((user_service->message_available_pos -
317 				instance->completion_remove) < 0) {
318 				VCHIQ_STATUS_T status;
319 				vchiq_log_info(vchiq_arm_log_level,
320 					"Inserting extra MESSAGE_AVAILABLE");
321 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
322 				status = add_completion(instance, reason,
323 					NULL, user_service, bulk_userdata);
324 				if (status != VCHIQ_SUCCESS) {
325 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
326 					return status;
327 				}
328 			}
329 
330 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
331 			if (down_interruptible(&user_service->remove_event)
332 				!= 0) {
333 				vchiq_log_info(vchiq_arm_log_level,
334 					"service_callback interrupted");
335 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
336 				return VCHIQ_RETRY;
337 			} else if (instance->closing) {
338 				vchiq_log_info(vchiq_arm_log_level,
339 					"service_callback closing");
340 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
341 				return VCHIQ_ERROR;
342 			}
343 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
344 			spin_lock(&msg_queue_spinlock);
345 		}
346 
347 		user_service->msg_queue[user_service->msg_insert &
348 			(MSG_QUEUE_SIZE - 1)] = header;
349 		user_service->msg_insert++;
350 		spin_unlock(&msg_queue_spinlock);
351 
352 		up(&user_service->insert_event);
353 
354 		/* If there is a thread waiting in DEQUEUE_MESSAGE, or if
355 		** there is a MESSAGE_AVAILABLE in the completion queue then
356 		** bypass the completion queue.
357 		*/
358 		if (((user_service->message_available_pos -
359 			instance->completion_remove) >= 0) ||
360 			user_service->dequeue_pending) {
361 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
362 			user_service->dequeue_pending = 0;
363 			return VCHIQ_SUCCESS;
364 		}
365 
366 		header = NULL;
367 	}
368 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
369 
370 	return add_completion(instance, reason, header, user_service,
371 		bulk_userdata);
372 }
373 
374 /****************************************************************************
375 *
376 *   user_service_free
377 *
378 ***************************************************************************/
379 static void
380 user_service_free(void *userdata)
381 {
382 	USER_SERVICE_T *user_service = userdata;
383 
384 	_sema_destroy(&user_service->insert_event);
385 	_sema_destroy(&user_service->remove_event);
386 
387 	kfree(user_service);
388 }
389 
390 /****************************************************************************
391 *
392 *   close_delivered
393 *
394 ***************************************************************************/
395 static void close_delivered(USER_SERVICE_T *user_service)
396 {
397 	vchiq_log_info(vchiq_arm_log_level,
398 		"close_delivered(handle=%x)",
399 		user_service->service->handle);
400 
401 	if (user_service->close_pending) {
402 		/* Allow the underlying service to be culled */
403 		unlock_service(user_service->service);
404 
405 		/* Wake the user-thread blocked in close_ or remove_service */
406 		up(&user_service->close_event);
407 
408 		user_service->close_pending = 0;
409 	}
410 }
411 
412 /****************************************************************************
413 *
414 *   vchiq_ioctl
415 *
416 ***************************************************************************/
417 
418 static int
419 vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
420    struct thread *td)
421 {
422 	VCHIQ_INSTANCE_T instance;
423 	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
424 	VCHIQ_SERVICE_T *service = NULL;
425 	int ret = 0;
426 	int i, rc;
427 	DEBUG_INITIALISE(g_state.local)
428 
429 	if ((ret = devfs_get_cdevpriv((void**)&instance))) {
430 		printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret);
431 		return (ret);
432 	}
433 
434 /* XXXBSD: HACK! */
435 #define _IOC_NR(x) ((x) & 0xff)
436 #define	_IOC_TYPE(x)	IOCGROUP(x)
437 
438 	vchiq_log_trace(vchiq_arm_log_level,
439 		 "vchiq_ioctl - instance %x, cmd %s, arg %p",
440 		(unsigned int)instance,
441 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
442 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
443 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
444 
445 	switch (cmd) {
446 	case VCHIQ_IOC_SHUTDOWN:
447 		if (!instance->connected)
448 			break;
449 
450 		/* Remove all services */
451 		i = 0;
452 		while ((service = next_service_by_instance(instance->state,
453 			instance, &i)) != NULL) {
454 			status = vchiq_remove_service(service->handle);
455 			unlock_service(service);
456 			if (status != VCHIQ_SUCCESS)
457 				break;
458 		}
459 		service = NULL;
460 
461 		if (status == VCHIQ_SUCCESS) {
462 			/* Wake the completion thread and ask it to exit */
463 			instance->closing = 1;
464 			up(&instance->insert_event);
465 		}
466 
467 		break;
468 
469 	case VCHIQ_IOC_CONNECT:
470 		if (instance->connected) {
471 			ret = -EINVAL;
472 			break;
473 		}
474 		rc = lmutex_lock_interruptible(&instance->state->mutex);
475 		if (rc != 0) {
476 			vchiq_log_error(vchiq_arm_log_level,
477 				"vchiq: connect: could not lock mutex for "
478 				"state %d: %d",
479 				instance->state->id, rc);
480 			ret = -EINTR;
481 			break;
482 		}
483 		status = vchiq_connect_internal(instance->state, instance);
484 		lmutex_unlock(&instance->state->mutex);
485 
486 		if (status == VCHIQ_SUCCESS)
487 			instance->connected = 1;
488 		else
489 			vchiq_log_error(vchiq_arm_log_level,
490 				"vchiq: could not connect: %d", status);
491 		break;
492 
493 	case VCHIQ_IOC_CREATE_SERVICE: {
494 		VCHIQ_CREATE_SERVICE_T args;
495 		USER_SERVICE_T *user_service = NULL;
496 		void *userdata;
497 		int srvstate;
498 
499 		memcpy(&args, (const void*)arg, sizeof(args));
500 
501 		user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
502 		if (!user_service) {
503 			ret = -ENOMEM;
504 			break;
505 		}
506 
507 		if (args.is_open) {
508 			if (!instance->connected) {
509 				ret = -ENOTCONN;
510 				kfree(user_service);
511 				break;
512 			}
513 			srvstate = VCHIQ_SRVSTATE_OPENING;
514 		} else {
515 			srvstate =
516 				 instance->connected ?
517 				 VCHIQ_SRVSTATE_LISTENING :
518 				 VCHIQ_SRVSTATE_HIDDEN;
519 		}
520 
521 		userdata = args.params.userdata;
522 		args.params.callback = service_callback;
523 		args.params.userdata = user_service;
524 		service = vchiq_add_service_internal(
525 				instance->state,
526 				&args.params, srvstate,
527 				instance, user_service_free);
528 
529 		if (service != NULL) {
530 			user_service->service = service;
531 			user_service->userdata = userdata;
532 			user_service->instance = instance;
533 			user_service->is_vchi = (args.is_vchi != 0);
534 			user_service->dequeue_pending = 0;
535 			user_service->close_pending = 0;
536 			user_service->message_available_pos =
537 				instance->completion_remove - 1;
538 			user_service->msg_insert = 0;
539 			user_service->msg_remove = 0;
540 			_sema_init(&user_service->insert_event, 0);
541 			_sema_init(&user_service->remove_event, 0);
542 			_sema_init(&user_service->close_event, 0);
543 
544 			if (args.is_open) {
545 				status = vchiq_open_service_internal
546 					(service, instance->pid);
547 				if (status != VCHIQ_SUCCESS) {
548 					vchiq_remove_service(service->handle);
549 					service = NULL;
550 					ret = (status == VCHIQ_RETRY) ?
551 						-EINTR : -EIO;
552 					break;
553 				}
554 			}
555 
556 #ifdef VCHIQ_IOCTL_DEBUG
557 			printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);
558 #endif
559 			memcpy((void *)
560 				&(((VCHIQ_CREATE_SERVICE_T*)
561 					arg)->handle),
562 				(const void *)&service->handle,
563 				sizeof(service->handle));
564 
565 			service = NULL;
566 		} else {
567 			ret = -EEXIST;
568 			kfree(user_service);
569 		}
570 	} break;
571 
572 	case VCHIQ_IOC_CLOSE_SERVICE: {
573 		VCHIQ_SERVICE_HANDLE_T handle;
574 
575 		memcpy(&handle, (const void*)arg, sizeof(handle));
576 
577 #ifdef VCHIQ_IOCTL_DEBUG
578 		printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);
579 #endif
580 
581 		service = find_service_for_instance(instance, handle);
582 		if (service != NULL) {
583 			USER_SERVICE_T *user_service =
584 				(USER_SERVICE_T *)service->base.userdata;
585 			/* close_pending is false on first entry, and when the
586                            wait in vchiq_close_service has been interrupted. */
587 			if (!user_service->close_pending) {
588 				status = vchiq_close_service(service->handle);
589 				if (status != VCHIQ_SUCCESS)
590 					break;
591 			}
592 
593 			/* close_pending is true once the underlying service
594 			   has been closed until the client library calls the
595 			   CLOSE_DELIVERED ioctl, signalling close_event. */
596 			if (user_service->close_pending &&
597 				down_interruptible(&user_service->close_event))
598 				status = VCHIQ_RETRY;
599 		}
600 		else
601 			ret = -EINVAL;
602 	} break;
603 
604 	case VCHIQ_IOC_REMOVE_SERVICE: {
605 		VCHIQ_SERVICE_HANDLE_T handle;
606 
607 		memcpy(&handle, (const void*)arg, sizeof(handle));
608 
609 #ifdef VCHIQ_IOCTL_DEBUG
610 		printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);
611 #endif
612 
613 		service = find_service_for_instance(instance, handle);
614 		if (service != NULL) {
615 			USER_SERVICE_T *user_service =
616 				(USER_SERVICE_T *)service->base.userdata;
617 			/* close_pending is false on first entry, and when the
618                            wait in vchiq_close_service has been interrupted. */
619 			if (!user_service->close_pending) {
620 				status = vchiq_remove_service(service->handle);
621 				if (status != VCHIQ_SUCCESS)
622 					break;
623 			}
624 
625 			/* close_pending is true once the underlying service
626 			   has been closed until the client library calls the
627 			   CLOSE_DELIVERED ioctl, signalling close_event. */
628 			if (user_service->close_pending &&
629 				down_interruptible(&user_service->close_event))
630 				status = VCHIQ_RETRY;
631 		}
632 		else
633 			ret = -EINVAL;
634 	} break;
635 
636 	case VCHIQ_IOC_USE_SERVICE:
637 	case VCHIQ_IOC_RELEASE_SERVICE:	{
638 		VCHIQ_SERVICE_HANDLE_T handle;
639 
640 		memcpy(&handle, (const void*)arg, sizeof(handle));
641 
642 #ifdef VCHIQ_IOCTL_DEBUG
643 		printf("%s: [%s SERVICE] handle = %08x\n", __func__,
644 		    cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);
645 #endif
646 
647 		service = find_service_for_instance(instance, handle);
648 		if (service != NULL) {
649 			status = (cmd == VCHIQ_IOC_USE_SERVICE)	?
650 				vchiq_use_service_internal(service) :
651 				vchiq_release_service_internal(service);
652 			if (status != VCHIQ_SUCCESS) {
653 				vchiq_log_error(vchiq_susp_log_level,
654 					"%s: cmd %s returned error %d for "
655 					"service %c%c%c%c:%8x",
656 					__func__,
657 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
658 						"VCHIQ_IOC_USE_SERVICE" :
659 						"VCHIQ_IOC_RELEASE_SERVICE",
660 					status,
661 					VCHIQ_FOURCC_AS_4CHARS(
662 						service->base.fourcc),
663 					service->client_id);
664 				ret = -EINVAL;
665 			}
666 		} else
667 			ret = -EINVAL;
668 	} break;
669 
670 	case VCHIQ_IOC_QUEUE_MESSAGE: {
671 		VCHIQ_QUEUE_MESSAGE_T args;
672 		memcpy(&args, (const void*)arg, sizeof(args));
673 
674 #ifdef VCHIQ_IOCTL_DEBUG
675 		printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle);
676 #endif
677 
678 		service = find_service_for_instance(instance, args.handle);
679 
680 		if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
681 			/* Copy elements into kernel space */
682 			VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
683 			if (copy_from_user(elements, args.elements,
684 				args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
685 				status = vchiq_queue_message
686 					(args.handle,
687 					elements, args.count);
688 			else
689 				ret = -EFAULT;
690 		} else {
691 			ret = -EINVAL;
692 		}
693 	} break;
694 
695 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
696 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
697 		VCHIQ_QUEUE_BULK_TRANSFER_T args;
698 		struct bulk_waiter_node *waiter = NULL;
699 		VCHIQ_BULK_DIR_T dir =
700 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
701 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
702 
703 		memcpy(&args, (const void*)arg, sizeof(args));
704 
705 		service = find_service_for_instance(instance, args.handle);
706 		if (!service) {
707 			ret = -EINVAL;
708 			break;
709 		}
710 
711 		if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
712 			waiter = kzalloc(sizeof(struct bulk_waiter_node),
713 				GFP_KERNEL);
714 			if (!waiter) {
715 				ret = -ENOMEM;
716 				break;
717 			}
718 			args.userdata = &waiter->bulk_waiter;
719 		} else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
720 			struct list_head *pos;
721 			lmutex_lock(&instance->bulk_waiter_list_mutex);
722 			list_for_each(pos, &instance->bulk_waiter_list) {
723 				if (list_entry(pos, struct bulk_waiter_node,
724 					list)->pid == current->p_pid) {
725 					waiter = list_entry(pos,
726 						struct bulk_waiter_node,
727 						list);
728 					list_del(pos);
729 					break;
730 				}
731 
732 			}
733 			lmutex_unlock(&instance->bulk_waiter_list_mutex);
734 			if (!waiter) {
735 				vchiq_log_error(vchiq_arm_log_level,
736 					"no bulk_waiter found for pid %d",
737 					current->p_pid);
738 				ret = -ESRCH;
739 				break;
740 			}
741 			vchiq_log_info(vchiq_arm_log_level,
742 				"found bulk_waiter %x for pid %d",
743 				(unsigned int)waiter, current->p_pid);
744 			args.userdata = &waiter->bulk_waiter;
745 		}
746 		status = vchiq_bulk_transfer
747 			(args.handle,
748 			 VCHI_MEM_HANDLE_INVALID,
749 			 args.data, args.size,
750 			 args.userdata, args.mode,
751 			 dir);
752 		if (!waiter)
753 			break;
754 		if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
755 			!waiter->bulk_waiter.bulk) {
756 			if (waiter->bulk_waiter.bulk) {
757 				/* Cancel the signal when the transfer
758 				** completes. */
759 				spin_lock(&bulk_waiter_spinlock);
760 				waiter->bulk_waiter.bulk->userdata = NULL;
761 				spin_unlock(&bulk_waiter_spinlock);
762 			}
763 			_sema_destroy(&waiter->bulk_waiter.event);
764 			kfree(waiter);
765 		} else {
766 			const VCHIQ_BULK_MODE_T mode_waiting =
767 				VCHIQ_BULK_MODE_WAITING;
768 			waiter->pid = current->p_pid;
769 			lmutex_lock(&instance->bulk_waiter_list_mutex);
770 			list_add(&waiter->list, &instance->bulk_waiter_list);
771 			lmutex_unlock(&instance->bulk_waiter_list_mutex);
772 			vchiq_log_info(vchiq_arm_log_level,
773 				"saved bulk_waiter %x for pid %d",
774 				(unsigned int)waiter, current->p_pid);
775 
776 			memcpy((void *)
777 				&(((VCHIQ_QUEUE_BULK_TRANSFER_T *)
778 					arg)->mode),
779 				(const void *)&mode_waiting,
780 				sizeof(mode_waiting));
781 		}
782 	} break;
783 
784 	case VCHIQ_IOC_AWAIT_COMPLETION: {
785 		VCHIQ_AWAIT_COMPLETION_T args;
786 		int count = 0;
787 
788 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
789 		if (!instance->connected) {
790 			ret = -ENOTCONN;
791 			break;
792 		}
793 
794 		memcpy(&args, (const void*)arg, sizeof(args));
795 
796 		lmutex_lock(&instance->completion_mutex);
797 
798 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
799 		while ((instance->completion_remove ==
800 			instance->completion_insert)
801 			&& !instance->closing) {
802 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
803 			lmutex_unlock(&instance->completion_mutex);
804 			rc = down_interruptible(&instance->insert_event);
805 			lmutex_lock(&instance->completion_mutex);
806 			if (rc != 0) {
807 				DEBUG_TRACE(AWAIT_COMPLETION_LINE);
808 				vchiq_log_info(vchiq_arm_log_level,
809 					"AWAIT_COMPLETION interrupted");
810 				ret = -EINTR;
811 				break;
812 			}
813 		}
814 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
815 
816 		/* A read memory barrier is needed to stop prefetch of a stale
817 		** completion record
818 		*/
819 		rmb();
820 
821 		if (ret == 0) {
822 			int msgbufcount = args.msgbufcount;
823 			for (count = 0; count < args.count; count++) {
824 				VCHIQ_COMPLETION_DATA_T *completion;
825 				VCHIQ_SERVICE_T *service1;
826 				USER_SERVICE_T *user_service;
827 				VCHIQ_HEADER_T *header;
828 				if (instance->completion_remove ==
829 					instance->completion_insert)
830 					break;
831 				completion = &instance->completions[
832 					instance->completion_remove &
833 					(MAX_COMPLETIONS - 1)];
834 
835 				service1 = completion->service_userdata;
836 				user_service = service1->base.userdata;
837 				completion->service_userdata =
838 					user_service->userdata;
839 
840 				header = completion->header;
841 				if (header) {
842 					void __user *msgbuf;
843 					int msglen;
844 
845 					msglen = header->size +
846 						sizeof(VCHIQ_HEADER_T);
847 					/* This must be a VCHIQ-style service */
848 					if (args.msgbufsize < msglen) {
849 						vchiq_log_error(
850 							vchiq_arm_log_level,
851 							"header %x: msgbufsize"
852 							" %x < msglen %x",
853 							(unsigned int)header,
854 							args.msgbufsize,
855 							msglen);
856 						WARN(1, "invalid message "
857 							"size\n");
858 						if (count == 0)
859 							ret = -EMSGSIZE;
860 						break;
861 					}
862 					if (msgbufcount <= 0)
863 						/* Stall here for lack of a
864 						** buffer for the message. */
865 						break;
866 					/* Get the pointer from user space */
867 					msgbufcount--;
868 					if (copy_from_user(&msgbuf,
869 						(const void __user *)
870 						&args.msgbufs[msgbufcount],
871 						sizeof(msgbuf)) != 0) {
872 						if (count == 0)
873 							ret = -EFAULT;
874 						break;
875 					}
876 
877 					/* Copy the message to user space */
878 					if (copy_to_user(msgbuf, header,
879 						msglen) != 0) {
880 						if (count == 0)
881 							ret = -EFAULT;
882 						break;
883 					}
884 
885 					/* Now it has been copied, the message
886 					** can be released. */
887 					vchiq_release_message(service1->handle,
888 						header);
889 
890 					/* The completion must point to the
891 					** msgbuf. */
892 					completion->header = msgbuf;
893 				}
894 
895 				if ((completion->reason ==
896 					VCHIQ_SERVICE_CLOSED) &&
897 					!instance->use_close_delivered)
898 					unlock_service(service1);
899 
900 				if (copy_to_user((void __user *)(
901 					(size_t)args.buf +
902 					count * sizeof(VCHIQ_COMPLETION_DATA_T)),
903 					completion,
904 					sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
905 						if (ret == 0)
906 							ret = -EFAULT;
907 					break;
908 				}
909 
910 				instance->completion_remove++;
911 			}
912 
913 			if (msgbufcount != args.msgbufcount) {
914 				memcpy((void __user *)
915 					&((VCHIQ_AWAIT_COMPLETION_T *)arg)->
916 						msgbufcount,
917 					&msgbufcount,
918 					sizeof(msgbufcount));
919 			}
920 
921 			 if (count != args.count)
922 			 {
923 				memcpy((void __user *)
924 					&((VCHIQ_AWAIT_COMPLETION_T *)arg)->count,
925 					&count, sizeof(count));
926 			}
927 		}
928 
929 		if (count != 0)
930 			up(&instance->remove_event);
931 
932 		if ((ret == 0) && instance->closing)
933 			ret = -ENOTCONN;
934 		/*
935 		 * XXXBSD: ioctl return codes are not negative as in linux, so
936 		 * we can not indicate success with positive number of passed
937 		 * messages
938 		 */
939 		if (ret > 0)
940 			ret = 0;
941 
942 		lmutex_unlock(&instance->completion_mutex);
943 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
944 	} break;
945 
946 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
947 		VCHIQ_DEQUEUE_MESSAGE_T args;
948 		USER_SERVICE_T *user_service;
949 		VCHIQ_HEADER_T *header;
950 
951 		DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
952 		memcpy(&args, (const void*)arg, sizeof(args));
953 		service = find_service_for_instance(instance, args.handle);
954 		if (!service) {
955 			ret = -EINVAL;
956 			break;
957 		}
958 		user_service = (USER_SERVICE_T *)service->base.userdata;
959 		if (user_service->is_vchi == 0) {
960 			ret = -EINVAL;
961 			break;
962 		}
963 
964 		spin_lock(&msg_queue_spinlock);
965 		if (user_service->msg_remove == user_service->msg_insert) {
966 			if (!args.blocking) {
967 				spin_unlock(&msg_queue_spinlock);
968 				DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
969 				ret = -EWOULDBLOCK;
970 				break;
971 			}
972 			user_service->dequeue_pending = 1;
973 			do {
974 				spin_unlock(&msg_queue_spinlock);
975 				DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
976 				if (down_interruptible(
977 					&user_service->insert_event) != 0) {
978 					vchiq_log_info(vchiq_arm_log_level,
979 						"DEQUEUE_MESSAGE interrupted");
980 					ret = -EINTR;
981 					break;
982 				}
983 				spin_lock(&msg_queue_spinlock);
984 			} while (user_service->msg_remove ==
985 				user_service->msg_insert);
986 
987 			if (ret)
988 				break;
989 		}
990 
991 		BUG_ON((int)(user_service->msg_insert -
992 			user_service->msg_remove) < 0);
993 
994 		header = user_service->msg_queue[user_service->msg_remove &
995 			(MSG_QUEUE_SIZE - 1)];
996 		user_service->msg_remove++;
997 		spin_unlock(&msg_queue_spinlock);
998 
999 		up(&user_service->remove_event);
1000 		if (header == NULL)
1001 			ret = -ENOTCONN;
1002 		else if (header->size <= args.bufsize) {
1003 			/* Copy to user space if msgbuf is not NULL */
1004 			if ((args.buf == NULL) ||
1005 				(copy_to_user((void __user *)args.buf,
1006 				header->data,
1007 				header->size) == 0)) {
1008 				args.bufsize = header->size;
1009 				memcpy((void *)arg, &args,
1010 				    sizeof(args));
1011 				vchiq_release_message(
1012 					service->handle,
1013 					header);
1014 			} else
1015 				ret = -EFAULT;
1016 		} else {
1017 			vchiq_log_error(vchiq_arm_log_level,
1018 				"header %x: bufsize %x < size %x",
1019 				(unsigned int)header, args.bufsize,
1020 				header->size);
1021 			WARN(1, "invalid size\n");
1022 			ret = -EMSGSIZE;
1023 		}
1024 		DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1025 	} break;
1026 
1027 	case VCHIQ_IOC_GET_CLIENT_ID: {
1028 		VCHIQ_SERVICE_HANDLE_T handle;
1029 
1030 		memcpy(&handle, (const void*)arg, sizeof(handle));
1031 
1032 		ret = vchiq_get_client_id(handle);
1033 	} break;
1034 
1035 	case VCHIQ_IOC_GET_CONFIG: {
1036 		VCHIQ_GET_CONFIG_T args;
1037 		VCHIQ_CONFIG_T config;
1038 
1039 		memcpy(&args, (const void*)arg, sizeof(args));
1040 		if (args.config_size > sizeof(config)) {
1041 			ret = -EINVAL;
1042 			break;
1043 		}
1044 		status = vchiq_get_config(instance, args.config_size, &config);
1045 		if (status == VCHIQ_SUCCESS) {
1046 			if (copy_to_user((void __user *)args.pconfig,
1047 				    &config, args.config_size) != 0) {
1048 				ret = -EFAULT;
1049 				break;
1050 			}
1051 		}
1052 	} break;
1053 
1054 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1055 		VCHIQ_SET_SERVICE_OPTION_T args;
1056 
1057 		memcpy(&args, (const void*)arg, sizeof(args));
1058 
1059 		service = find_service_for_instance(instance, args.handle);
1060 		if (!service) {
1061 			ret = -EINVAL;
1062 			break;
1063 		}
1064 
1065 		status = vchiq_set_service_option(
1066 				args.handle, args.option, args.value);
1067 	} break;
1068 
1069 	case VCHIQ_IOC_DUMP_PHYS_MEM: {
1070 		VCHIQ_DUMP_MEM_T  args;
1071 
1072 		memcpy(&args, (const void*)arg, sizeof(args));
1073 		printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);
1074 #if 0
1075 		dump_phys_mem(args.virt_addr, args.num_bytes);
1076 #endif
1077 	} break;
1078 
1079 	case VCHIQ_IOC_LIB_VERSION: {
1080 		unsigned int lib_version = (unsigned int)arg;
1081 
1082 		if (lib_version < VCHIQ_VERSION_MIN)
1083 			ret = -EINVAL;
1084 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1085 			instance->use_close_delivered = 1;
1086 	} break;
1087 
1088 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1089 		VCHIQ_SERVICE_HANDLE_T handle;
1090 		memcpy(&handle, (const void*)arg, sizeof(handle));
1091 
1092 		service = find_closed_service_for_instance(instance, handle);
1093 		if (service != NULL) {
1094 			USER_SERVICE_T *user_service =
1095 				(USER_SERVICE_T *)service->base.userdata;
1096 			close_delivered(user_service);
1097 		}
1098 		else
1099 			ret = -EINVAL;
1100 	} break;
1101 
1102 	default:
1103 		ret = -ENOTTY;
1104 		break;
1105 	}
1106 
1107 	if (service)
1108 		unlock_service(service);
1109 
1110 	if (ret == 0) {
1111 		if (status == VCHIQ_ERROR)
1112 			ret = -EIO;
1113 		else if (status == VCHIQ_RETRY)
1114 			ret = -EINTR;
1115 	}
1116 
1117 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1118 		(ret != -EWOULDBLOCK))
1119 		vchiq_log_info(vchiq_arm_log_level,
1120 			"  ioctl instance %lx, cmd %s -> status %d, %d",
1121 			(unsigned long)instance,
1122 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1123 				ioctl_names[_IOC_NR(cmd)] :
1124 				"<invalid>",
1125 			status, ret);
1126 	else
1127 		vchiq_log_trace(vchiq_arm_log_level,
1128 			"  ioctl instance %lx, cmd %s -> status %d, %d",
1129 			(unsigned long)instance,
1130 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1131 				ioctl_names[_IOC_NR(cmd)] :
1132 				"<invalid>",
1133 			status, ret);
1134 
1135 	/* XXXBSD: report BSD-style error to userland */
1136 	if (ret < 0)
1137 		ret = -ret;
1138 
1139 	return ret;
1140 }
1141 
1142 static void
1143 instance_dtr(void *data)
1144 {
1145 
1146    free(data, M_VCHIQ);
1147 }
1148 
1149 /****************************************************************************
1150 *
1151 *   vchiq_open
1152 *
1153 ***************************************************************************/
1154 
1155 static int
1156 vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
1157 {
1158 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1159 	/* XXXBSD: do we really need this check? */
1160 	if (1) {
1161 		VCHIQ_STATE_T *state = vchiq_get_state();
1162 		VCHIQ_INSTANCE_T instance;
1163 
1164 		if (!state) {
1165 			vchiq_log_error(vchiq_arm_log_level,
1166 				"vchiq has no connection to VideoCore");
1167 			return -ENOTCONN;
1168 		}
1169 
1170 		instance = kmalloc(sizeof(*instance), GFP_KERNEL);
1171 		if (!instance)
1172 			return -ENOMEM;
1173 
1174 		instance->state = state;
1175 		/* XXXBSD: PID or thread ID? */
1176 		instance->pid = td->td_proc->p_pid;
1177 
1178 #ifdef notyet
1179 		ret = vchiq_proc_add_instance(instance);
1180 		if (ret != 0) {
1181 			kfree(instance);
1182 			return ret;
1183 		}
1184 #endif
1185 
1186 		_sema_init(&instance->insert_event, 0);
1187 		_sema_init(&instance->remove_event, 0);
1188 		lmutex_init(&instance->completion_mutex);
1189 		lmutex_init(&instance->bulk_waiter_list_mutex);
1190 		INIT_LIST_HEAD(&instance->bulk_waiter_list);
1191 
1192 		devfs_set_cdevpriv(instance, instance_dtr);
1193 	}
1194 	else {
1195 		vchiq_log_error(vchiq_arm_log_level,
1196 			"Unknown minor device");
1197 		return -ENXIO;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 /****************************************************************************
1204 *
1205 *   vchiq_release
1206 *
1207 ***************************************************************************/
1208 
1209 static int
1210 vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,
1211                 struct thread *td)
1212 {
1213 	int ret = 0;
1214 	if (1) {
1215 		VCHIQ_INSTANCE_T instance;
1216 		VCHIQ_STATE_T *state = vchiq_get_state();
1217 		VCHIQ_SERVICE_T *service;
1218 		int i;
1219 
1220 		if ((ret = devfs_get_cdevpriv((void**)&instance))) {
1221 			printf("devfs_get_cdevpriv failed: error %d\n", ret);
1222 			return (ret);
1223 		}
1224 
1225 		vchiq_log_info(vchiq_arm_log_level,
1226 			"vchiq_release: instance=%lx",
1227 			(unsigned long)instance);
1228 
1229 		if (!state) {
1230 			ret = -EPERM;
1231 			goto out;
1232 		}
1233 
1234 		/* Ensure videocore is awake to allow termination. */
1235 		vchiq_use_internal(instance->state, NULL,
1236 				USE_TYPE_VCHIQ);
1237 
1238 		lmutex_lock(&instance->completion_mutex);
1239 
1240 		/* Wake the completion thread and ask it to exit */
1241 		instance->closing = 1;
1242 		up(&instance->insert_event);
1243 
1244 		lmutex_unlock(&instance->completion_mutex);
1245 
1246 		/* Wake the slot handler if the completion queue is full. */
1247 		up(&instance->remove_event);
1248 
1249 		/* Mark all services for termination... */
1250 		i = 0;
1251 		while ((service = next_service_by_instance(state, instance,
1252 			&i)) !=	NULL) {
1253 			USER_SERVICE_T *user_service = service->base.userdata;
1254 
1255 			/* Wake the slot handler if the msg queue is full. */
1256 			up(&user_service->remove_event);
1257 
1258 			vchiq_terminate_service_internal(service);
1259 			unlock_service(service);
1260 		}
1261 
1262 		/* ...and wait for them to die */
1263 		i = 0;
1264 		while ((service = next_service_by_instance(state, instance, &i))
1265 			!= NULL) {
1266 			USER_SERVICE_T *user_service = service->base.userdata;
1267 
1268 			down(&service->remove_event);
1269 
1270 			BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1271 
1272 			spin_lock(&msg_queue_spinlock);
1273 
1274 			while (user_service->msg_remove !=
1275 				user_service->msg_insert) {
1276 				VCHIQ_HEADER_T *header = user_service->
1277 					msg_queue[user_service->msg_remove &
1278 						(MSG_QUEUE_SIZE - 1)];
1279 				user_service->msg_remove++;
1280 				spin_unlock(&msg_queue_spinlock);
1281 
1282 				if (header)
1283 					vchiq_release_message(
1284 						service->handle,
1285 						header);
1286 				spin_lock(&msg_queue_spinlock);
1287 			}
1288 
1289 			spin_unlock(&msg_queue_spinlock);
1290 
1291 			unlock_service(service);
1292 		}
1293 
1294 		/* Release any closed services */
1295 		while (instance->completion_remove !=
1296 			instance->completion_insert) {
1297 			VCHIQ_COMPLETION_DATA_T *completion;
1298 			VCHIQ_SERVICE_T *service1;
1299 			completion = &instance->completions[
1300 				instance->completion_remove &
1301 				(MAX_COMPLETIONS - 1)];
1302 			service1 = completion->service_userdata;
1303 			if (completion->reason == VCHIQ_SERVICE_CLOSED)
1304 			{
1305 				USER_SERVICE_T *user_service =
1306 					service->base.userdata;
1307 
1308 				/* Wake any blocked user-thread */
1309 				if (instance->use_close_delivered)
1310 					up(&user_service->close_event);
1311 				unlock_service(service1);
1312 			}
1313 			instance->completion_remove++;
1314 		}
1315 
1316 		/* Release the PEER service count. */
1317 		vchiq_release_internal(instance->state, NULL);
1318 
1319 		{
1320 			struct list_head *pos, *next;
1321 			list_for_each_safe(pos, next,
1322 				&instance->bulk_waiter_list) {
1323 				struct bulk_waiter_node *waiter;
1324 				waiter = list_entry(pos,
1325 					struct bulk_waiter_node,
1326 					list);
1327 				list_del(pos);
1328 				vchiq_log_info(vchiq_arm_log_level,
1329 					"bulk_waiter - cleaned up %x "
1330 					"for pid %d",
1331 					(unsigned int)waiter, waiter->pid);
1332 		                _sema_destroy(&waiter->bulk_waiter.event);
1333 				kfree(waiter);
1334 			}
1335 		}
1336 
1337 	}
1338 	else {
1339 		vchiq_log_error(vchiq_arm_log_level,
1340 			"Unknown minor device");
1341 		ret = -ENXIO;
1342 	}
1343 
1344 out:
1345 	return ret;
1346 }
1347 
1348 /****************************************************************************
1349 *
1350 *   vchiq_dump
1351 *
1352 ***************************************************************************/
1353 
1354 void
1355 vchiq_dump(void *dump_context, const char *str, int len)
1356 {
1357 	DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1358 
1359 	if (context->actual < context->space) {
1360 		int copy_bytes;
1361 		if (context->offset > 0) {
1362 			int skip_bytes = min(len, (int)context->offset);
1363 			str += skip_bytes;
1364 			len -= skip_bytes;
1365 			context->offset -= skip_bytes;
1366 			if (context->offset > 0)
1367 				return;
1368 		}
1369 		copy_bytes = min(len, (int)(context->space - context->actual));
1370 		if (copy_bytes == 0)
1371 			return;
1372 		memcpy(context->buf + context->actual, str, copy_bytes);
1373 		context->actual += copy_bytes;
1374 		len -= copy_bytes;
1375 
1376 		/* If tne terminating NUL is included in the length, then it
1377 		** marks the end of a line and should be replaced with a
1378 		** carriage return. */
1379 		if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1380 			char cr = '\n';
1381 			memcpy(context->buf + context->actual - 1, &cr, 1);
1382 		}
1383 	}
1384 }
1385 
1386 /****************************************************************************
1387 *
1388 *   vchiq_dump_platform_instance_state
1389 *
1390 ***************************************************************************/
1391 
1392 void
1393 vchiq_dump_platform_instances(void *dump_context)
1394 {
1395 	VCHIQ_STATE_T *state = vchiq_get_state();
1396 	char buf[80];
1397 	int len;
1398 	int i;
1399 
1400 	/* There is no list of instances, so instead scan all services,
1401 		marking those that have been dumped. */
1402 
1403 	for (i = 0; i < state->unused_service; i++) {
1404 		VCHIQ_SERVICE_T *service = state->services[i];
1405 		VCHIQ_INSTANCE_T instance;
1406 
1407 		if (service && (service->base.callback == service_callback)) {
1408 			instance = service->instance;
1409 			if (instance)
1410 				instance->mark = 0;
1411 		}
1412 	}
1413 
1414 	for (i = 0; i < state->unused_service; i++) {
1415 		VCHIQ_SERVICE_T *service = state->services[i];
1416 		VCHIQ_INSTANCE_T instance;
1417 
1418 		if (service && (service->base.callback == service_callback)) {
1419 			instance = service->instance;
1420 			if (instance && !instance->mark) {
1421 				len = snprintf(buf, sizeof(buf),
1422 					"Instance %x: pid %d,%s completions "
1423 						"%d/%d",
1424 					(unsigned int)instance, instance->pid,
1425 					instance->connected ? " connected, " :
1426 						"",
1427 					instance->completion_insert -
1428 						instance->completion_remove,
1429 					MAX_COMPLETIONS);
1430 
1431 				vchiq_dump(dump_context, buf, len + 1);
1432 
1433 				instance->mark = 1;
1434 			}
1435 		}
1436 	}
1437 }
1438 
1439 /****************************************************************************
1440 *
1441 *   vchiq_dump_platform_service_state
1442 *
1443 ***************************************************************************/
1444 
1445 void
1446 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1447 {
1448 	USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1449 	char buf[80];
1450 	int len;
1451 
1452 	len = snprintf(buf, sizeof(buf), "  instance %x",
1453 		(unsigned int)service->instance);
1454 
1455 	if ((service->base.callback == service_callback) &&
1456 		user_service->is_vchi) {
1457 		len += snprintf(buf + len, sizeof(buf) - len,
1458 			", %d/%d messages",
1459 			user_service->msg_insert - user_service->msg_remove,
1460 			MSG_QUEUE_SIZE);
1461 
1462 		if (user_service->dequeue_pending)
1463 			len += snprintf(buf + len, sizeof(buf) - len,
1464 				" (dequeue pending)");
1465 	}
1466 
1467 	vchiq_dump(dump_context, buf, len + 1);
1468 }
1469 
1470 #ifdef notyet
1471 /****************************************************************************
1472 *
1473 *   dump_user_mem
1474 *
1475 ***************************************************************************/
1476 
1477 static void
1478 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1479 {
1480 	int            rc;
1481 	uint8_t       *end_virt_addr = virt_addr + num_bytes;
1482 	int            num_pages;
1483 	int            offset;
1484 	int            end_offset;
1485 	int            page_idx;
1486 	int            prev_idx;
1487 	struct page   *page;
1488 	struct page  **pages;
1489 	uint8_t       *kmapped_virt_ptr;
1490 
1491 	/* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1492 
1493 	virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1494 	end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1495 		~0x0fuL);
1496 
1497 	offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1498 	end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1499 
1500 	num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1501 
1502 	pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1503 	if (pages == NULL) {
1504 		vchiq_log_error(vchiq_arm_log_level,
1505 			"Unable to allocation memory for %d pages\n",
1506 			num_pages);
1507 		return;
1508 	}
1509 
1510 	down_read(&current->mm->mmap_sem);
1511 	rc = get_user_pages(current,      /* task */
1512 		current->mm,              /* mm */
1513 		(unsigned long)virt_addr, /* start */
1514 		num_pages,                /* len */
1515 		0,                        /* write */
1516 		0,                        /* force */
1517 		pages,                    /* pages (array of page pointers) */
1518 		NULL);                    /* vmas */
1519 	up_read(&current->mm->mmap_sem);
1520 
1521 	prev_idx = -1;
1522 	page = NULL;
1523 
1524 	while (offset < end_offset) {
1525 
1526 		int page_offset = offset % PAGE_SIZE;
1527 		page_idx = offset / PAGE_SIZE;
1528 
1529 		if (page_idx != prev_idx) {
1530 
1531 			if (page != NULL)
1532 				kunmap(page);
1533 			page = pages[page_idx];
1534 			kmapped_virt_ptr = kmap(page);
1535 
1536 			prev_idx = page_idx;
1537 		}
1538 
1539 		if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1540 			vchiq_log_dump_mem("ph",
1541 				(uint32_t)(unsigned long)&kmapped_virt_ptr[
1542 					page_offset],
1543 				&kmapped_virt_ptr[page_offset], 16);
1544 
1545 		offset += 16;
1546 	}
1547 	if (page != NULL)
1548 		kunmap(page);
1549 
1550 	for (page_idx = 0; page_idx < num_pages; page_idx++)
1551 		page_cache_release(pages[page_idx]);
1552 
1553 	kfree(pages);
1554 }
1555 
1556 /****************************************************************************
1557 *
1558 *   vchiq_read
1559 *
1560 ***************************************************************************/
1561 
1562 static ssize_t
1563 vchiq_read(struct file *file, char __user *buf,
1564 	size_t count, loff_t *ppos)
1565 {
1566 	DUMP_CONTEXT_T context;
1567 	context.buf = buf;
1568 	context.actual = 0;
1569 	context.space = count;
1570 	context.offset = *ppos;
1571 
1572 	vchiq_dump_state(&context, &g_state);
1573 
1574 	*ppos += context.actual;
1575 
1576 	return context.actual;
1577 }
1578 #endif
1579 
1580 VCHIQ_STATE_T *
1581 vchiq_get_state(void)
1582 {
1583 
1584 	if (g_state.remote == NULL)
1585 		printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1586 	else if (g_state.remote->initialised != 1)
1587 		printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1588 			__func__, g_state.remote->initialised);
1589 
1590 	return ((g_state.remote != NULL) &&
1591 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
1592 }
1593 
1594 /*
1595  * Autosuspend related functionality
1596  */
1597 
1598 int
1599 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1600 {
1601 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1602 	if (!arm_state)
1603 		/* autosuspend not supported - always return wanted */
1604 		return 1;
1605 	else if (arm_state->blocked_count)
1606 		return 1;
1607 	else if (!arm_state->videocore_use_count)
1608 		/* usage count zero - check for override unless we're forcing */
1609 		if (arm_state->resume_blocked)
1610 			return 0;
1611 		else
1612 			return vchiq_platform_videocore_wanted(state);
1613 	else
1614 		/* non-zero usage count - videocore still required */
1615 		return 1;
1616 }
1617 
1618 static VCHIQ_STATUS_T
1619 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1620 	VCHIQ_HEADER_T *header,
1621 	VCHIQ_SERVICE_HANDLE_T service_user,
1622 	void *bulk_user)
1623 {
1624 	vchiq_log_error(vchiq_susp_log_level,
1625 		"%s callback reason %d", __func__, reason);
1626 	return 0;
1627 }
1628 
1629 static int
1630 vchiq_keepalive_thread_func(void *v)
1631 {
1632 	VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1633 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1634 
1635 	VCHIQ_STATUS_T status;
1636 	VCHIQ_INSTANCE_T instance;
1637 	VCHIQ_SERVICE_HANDLE_T ka_handle;
1638 
1639 	VCHIQ_SERVICE_PARAMS_T params = {
1640 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1641 		.callback    = vchiq_keepalive_vchiq_callback,
1642 		.version     = KEEPALIVE_VER,
1643 		.version_min = KEEPALIVE_VER_MIN
1644 	};
1645 
1646 	status = vchiq_initialise(&instance);
1647 	if (status != VCHIQ_SUCCESS) {
1648 		vchiq_log_error(vchiq_susp_log_level,
1649 			"%s vchiq_initialise failed %d", __func__, status);
1650 		goto exit;
1651 	}
1652 
1653 	status = vchiq_connect(instance);
1654 	if (status != VCHIQ_SUCCESS) {
1655 		vchiq_log_error(vchiq_susp_log_level,
1656 			"%s vchiq_connect failed %d", __func__, status);
1657 		goto shutdown;
1658 	}
1659 
1660 	status = vchiq_add_service(instance, &params, &ka_handle);
1661 	if (status != VCHIQ_SUCCESS) {
1662 		vchiq_log_error(vchiq_susp_log_level,
1663 			"%s vchiq_open_service failed %d", __func__, status);
1664 		goto shutdown;
1665 	}
1666 
1667 	while (1) {
1668 		long rc = 0, uc = 0;
1669 		if (wait_for_completion_interruptible(&arm_state->ka_evt)
1670 				!= 0) {
1671 			vchiq_log_error(vchiq_susp_log_level,
1672 				"%s interrupted", __func__);
1673 			flush_signals(current);
1674 			continue;
1675 		}
1676 
1677 		/* read and clear counters.  Do release_count then use_count to
1678 		 * prevent getting more releases than uses */
1679 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1680 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1681 
1682 		/* Call use/release service the requisite number of times.
1683 		 * Process use before release so use counts don't go negative */
1684 		while (uc--) {
1685 			atomic_inc(&arm_state->ka_use_ack_count);
1686 			status = vchiq_use_service(ka_handle);
1687 			if (status != VCHIQ_SUCCESS) {
1688 				vchiq_log_error(vchiq_susp_log_level,
1689 					"%s vchiq_use_service error %d",
1690 					__func__, status);
1691 			}
1692 		}
1693 		while (rc--) {
1694 			status = vchiq_release_service(ka_handle);
1695 			if (status != VCHIQ_SUCCESS) {
1696 				vchiq_log_error(vchiq_susp_log_level,
1697 					"%s vchiq_release_service error %d",
1698 					__func__, status);
1699 			}
1700 		}
1701 	}
1702 
1703 shutdown:
1704 	vchiq_shutdown(instance);
1705 exit:
1706 	return 0;
1707 }
1708 
1709 VCHIQ_STATUS_T
1710 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1711 {
1712 	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1713 
1714 	if (arm_state) {
1715 		rwlock_init(&arm_state->susp_res_lock);
1716 
1717 		init_completion(&arm_state->ka_evt);
1718 		atomic_set(&arm_state->ka_use_count, 0);
1719 		atomic_set(&arm_state->ka_use_ack_count, 0);
1720 		atomic_set(&arm_state->ka_release_count, 0);
1721 
1722 		init_completion(&arm_state->vc_suspend_complete);
1723 
1724 		init_completion(&arm_state->vc_resume_complete);
1725 		/* Initialise to 'done' state.  We only want to block on resume
1726 		 * completion while videocore is suspended. */
1727 		set_resume_state(arm_state, VC_RESUME_RESUMED);
1728 
1729 		init_completion(&arm_state->resume_blocker);
1730 		/* Initialise to 'done' state.  We only want to block on this
1731 		 * completion while resume is blocked */
1732 		complete_all(&arm_state->resume_blocker);
1733 
1734 		init_completion(&arm_state->blocked_blocker);
1735 		/* Initialise to 'done' state.  We only want to block on this
1736 		 * completion while things are waiting on the resume blocker */
1737 		complete_all(&arm_state->blocked_blocker);
1738 
1739 		arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1740 		arm_state->suspend_timer_running = 0;
1741 		init_timer(&arm_state->suspend_timer);
1742 		arm_state->suspend_timer.data = (unsigned long)(state);
1743 		arm_state->suspend_timer.function = suspend_timer_callback;
1744 
1745 		arm_state->first_connect = 0;
1746 
1747 	}
1748 	return status;
1749 }
1750 
1751 /*
1752 ** Functions to modify the state variables;
1753 **	set_suspend_state
1754 **	set_resume_state
1755 **
1756 ** There are more state variables than we might like, so ensure they remain in
1757 ** step.  Suspend and resume state are maintained separately, since most of
1758 ** these state machines can operate independently.  However, there are a few
1759 ** states where state transitions in one state machine cause a reset to the
1760 ** other state machine.  In addition, there are some completion events which
1761 ** need to occur on state machine reset and end-state(s), so these are also
1762 ** dealt with in these functions.
1763 **
1764 ** In all states we set the state variable according to the input, but in some
1765 ** cases we perform additional steps outlined below;
1766 **
1767 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1768 **			The suspend completion is completed after any suspend
1769 **			attempt.  When we reset the state machine we also reset
1770 **			the completion.  This reset occurs when videocore is
1771 **			resumed, and also if we initiate suspend after a suspend
1772 **			failure.
1773 **
1774 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1775 **			suspend - ie from this point on we must try to suspend
1776 **			before resuming can occur.  We therefore also reset the
1777 **			resume state machine to VC_RESUME_IDLE in this state.
1778 **
1779 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1780 **			complete_all on the suspend completion to notify
1781 **			anything waiting for suspend to happen.
1782 **
1783 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1784 **			initiate resume, so no need to alter resume state.
1785 **			We call complete_all on the suspend completion to notify
1786 **			of suspend rejection.
1787 **
1788 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
1789 **			suspend completion and reset the resume state machine.
1790 **
1791 ** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
1792 **			resume completion is in it's 'done' state whenever
1793 **			videcore is running.  Therfore, the VC_RESUME_IDLE state
1794 **			implies that videocore is suspended.
1795 **			Hence, any thread which needs to wait until videocore is
1796 **			running can wait on this completion - it will only block
1797 **			if videocore is suspended.
1798 **
1799 ** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
1800 **			Call complete_all on the resume completion to unblock
1801 **			any threads waiting for resume.	 Also reset the suspend
1802 **			state machine to it's idle state.
1803 **
1804 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1805 */
1806 
1807 void
1808 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1809 	enum vc_suspend_status new_state)
1810 {
1811 	/* set the state in all cases */
1812 	arm_state->vc_suspend_state = new_state;
1813 
1814 	/* state specific additional actions */
1815 	switch (new_state) {
1816 	case VC_SUSPEND_FORCE_CANCELED:
1817 		complete_all(&arm_state->vc_suspend_complete);
1818 		break;
1819 	case VC_SUSPEND_REJECTED:
1820 		complete_all(&arm_state->vc_suspend_complete);
1821 		break;
1822 	case VC_SUSPEND_FAILED:
1823 		complete_all(&arm_state->vc_suspend_complete);
1824 		arm_state->vc_resume_state = VC_RESUME_RESUMED;
1825 		complete_all(&arm_state->vc_resume_complete);
1826 		break;
1827 	case VC_SUSPEND_IDLE:
1828 		/* TODO: reinit_completion */
1829 		INIT_COMPLETION(arm_state->vc_suspend_complete);
1830 		break;
1831 	case VC_SUSPEND_REQUESTED:
1832 		break;
1833 	case VC_SUSPEND_IN_PROGRESS:
1834 		set_resume_state(arm_state, VC_RESUME_IDLE);
1835 		break;
1836 	case VC_SUSPEND_SUSPENDED:
1837 		complete_all(&arm_state->vc_suspend_complete);
1838 		break;
1839 	default:
1840 		BUG();
1841 		break;
1842 	}
1843 }
1844 
1845 void
1846 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1847 	enum vc_resume_status new_state)
1848 {
1849 	/* set the state in all cases */
1850 	arm_state->vc_resume_state = new_state;
1851 
1852 	/* state specific additional actions */
1853 	switch (new_state) {
1854 	case VC_RESUME_FAILED:
1855 		break;
1856 	case VC_RESUME_IDLE:
1857 		/* TODO: reinit_completion */
1858 		INIT_COMPLETION(arm_state->vc_resume_complete);
1859 		break;
1860 	case VC_RESUME_REQUESTED:
1861 		break;
1862 	case VC_RESUME_IN_PROGRESS:
1863 		break;
1864 	case VC_RESUME_RESUMED:
1865 		complete_all(&arm_state->vc_resume_complete);
1866 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1867 		break;
1868 	default:
1869 		BUG();
1870 		break;
1871 	}
1872 }
1873 
1874 
1875 /* should be called with the write lock held */
1876 inline void
1877 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1878 {
1879 	del_timer(&arm_state->suspend_timer);
1880 	arm_state->suspend_timer.expires = jiffies +
1881 		msecs_to_jiffies(arm_state->
1882 			suspend_timer_timeout);
1883 	add_timer(&arm_state->suspend_timer);
1884 	arm_state->suspend_timer_running = 1;
1885 }
1886 
1887 /* should be called with the write lock held */
1888 static inline void
1889 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1890 {
1891 	if (arm_state->suspend_timer_running) {
1892 		del_timer(&arm_state->suspend_timer);
1893 		arm_state->suspend_timer_running = 0;
1894 	}
1895 }
1896 
1897 static inline int
1898 need_resume(VCHIQ_STATE_T *state)
1899 {
1900 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1901 	return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1902 			(arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1903 			vchiq_videocore_wanted(state);
1904 }
1905 
1906 static int
1907 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1908 {
1909 	int status = VCHIQ_SUCCESS;
1910 	const unsigned long timeout_val =
1911 				msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1912 	int resume_count = 0;
1913 
1914 	/* Allow any threads which were blocked by the last force suspend to
1915 	 * complete if they haven't already.  Only give this one shot; if
1916 	 * blocked_count is incremented after blocked_blocker is completed
1917 	 * (which only happens when blocked_count hits 0) then those threads
1918 	 * will have to wait until next time around */
1919 	if (arm_state->blocked_count) {
1920 		/* TODO: reinit_completion */
1921 		INIT_COMPLETION(arm_state->blocked_blocker);
1922 		write_unlock_bh(&arm_state->susp_res_lock);
1923 		vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1924 			"blocked clients", __func__);
1925 		if (wait_for_completion_interruptible_timeout(
1926 				&arm_state->blocked_blocker, timeout_val)
1927 					<= 0) {
1928 			vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1929 				"previously blocked clients failed" , __func__);
1930 			status = VCHIQ_ERROR;
1931 			write_lock_bh(&arm_state->susp_res_lock);
1932 			goto out;
1933 		}
1934 		vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
1935 			"clients resumed", __func__);
1936 		write_lock_bh(&arm_state->susp_res_lock);
1937 	}
1938 
1939 	/* We need to wait for resume to complete if it's in process */
1940 	while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
1941 			arm_state->vc_resume_state > VC_RESUME_IDLE) {
1942 		if (resume_count > 1) {
1943 			status = VCHIQ_ERROR;
1944 			vchiq_log_error(vchiq_susp_log_level, "%s waited too "
1945 				"many times for resume" , __func__);
1946 			goto out;
1947 		}
1948 		write_unlock_bh(&arm_state->susp_res_lock);
1949 		vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
1950 			__func__);
1951 		if (wait_for_completion_interruptible_timeout(
1952 				&arm_state->vc_resume_complete, timeout_val)
1953 					<= 0) {
1954 			vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1955 				"resume failed (%s)", __func__,
1956 				resume_state_names[arm_state->vc_resume_state +
1957 							VC_RESUME_NUM_OFFSET]);
1958 			status = VCHIQ_ERROR;
1959 			write_lock_bh(&arm_state->susp_res_lock);
1960 			goto out;
1961 		}
1962 		vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
1963 		write_lock_bh(&arm_state->susp_res_lock);
1964 		resume_count++;
1965 	}
1966 	/* TODO: reinit_completion */
1967 	INIT_COMPLETION(arm_state->resume_blocker);
1968 	arm_state->resume_blocked = 1;
1969 
1970 out:
1971 	return status;
1972 }
1973 
1974 static inline void
1975 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
1976 {
1977 	complete_all(&arm_state->resume_blocker);
1978 	arm_state->resume_blocked = 0;
1979 }
1980 
1981 /* Initiate suspend via slot handler. Should be called with the write lock
1982  * held */
1983 VCHIQ_STATUS_T
1984 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
1985 {
1986 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
1987 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1988 
1989 	if (!arm_state)
1990 		goto out;
1991 
1992 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1993 	status = VCHIQ_SUCCESS;
1994 
1995 
1996 	switch (arm_state->vc_suspend_state) {
1997 	case VC_SUSPEND_REQUESTED:
1998 		vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
1999 			"requested", __func__);
2000 		break;
2001 	case VC_SUSPEND_IN_PROGRESS:
2002 		vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2003 			"progress", __func__);
2004 		break;
2005 
2006 	default:
2007 		/* We don't expect to be in other states, so log but continue
2008 		 * anyway */
2009 		vchiq_log_error(vchiq_susp_log_level,
2010 			"%s unexpected suspend state %s", __func__,
2011 			suspend_state_names[arm_state->vc_suspend_state +
2012 						VC_SUSPEND_NUM_OFFSET]);
2013 		/* fall through */
2014 	case VC_SUSPEND_REJECTED:
2015 	case VC_SUSPEND_FAILED:
2016 		/* Ensure any idle state actions have been run */
2017 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2018 		/* fall through */
2019 	case VC_SUSPEND_IDLE:
2020 		vchiq_log_info(vchiq_susp_log_level,
2021 			"%s: suspending", __func__);
2022 		set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2023 		/* kick the slot handler thread to initiate suspend */
2024 		request_poll(state, NULL, 0);
2025 		break;
2026 	}
2027 
2028 out:
2029 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2030 	return status;
2031 }
2032 
2033 void
2034 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2035 {
2036 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2037 	int susp = 0;
2038 
2039 	if (!arm_state)
2040 		goto out;
2041 
2042 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2043 
2044 	write_lock_bh(&arm_state->susp_res_lock);
2045 	if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2046 			arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2047 		set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2048 		susp = 1;
2049 	}
2050 	write_unlock_bh(&arm_state->susp_res_lock);
2051 
2052 	if (susp)
2053 		vchiq_platform_suspend(state);
2054 
2055 out:
2056 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2057 	return;
2058 }
2059 
2060 
2061 static void
2062 output_timeout_error(VCHIQ_STATE_T *state)
2063 {
2064 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2065 	char service_err[50] = "";
2066 	int vc_use_count = arm_state->videocore_use_count;
2067 	int active_services = state->unused_service;
2068 	int i;
2069 
2070 	if (!arm_state->videocore_use_count) {
2071 		snprintf(service_err, 50, " Videocore usecount is 0");
2072 		goto output_msg;
2073 	}
2074 	for (i = 0; i < active_services; i++) {
2075 		VCHIQ_SERVICE_T *service_ptr = state->services[i];
2076 		if (service_ptr && service_ptr->service_use_count &&
2077 			(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2078 			snprintf(service_err, 50, " %c%c%c%c(%8x) service has "
2079 				"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2080 					service_ptr->base.fourcc),
2081 				 service_ptr->client_id,
2082 				 service_ptr->service_use_count,
2083 				 service_ptr->service_use_count ==
2084 					 vc_use_count ? "" : " (+ more)");
2085 			break;
2086 		}
2087 	}
2088 
2089 output_msg:
2090 	vchiq_log_error(vchiq_susp_log_level,
2091 		"timed out waiting for vc suspend (%d).%s",
2092 		 arm_state->autosuspend_override, service_err);
2093 
2094 }
2095 
2096 /* Try to get videocore into suspended state, regardless of autosuspend state.
2097 ** We don't actually force suspend, since videocore may get into a bad state
2098 ** if we force suspend at a bad time.  Instead, we wait for autosuspend to
2099 ** determine a good point to suspend.  If this doesn't happen within 100ms we
2100 ** report failure.
2101 **
2102 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2103 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2104 */
2105 VCHIQ_STATUS_T
2106 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2107 {
2108 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2109 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
2110 	long rc = 0;
2111 	int repeat = -1;
2112 
2113 	if (!arm_state)
2114 		goto out;
2115 
2116 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2117 
2118 	write_lock_bh(&arm_state->susp_res_lock);
2119 
2120 	status = block_resume(arm_state);
2121 	if (status != VCHIQ_SUCCESS)
2122 		goto unlock;
2123 	if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2124 		/* Already suspended - just block resume and exit */
2125 		vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2126 			__func__);
2127 		status = VCHIQ_SUCCESS;
2128 		goto unlock;
2129 	} else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2130 		/* initiate suspend immediately in the case that we're waiting
2131 		 * for the timeout */
2132 		stop_suspend_timer(arm_state);
2133 		if (!vchiq_videocore_wanted(state)) {
2134 			vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2135 				"idle, initiating suspend", __func__);
2136 			status = vchiq_arm_vcsuspend(state);
2137 		} else if (arm_state->autosuspend_override <
2138 						FORCE_SUSPEND_FAIL_MAX) {
2139 			vchiq_log_info(vchiq_susp_log_level, "%s letting "
2140 				"videocore go idle", __func__);
2141 			status = VCHIQ_SUCCESS;
2142 		} else {
2143 			vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2144 				"many times - attempting suspend", __func__);
2145 			status = vchiq_arm_vcsuspend(state);
2146 		}
2147 	} else {
2148 		vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2149 			"in progress - wait for completion", __func__);
2150 		status = VCHIQ_SUCCESS;
2151 	}
2152 
2153 	/* Wait for suspend to happen due to system idle (not forced..) */
2154 	if (status != VCHIQ_SUCCESS)
2155 		goto unblock_resume;
2156 
2157 	do {
2158 		write_unlock_bh(&arm_state->susp_res_lock);
2159 
2160 		rc = wait_for_completion_interruptible_timeout(
2161 				&arm_state->vc_suspend_complete,
2162 				msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2163 
2164 		write_lock_bh(&arm_state->susp_res_lock);
2165 		if (rc < 0) {
2166 			vchiq_log_warning(vchiq_susp_log_level, "%s "
2167 				"interrupted waiting for suspend", __func__);
2168 			status = VCHIQ_ERROR;
2169 			goto unblock_resume;
2170 		} else if (rc == 0) {
2171 			if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2172 				/* Repeat timeout once if in progress */
2173 				if (repeat < 0) {
2174 					repeat = 1;
2175 					continue;
2176 				}
2177 			}
2178 			arm_state->autosuspend_override++;
2179 			output_timeout_error(state);
2180 
2181 			status = VCHIQ_RETRY;
2182 			goto unblock_resume;
2183 		}
2184 	} while (0 < (repeat--));
2185 
2186 	/* Check and report state in case we need to abort ARM suspend */
2187 	if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2188 		status = VCHIQ_RETRY;
2189 		vchiq_log_error(vchiq_susp_log_level,
2190 			"%s videocore suspend failed (state %s)", __func__,
2191 			suspend_state_names[arm_state->vc_suspend_state +
2192 						VC_SUSPEND_NUM_OFFSET]);
2193 		/* Reset the state only if it's still in an error state.
2194 		 * Something could have already initiated another suspend. */
2195 		if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2196 			set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2197 
2198 		goto unblock_resume;
2199 	}
2200 
2201 	/* successfully suspended - unlock and exit */
2202 	goto unlock;
2203 
2204 unblock_resume:
2205 	/* all error states need to unblock resume before exit */
2206 	unblock_resume(arm_state);
2207 
2208 unlock:
2209 	write_unlock_bh(&arm_state->susp_res_lock);
2210 
2211 out:
2212 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2213 	return status;
2214 }
2215 
2216 void
2217 vchiq_check_suspend(VCHIQ_STATE_T *state)
2218 {
2219 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2220 
2221 	if (!arm_state)
2222 		goto out;
2223 
2224 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2225 
2226 	write_lock_bh(&arm_state->susp_res_lock);
2227 	if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2228 			arm_state->first_connect &&
2229 			!vchiq_videocore_wanted(state)) {
2230 		vchiq_arm_vcsuspend(state);
2231 	}
2232 	write_unlock_bh(&arm_state->susp_res_lock);
2233 
2234 out:
2235 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2236 	return;
2237 }
2238 
2239 
2240 int
2241 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2242 {
2243 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2244 	int resume = 0;
2245 	int ret = -1;
2246 
2247 	if (!arm_state)
2248 		goto out;
2249 
2250 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2251 
2252 	write_lock_bh(&arm_state->susp_res_lock);
2253 	unblock_resume(arm_state);
2254 	resume = vchiq_check_resume(state);
2255 	write_unlock_bh(&arm_state->susp_res_lock);
2256 
2257 	if (resume) {
2258 		if (wait_for_completion_interruptible(
2259 			&arm_state->vc_resume_complete) < 0) {
2260 			vchiq_log_error(vchiq_susp_log_level,
2261 				"%s interrupted", __func__);
2262 			/* failed, cannot accurately derive suspend
2263 			 * state, so exit early. */
2264 			goto out;
2265 		}
2266 	}
2267 
2268 	read_lock_bh(&arm_state->susp_res_lock);
2269 	if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2270 		vchiq_log_info(vchiq_susp_log_level,
2271 				"%s: Videocore remains suspended", __func__);
2272 	} else {
2273 		vchiq_log_info(vchiq_susp_log_level,
2274 				"%s: Videocore resumed", __func__);
2275 		ret = 0;
2276 	}
2277 	read_unlock_bh(&arm_state->susp_res_lock);
2278 out:
2279 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2280 	return ret;
2281 }
2282 
2283 /* This function should be called with the write lock held */
2284 int
2285 vchiq_check_resume(VCHIQ_STATE_T *state)
2286 {
2287 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2288 	int resume = 0;
2289 
2290 	if (!arm_state)
2291 		goto out;
2292 
2293 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2294 
2295 	if (need_resume(state)) {
2296 		set_resume_state(arm_state, VC_RESUME_REQUESTED);
2297 		request_poll(state, NULL, 0);
2298 		resume = 1;
2299 	}
2300 
2301 out:
2302 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2303 	return resume;
2304 }
2305 
2306 #ifdef notyet
2307 void
2308 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2309 {
2310 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2311 	int res = 0;
2312 
2313 	if (!arm_state)
2314 		goto out;
2315 
2316 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2317 
2318 	write_lock_bh(&arm_state->susp_res_lock);
2319 	if (arm_state->wake_address == 0) {
2320 		vchiq_log_info(vchiq_susp_log_level,
2321 					"%s: already awake", __func__);
2322 		goto unlock;
2323 	}
2324 	if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2325 		vchiq_log_info(vchiq_susp_log_level,
2326 					"%s: already resuming", __func__);
2327 		goto unlock;
2328 	}
2329 
2330 	if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2331 		set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2332 		res = 1;
2333 	} else
2334 		vchiq_log_trace(vchiq_susp_log_level,
2335 				"%s: not resuming (resume state %s)", __func__,
2336 				resume_state_names[arm_state->vc_resume_state +
2337 							VC_RESUME_NUM_OFFSET]);
2338 
2339 unlock:
2340 	write_unlock_bh(&arm_state->susp_res_lock);
2341 
2342 	if (res)
2343 		vchiq_platform_resume(state);
2344 
2345 out:
2346 	vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2347 	return;
2348 
2349 }
2350 #endif
2351 
2352 
2353 
2354 VCHIQ_STATUS_T
2355 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2356 		enum USE_TYPE_E use_type)
2357 {
2358 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2359 	VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2360 	char entity[16];
2361 	int *entity_uc;
2362 	int local_uc, local_entity_uc;
2363 
2364 	if (!arm_state)
2365 		goto out;
2366 
2367 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2368 
2369 	if (use_type == USE_TYPE_VCHIQ) {
2370 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
2371 		entity_uc = &arm_state->peer_use_count;
2372 	} else if (service) {
2373 		snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2374 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2375 			service->client_id);
2376 		entity_uc = &service->service_use_count;
2377 	} else {
2378 		vchiq_log_error(vchiq_susp_log_level, "%s null service "
2379 				"ptr", __func__);
2380 		ret = VCHIQ_ERROR;
2381 		goto out;
2382 	}
2383 
2384 	write_lock_bh(&arm_state->susp_res_lock);
2385 	while (arm_state->resume_blocked) {
2386 		/* If we call 'use' while force suspend is waiting for suspend,
2387 		 * then we're about to block the thread which the force is
2388 		 * waiting to complete, so we're bound to just time out. In this
2389 		 * case, set the suspend state such that the wait will be
2390 		 * canceled, so we can complete as quickly as possible. */
2391 		if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2392 				VC_SUSPEND_IDLE) {
2393 			set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2394 			break;
2395 		}
2396 		/* If suspend is already in progress then we need to block */
2397 		if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2398 			/* Indicate that there are threads waiting on the resume
2399 			 * blocker.  These need to be allowed to complete before
2400 			 * a _second_ call to force suspend can complete,
2401 			 * otherwise low priority threads might never actually
2402 			 * continue */
2403 			arm_state->blocked_count++;
2404 			write_unlock_bh(&arm_state->susp_res_lock);
2405 			vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2406 				"blocked - waiting...", __func__, entity);
2407 			if (wait_for_completion_killable(
2408 					&arm_state->resume_blocker) != 0) {
2409 				vchiq_log_error(vchiq_susp_log_level, "%s %s "
2410 					"wait for resume blocker interrupted",
2411 					__func__, entity);
2412 				ret = VCHIQ_ERROR;
2413 				write_lock_bh(&arm_state->susp_res_lock);
2414 				arm_state->blocked_count--;
2415 				write_unlock_bh(&arm_state->susp_res_lock);
2416 				goto out;
2417 			}
2418 			vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2419 				"unblocked", __func__, entity);
2420 			write_lock_bh(&arm_state->susp_res_lock);
2421 			if (--arm_state->blocked_count == 0)
2422 				complete_all(&arm_state->blocked_blocker);
2423 		}
2424 	}
2425 
2426 	stop_suspend_timer(arm_state);
2427 
2428 	local_uc = ++arm_state->videocore_use_count;
2429 	local_entity_uc = ++(*entity_uc);
2430 
2431 	/* If there's a pending request which hasn't yet been serviced then
2432 	 * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
2433 	 * vc_resume_complete will block until we either resume or fail to
2434 	 * suspend */
2435 	if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2436 		set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2437 
2438 	if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2439 		set_resume_state(arm_state, VC_RESUME_REQUESTED);
2440 		vchiq_log_info(vchiq_susp_log_level,
2441 			"%s %s count %d, state count %d",
2442 			__func__, entity, local_entity_uc, local_uc);
2443 		request_poll(state, NULL, 0);
2444 	} else
2445 		vchiq_log_trace(vchiq_susp_log_level,
2446 			"%s %s count %d, state count %d",
2447 			__func__, entity, *entity_uc, local_uc);
2448 
2449 
2450 	write_unlock_bh(&arm_state->susp_res_lock);
2451 
2452 	/* Completion is in a done state when we're not suspended, so this won't
2453 	 * block for the non-suspended case. */
2454 	if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2455 		vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2456 			__func__, entity);
2457 		if (wait_for_completion_killable(
2458 				&arm_state->vc_resume_complete) != 0) {
2459 			vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2460 				"resume interrupted", __func__, entity);
2461 			ret = VCHIQ_ERROR;
2462 			goto out;
2463 		}
2464 		vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2465 			entity);
2466 	}
2467 
2468 	if (ret == VCHIQ_SUCCESS) {
2469 		VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2470 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2471 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2472 			/* Send the use notify to videocore */
2473 			status = vchiq_send_remote_use_active(state);
2474 			if (status == VCHIQ_SUCCESS)
2475 				ack_cnt--;
2476 			else
2477 				atomic_add(ack_cnt,
2478 					&arm_state->ka_use_ack_count);
2479 		}
2480 	}
2481 
2482 out:
2483 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2484 	return ret;
2485 }
2486 
2487 VCHIQ_STATUS_T
2488 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2489 {
2490 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2491 	VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2492 	char entity[16];
2493 	int *entity_uc;
2494 
2495 	if (!arm_state)
2496 		goto out;
2497 
2498 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2499 
2500 	if (service) {
2501 		snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2502 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2503 			service->client_id);
2504 		entity_uc = &service->service_use_count;
2505 	} else {
2506 		snprintf(entity, sizeof(entity), "PEER:   ");
2507 		entity_uc = &arm_state->peer_use_count;
2508 	}
2509 
2510 	write_lock_bh(&arm_state->susp_res_lock);
2511 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
2512 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
2513 		WARN_ON(!arm_state->videocore_use_count);
2514 		WARN_ON(!(*entity_uc));
2515 		ret = VCHIQ_ERROR;
2516 		goto unlock;
2517 	}
2518 	--arm_state->videocore_use_count;
2519 	--(*entity_uc);
2520 
2521 	if (!vchiq_videocore_wanted(state)) {
2522 		if (vchiq_platform_use_suspend_timer() &&
2523 				!arm_state->resume_blocked) {
2524 			/* Only use the timer if we're not trying to force
2525 			 * suspend (=> resume_blocked) */
2526 			start_suspend_timer(arm_state);
2527 		} else {
2528 			vchiq_log_info(vchiq_susp_log_level,
2529 				"%s %s count %d, state count %d - suspending",
2530 				__func__, entity, *entity_uc,
2531 				arm_state->videocore_use_count);
2532 			vchiq_arm_vcsuspend(state);
2533 		}
2534 	} else
2535 		vchiq_log_trace(vchiq_susp_log_level,
2536 			"%s %s count %d, state count %d",
2537 			__func__, entity, *entity_uc,
2538 			arm_state->videocore_use_count);
2539 
2540 unlock:
2541 	write_unlock_bh(&arm_state->susp_res_lock);
2542 
2543 out:
2544 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2545 	return ret;
2546 }
2547 
2548 void
2549 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2550 {
2551 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2552 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2553 	atomic_inc(&arm_state->ka_use_count);
2554 	complete(&arm_state->ka_evt);
2555 }
2556 
2557 void
2558 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2559 {
2560 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2561 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2562 	atomic_inc(&arm_state->ka_release_count);
2563 	complete(&arm_state->ka_evt);
2564 }
2565 
2566 VCHIQ_STATUS_T
2567 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2568 {
2569 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2570 }
2571 
2572 VCHIQ_STATUS_T
2573 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2574 {
2575 	return vchiq_release_internal(service->state, service);
2576 }
2577 
2578 static void suspend_timer_callback(unsigned long context)
2579 {
2580 	VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2581 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2582 	if (!arm_state)
2583 		goto out;
2584 	vchiq_log_info(vchiq_susp_log_level,
2585 		"%s - suspend timer expired - check suspend", __func__);
2586 	vchiq_check_suspend(state);
2587 out:
2588 	return;
2589 }
2590 
2591 VCHIQ_STATUS_T
2592 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2593 {
2594 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2595 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2596 	if (service) {
2597 		ret = vchiq_use_internal(service->state, service,
2598 				USE_TYPE_SERVICE_NO_RESUME);
2599 		unlock_service(service);
2600 	}
2601 	return ret;
2602 }
2603 
2604 VCHIQ_STATUS_T
2605 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2606 {
2607 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2608 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2609 	if (service) {
2610 		ret = vchiq_use_internal(service->state, service,
2611 				USE_TYPE_SERVICE);
2612 		unlock_service(service);
2613 	}
2614 	return ret;
2615 }
2616 
2617 VCHIQ_STATUS_T
2618 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2619 {
2620 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2621 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2622 	if (service) {
2623 		ret = vchiq_release_internal(service->state, service);
2624 		unlock_service(service);
2625 	}
2626 	return ret;
2627 }
2628 
2629 void
2630 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2631 {
2632 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2633 	int i, j = 0;
2634 	/* Only dump 64 services */
2635 	static const int local_max_services = 64;
2636 	/* If there's more than 64 services, only dump ones with
2637 	 * non-zero counts */
2638 	int only_nonzero = 0;
2639 	static const char *nz = "<-- preventing suspend";
2640 
2641 	enum vc_suspend_status vc_suspend_state;
2642 	enum vc_resume_status  vc_resume_state;
2643 	int peer_count;
2644 	int vc_use_count;
2645 	int active_services;
2646 	struct service_data_struct {
2647 		int fourcc;
2648 		int clientid;
2649 		int use_count;
2650 	} service_data[local_max_services];
2651 
2652 	if (!arm_state)
2653 		return;
2654 
2655 	read_lock_bh(&arm_state->susp_res_lock);
2656 	vc_suspend_state = arm_state->vc_suspend_state;
2657 	vc_resume_state  = arm_state->vc_resume_state;
2658 	peer_count = arm_state->peer_use_count;
2659 	vc_use_count = arm_state->videocore_use_count;
2660 	active_services = state->unused_service;
2661 	if (active_services > local_max_services)
2662 		only_nonzero = 1;
2663 
2664 	for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2665 		VCHIQ_SERVICE_T *service_ptr = state->services[i];
2666 		if (!service_ptr)
2667 			continue;
2668 
2669 		if (only_nonzero && !service_ptr->service_use_count)
2670 			continue;
2671 
2672 		if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2673 			service_data[j].fourcc = service_ptr->base.fourcc;
2674 			service_data[j].clientid = service_ptr->client_id;
2675 			service_data[j++].use_count = service_ptr->
2676 							service_use_count;
2677 		}
2678 	}
2679 
2680 	read_unlock_bh(&arm_state->susp_res_lock);
2681 
2682 	vchiq_log_warning(vchiq_susp_log_level,
2683 		"-- Videcore suspend state: %s --",
2684 		suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2685 	vchiq_log_warning(vchiq_susp_log_level,
2686 		"-- Videcore resume state: %s --",
2687 		resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2688 
2689 	if (only_nonzero)
2690 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2691 			"services (%d).  Only dumping up to first %d services "
2692 			"with non-zero use-count", active_services,
2693 			local_max_services);
2694 
2695 	for (i = 0; i < j; i++) {
2696 		vchiq_log_warning(vchiq_susp_log_level,
2697 			"----- %c%c%c%c:%d service count %d %s",
2698 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2699 			service_data[i].clientid,
2700 			service_data[i].use_count,
2701 			service_data[i].use_count ? nz : "");
2702 	}
2703 	vchiq_log_warning(vchiq_susp_log_level,
2704 		"----- VCHIQ use count count %d", peer_count);
2705 	vchiq_log_warning(vchiq_susp_log_level,
2706 		"--- Overall vchiq instance use count %d", vc_use_count);
2707 
2708 	vchiq_dump_platform_use_state(state);
2709 }
2710 
2711 VCHIQ_STATUS_T
2712 vchiq_check_service(VCHIQ_SERVICE_T *service)
2713 {
2714 	VCHIQ_ARM_STATE_T *arm_state;
2715 	VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2716 
2717 	if (!service || !service->state)
2718 		goto out;
2719 
2720 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2721 
2722 	arm_state = vchiq_platform_get_arm_state(service->state);
2723 
2724 	read_lock_bh(&arm_state->susp_res_lock);
2725 	if (service->service_use_count)
2726 		ret = VCHIQ_SUCCESS;
2727 	read_unlock_bh(&arm_state->susp_res_lock);
2728 
2729 	if (ret == VCHIQ_ERROR) {
2730 		vchiq_log_error(vchiq_susp_log_level,
2731 			"%s ERROR - %c%c%c%c:%8x service count %d, "
2732 			"state count %d, videocore suspend state %s", __func__,
2733 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2734 			service->client_id, service->service_use_count,
2735 			arm_state->videocore_use_count,
2736 			suspend_state_names[arm_state->vc_suspend_state +
2737 						VC_SUSPEND_NUM_OFFSET]);
2738 		vchiq_dump_service_use_state(service->state);
2739 	}
2740 out:
2741 	return ret;
2742 }
2743 
2744 /* stub functions */
2745 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2746 {
2747 	(void)state;
2748 }
2749 
2750 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2751 	VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2752 {
2753 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2754 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2755 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
2756 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2757 		write_lock_bh(&arm_state->susp_res_lock);
2758 		if (!arm_state->first_connect) {
2759 			char threadname[10];
2760 			arm_state->first_connect = 1;
2761 			write_unlock_bh(&arm_state->susp_res_lock);
2762 			snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2763 				state->id);
2764 			arm_state->ka_thread = vchiq_thread_create(
2765 				&vchiq_keepalive_thread_func,
2766 				(void *)state,
2767 				threadname);
2768 			if (arm_state->ka_thread == NULL) {
2769 				vchiq_log_error(vchiq_susp_log_level,
2770 					"vchiq: FATAL: couldn't create thread %s",
2771 					threadname);
2772 			} else {
2773 				wake_up_process(arm_state->ka_thread);
2774 			}
2775 		} else
2776 			write_unlock_bh(&arm_state->susp_res_lock);
2777 	}
2778 }
2779 
2780 /****************************************************************************
2781 *
2782 *   vchiq_init - called when the module is loaded.
2783 *
2784 ***************************************************************************/
2785 
2786 int __init vchiq_init(void);
2787 int __init
2788 vchiq_init(void)
2789 {
2790 	int err;
2791 
2792 #ifdef notyet
2793 	/* create proc entries */
2794 	err = vchiq_proc_init();
2795 	if (err != 0)
2796 		goto failed_proc_init;
2797 #endif
2798 
2799 	vchiq_cdev = make_dev(&vchiq_cdevsw, 0,
2800 	    UID_ROOT, GID_WHEEL, 0600, "vchiq");
2801 	if (!vchiq_cdev) {
2802 		printf("Failed to create /dev/vchiq");
2803 		return (-ENXIO);
2804 	}
2805 
2806 	spin_lock_init(&msg_queue_spinlock);
2807 
2808 	err = vchiq_platform_init(&g_state);
2809 	if (err != 0)
2810 		goto failed_platform_init;
2811 
2812 	vchiq_log_info(vchiq_arm_log_level,
2813 		"vchiq: initialised - version %d (min %d)",
2814 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
2815 
2816 	return 0;
2817 
2818 failed_platform_init:
2819 	if (vchiq_cdev) {
2820 		destroy_dev(vchiq_cdev);
2821 		vchiq_cdev = NULL;
2822 	}
2823 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2824 	return err;
2825 }
2826 
2827 #ifdef notyet
2828 static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2829 {
2830 	VCHIQ_SERVICE_T *service;
2831 	int use_count = 0, i;
2832 	i = 0;
2833 	while ((service = next_service_by_instance(instance->state,
2834 		instance, &i)) != NULL) {
2835 		use_count += service->service_use_count;
2836 		unlock_service(service);
2837 	}
2838 	return use_count;
2839 }
2840 
2841 /* read the per-process use-count */
2842 static int proc_read_use_count(char *page, char **start,
2843 			       off_t off, int count,
2844 			       int *eof, void *data)
2845 {
2846 	VCHIQ_INSTANCE_T instance = data;
2847 	int len, use_count;
2848 
2849 	use_count = vchiq_instance_get_use_count(instance);
2850 	len = snprintf(page+off, count, "%d\n", use_count);
2851 
2852 	return len;
2853 }
2854 
2855 /* add an instance (process) to the proc entries */
2856 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
2857 {
2858 	char pidstr[32];
2859 	struct proc_dir_entry *top, *use_count;
2860 	struct proc_dir_entry *clients = vchiq_clients_top();
2861 	int pid = instance->pid;
2862 
2863 	snprintf(pidstr, sizeof(pidstr), "%d", pid);
2864 	top = proc_mkdir(pidstr, clients);
2865 	if (!top)
2866 		goto fail_top;
2867 
2868 	use_count = create_proc_read_entry("use_count",
2869 					   0444, top,
2870 					   proc_read_use_count,
2871 					   instance);
2872 	if (!use_count)
2873 		goto fail_use_count;
2874 
2875 	instance->proc_entry = top;
2876 
2877 	return 0;
2878 
2879 fail_use_count:
2880 	remove_proc_entry(top->name, clients);
2881 fail_top:
2882 	return -ENOMEM;
2883 }
2884 
2885 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
2886 {
2887 	struct proc_dir_entry *clients = vchiq_clients_top();
2888 	remove_proc_entry("use_count", instance->proc_entry);
2889 	remove_proc_entry(instance->proc_entry->name, clients);
2890 }
2891 
2892 #endif
2893 
2894 /****************************************************************************
2895 *
2896 *   vchiq_exit - called when the module is unloaded.
2897 *
2898 ***************************************************************************/
2899 
2900 void vchiq_exit(void);
2901 void
2902 vchiq_exit(void)
2903 {
2904 	if (vchiq_ehtag == NULL)
2905 		EVENTHANDLER_DEREGISTER(dev_clone, vchiq_ehtag);
2906 	vchiq_ehtag = NULL;
2907 
2908 	vchiq_platform_exit(&g_state);
2909 	if (vchiq_cdev) {
2910 		destroy_dev(vchiq_cdev);
2911 		vchiq_cdev = NULL;
2912 	}
2913 }
2914