1 /** 2 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. 3 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The names of the above-listed copyright holders may not be used 15 * to endorse or promote products derived from this software without 16 * specific prior written permission. 17 * 18 * ALTERNATIVELY, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2, as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 36 #include "vchiq_core.h" 37 #include "vchiq_ioctl.h" 38 #include "vchiq_arm.h" 39 40 #define DEVICE_NAME "vchiq" 41 42 /* Override the default prefix, which would be vchiq_arm (from the filename) */ 43 #undef MODULE_PARAM_PREFIX 44 #define MODULE_PARAM_PREFIX DEVICE_NAME "." 45 46 #define VCHIQ_MINOR 0 47 48 /* Some per-instance constants */ 49 #define MAX_COMPLETIONS 128 50 #define MAX_SERVICES 64 51 #define MAX_ELEMENTS 8 52 #define MSG_QUEUE_SIZE 128 53 54 #define KEEPALIVE_VER 1 55 #define KEEPALIVE_VER_MIN KEEPALIVE_VER 56 57 /* Run time control of log level, based on KERN_XXX level. */ 58 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT; 59 int vchiq_susp_log_level = VCHIQ_LOG_ERROR; 60 61 #define SUSPEND_TIMER_TIMEOUT_MS 100 62 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000 63 64 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */ 65 static const char *const suspend_state_names[] = { 66 "VC_SUSPEND_FORCE_CANCELED", 67 "VC_SUSPEND_REJECTED", 68 "VC_SUSPEND_FAILED", 69 "VC_SUSPEND_IDLE", 70 "VC_SUSPEND_REQUESTED", 71 "VC_SUSPEND_IN_PROGRESS", 72 "VC_SUSPEND_SUSPENDED" 73 }; 74 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */ 75 static const char *const resume_state_names[] = { 76 "VC_RESUME_FAILED", 77 "VC_RESUME_IDLE", 78 "VC_RESUME_REQUESTED", 79 "VC_RESUME_IN_PROGRESS", 80 "VC_RESUME_RESUMED" 81 }; 82 /* The number of times we allow force suspend to timeout before actually 83 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq 84 ** correctly - we don't want to prevent ARM suspend indefinitely in this case. 85 */ 86 #define FORCE_SUSPEND_FAIL_MAX 8 87 88 /* The time in ms allowed for videocore to go idle when force suspend has been 89 * requested */ 90 #define FORCE_SUSPEND_TIMEOUT_MS 200 91 92 93 static void suspend_timer_callback(unsigned long context); 94 #ifdef notyet 95 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance); 96 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance); 97 #endif 98 99 100 typedef struct user_service_struct { 101 VCHIQ_SERVICE_T *service; 102 void *userdata; 103 VCHIQ_INSTANCE_T instance; 104 char is_vchi; 105 char dequeue_pending; 106 char close_pending; 107 int message_available_pos; 108 int msg_insert; 109 int msg_remove; 110 struct semaphore insert_event; 111 struct semaphore remove_event; 112 struct semaphore close_event; 113 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE]; 114 } USER_SERVICE_T; 115 116 struct bulk_waiter_node { 117 struct bulk_waiter bulk_waiter; 118 int pid; 119 struct list_head list; 120 }; 121 122 struct vchiq_instance_struct { 123 VCHIQ_STATE_T *state; 124 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS]; 125 int completion_insert; 126 int completion_remove; 127 struct semaphore insert_event; 128 struct semaphore remove_event; 129 struct mutex completion_mutex; 130 131 int connected; 132 int closing; 133 int pid; 134 int mark; 135 int use_close_delivered; 136 int trace; 137 138 struct list_head bulk_waiter_list; 139 struct mutex bulk_waiter_list_mutex; 140 141 #ifdef notyet 142 VCHIQ_DEBUGFS_NODE_T proc_entry; 143 #endif 144 }; 145 146 typedef struct dump_context_struct { 147 char __user *buf; 148 size_t actual; 149 size_t space; 150 loff_t offset; 151 } DUMP_CONTEXT_T; 152 153 static struct cdev * vchiq_cdev; 154 VCHIQ_STATE_T g_state; 155 static DEFINE_SPINLOCK(msg_queue_spinlock); 156 157 static const char *const ioctl_names[] = { 158 "CONNECT", 159 "SHUTDOWN", 160 "CREATE_SERVICE", 161 "REMOVE_SERVICE", 162 "QUEUE_MESSAGE", 163 "QUEUE_BULK_TRANSMIT", 164 "QUEUE_BULK_RECEIVE", 165 "AWAIT_COMPLETION", 166 "DEQUEUE_MESSAGE", 167 "GET_CLIENT_ID", 168 "GET_CONFIG", 169 "CLOSE_SERVICE", 170 "USE_SERVICE", 171 "RELEASE_SERVICE", 172 "SET_SERVICE_OPTION", 173 "DUMP_PHYS_MEM", 174 "LIB_VERSION", 175 "CLOSE_DELIVERED" 176 }; 177 178 vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) == 179 (VCHIQ_IOC_MAX + 1)); 180 181 static d_open_t vchiq_open; 182 static d_close_t vchiq_close; 183 static d_ioctl_t vchiq_ioctl; 184 185 static struct cdevsw vchiq_cdevsw = { 186 .d_version = D_VERSION, 187 .d_ioctl = vchiq_ioctl, 188 .d_open = vchiq_open, 189 .d_close = vchiq_close, 190 .d_name = DEVICE_NAME, 191 }; 192 193 #if 0 194 static void 195 dump_phys_mem(void *virt_addr, uint32_t num_bytes); 196 #endif 197 198 /**************************************************************************** 199 * 200 * add_completion 201 * 202 ***************************************************************************/ 203 204 static VCHIQ_STATUS_T 205 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason, 206 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service, 207 void *bulk_userdata) 208 { 209 VCHIQ_COMPLETION_DATA_T *completion; 210 int insert; 211 DEBUG_INITIALISE(g_state.local) 212 213 insert = instance->completion_insert; 214 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { 215 /* Out of space - wait for the client */ 216 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 217 vchiq_log_trace(vchiq_arm_log_level, 218 "add_completion - completion queue full"); 219 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); 220 221 if (down_interruptible(&instance->remove_event) != 0) { 222 vchiq_log_info(vchiq_arm_log_level, 223 "service_callback interrupted"); 224 return VCHIQ_RETRY; 225 } 226 227 if (instance->closing) { 228 vchiq_log_info(vchiq_arm_log_level, 229 "service_callback closing"); 230 return VCHIQ_SUCCESS; 231 } 232 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 233 } 234 235 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; 236 237 completion->header = header; 238 completion->reason = reason; 239 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ 240 completion->service_userdata = user_service->service; 241 completion->bulk_userdata = bulk_userdata; 242 243 if (reason == VCHIQ_SERVICE_CLOSED) { 244 /* Take an extra reference, to be held until 245 this CLOSED notification is delivered. */ 246 lock_service(user_service->service); 247 if (instance->use_close_delivered) 248 user_service->close_pending = 1; 249 } 250 251 /* A write barrier is needed here to ensure that the entire completion 252 record is written out before the insert point. */ 253 wmb(); 254 255 if (reason == VCHIQ_MESSAGE_AVAILABLE) 256 user_service->message_available_pos = insert; 257 258 instance->completion_insert = ++insert; 259 260 up(&instance->insert_event); 261 262 return VCHIQ_SUCCESS; 263 } 264 265 /**************************************************************************** 266 * 267 * service_callback 268 * 269 ***************************************************************************/ 270 271 static VCHIQ_STATUS_T 272 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, 273 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata) 274 { 275 /* How do we ensure the callback goes to the right client? 276 ** The service_user data points to a USER_SERVICE_T record containing 277 ** the original callback and the user state structure, which contains a 278 ** circular buffer for completion records. 279 */ 280 USER_SERVICE_T *user_service; 281 VCHIQ_SERVICE_T *service; 282 VCHIQ_INSTANCE_T instance; 283 int skip_completion = 0; 284 DEBUG_INITIALISE(g_state.local) 285 286 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 287 288 service = handle_to_service(handle); 289 BUG_ON(!service); 290 user_service = (USER_SERVICE_T *)service->base.userdata; 291 instance = user_service->instance; 292 293 if (!instance || instance->closing) 294 return VCHIQ_SUCCESS; 295 296 vchiq_log_trace(vchiq_arm_log_level, 297 "service_callback - service %lx(%d,%p), reason %d, header %lx, " 298 "instance %lx, bulk_userdata %lx", 299 (unsigned long)user_service, 300 service->localport, user_service->userdata, 301 reason, (unsigned long)header, 302 (unsigned long)instance, (unsigned long)bulk_userdata); 303 304 if (header && user_service->is_vchi) { 305 spin_lock(&msg_queue_spinlock); 306 while (user_service->msg_insert == 307 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 308 spin_unlock(&msg_queue_spinlock); 309 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 310 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 311 vchiq_log_trace(vchiq_arm_log_level, 312 "service_callback - msg queue full"); 313 /* If there is no MESSAGE_AVAILABLE in the completion 314 ** queue, add one 315 */ 316 if ((user_service->message_available_pos - 317 instance->completion_remove) < 0) { 318 VCHIQ_STATUS_T status; 319 vchiq_log_info(vchiq_arm_log_level, 320 "Inserting extra MESSAGE_AVAILABLE"); 321 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 322 status = add_completion(instance, reason, 323 NULL, user_service, bulk_userdata); 324 if (status != VCHIQ_SUCCESS) { 325 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 326 return status; 327 } 328 } 329 330 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 331 if (down_interruptible(&user_service->remove_event) 332 != 0) { 333 vchiq_log_info(vchiq_arm_log_level, 334 "service_callback interrupted"); 335 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 336 return VCHIQ_RETRY; 337 } else if (instance->closing) { 338 vchiq_log_info(vchiq_arm_log_level, 339 "service_callback closing"); 340 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 341 return VCHIQ_ERROR; 342 } 343 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 344 spin_lock(&msg_queue_spinlock); 345 } 346 347 user_service->msg_queue[user_service->msg_insert & 348 (MSG_QUEUE_SIZE - 1)] = header; 349 user_service->msg_insert++; 350 351 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if 352 ** there is a MESSAGE_AVAILABLE in the completion queue then 353 ** bypass the completion queue. 354 */ 355 if (((user_service->message_available_pos - 356 instance->completion_remove) >= 0) || 357 user_service->dequeue_pending) { 358 user_service->dequeue_pending = 0; 359 skip_completion = 1; 360 } 361 362 spin_unlock(&msg_queue_spinlock); 363 364 up(&user_service->insert_event); 365 366 header = NULL; 367 } 368 369 if (skip_completion) { 370 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 371 return VCHIQ_SUCCESS; 372 } 373 374 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 375 376 return add_completion(instance, reason, header, user_service, 377 bulk_userdata); 378 } 379 380 /**************************************************************************** 381 * 382 * user_service_free 383 * 384 ***************************************************************************/ 385 static void 386 user_service_free(void *userdata) 387 { 388 USER_SERVICE_T *user_service = userdata; 389 390 _sema_destroy(&user_service->insert_event); 391 _sema_destroy(&user_service->remove_event); 392 393 kfree(user_service); 394 } 395 396 /**************************************************************************** 397 * 398 * close_delivered 399 * 400 ***************************************************************************/ 401 static void close_delivered(USER_SERVICE_T *user_service) 402 { 403 vchiq_log_info(vchiq_arm_log_level, 404 "close_delivered(handle=%x)", 405 user_service->service->handle); 406 407 if (user_service->close_pending) { 408 /* Allow the underlying service to be culled */ 409 unlock_service(user_service->service); 410 411 /* Wake the user-thread blocked in close_ or remove_service */ 412 up(&user_service->close_event); 413 414 user_service->close_pending = 0; 415 } 416 } 417 418 /**************************************************************************** 419 * 420 * vchiq_ioctl 421 * 422 ***************************************************************************/ 423 424 static int 425 vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, 426 struct thread *td) 427 { 428 VCHIQ_INSTANCE_T instance; 429 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 430 VCHIQ_SERVICE_T *service = NULL; 431 int ret = 0; 432 int i, rc; 433 DEBUG_INITIALISE(g_state.local) 434 435 if ((ret = devfs_get_cdevpriv((void**)&instance))) { 436 printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret); 437 return (ret); 438 } 439 440 /* XXXBSD: HACK! */ 441 #define _IOC_NR(x) ((x) & 0xff) 442 #define _IOC_TYPE(x) IOCGROUP(x) 443 444 vchiq_log_trace(vchiq_arm_log_level, 445 "vchiq_ioctl - instance %zx, cmd %s, arg %p", 446 (size_t)instance, 447 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && 448 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? 449 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); 450 451 #ifdef COMPAT_FREEBSD32 452 /* A fork in the road to freebsd32 compatibilty */ 453 #define _CF32_FORK(compat_c, native_c) { \ 454 int _____dont_call_your_vars_this = 0; \ 455 switch (cmd) { \ 456 _CF32_CASE {_____dont_call_your_vars_this = 1;} \ 457 break; \ 458 } \ 459 if (_____dont_call_your_vars_this) \ 460 { compat_c } \ 461 else \ 462 { native_c } \ 463 } 464 #else 465 #define _CF32_FORK(compat_c, native_c) { native_c } 466 #endif 467 switch (cmd) { 468 case VCHIQ_IOC_SHUTDOWN: 469 if (!instance->connected) 470 break; 471 472 /* Remove all services */ 473 i = 0; 474 while ((service = next_service_by_instance(instance->state, 475 instance, &i)) != NULL) { 476 status = vchiq_remove_service(service->handle); 477 unlock_service(service); 478 if (status != VCHIQ_SUCCESS) 479 break; 480 } 481 service = NULL; 482 483 if (status == VCHIQ_SUCCESS) { 484 /* Wake the completion thread and ask it to exit */ 485 instance->closing = 1; 486 up(&instance->insert_event); 487 } 488 489 break; 490 491 case VCHIQ_IOC_CONNECT: 492 if (instance->connected) { 493 ret = -EINVAL; 494 break; 495 } 496 rc = lmutex_lock_interruptible(&instance->state->mutex); 497 if (rc != 0) { 498 vchiq_log_error(vchiq_arm_log_level, 499 "vchiq: connect: could not lock mutex for " 500 "state %d: %d", 501 instance->state->id, rc); 502 ret = -EINTR; 503 break; 504 } 505 status = vchiq_connect_internal(instance->state, instance); 506 lmutex_unlock(&instance->state->mutex); 507 508 if (status == VCHIQ_SUCCESS) 509 instance->connected = 1; 510 else 511 vchiq_log_error(vchiq_arm_log_level, 512 "vchiq: could not connect: %d", status); 513 break; 514 515 #ifdef COMPAT_FREEBSD32 516 #define _CF32_CASE \ 517 case VCHIQ_IOC_CREATE_SERVICE32: 518 _CF32_CASE 519 #endif 520 case VCHIQ_IOC_CREATE_SERVICE: { 521 VCHIQ_CREATE_SERVICE_T args; 522 USER_SERVICE_T *user_service = NULL; 523 void *userdata; 524 int srvstate; 525 526 _CF32_FORK( 527 VCHIQ_CREATE_SERVICE32_T args32; 528 memcpy(&args32, (const void*)arg, sizeof(args32)); 529 args.params.fourcc = args32.params.fourcc; 530 /* XXXMDC not actually used? overwritten straight away */ 531 args.params.callback = 532 (VCHIQ_CALLBACK_T)(uintptr_t) args32.params.callback; 533 args.params.userdata = (void*)(uintptr_t)args32.params.userdata; 534 args.params.version = args32.params.version; 535 args.params.version_min = args32.params.version_min; 536 args.is_open = args32.is_open; 537 args.is_vchi = args32.is_vchi; 538 args.handle = args32.handle; 539 , 540 memcpy(&args, (const void*)arg, sizeof(args)); 541 ) 542 543 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL); 544 if (!user_service) { 545 ret = -ENOMEM; 546 break; 547 } 548 549 if (args.is_open) { 550 if (!instance->connected) { 551 ret = -ENOTCONN; 552 kfree(user_service); 553 break; 554 } 555 srvstate = VCHIQ_SRVSTATE_OPENING; 556 } else { 557 srvstate = 558 instance->connected ? 559 VCHIQ_SRVSTATE_LISTENING : 560 VCHIQ_SRVSTATE_HIDDEN; 561 } 562 563 userdata = args.params.userdata; 564 args.params.callback = service_callback; 565 args.params.userdata = user_service; 566 service = vchiq_add_service_internal( 567 instance->state, 568 &args.params, srvstate, 569 instance, user_service_free); 570 571 if (service != NULL) { 572 user_service->service = service; 573 user_service->userdata = userdata; 574 user_service->instance = instance; 575 user_service->is_vchi = (args.is_vchi != 0); 576 user_service->dequeue_pending = 0; 577 user_service->close_pending = 0; 578 user_service->message_available_pos = 579 instance->completion_remove - 1; 580 user_service->msg_insert = 0; 581 user_service->msg_remove = 0; 582 _sema_init(&user_service->insert_event, 0); 583 _sema_init(&user_service->remove_event, 0); 584 _sema_init(&user_service->close_event, 0); 585 586 if (args.is_open) { 587 status = vchiq_open_service_internal 588 (service, instance->pid); 589 if (status != VCHIQ_SUCCESS) { 590 vchiq_remove_service(service->handle); 591 service = NULL; 592 ret = (status == VCHIQ_RETRY) ? 593 -EINTR : -EIO; 594 break; 595 } 596 } 597 #ifdef VCHIQ_IOCTL_DEBUG 598 printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle); 599 #endif 600 _CF32_FORK( 601 memcpy((void *) 602 &(((VCHIQ_CREATE_SERVICE32_T*) 603 arg)->handle), 604 (const void *)&service->handle, 605 sizeof(service->handle)); 606 , 607 memcpy((void *) 608 &(((VCHIQ_CREATE_SERVICE_T*) 609 arg)->handle), 610 (const void *)&service->handle, 611 sizeof(service->handle)); 612 ); 613 614 service = NULL; 615 } else { 616 ret = -EEXIST; 617 kfree(user_service); 618 } 619 } break; 620 #undef _CF32_CASE 621 622 case VCHIQ_IOC_CLOSE_SERVICE: { 623 VCHIQ_SERVICE_HANDLE_T handle; 624 625 memcpy(&handle, (const void*)arg, sizeof(handle)); 626 627 #ifdef VCHIQ_IOCTL_DEBUG 628 printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle); 629 #endif 630 631 service = find_service_for_instance(instance, handle); 632 if (service != NULL) { 633 USER_SERVICE_T *user_service = 634 (USER_SERVICE_T *)service->base.userdata; 635 /* close_pending is false on first entry, and when the 636 wait in vchiq_close_service has been interrupted. */ 637 if (!user_service->close_pending) { 638 status = vchiq_close_service(service->handle); 639 if (status != VCHIQ_SUCCESS) 640 break; 641 } 642 643 /* close_pending is true once the underlying service 644 has been closed until the client library calls the 645 CLOSE_DELIVERED ioctl, signalling close_event. */ 646 if (user_service->close_pending && 647 down_interruptible(&user_service->close_event)) 648 status = VCHIQ_RETRY; 649 } 650 else 651 ret = -EINVAL; 652 } break; 653 654 case VCHIQ_IOC_REMOVE_SERVICE: { 655 VCHIQ_SERVICE_HANDLE_T handle; 656 657 memcpy(&handle, (const void*)arg, sizeof(handle)); 658 659 #ifdef VCHIQ_IOCTL_DEBUG 660 printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle); 661 #endif 662 663 service = find_service_for_instance(instance, handle); 664 if (service != NULL) { 665 USER_SERVICE_T *user_service = 666 (USER_SERVICE_T *)service->base.userdata; 667 /* close_pending is false on first entry, and when the 668 wait in vchiq_close_service has been interrupted. */ 669 if (!user_service->close_pending) { 670 status = vchiq_remove_service(service->handle); 671 if (status != VCHIQ_SUCCESS) 672 break; 673 } 674 675 /* close_pending is true once the underlying service 676 has been closed until the client library calls the 677 CLOSE_DELIVERED ioctl, signalling close_event. */ 678 if (user_service->close_pending && 679 down_interruptible(&user_service->close_event)) 680 status = VCHIQ_RETRY; 681 } 682 else 683 ret = -EINVAL; 684 } break; 685 686 case VCHIQ_IOC_USE_SERVICE: 687 case VCHIQ_IOC_RELEASE_SERVICE: { 688 VCHIQ_SERVICE_HANDLE_T handle; 689 690 memcpy(&handle, (const void*)arg, sizeof(handle)); 691 692 #ifdef VCHIQ_IOCTL_DEBUG 693 printf("%s: [%s SERVICE] handle = %08x\n", __func__, 694 cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle); 695 #endif 696 697 service = find_service_for_instance(instance, handle); 698 if (service != NULL) { 699 status = (cmd == VCHIQ_IOC_USE_SERVICE) ? 700 vchiq_use_service_internal(service) : 701 vchiq_release_service_internal(service); 702 if (status != VCHIQ_SUCCESS) { 703 vchiq_log_error(vchiq_susp_log_level, 704 "%s: cmd %s returned error %d for " 705 "service %c%c%c%c:%8x", 706 __func__, 707 (cmd == VCHIQ_IOC_USE_SERVICE) ? 708 "VCHIQ_IOC_USE_SERVICE" : 709 "VCHIQ_IOC_RELEASE_SERVICE", 710 status, 711 VCHIQ_FOURCC_AS_4CHARS( 712 service->base.fourcc), 713 service->client_id); 714 ret = -EINVAL; 715 } 716 } else 717 ret = -EINVAL; 718 } break; 719 720 #ifdef COMPAT_FREEBSD32 721 #define _CF32_CASE \ 722 case VCHIQ_IOC_QUEUE_MESSAGE32: 723 _CF32_CASE 724 #endif 725 case VCHIQ_IOC_QUEUE_MESSAGE: { 726 VCHIQ_QUEUE_MESSAGE_T args; 727 _CF32_FORK( 728 VCHIQ_QUEUE_MESSAGE32_T args32; 729 memcpy(&args32, (const void*)arg, sizeof(args32)); 730 args.handle = args32.handle; 731 args.count = args32.count; 732 args.elements = (VCHIQ_ELEMENT_T *)(uintptr_t)args32.elements; 733 , 734 memcpy(&args, (const void*)arg, sizeof(args)); 735 ) 736 737 #ifdef VCHIQ_IOCTL_DEBUG 738 printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle); 739 #endif 740 741 service = find_service_for_instance(instance, args.handle); 742 743 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) { 744 /* Copy elements into kernel space */ 745 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS]; 746 long cp_ret; 747 _CF32_FORK( 748 VCHIQ_ELEMENT32_T elements32[MAX_ELEMENTS]; 749 cp_ret = copy_from_user(elements32, args.elements, 750 args.count * sizeof(VCHIQ_ELEMENT32_T)); 751 for(int i=0;cp_ret == 0 && i < args.count;++i){ 752 elements[i].data = 753 (void *)(uintptr_t)elements32[i].data; 754 elements[i].size = elements32[i].size; 755 } 756 757 , 758 cp_ret = copy_from_user(elements, args.elements, 759 args.count * sizeof(VCHIQ_ELEMENT_T)); 760 ) 761 if (cp_ret == 0) 762 status = vchiq_queue_message 763 (args.handle, 764 elements, args.count); 765 else 766 ret = -EFAULT; 767 } else { 768 ret = -EINVAL; 769 } 770 } break; 771 #undef _CF32_CASE 772 773 #ifdef COMPAT_FREEBSD32 774 #define _CF32_CASE \ 775 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: \ 776 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32: 777 _CF32_CASE 778 #endif 779 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: 780 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { 781 VCHIQ_QUEUE_BULK_TRANSFER_T args; 782 783 struct bulk_waiter_node *waiter = NULL; 784 VCHIQ_BULK_DIR_T dir = 785 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) || 786 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)? 787 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; 788 789 _CF32_FORK( 790 VCHIQ_QUEUE_BULK_TRANSFER32_T args32; 791 memcpy(&args32, (const void*)arg, sizeof(args32)); 792 /* XXXMDC parens needed (macro parsing?) */ 793 args = ((VCHIQ_QUEUE_BULK_TRANSFER_T) { 794 .handle = args32.handle, 795 .data = (void *)(uintptr_t) args32.data, 796 .size = args32.size, 797 .userdata = (void *)(uintptr_t) args32.userdata, 798 .mode = args32.mode, 799 }); 800 , 801 memcpy(&args, (const void*)arg, sizeof(args)); 802 ) 803 804 service = find_service_for_instance(instance, args.handle); 805 if (!service) { 806 ret = -EINVAL; 807 break; 808 } 809 810 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) { 811 waiter = kzalloc(sizeof(struct bulk_waiter_node), 812 GFP_KERNEL); 813 if (!waiter) { 814 ret = -ENOMEM; 815 break; 816 } 817 args.userdata = &waiter->bulk_waiter; 818 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) { 819 struct list_head *pos; 820 lmutex_lock(&instance->bulk_waiter_list_mutex); 821 list_for_each(pos, &instance->bulk_waiter_list) { 822 if (list_entry(pos, struct bulk_waiter_node, 823 list)->pid == current->p_pid) { 824 waiter = list_entry(pos, 825 struct bulk_waiter_node, 826 list); 827 list_del(pos); 828 break; 829 } 830 } 831 lmutex_unlock(&instance->bulk_waiter_list_mutex); 832 if (!waiter) { 833 vchiq_log_error(vchiq_arm_log_level, 834 "no bulk_waiter found for pid %d", 835 current->p_pid); 836 ret = -ESRCH; 837 break; 838 } 839 vchiq_log_info(vchiq_arm_log_level, 840 "found bulk_waiter %zx for pid %d", 841 (size_t)waiter, current->p_pid); 842 args.userdata = &waiter->bulk_waiter; 843 } 844 845 status = vchiq_bulk_transfer 846 (args.handle, 847 VCHI_MEM_HANDLE_INVALID, 848 args.data, args.size, 849 args.userdata, args.mode, 850 dir); 851 if (!waiter) 852 break; 853 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || 854 !waiter->bulk_waiter.bulk) { 855 if (waiter->bulk_waiter.bulk) { 856 /* Cancel the signal when the transfer 857 ** completes. */ 858 spin_lock(&bulk_waiter_spinlock); 859 waiter->bulk_waiter.bulk->userdata = NULL; 860 spin_unlock(&bulk_waiter_spinlock); 861 } 862 _sema_destroy(&waiter->bulk_waiter.event); 863 kfree(waiter); 864 } else { 865 const VCHIQ_BULK_MODE_T mode_waiting = 866 VCHIQ_BULK_MODE_WAITING; 867 waiter->pid = current->p_pid; 868 lmutex_lock(&instance->bulk_waiter_list_mutex); 869 list_add(&waiter->list, &instance->bulk_waiter_list); 870 lmutex_unlock(&instance->bulk_waiter_list_mutex); 871 vchiq_log_info(vchiq_arm_log_level, 872 "saved bulk_waiter %zx for pid %d", 873 (size_t)waiter, current->p_pid); 874 875 _CF32_FORK( 876 memcpy((void *) 877 &(((VCHIQ_QUEUE_BULK_TRANSFER32_T *) 878 arg)->mode), 879 (const void *)&mode_waiting, 880 sizeof(mode_waiting)); 881 , 882 memcpy((void *) 883 &(((VCHIQ_QUEUE_BULK_TRANSFER_T *) 884 arg)->mode), 885 (const void *)&mode_waiting, 886 sizeof(mode_waiting)); 887 ) 888 } 889 } break; 890 #undef _CF32_CASE 891 892 #ifdef COMPAT_FREEBSD32 893 #define _CF32_CASE \ 894 case VCHIQ_IOC_AWAIT_COMPLETION32: 895 _CF32_CASE 896 #endif 897 case VCHIQ_IOC_AWAIT_COMPLETION: { 898 VCHIQ_AWAIT_COMPLETION_T args; 899 int count = 0; 900 901 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 902 if (!instance->connected) { 903 ret = -ENOTCONN; 904 break; 905 } 906 907 _CF32_FORK( 908 VCHIQ_AWAIT_COMPLETION32_T args32; 909 memcpy(&args32, (const void*)arg, sizeof(args32)); 910 args.count = args32.count; 911 args.buf = (VCHIQ_COMPLETION_DATA_T *)(uintptr_t)args32.buf; 912 args.msgbufsize = args32.msgbufsize; 913 args.msgbufcount = args32.msgbufcount; 914 args.msgbufs = (void **)(uintptr_t)args32.msgbufs; 915 , 916 memcpy(&args, (const void*)arg, sizeof(args)); 917 ) 918 919 lmutex_lock(&instance->completion_mutex); 920 921 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 922 while ((instance->completion_remove == 923 instance->completion_insert) 924 && !instance->closing) { 925 926 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 927 lmutex_unlock(&instance->completion_mutex); 928 rc = down_interruptible(&instance->insert_event); 929 lmutex_lock(&instance->completion_mutex); 930 if (rc != 0) { 931 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 932 vchiq_log_info(vchiq_arm_log_level, 933 "AWAIT_COMPLETION interrupted"); 934 ret = -EINTR; 935 break; 936 } 937 } 938 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 939 940 if (ret == 0) { 941 int msgbufcount = args.msgbufcount; 942 int remove; 943 944 remove = instance->completion_remove; 945 946 for (count = 0; count < args.count; count++) { 947 VCHIQ_COMPLETION_DATA_T *completion; 948 VCHIQ_SERVICE_T *service1; 949 USER_SERVICE_T *user_service; 950 VCHIQ_HEADER_T *header; 951 952 if (remove == instance->completion_insert) 953 break; 954 955 completion = &instance->completions[ 956 remove & (MAX_COMPLETIONS - 1)]; 957 958 959 /* A read memory barrier is needed to prevent 960 ** the prefetch of a stale completion record 961 */ 962 rmb(); 963 964 service1 = completion->service_userdata; 965 user_service = service1->base.userdata; 966 completion->service_userdata = 967 user_service->userdata; 968 969 header = completion->header; 970 if (header) { 971 void __user *msgbuf; 972 int msglen; 973 974 msglen = header->size + 975 sizeof(VCHIQ_HEADER_T); 976 /* This must be a VCHIQ-style service */ 977 if (args.msgbufsize < msglen) { 978 vchiq_log_error( 979 vchiq_arm_log_level, 980 "header %zx: msgbufsize" 981 " %x < msglen %x", 982 (size_t)header, 983 args.msgbufsize, 984 msglen); 985 WARN(1, "invalid message " 986 "size\n"); 987 if (count == 0) 988 ret = -EMSGSIZE; 989 break; 990 } 991 if (msgbufcount <= 0) 992 /* Stall here for lack of a 993 ** buffer for the message. */ 994 break; 995 /* Get the pointer from user space */ 996 msgbufcount--; 997 _CF32_FORK( 998 uint32_t *msgbufs32 = 999 (uint32_t *) args.msgbufs; 1000 uint32_t msgbuf32 = 0; 1001 if (copy_from_user(&msgbuf32, 1002 (const uint32_t __user *) 1003 &msgbufs32[msgbufcount], 1004 sizeof(msgbuf32)) != 0) { 1005 if (count == 0) 1006 ret = -EFAULT; 1007 break; 1008 } 1009 msgbuf = (void __user *)(uintptr_t)msgbuf32; 1010 , 1011 if (copy_from_user(&msgbuf, 1012 (const void __user *) 1013 &args.msgbufs[msgbufcount], 1014 sizeof(msgbuf)) != 0) { 1015 if (count == 0) 1016 ret = -EFAULT; 1017 break; 1018 } 1019 ) 1020 1021 /* Copy the message to user space */ 1022 if (copy_to_user(msgbuf, header, 1023 msglen) != 0) { 1024 if (count == 0) 1025 ret = -EFAULT; 1026 break; 1027 } 1028 1029 /* Now it has been copied, the message 1030 ** can be released. */ 1031 vchiq_release_message(service1->handle, 1032 header); 1033 1034 /* The completion must point to the 1035 ** msgbuf. */ 1036 completion->header = msgbuf; 1037 } 1038 1039 if ((completion->reason == 1040 VCHIQ_SERVICE_CLOSED) && 1041 !instance->use_close_delivered) 1042 unlock_service(service1); 1043 _CF32_FORK( 1044 VCHIQ_COMPLETION_DATA32_T comp32 = {0}; 1045 comp32.reason = 1046 (uint32_t)(size_t) completion->reason; 1047 comp32.service_userdata = 1048 (uint32_t)(size_t) 1049 completion->service_userdata; 1050 comp32.bulk_userdata = 1051 (uint32_t)(size_t) 1052 completion->bulk_userdata; 1053 comp32.header = 1054 (uint32_t)(size_t)completion->header; 1055 1056 VCHIQ_COMPLETION_DATA32_T __user *buf_loc; 1057 buf_loc = (VCHIQ_COMPLETION_DATA32_T __user *) 1058 args.buf; 1059 buf_loc += count; 1060 if (copy_to_user( 1061 buf_loc, &comp32, sizeof(comp32) 1062 ) != 0) { 1063 if (ret == 0) 1064 ret = -EFAULT; 1065 } 1066 , 1067 if (copy_to_user((void __user *)( 1068 (size_t)args.buf + 1069 count * sizeof(VCHIQ_COMPLETION_DATA_T)), 1070 completion, 1071 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) { 1072 if (ret == 0) 1073 ret = -EFAULT; 1074 break; 1075 } 1076 ) 1077 1078 /* Ensure that the above copy has completed 1079 ** before advancing the remove pointer. */ 1080 mb(); 1081 1082 instance->completion_remove = ++remove; 1083 } 1084 1085 if (msgbufcount != args.msgbufcount) { 1086 _CF32_FORK( 1087 memcpy( 1088 (void __user *) 1089 &((VCHIQ_AWAIT_COMPLETION32_T *)arg)-> 1090 msgbufcount, 1091 &msgbufcount, 1092 sizeof(msgbufcount)); 1093 , 1094 memcpy((void __user *) 1095 &((VCHIQ_AWAIT_COMPLETION_T *)arg)-> 1096 msgbufcount, 1097 &msgbufcount, 1098 sizeof(msgbufcount)); 1099 ) 1100 } 1101 1102 if (count != args.count) 1103 { 1104 _CF32_FORK( 1105 memcpy((void __user *) 1106 &((VCHIQ_AWAIT_COMPLETION32_T *)arg)->count, 1107 &count, sizeof(count)); 1108 , 1109 memcpy((void __user *) 1110 &((VCHIQ_AWAIT_COMPLETION_T *)arg)->count, 1111 &count, sizeof(count)); 1112 ) 1113 } 1114 } 1115 1116 if (count != 0) 1117 up(&instance->remove_event); 1118 1119 if ((ret == 0) && instance->closing) 1120 ret = -ENOTCONN; 1121 /* 1122 * XXXBSD: ioctl return codes are not negative as in linux, so 1123 * we can not indicate success with positive number of passed 1124 * messages 1125 */ 1126 if (ret > 0) 1127 ret = 0; 1128 1129 lmutex_unlock(&instance->completion_mutex); 1130 DEBUG_TRACE(AWAIT_COMPLETION_LINE); 1131 } break; 1132 #undef _CF32_CASE 1133 1134 #ifdef COMPAT_FREEBSD32 1135 #define _CF32_CASE \ 1136 case VCHIQ_IOC_DEQUEUE_MESSAGE32: 1137 _CF32_CASE 1138 #endif 1139 case VCHIQ_IOC_DEQUEUE_MESSAGE: { 1140 VCHIQ_DEQUEUE_MESSAGE_T args; 1141 USER_SERVICE_T *user_service; 1142 VCHIQ_HEADER_T *header; 1143 1144 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1145 _CF32_FORK( 1146 VCHIQ_DEQUEUE_MESSAGE32_T args32; 1147 memcpy(&args32, (const void*)arg, sizeof(args32)); 1148 args.handle = args32.handle; 1149 args.blocking = args32.blocking; 1150 args.bufsize = args32.bufsize; 1151 args.buf = (void *)(uintptr_t)args32.buf; 1152 , 1153 memcpy(&args, (const void*)arg, sizeof(args)); 1154 ) 1155 service = find_service_for_instance(instance, args.handle); 1156 if (!service) { 1157 ret = -EINVAL; 1158 break; 1159 } 1160 user_service = (USER_SERVICE_T *)service->base.userdata; 1161 if (user_service->is_vchi == 0) { 1162 ret = -EINVAL; 1163 break; 1164 } 1165 1166 spin_lock(&msg_queue_spinlock); 1167 if (user_service->msg_remove == user_service->msg_insert) { 1168 if (!args.blocking) { 1169 spin_unlock(&msg_queue_spinlock); 1170 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1171 ret = -EWOULDBLOCK; 1172 break; 1173 } 1174 user_service->dequeue_pending = 1; 1175 do { 1176 spin_unlock(&msg_queue_spinlock); 1177 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1178 if (down_interruptible( 1179 &user_service->insert_event) != 0) { 1180 vchiq_log_info(vchiq_arm_log_level, 1181 "DEQUEUE_MESSAGE interrupted"); 1182 ret = -EINTR; 1183 break; 1184 } 1185 spin_lock(&msg_queue_spinlock); 1186 } while (user_service->msg_remove == 1187 user_service->msg_insert); 1188 1189 if (ret) 1190 break; 1191 } 1192 1193 BUG_ON((int)(user_service->msg_insert - 1194 user_service->msg_remove) < 0); 1195 1196 header = user_service->msg_queue[user_service->msg_remove & 1197 (MSG_QUEUE_SIZE - 1)]; 1198 user_service->msg_remove++; 1199 spin_unlock(&msg_queue_spinlock); 1200 1201 up(&user_service->remove_event); 1202 if (header == NULL) 1203 ret = -ENOTCONN; 1204 else if (header->size <= args.bufsize) { 1205 /* Copy to user space if msgbuf is not NULL */ 1206 if ((args.buf == NULL) || 1207 (copy_to_user((void __user *)args.buf, 1208 header->data, 1209 header->size) == 0)) { 1210 args.bufsize = header->size; 1211 _CF32_FORK( 1212 VCHIQ_DEQUEUE_MESSAGE32_T args32; 1213 args32.handle = args.handle; 1214 args32.blocking = args.blocking; 1215 args32.bufsize = args.bufsize; 1216 args32.buf = (uintptr_t)(void *)args.buf; 1217 1218 memcpy((void *)arg, &args32, 1219 sizeof(args32)); 1220 , 1221 memcpy((void *)arg, &args, 1222 sizeof(args)); 1223 ) 1224 vchiq_release_message( 1225 service->handle, 1226 header); 1227 } else 1228 ret = -EFAULT; 1229 } else { 1230 vchiq_log_error(vchiq_arm_log_level, 1231 "header %zx: bufsize %x < size %x", 1232 (size_t)header, args.bufsize, 1233 header->size); 1234 WARN(1, "invalid size\n"); 1235 ret = -EMSGSIZE; 1236 } 1237 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 1238 } break; 1239 #undef _CF32_CASE 1240 1241 case VCHIQ_IOC_GET_CLIENT_ID: { 1242 VCHIQ_SERVICE_HANDLE_T handle; 1243 1244 memcpy(&handle, (const void*)arg, sizeof(handle)); 1245 1246 ret = vchiq_get_client_id(handle); 1247 } break; 1248 1249 #ifdef COMPAT_FREEBSD32 1250 #define _CF32_CASE \ 1251 case VCHIQ_IOC_GET_CONFIG32: 1252 _CF32_CASE 1253 #endif 1254 case VCHIQ_IOC_GET_CONFIG: { 1255 VCHIQ_GET_CONFIG_T args; 1256 VCHIQ_CONFIG_T config; 1257 _CF32_FORK( 1258 VCHIQ_GET_CONFIG32_T args32; 1259 1260 memcpy(&args32, (const void*)arg, sizeof(args32)); 1261 args.config_size = args32.config_size; 1262 args.pconfig = (VCHIQ_CONFIG_T *) 1263 (uintptr_t)args32.pconfig; 1264 , 1265 memcpy(&args, (const void*)arg, sizeof(args)); 1266 ) 1267 if (args.config_size > sizeof(config)) { 1268 ret = -EINVAL; 1269 break; 1270 } 1271 status = vchiq_get_config(instance, args.config_size, &config); 1272 if (status == VCHIQ_SUCCESS) { 1273 if (copy_to_user((void __user *)args.pconfig, 1274 &config, args.config_size) != 0) { 1275 ret = -EFAULT; 1276 break; 1277 } 1278 } 1279 } break; 1280 #undef _CF32_CASE 1281 1282 case VCHIQ_IOC_SET_SERVICE_OPTION: { 1283 VCHIQ_SET_SERVICE_OPTION_T args; 1284 1285 memcpy(&args, (const void*)arg, sizeof(args)); 1286 1287 service = find_service_for_instance(instance, args.handle); 1288 if (!service) { 1289 ret = -EINVAL; 1290 break; 1291 } 1292 1293 status = vchiq_set_service_option( 1294 args.handle, args.option, args.value); 1295 } break; 1296 1297 #ifdef COMPAT_FREEBSD32 1298 #define _CF32_CASE \ 1299 case VCHIQ_IOC_DUMP_PHYS_MEM32: 1300 _CF32_CASE 1301 #endif 1302 case VCHIQ_IOC_DUMP_PHYS_MEM: { 1303 VCHIQ_DUMP_MEM_T args; 1304 1305 _CF32_FORK( 1306 VCHIQ_DUMP_MEM32_T args32; 1307 memcpy(&args32, (const void*)arg, sizeof(args32)); 1308 args.virt_addr = (void *)(uintptr_t)args32.virt_addr; 1309 args.num_bytes = (size_t)args32.num_bytes; 1310 , 1311 memcpy(&args, (const void*)arg, sizeof(args)); 1312 ) 1313 printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__); 1314 #if 0 1315 dump_phys_mem(args.virt_addr, args.num_bytes); 1316 #endif 1317 } break; 1318 #undef _CF32_CASE 1319 1320 case VCHIQ_IOC_LIB_VERSION: { 1321 size_t lib_version = (size_t)arg; 1322 1323 if (lib_version < VCHIQ_VERSION_MIN) 1324 ret = -EINVAL; 1325 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) 1326 instance->use_close_delivered = 1; 1327 } break; 1328 1329 case VCHIQ_IOC_CLOSE_DELIVERED: { 1330 VCHIQ_SERVICE_HANDLE_T handle; 1331 memcpy(&handle, (const void*)arg, sizeof(handle)); 1332 1333 service = find_closed_service_for_instance(instance, handle); 1334 if (service != NULL) { 1335 USER_SERVICE_T *user_service = 1336 (USER_SERVICE_T *)service->base.userdata; 1337 close_delivered(user_service); 1338 } 1339 else 1340 ret = -EINVAL; 1341 } break; 1342 1343 default: 1344 ret = -ENOTTY; 1345 break; 1346 } 1347 #undef _CF32_FORK 1348 1349 if (service) 1350 unlock_service(service); 1351 1352 if (ret == 0) { 1353 if (status == VCHIQ_ERROR) 1354 ret = -EIO; 1355 else if (status == VCHIQ_RETRY) 1356 ret = -EINTR; 1357 } 1358 1359 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && 1360 (ret != -EWOULDBLOCK)) 1361 vchiq_log_info(vchiq_arm_log_level, 1362 " ioctl instance %lx, cmd %s -> status %d, %d", 1363 (unsigned long)instance, 1364 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1365 ioctl_names[_IOC_NR(cmd)] : 1366 "<invalid>", 1367 status, ret); 1368 else 1369 vchiq_log_trace(vchiq_arm_log_level, 1370 " ioctl instance %lx, cmd %s -> status %d, %d", 1371 (unsigned long)instance, 1372 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? 1373 ioctl_names[_IOC_NR(cmd)] : 1374 "<invalid>", 1375 status, ret); 1376 1377 /* XXXBSD: report BSD-style error to userland */ 1378 if (ret < 0) 1379 ret = -ret; 1380 1381 return ret; 1382 } 1383 1384 1385 1386 /**************************************************************************** 1387 * 1388 * vchiq_open 1389 * 1390 ***************************************************************************/ 1391 static void instance_dtr(void *data); 1392 1393 static int 1394 vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 1395 { 1396 vchiq_log_info(vchiq_arm_log_level, "vchiq_open"); 1397 /* XXXBSD: do we really need this check? */ 1398 if (1) { 1399 VCHIQ_STATE_T *state = vchiq_get_state(); 1400 VCHIQ_INSTANCE_T instance; 1401 1402 if (!state) { 1403 vchiq_log_error(vchiq_arm_log_level, 1404 "vchiq has no connection to VideoCore"); 1405 return -ENOTCONN; 1406 } 1407 1408 instance = kmalloc(sizeof(*instance), GFP_KERNEL); 1409 if (!instance) 1410 return -ENOMEM; 1411 1412 instance->state = state; 1413 /* XXXBSD: PID or thread ID? */ 1414 instance->pid = td->td_proc->p_pid; 1415 1416 #ifdef notyet 1417 ret = vchiq_proc_add_instance(instance); 1418 if (ret != 0) { 1419 kfree(instance); 1420 return ret; 1421 } 1422 #endif 1423 1424 _sema_init(&instance->insert_event, 0); 1425 _sema_init(&instance->remove_event, 0); 1426 lmutex_init(&instance->completion_mutex); 1427 lmutex_init(&instance->bulk_waiter_list_mutex); 1428 INIT_LIST_HEAD(&instance->bulk_waiter_list); 1429 1430 devfs_set_cdevpriv(instance, instance_dtr); 1431 } 1432 else { 1433 vchiq_log_error(vchiq_arm_log_level, 1434 "Unknown minor device"); 1435 return -ENXIO; 1436 } 1437 1438 return 0; 1439 } 1440 1441 /**************************************************************************** 1442 * 1443 * vchiq_release 1444 * 1445 ***************************************************************************/ 1446 1447 1448 static int 1449 _vchiq_close_instance(VCHIQ_INSTANCE_T instance) 1450 { 1451 int ret = 0; 1452 VCHIQ_STATE_T *state = vchiq_get_state(); 1453 VCHIQ_SERVICE_T *service; 1454 int i; 1455 1456 vchiq_log_info(vchiq_arm_log_level, 1457 "vchiq_release: instance=%lx", 1458 (unsigned long)instance); 1459 1460 if (!state) { 1461 ret = -EPERM; 1462 goto out; 1463 } 1464 1465 /* Ensure videocore is awake to allow termination. */ 1466 vchiq_use_internal(instance->state, NULL, 1467 USE_TYPE_VCHIQ); 1468 1469 lmutex_lock(&instance->completion_mutex); 1470 1471 /* Wake the completion thread and ask it to exit */ 1472 instance->closing = 1; 1473 up(&instance->insert_event); 1474 1475 lmutex_unlock(&instance->completion_mutex); 1476 1477 /* Wake the slot handler if the completion queue is full. */ 1478 up(&instance->remove_event); 1479 1480 /* Mark all services for termination... */ 1481 i = 0; 1482 while ((service = next_service_by_instance(state, instance, 1483 &i)) != NULL) { 1484 USER_SERVICE_T *user_service = service->base.userdata; 1485 1486 /* Wake the slot handler if the msg queue is full. */ 1487 up(&user_service->remove_event); 1488 1489 vchiq_terminate_service_internal(service); 1490 unlock_service(service); 1491 } 1492 1493 /* ...and wait for them to die */ 1494 i = 0; 1495 while ((service = next_service_by_instance(state, instance, &i)) 1496 != NULL) { 1497 USER_SERVICE_T *user_service = service->base.userdata; 1498 1499 down(&service->remove_event); 1500 1501 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); 1502 1503 spin_lock(&msg_queue_spinlock); 1504 1505 while (user_service->msg_remove != 1506 user_service->msg_insert) { 1507 VCHIQ_HEADER_T *header = user_service-> 1508 msg_queue[user_service->msg_remove & 1509 (MSG_QUEUE_SIZE - 1)]; 1510 user_service->msg_remove++; 1511 spin_unlock(&msg_queue_spinlock); 1512 1513 if (header) 1514 vchiq_release_message( 1515 service->handle, 1516 header); 1517 spin_lock(&msg_queue_spinlock); 1518 } 1519 1520 spin_unlock(&msg_queue_spinlock); 1521 1522 unlock_service(service); 1523 } 1524 1525 /* Release any closed services */ 1526 while (instance->completion_remove != 1527 instance->completion_insert) { 1528 VCHIQ_COMPLETION_DATA_T *completion; 1529 VCHIQ_SERVICE_T *service; 1530 completion = &instance->completions[ 1531 instance->completion_remove & 1532 (MAX_COMPLETIONS - 1)]; 1533 service = completion->service_userdata; 1534 if (completion->reason == VCHIQ_SERVICE_CLOSED) 1535 { 1536 USER_SERVICE_T *user_service = 1537 service->base.userdata; 1538 1539 /* Wake any blocked user-thread */ 1540 if (instance->use_close_delivered) 1541 up(&user_service->close_event); 1542 1543 unlock_service(service); 1544 } 1545 instance->completion_remove++; 1546 } 1547 1548 /* Release the PEER service count. */ 1549 vchiq_release_internal(instance->state, NULL); 1550 1551 { 1552 struct list_head *pos, *next; 1553 list_for_each_safe(pos, next, 1554 &instance->bulk_waiter_list) { 1555 struct bulk_waiter_node *waiter; 1556 waiter = list_entry(pos, 1557 struct bulk_waiter_node, 1558 list); 1559 list_del(pos); 1560 vchiq_log_info(vchiq_arm_log_level, 1561 "bulk_waiter - cleaned up %zx " 1562 "for pid %d", 1563 (size_t)waiter, waiter->pid); 1564 _sema_destroy(&waiter->bulk_waiter.event); 1565 kfree(waiter); 1566 } 1567 } 1568 1569 out: 1570 return ret; 1571 1572 } 1573 1574 static void 1575 instance_dtr(void *data) 1576 { 1577 VCHIQ_INSTANCE_T instance = data; 1578 _vchiq_close_instance(instance); 1579 kfree(data); 1580 } 1581 1582 static int 1583 vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused, 1584 struct thread *td) 1585 { 1586 1587 /* XXXMDC it's privdata that tracks opens */ 1588 /* XXXMDC only get closes when there are no more open fds on a vnode */ 1589 1590 return(0); 1591 1592 } 1593 1594 /**************************************************************************** 1595 * 1596 * vchiq_dump 1597 * 1598 ***************************************************************************/ 1599 1600 void 1601 vchiq_dump(void *dump_context, const char *str, int len) 1602 { 1603 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context; 1604 1605 if (context->actual < context->space) { 1606 int copy_bytes; 1607 if (context->offset > 0) { 1608 int skip_bytes = min(len, (int)context->offset); 1609 str += skip_bytes; 1610 len -= skip_bytes; 1611 context->offset -= skip_bytes; 1612 if (context->offset > 0) 1613 return; 1614 } 1615 copy_bytes = min(len, (int)(context->space - context->actual)); 1616 if (copy_bytes == 0) 1617 return; 1618 memcpy(context->buf + context->actual, str, copy_bytes); 1619 context->actual += copy_bytes; 1620 len -= copy_bytes; 1621 1622 /* If tne terminating NUL is included in the length, then it 1623 ** marks the end of a line and should be replaced with a 1624 ** carriage return. */ 1625 if ((len == 0) && (str[copy_bytes - 1] == '\0')) { 1626 char cr = '\n'; 1627 memcpy(context->buf + context->actual - 1, &cr, 1); 1628 } 1629 } 1630 } 1631 1632 /**************************************************************************** 1633 * 1634 * vchiq_dump_platform_instance_state 1635 * 1636 ***************************************************************************/ 1637 1638 void 1639 vchiq_dump_platform_instances(void *dump_context) 1640 { 1641 VCHIQ_STATE_T *state = vchiq_get_state(); 1642 char buf[80]; 1643 int len; 1644 int i; 1645 1646 /* There is no list of instances, so instead scan all services, 1647 marking those that have been dumped. */ 1648 1649 for (i = 0; i < state->unused_service; i++) { 1650 VCHIQ_SERVICE_T *service = state->services[i]; 1651 VCHIQ_INSTANCE_T instance; 1652 1653 if (service && (service->base.callback == service_callback)) { 1654 instance = service->instance; 1655 if (instance) 1656 instance->mark = 0; 1657 } 1658 } 1659 1660 for (i = 0; i < state->unused_service; i++) { 1661 VCHIQ_SERVICE_T *service = state->services[i]; 1662 VCHIQ_INSTANCE_T instance; 1663 1664 if (service && (service->base.callback == service_callback)) { 1665 instance = service->instance; 1666 if (instance && !instance->mark) { 1667 len = snprintf(buf, sizeof(buf), 1668 "Instance %zx: pid %d,%s completions " 1669 "%d/%d", 1670 (size_t)instance, instance->pid, 1671 instance->connected ? " connected, " : 1672 "", 1673 instance->completion_insert - 1674 instance->completion_remove, 1675 MAX_COMPLETIONS); 1676 1677 vchiq_dump(dump_context, buf, len + 1); 1678 1679 instance->mark = 1; 1680 } 1681 } 1682 } 1683 } 1684 1685 /**************************************************************************** 1686 * 1687 * vchiq_dump_platform_service_state 1688 * 1689 ***************************************************************************/ 1690 1691 void 1692 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service) 1693 { 1694 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata; 1695 char buf[80]; 1696 int len; 1697 1698 len = snprintf(buf, sizeof(buf), " instance %zx", 1699 (size_t)service->instance); 1700 1701 if ((service->base.callback == service_callback) && 1702 user_service->is_vchi) { 1703 len += snprintf(buf + len, sizeof(buf) - len, 1704 ", %d/%d messages", 1705 user_service->msg_insert - user_service->msg_remove, 1706 MSG_QUEUE_SIZE); 1707 1708 if (user_service->dequeue_pending) 1709 len += snprintf(buf + len, sizeof(buf) - len, 1710 " (dequeue pending)"); 1711 } 1712 1713 vchiq_dump(dump_context, buf, len + 1); 1714 } 1715 1716 #ifdef notyet 1717 /**************************************************************************** 1718 * 1719 * dump_user_mem 1720 * 1721 ***************************************************************************/ 1722 1723 static void 1724 dump_phys_mem(void *virt_addr, uint32_t num_bytes) 1725 { 1726 int rc; 1727 uint8_t *end_virt_addr = virt_addr + num_bytes; 1728 int num_pages; 1729 int offset; 1730 int end_offset; 1731 int page_idx; 1732 int prev_idx; 1733 struct page *page; 1734 struct page **pages; 1735 uint8_t *kmapped_virt_ptr; 1736 1737 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */ 1738 1739 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL); 1740 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) & 1741 ~0x0fuL); 1742 1743 offset = (int)(long)virt_addr & (PAGE_SIZE - 1); 1744 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1); 1745 1746 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; 1747 1748 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 1749 if (pages == NULL) { 1750 vchiq_log_error(vchiq_arm_log_level, 1751 "Unable to allocation memory for %d pages\n", 1752 num_pages); 1753 return; 1754 } 1755 1756 down_read(¤t->mm->mmap_sem); 1757 rc = get_user_pages(current, /* task */ 1758 current->mm, /* mm */ 1759 (unsigned long)virt_addr, /* start */ 1760 num_pages, /* len */ 1761 0, /* write */ 1762 0, /* force */ 1763 pages, /* pages (array of page pointers) */ 1764 NULL); /* vmas */ 1765 up_read(¤t->mm->mmap_sem); 1766 1767 prev_idx = -1; 1768 page = NULL; 1769 1770 while (offset < end_offset) { 1771 1772 int page_offset = offset % PAGE_SIZE; 1773 page_idx = offset / PAGE_SIZE; 1774 1775 if (page_idx != prev_idx) { 1776 1777 if (page != NULL) 1778 kunmap(page); 1779 page = pages[page_idx]; 1780 kmapped_virt_ptr = kmap(page); 1781 1782 prev_idx = page_idx; 1783 } 1784 1785 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE) 1786 vchiq_log_dump_mem("ph", 1787 (uint32_t)(unsigned long)&kmapped_virt_ptr[ 1788 page_offset], 1789 &kmapped_virt_ptr[page_offset], 16); 1790 1791 offset += 16; 1792 } 1793 if (page != NULL) 1794 kunmap(page); 1795 1796 for (page_idx = 0; page_idx < num_pages; page_idx++) 1797 page_cache_release(pages[page_idx]); 1798 1799 kfree(pages); 1800 } 1801 1802 /**************************************************************************** 1803 * 1804 * vchiq_read 1805 * 1806 ***************************************************************************/ 1807 1808 static ssize_t 1809 vchiq_read(struct file *file, char __user *buf, 1810 size_t count, loff_t *ppos) 1811 { 1812 DUMP_CONTEXT_T context; 1813 context.buf = buf; 1814 context.actual = 0; 1815 context.space = count; 1816 context.offset = *ppos; 1817 1818 vchiq_dump_state(&context, &g_state); 1819 1820 *ppos += context.actual; 1821 1822 return context.actual; 1823 } 1824 #endif 1825 1826 VCHIQ_STATE_T * 1827 vchiq_get_state(void) 1828 { 1829 1830 if (g_state.remote == NULL) 1831 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__); 1832 else if (g_state.remote->initialised != 1) 1833 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n", 1834 __func__, g_state.remote->initialised); 1835 1836 return ((g_state.remote != NULL) && 1837 (g_state.remote->initialised == 1)) ? &g_state : NULL; 1838 } 1839 1840 /* 1841 * Autosuspend related functionality 1842 */ 1843 1844 int 1845 vchiq_videocore_wanted(VCHIQ_STATE_T *state) 1846 { 1847 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1848 if (!arm_state) 1849 /* autosuspend not supported - always return wanted */ 1850 return 1; 1851 else if (arm_state->blocked_count) 1852 return 1; 1853 else if (!arm_state->videocore_use_count) 1854 /* usage count zero - check for override unless we're forcing */ 1855 if (arm_state->resume_blocked) 1856 return 0; 1857 else 1858 return vchiq_platform_videocore_wanted(state); 1859 else 1860 /* non-zero usage count - videocore still required */ 1861 return 1; 1862 } 1863 1864 static VCHIQ_STATUS_T 1865 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason, 1866 VCHIQ_HEADER_T *header, 1867 VCHIQ_SERVICE_HANDLE_T service_user, 1868 void *bulk_user) 1869 { 1870 vchiq_log_error(vchiq_susp_log_level, 1871 "%s callback reason %d", __func__, reason); 1872 return 0; 1873 } 1874 1875 static int 1876 vchiq_keepalive_thread_func(void *v) 1877 { 1878 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v; 1879 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 1880 1881 VCHIQ_STATUS_T status; 1882 VCHIQ_INSTANCE_T instance; 1883 VCHIQ_SERVICE_HANDLE_T ka_handle; 1884 1885 VCHIQ_SERVICE_PARAMS_T params = { 1886 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), 1887 .callback = vchiq_keepalive_vchiq_callback, 1888 .version = KEEPALIVE_VER, 1889 .version_min = KEEPALIVE_VER_MIN 1890 }; 1891 1892 status = vchiq_initialise(&instance); 1893 if (status != VCHIQ_SUCCESS) { 1894 vchiq_log_error(vchiq_susp_log_level, 1895 "%s vchiq_initialise failed %d", __func__, status); 1896 goto exit; 1897 } 1898 1899 status = vchiq_connect(instance); 1900 if (status != VCHIQ_SUCCESS) { 1901 vchiq_log_error(vchiq_susp_log_level, 1902 "%s vchiq_connect failed %d", __func__, status); 1903 goto shutdown; 1904 } 1905 1906 status = vchiq_add_service(instance, ¶ms, &ka_handle); 1907 if (status != VCHIQ_SUCCESS) { 1908 vchiq_log_error(vchiq_susp_log_level, 1909 "%s vchiq_open_service failed %d", __func__, status); 1910 goto shutdown; 1911 } 1912 1913 while (1) { 1914 long rc = 0, uc = 0; 1915 if (wait_for_completion_interruptible(&arm_state->ka_evt) 1916 != 0) { 1917 vchiq_log_error(vchiq_susp_log_level, 1918 "%s interrupted", __func__); 1919 flush_signals(current); 1920 continue; 1921 } 1922 1923 /* read and clear counters. Do release_count then use_count to 1924 * prevent getting more releases than uses */ 1925 rc = atomic_xchg(&arm_state->ka_release_count, 0); 1926 uc = atomic_xchg(&arm_state->ka_use_count, 0); 1927 1928 /* Call use/release service the requisite number of times. 1929 * Process use before release so use counts don't go negative */ 1930 while (uc--) { 1931 atomic_inc(&arm_state->ka_use_ack_count); 1932 status = vchiq_use_service(ka_handle); 1933 if (status != VCHIQ_SUCCESS) { 1934 vchiq_log_error(vchiq_susp_log_level, 1935 "%s vchiq_use_service error %d", 1936 __func__, status); 1937 } 1938 } 1939 while (rc--) { 1940 status = vchiq_release_service(ka_handle); 1941 if (status != VCHIQ_SUCCESS) { 1942 vchiq_log_error(vchiq_susp_log_level, 1943 "%s vchiq_release_service error %d", 1944 __func__, status); 1945 } 1946 } 1947 } 1948 1949 shutdown: 1950 vchiq_shutdown(instance); 1951 exit: 1952 return 0; 1953 } 1954 1955 VCHIQ_STATUS_T 1956 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state) 1957 { 1958 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 1959 1960 if (arm_state) { 1961 rwlock_init(&arm_state->susp_res_lock); 1962 1963 init_completion(&arm_state->ka_evt); 1964 atomic_set(&arm_state->ka_use_count, 0); 1965 atomic_set(&arm_state->ka_use_ack_count, 0); 1966 atomic_set(&arm_state->ka_release_count, 0); 1967 1968 init_completion(&arm_state->vc_suspend_complete); 1969 1970 init_completion(&arm_state->vc_resume_complete); 1971 /* Initialise to 'done' state. We only want to block on resume 1972 * completion while videocore is suspended. */ 1973 set_resume_state(arm_state, VC_RESUME_RESUMED); 1974 1975 init_completion(&arm_state->resume_blocker); 1976 /* Initialise to 'done' state. We only want to block on this 1977 * completion while resume is blocked */ 1978 complete_all(&arm_state->resume_blocker); 1979 1980 init_completion(&arm_state->blocked_blocker); 1981 /* Initialise to 'done' state. We only want to block on this 1982 * completion while things are waiting on the resume blocker */ 1983 complete_all(&arm_state->blocked_blocker); 1984 1985 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS; 1986 arm_state->suspend_timer_running = 0; 1987 vchiq_init_timer(&arm_state->suspend_timer); 1988 arm_state->suspend_timer.data = (unsigned long)(state); 1989 arm_state->suspend_timer.function = suspend_timer_callback; 1990 1991 arm_state->first_connect = 0; 1992 1993 } 1994 return status; 1995 } 1996 1997 /* 1998 ** Functions to modify the state variables; 1999 ** set_suspend_state 2000 ** set_resume_state 2001 ** 2002 ** There are more state variables than we might like, so ensure they remain in 2003 ** step. Suspend and resume state are maintained separately, since most of 2004 ** these state machines can operate independently. However, there are a few 2005 ** states where state transitions in one state machine cause a reset to the 2006 ** other state machine. In addition, there are some completion events which 2007 ** need to occur on state machine reset and end-state(s), so these are also 2008 ** dealt with in these functions. 2009 ** 2010 ** In all states we set the state variable according to the input, but in some 2011 ** cases we perform additional steps outlined below; 2012 ** 2013 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time. 2014 ** The suspend completion is completed after any suspend 2015 ** attempt. When we reset the state machine we also reset 2016 ** the completion. This reset occurs when videocore is 2017 ** resumed, and also if we initiate suspend after a suspend 2018 ** failure. 2019 ** 2020 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for 2021 ** suspend - ie from this point on we must try to suspend 2022 ** before resuming can occur. We therefore also reset the 2023 ** resume state machine to VC_RESUME_IDLE in this state. 2024 ** 2025 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call 2026 ** complete_all on the suspend completion to notify 2027 ** anything waiting for suspend to happen. 2028 ** 2029 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also 2030 ** initiate resume, so no need to alter resume state. 2031 ** We call complete_all on the suspend completion to notify 2032 ** of suspend rejection. 2033 ** 2034 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the 2035 ** suspend completion and reset the resume state machine. 2036 ** 2037 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The 2038 ** resume completion is in its 'done' state whenever 2039 ** videcore is running. Therfore, the VC_RESUME_IDLE state 2040 ** implies that videocore is suspended. 2041 ** Hence, any thread which needs to wait until videocore is 2042 ** running can wait on this completion - it will only block 2043 ** if videocore is suspended. 2044 ** 2045 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running. 2046 ** Call complete_all on the resume completion to unblock 2047 ** any threads waiting for resume. Also reset the suspend 2048 ** state machine to it's idle state. 2049 ** 2050 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists. 2051 */ 2052 2053 void 2054 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state, 2055 enum vc_suspend_status new_state) 2056 { 2057 /* set the state in all cases */ 2058 arm_state->vc_suspend_state = new_state; 2059 2060 /* state specific additional actions */ 2061 switch (new_state) { 2062 case VC_SUSPEND_FORCE_CANCELED: 2063 complete_all(&arm_state->vc_suspend_complete); 2064 break; 2065 case VC_SUSPEND_REJECTED: 2066 complete_all(&arm_state->vc_suspend_complete); 2067 break; 2068 case VC_SUSPEND_FAILED: 2069 complete_all(&arm_state->vc_suspend_complete); 2070 arm_state->vc_resume_state = VC_RESUME_RESUMED; 2071 complete_all(&arm_state->vc_resume_complete); 2072 break; 2073 case VC_SUSPEND_IDLE: 2074 /* TODO: reinit_completion */ 2075 INIT_COMPLETION(arm_state->vc_suspend_complete); 2076 break; 2077 case VC_SUSPEND_REQUESTED: 2078 break; 2079 case VC_SUSPEND_IN_PROGRESS: 2080 set_resume_state(arm_state, VC_RESUME_IDLE); 2081 break; 2082 case VC_SUSPEND_SUSPENDED: 2083 complete_all(&arm_state->vc_suspend_complete); 2084 break; 2085 default: 2086 BUG(); 2087 break; 2088 } 2089 } 2090 2091 void 2092 set_resume_state(VCHIQ_ARM_STATE_T *arm_state, 2093 enum vc_resume_status new_state) 2094 { 2095 /* set the state in all cases */ 2096 arm_state->vc_resume_state = new_state; 2097 2098 /* state specific additional actions */ 2099 switch (new_state) { 2100 case VC_RESUME_FAILED: 2101 break; 2102 case VC_RESUME_IDLE: 2103 /* TODO: reinit_completion */ 2104 INIT_COMPLETION(arm_state->vc_resume_complete); 2105 break; 2106 case VC_RESUME_REQUESTED: 2107 break; 2108 case VC_RESUME_IN_PROGRESS: 2109 break; 2110 case VC_RESUME_RESUMED: 2111 complete_all(&arm_state->vc_resume_complete); 2112 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2113 break; 2114 default: 2115 BUG(); 2116 break; 2117 } 2118 } 2119 2120 2121 /* should be called with the write lock held */ 2122 inline void 2123 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 2124 { 2125 vchiq_del_timer(&arm_state->suspend_timer); 2126 arm_state->suspend_timer.expires = jiffies + 2127 msecs_to_jiffies(arm_state-> 2128 suspend_timer_timeout); 2129 vchiq_add_timer(&arm_state->suspend_timer); 2130 arm_state->suspend_timer_running = 1; 2131 } 2132 2133 /* should be called with the write lock held */ 2134 static inline void 2135 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state) 2136 { 2137 if (arm_state->suspend_timer_running) { 2138 vchiq_del_timer(&arm_state->suspend_timer); 2139 arm_state->suspend_timer_running = 0; 2140 } 2141 } 2142 2143 static inline int 2144 need_resume(VCHIQ_STATE_T *state) 2145 { 2146 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2147 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) && 2148 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) && 2149 vchiq_videocore_wanted(state); 2150 } 2151 2152 static int 2153 block_resume(VCHIQ_ARM_STATE_T *arm_state) 2154 { 2155 int status = VCHIQ_SUCCESS; 2156 const unsigned long timeout_val = 2157 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS); 2158 int resume_count = 0; 2159 2160 /* Allow any threads which were blocked by the last force suspend to 2161 * complete if they haven't already. Only give this one shot; if 2162 * blocked_count is incremented after blocked_blocker is completed 2163 * (which only happens when blocked_count hits 0) then those threads 2164 * will have to wait until next time around */ 2165 if (arm_state->blocked_count) { 2166 /* TODO: reinit_completion */ 2167 INIT_COMPLETION(arm_state->blocked_blocker); 2168 write_unlock_bh(&arm_state->susp_res_lock); 2169 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously " 2170 "blocked clients", __func__); 2171 if (wait_for_completion_interruptible_timeout( 2172 &arm_state->blocked_blocker, timeout_val) 2173 <= 0) { 2174 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 2175 "previously blocked clients failed" , __func__); 2176 status = VCHIQ_ERROR; 2177 write_lock_bh(&arm_state->susp_res_lock); 2178 goto out; 2179 } 2180 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked " 2181 "clients resumed", __func__); 2182 write_lock_bh(&arm_state->susp_res_lock); 2183 } 2184 2185 /* We need to wait for resume to complete if it's in process */ 2186 while (arm_state->vc_resume_state != VC_RESUME_RESUMED && 2187 arm_state->vc_resume_state > VC_RESUME_IDLE) { 2188 if (resume_count > 1) { 2189 status = VCHIQ_ERROR; 2190 vchiq_log_error(vchiq_susp_log_level, "%s waited too " 2191 "many times for resume" , __func__); 2192 goto out; 2193 } 2194 write_unlock_bh(&arm_state->susp_res_lock); 2195 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume", 2196 __func__); 2197 if (wait_for_completion_interruptible_timeout( 2198 &arm_state->vc_resume_complete, timeout_val) 2199 <= 0) { 2200 vchiq_log_error(vchiq_susp_log_level, "%s wait for " 2201 "resume failed (%s)", __func__, 2202 resume_state_names[arm_state->vc_resume_state + 2203 VC_RESUME_NUM_OFFSET]); 2204 status = VCHIQ_ERROR; 2205 write_lock_bh(&arm_state->susp_res_lock); 2206 goto out; 2207 } 2208 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__); 2209 write_lock_bh(&arm_state->susp_res_lock); 2210 resume_count++; 2211 } 2212 /* TODO: reinit_completion */ 2213 INIT_COMPLETION(arm_state->resume_blocker); 2214 arm_state->resume_blocked = 1; 2215 2216 out: 2217 return status; 2218 } 2219 2220 static inline void 2221 unblock_resume(VCHIQ_ARM_STATE_T *arm_state) 2222 { 2223 complete_all(&arm_state->resume_blocker); 2224 arm_state->resume_blocked = 0; 2225 } 2226 2227 /* Initiate suspend via slot handler. Should be called with the write lock 2228 * held */ 2229 VCHIQ_STATUS_T 2230 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state) 2231 { 2232 VCHIQ_STATUS_T status = VCHIQ_ERROR; 2233 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2234 2235 if (!arm_state) 2236 goto out; 2237 2238 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2239 status = VCHIQ_SUCCESS; 2240 2241 2242 switch (arm_state->vc_suspend_state) { 2243 case VC_SUSPEND_REQUESTED: 2244 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already " 2245 "requested", __func__); 2246 break; 2247 case VC_SUSPEND_IN_PROGRESS: 2248 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in " 2249 "progress", __func__); 2250 break; 2251 2252 default: 2253 /* We don't expect to be in other states, so log but continue 2254 * anyway */ 2255 vchiq_log_error(vchiq_susp_log_level, 2256 "%s unexpected suspend state %s", __func__, 2257 suspend_state_names[arm_state->vc_suspend_state + 2258 VC_SUSPEND_NUM_OFFSET]); 2259 /* fall through */ 2260 case VC_SUSPEND_REJECTED: 2261 case VC_SUSPEND_FAILED: 2262 /* Ensure any idle state actions have been run */ 2263 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2264 /* fall through */ 2265 case VC_SUSPEND_IDLE: 2266 vchiq_log_info(vchiq_susp_log_level, 2267 "%s: suspending", __func__); 2268 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED); 2269 /* kick the slot handler thread to initiate suspend */ 2270 request_poll(state, NULL, 0); 2271 break; 2272 } 2273 2274 out: 2275 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2276 return status; 2277 } 2278 2279 void 2280 vchiq_platform_check_suspend(VCHIQ_STATE_T *state) 2281 { 2282 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2283 int susp = 0; 2284 2285 if (!arm_state) 2286 goto out; 2287 2288 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2289 2290 write_lock_bh(&arm_state->susp_res_lock); 2291 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED && 2292 arm_state->vc_resume_state == VC_RESUME_RESUMED) { 2293 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS); 2294 susp = 1; 2295 } 2296 write_unlock_bh(&arm_state->susp_res_lock); 2297 2298 if (susp) 2299 vchiq_platform_suspend(state); 2300 2301 out: 2302 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2303 return; 2304 } 2305 2306 2307 static void 2308 output_timeout_error(VCHIQ_STATE_T *state) 2309 { 2310 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2311 char service_err[50] = ""; 2312 int vc_use_count = arm_state->videocore_use_count; 2313 int active_services = state->unused_service; 2314 int i; 2315 2316 if (!arm_state->videocore_use_count) { 2317 snprintf(service_err, 50, " Videocore usecount is 0"); 2318 goto output_msg; 2319 } 2320 for (i = 0; i < active_services; i++) { 2321 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2322 if (service_ptr && service_ptr->service_use_count && 2323 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) { 2324 snprintf(service_err, 50, " %c%c%c%c(%8x) service has " 2325 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS( 2326 service_ptr->base.fourcc), 2327 service_ptr->client_id, 2328 service_ptr->service_use_count, 2329 service_ptr->service_use_count == 2330 vc_use_count ? "" : " (+ more)"); 2331 break; 2332 } 2333 } 2334 2335 output_msg: 2336 vchiq_log_error(vchiq_susp_log_level, 2337 "timed out waiting for vc suspend (%d).%s", 2338 arm_state->autosuspend_override, service_err); 2339 2340 } 2341 2342 /* Try to get videocore into suspended state, regardless of autosuspend state. 2343 ** We don't actually force suspend, since videocore may get into a bad state 2344 ** if we force suspend at a bad time. Instead, we wait for autosuspend to 2345 ** determine a good point to suspend. If this doesn't happen within 100ms we 2346 ** report failure. 2347 ** 2348 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if 2349 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted. 2350 */ 2351 VCHIQ_STATUS_T 2352 vchiq_arm_force_suspend(VCHIQ_STATE_T *state) 2353 { 2354 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2355 VCHIQ_STATUS_T status = VCHIQ_ERROR; 2356 long rc = 0; 2357 int repeat = -1; 2358 2359 if (!arm_state) 2360 goto out; 2361 2362 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2363 2364 write_lock_bh(&arm_state->susp_res_lock); 2365 2366 status = block_resume(arm_state); 2367 if (status != VCHIQ_SUCCESS) 2368 goto unlock; 2369 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2370 /* Already suspended - just block resume and exit */ 2371 vchiq_log_info(vchiq_susp_log_level, "%s already suspended", 2372 __func__); 2373 status = VCHIQ_SUCCESS; 2374 goto unlock; 2375 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) { 2376 /* initiate suspend immediately in the case that we're waiting 2377 * for the timeout */ 2378 stop_suspend_timer(arm_state); 2379 if (!vchiq_videocore_wanted(state)) { 2380 vchiq_log_info(vchiq_susp_log_level, "%s videocore " 2381 "idle, initiating suspend", __func__); 2382 status = vchiq_arm_vcsuspend(state); 2383 } else if (arm_state->autosuspend_override < 2384 FORCE_SUSPEND_FAIL_MAX) { 2385 vchiq_log_info(vchiq_susp_log_level, "%s letting " 2386 "videocore go idle", __func__); 2387 status = VCHIQ_SUCCESS; 2388 } else { 2389 vchiq_log_warning(vchiq_susp_log_level, "%s failed too " 2390 "many times - attempting suspend", __func__); 2391 status = vchiq_arm_vcsuspend(state); 2392 } 2393 } else { 2394 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend " 2395 "in progress - wait for completion", __func__); 2396 status = VCHIQ_SUCCESS; 2397 } 2398 2399 /* Wait for suspend to happen due to system idle (not forced..) */ 2400 if (status != VCHIQ_SUCCESS) 2401 goto unblock_resume; 2402 2403 do { 2404 write_unlock_bh(&arm_state->susp_res_lock); 2405 2406 rc = wait_for_completion_interruptible_timeout( 2407 &arm_state->vc_suspend_complete, 2408 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS)); 2409 2410 write_lock_bh(&arm_state->susp_res_lock); 2411 if (rc < 0) { 2412 vchiq_log_warning(vchiq_susp_log_level, "%s " 2413 "interrupted waiting for suspend", __func__); 2414 status = VCHIQ_ERROR; 2415 goto unblock_resume; 2416 } else if (rc == 0) { 2417 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) { 2418 /* Repeat timeout once if in progress */ 2419 if (repeat < 0) { 2420 repeat = 1; 2421 continue; 2422 } 2423 } 2424 arm_state->autosuspend_override++; 2425 output_timeout_error(state); 2426 2427 status = VCHIQ_RETRY; 2428 goto unblock_resume; 2429 } 2430 } while (0 < (repeat--)); 2431 2432 /* Check and report state in case we need to abort ARM suspend */ 2433 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) { 2434 status = VCHIQ_RETRY; 2435 vchiq_log_error(vchiq_susp_log_level, 2436 "%s videocore suspend failed (state %s)", __func__, 2437 suspend_state_names[arm_state->vc_suspend_state + 2438 VC_SUSPEND_NUM_OFFSET]); 2439 /* Reset the state only if it's still in an error state. 2440 * Something could have already initiated another suspend. */ 2441 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE) 2442 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2443 2444 goto unblock_resume; 2445 } 2446 2447 /* successfully suspended - unlock and exit */ 2448 goto unlock; 2449 2450 unblock_resume: 2451 /* all error states need to unblock resume before exit */ 2452 unblock_resume(arm_state); 2453 2454 unlock: 2455 write_unlock_bh(&arm_state->susp_res_lock); 2456 2457 out: 2458 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status); 2459 return status; 2460 } 2461 2462 void 2463 vchiq_check_suspend(VCHIQ_STATE_T *state) 2464 { 2465 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2466 2467 if (!arm_state) 2468 goto out; 2469 2470 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2471 2472 write_lock_bh(&arm_state->susp_res_lock); 2473 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED && 2474 arm_state->first_connect && 2475 !vchiq_videocore_wanted(state)) { 2476 vchiq_arm_vcsuspend(state); 2477 } 2478 write_unlock_bh(&arm_state->susp_res_lock); 2479 2480 out: 2481 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2482 return; 2483 } 2484 2485 2486 int 2487 vchiq_arm_allow_resume(VCHIQ_STATE_T *state) 2488 { 2489 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2490 int resume = 0; 2491 int ret = -1; 2492 2493 if (!arm_state) 2494 goto out; 2495 2496 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2497 2498 write_lock_bh(&arm_state->susp_res_lock); 2499 unblock_resume(arm_state); 2500 resume = vchiq_check_resume(state); 2501 write_unlock_bh(&arm_state->susp_res_lock); 2502 2503 if (resume) { 2504 if (wait_for_completion_interruptible( 2505 &arm_state->vc_resume_complete) < 0) { 2506 vchiq_log_error(vchiq_susp_log_level, 2507 "%s interrupted", __func__); 2508 /* failed, cannot accurately derive suspend 2509 * state, so exit early. */ 2510 goto out; 2511 } 2512 } 2513 2514 read_lock_bh(&arm_state->susp_res_lock); 2515 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) { 2516 vchiq_log_info(vchiq_susp_log_level, 2517 "%s: Videocore remains suspended", __func__); 2518 } else { 2519 vchiq_log_info(vchiq_susp_log_level, 2520 "%s: Videocore resumed", __func__); 2521 ret = 0; 2522 } 2523 read_unlock_bh(&arm_state->susp_res_lock); 2524 out: 2525 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2526 return ret; 2527 } 2528 2529 /* This function should be called with the write lock held */ 2530 int 2531 vchiq_check_resume(VCHIQ_STATE_T *state) 2532 { 2533 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2534 int resume = 0; 2535 2536 if (!arm_state) 2537 goto out; 2538 2539 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2540 2541 if (need_resume(state)) { 2542 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2543 request_poll(state, NULL, 0); 2544 resume = 1; 2545 } 2546 2547 out: 2548 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2549 return resume; 2550 } 2551 2552 #ifdef notyet 2553 void 2554 vchiq_platform_check_resume(VCHIQ_STATE_T *state) 2555 { 2556 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2557 int res = 0; 2558 2559 if (!arm_state) 2560 goto out; 2561 2562 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2563 2564 write_lock_bh(&arm_state->susp_res_lock); 2565 if (arm_state->wake_address == 0) { 2566 vchiq_log_info(vchiq_susp_log_level, 2567 "%s: already awake", __func__); 2568 goto unlock; 2569 } 2570 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) { 2571 vchiq_log_info(vchiq_susp_log_level, 2572 "%s: already resuming", __func__); 2573 goto unlock; 2574 } 2575 2576 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) { 2577 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS); 2578 res = 1; 2579 } else 2580 vchiq_log_trace(vchiq_susp_log_level, 2581 "%s: not resuming (resume state %s)", __func__, 2582 resume_state_names[arm_state->vc_resume_state + 2583 VC_RESUME_NUM_OFFSET]); 2584 2585 unlock: 2586 write_unlock_bh(&arm_state->susp_res_lock); 2587 2588 if (res) 2589 vchiq_platform_resume(state); 2590 2591 out: 2592 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__); 2593 return; 2594 2595 } 2596 #endif 2597 2598 2599 2600 VCHIQ_STATUS_T 2601 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, 2602 enum USE_TYPE_E use_type) 2603 { 2604 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2605 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2606 char entity[16]; 2607 int *entity_uc; 2608 int local_uc, local_entity_uc; 2609 2610 if (!arm_state) 2611 goto out; 2612 2613 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2614 2615 if (use_type == USE_TYPE_VCHIQ) { 2616 snprintf(entity, sizeof(entity), "VCHIQ: "); 2617 entity_uc = &arm_state->peer_use_count; 2618 } else if (service) { 2619 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x", 2620 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2621 service->client_id); 2622 entity_uc = &service->service_use_count; 2623 } else { 2624 vchiq_log_error(vchiq_susp_log_level, "%s null service " 2625 "ptr", __func__); 2626 ret = VCHIQ_ERROR; 2627 goto out; 2628 } 2629 2630 write_lock_bh(&arm_state->susp_res_lock); 2631 while (arm_state->resume_blocked) { 2632 /* If we call 'use' while force suspend is waiting for suspend, 2633 * then we're about to block the thread which the force is 2634 * waiting to complete, so we're bound to just time out. In this 2635 * case, set the suspend state such that the wait will be 2636 * canceled, so we can complete as quickly as possible. */ 2637 if (arm_state->resume_blocked && arm_state->vc_suspend_state == 2638 VC_SUSPEND_IDLE) { 2639 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED); 2640 break; 2641 } 2642 /* If suspend is already in progress then we need to block */ 2643 if (!try_wait_for_completion(&arm_state->resume_blocker)) { 2644 /* Indicate that there are threads waiting on the resume 2645 * blocker. These need to be allowed to complete before 2646 * a _second_ call to force suspend can complete, 2647 * otherwise low priority threads might never actually 2648 * continue */ 2649 arm_state->blocked_count++; 2650 write_unlock_bh(&arm_state->susp_res_lock); 2651 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2652 "blocked - waiting...", __func__, entity); 2653 if (wait_for_completion_killable( 2654 &arm_state->resume_blocker) != 0) { 2655 vchiq_log_error(vchiq_susp_log_level, "%s %s " 2656 "wait for resume blocker interrupted", 2657 __func__, entity); 2658 ret = VCHIQ_ERROR; 2659 write_lock_bh(&arm_state->susp_res_lock); 2660 arm_state->blocked_count--; 2661 write_unlock_bh(&arm_state->susp_res_lock); 2662 goto out; 2663 } 2664 vchiq_log_info(vchiq_susp_log_level, "%s %s resume " 2665 "unblocked", __func__, entity); 2666 write_lock_bh(&arm_state->susp_res_lock); 2667 if (--arm_state->blocked_count == 0) 2668 complete_all(&arm_state->blocked_blocker); 2669 } 2670 } 2671 2672 stop_suspend_timer(arm_state); 2673 2674 local_uc = ++arm_state->videocore_use_count; 2675 local_entity_uc = ++(*entity_uc); 2676 2677 /* If there's a pending request which hasn't yet been serviced then 2678 * just clear it. If we're past VC_SUSPEND_REQUESTED state then 2679 * vc_resume_complete will block until we either resume or fail to 2680 * suspend */ 2681 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED) 2682 set_suspend_state(arm_state, VC_SUSPEND_IDLE); 2683 2684 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) { 2685 set_resume_state(arm_state, VC_RESUME_REQUESTED); 2686 vchiq_log_info(vchiq_susp_log_level, 2687 "%s %s count %d, state count %d", 2688 __func__, entity, local_entity_uc, local_uc); 2689 request_poll(state, NULL, 0); 2690 } else 2691 vchiq_log_trace(vchiq_susp_log_level, 2692 "%s %s count %d, state count %d", 2693 __func__, entity, *entity_uc, local_uc); 2694 2695 2696 write_unlock_bh(&arm_state->susp_res_lock); 2697 2698 /* Completion is in a done state when we're not suspended, so this won't 2699 * block for the non-suspended case. */ 2700 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) { 2701 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume", 2702 __func__, entity); 2703 if (wait_for_completion_killable( 2704 &arm_state->vc_resume_complete) != 0) { 2705 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for " 2706 "resume interrupted", __func__, entity); 2707 ret = VCHIQ_ERROR; 2708 goto out; 2709 } 2710 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__, 2711 entity); 2712 } 2713 2714 if (ret == VCHIQ_SUCCESS) { 2715 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 2716 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); 2717 while (ack_cnt && (status == VCHIQ_SUCCESS)) { 2718 /* Send the use notify to videocore */ 2719 status = vchiq_send_remote_use_active(state); 2720 if (status == VCHIQ_SUCCESS) 2721 ack_cnt--; 2722 else 2723 atomic_add(ack_cnt, 2724 &arm_state->ka_use_ack_count); 2725 } 2726 } 2727 2728 out: 2729 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2730 return ret; 2731 } 2732 2733 VCHIQ_STATUS_T 2734 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service) 2735 { 2736 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2737 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS; 2738 char entity[16]; 2739 int *entity_uc; 2740 2741 if (!arm_state) 2742 goto out; 2743 2744 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2745 2746 if (service) { 2747 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x", 2748 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2749 service->client_id); 2750 entity_uc = &service->service_use_count; 2751 } else { 2752 snprintf(entity, sizeof(entity), "PEER: "); 2753 entity_uc = &arm_state->peer_use_count; 2754 } 2755 2756 write_lock_bh(&arm_state->susp_res_lock); 2757 if (!arm_state->videocore_use_count || !(*entity_uc)) { 2758 /* Don't use BUG_ON - don't allow user thread to crash kernel */ 2759 WARN_ON(!arm_state->videocore_use_count); 2760 WARN_ON(!(*entity_uc)); 2761 ret = VCHIQ_ERROR; 2762 goto unlock; 2763 } 2764 --arm_state->videocore_use_count; 2765 --(*entity_uc); 2766 2767 if (!vchiq_videocore_wanted(state)) { 2768 if (vchiq_platform_use_suspend_timer() && 2769 !arm_state->resume_blocked) { 2770 /* Only use the timer if we're not trying to force 2771 * suspend (=> resume_blocked) */ 2772 start_suspend_timer(arm_state); 2773 } else { 2774 vchiq_log_info(vchiq_susp_log_level, 2775 "%s %s count %d, state count %d - suspending", 2776 __func__, entity, *entity_uc, 2777 arm_state->videocore_use_count); 2778 vchiq_arm_vcsuspend(state); 2779 } 2780 } else 2781 vchiq_log_trace(vchiq_susp_log_level, 2782 "%s %s count %d, state count %d", 2783 __func__, entity, *entity_uc, 2784 arm_state->videocore_use_count); 2785 2786 unlock: 2787 write_unlock_bh(&arm_state->susp_res_lock); 2788 2789 out: 2790 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret); 2791 return ret; 2792 } 2793 2794 void 2795 vchiq_on_remote_use(VCHIQ_STATE_T *state) 2796 { 2797 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2798 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2799 atomic_inc(&arm_state->ka_use_count); 2800 complete(&arm_state->ka_evt); 2801 } 2802 2803 void 2804 vchiq_on_remote_release(VCHIQ_STATE_T *state) 2805 { 2806 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2807 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2808 atomic_inc(&arm_state->ka_release_count); 2809 complete(&arm_state->ka_evt); 2810 } 2811 2812 VCHIQ_STATUS_T 2813 vchiq_use_service_internal(VCHIQ_SERVICE_T *service) 2814 { 2815 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); 2816 } 2817 2818 VCHIQ_STATUS_T 2819 vchiq_release_service_internal(VCHIQ_SERVICE_T *service) 2820 { 2821 return vchiq_release_internal(service->state, service); 2822 } 2823 2824 static void suspend_timer_callback(unsigned long context) 2825 { 2826 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context; 2827 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2828 if (!arm_state) 2829 goto out; 2830 vchiq_log_info(vchiq_susp_log_level, 2831 "%s - suspend timer expired - check suspend", __func__); 2832 vchiq_check_suspend(state); 2833 out: 2834 return; 2835 } 2836 2837 VCHIQ_STATUS_T 2838 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle) 2839 { 2840 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2841 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2842 if (service) { 2843 ret = vchiq_use_internal(service->state, service, 2844 USE_TYPE_SERVICE_NO_RESUME); 2845 unlock_service(service); 2846 } 2847 return ret; 2848 } 2849 2850 VCHIQ_STATUS_T 2851 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle) 2852 { 2853 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2854 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2855 if (service) { 2856 ret = vchiq_use_internal(service->state, service, 2857 USE_TYPE_SERVICE); 2858 unlock_service(service); 2859 } 2860 return ret; 2861 } 2862 2863 VCHIQ_STATUS_T 2864 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle) 2865 { 2866 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2867 VCHIQ_SERVICE_T *service = find_service_by_handle(handle); 2868 if (service) { 2869 ret = vchiq_release_internal(service->state, service); 2870 unlock_service(service); 2871 } 2872 return ret; 2873 } 2874 2875 void 2876 vchiq_dump_service_use_state(VCHIQ_STATE_T *state) 2877 { 2878 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 2879 int i, j = 0; 2880 /* Only dump 64 services */ 2881 static const int local_max_services = 64; 2882 /* If there's more than 64 services, only dump ones with 2883 * non-zero counts */ 2884 int only_nonzero = 0; 2885 static const char *nz = "<-- preventing suspend"; 2886 2887 enum vc_suspend_status vc_suspend_state; 2888 enum vc_resume_status vc_resume_state; 2889 int peer_count; 2890 int vc_use_count; 2891 int active_services; 2892 struct service_data_struct { 2893 int fourcc; 2894 int clientid; 2895 int use_count; 2896 } service_data[local_max_services]; 2897 2898 if (!arm_state) 2899 return; 2900 2901 read_lock_bh(&arm_state->susp_res_lock); 2902 vc_suspend_state = arm_state->vc_suspend_state; 2903 vc_resume_state = arm_state->vc_resume_state; 2904 peer_count = arm_state->peer_use_count; 2905 vc_use_count = arm_state->videocore_use_count; 2906 active_services = state->unused_service; 2907 if (active_services > local_max_services) 2908 only_nonzero = 1; 2909 2910 for (i = 0; (i < active_services) && (j < local_max_services); i++) { 2911 VCHIQ_SERVICE_T *service_ptr = state->services[i]; 2912 if (!service_ptr) 2913 continue; 2914 2915 if (only_nonzero && !service_ptr->service_use_count) 2916 continue; 2917 2918 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) { 2919 service_data[j].fourcc = service_ptr->base.fourcc; 2920 service_data[j].clientid = service_ptr->client_id; 2921 service_data[j++].use_count = service_ptr-> 2922 service_use_count; 2923 } 2924 } 2925 2926 read_unlock_bh(&arm_state->susp_res_lock); 2927 2928 vchiq_log_warning(vchiq_susp_log_level, 2929 "-- Videcore suspend state: %s --", 2930 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]); 2931 vchiq_log_warning(vchiq_susp_log_level, 2932 "-- Videcore resume state: %s --", 2933 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]); 2934 2935 if (only_nonzero) 2936 vchiq_log_warning(vchiq_susp_log_level, "Too many active " 2937 "services (%d). Only dumping up to first %d services " 2938 "with non-zero use-count", active_services, 2939 local_max_services); 2940 2941 for (i = 0; i < j; i++) { 2942 vchiq_log_warning(vchiq_susp_log_level, 2943 "----- %c%c%c%c:%d service count %d %s", 2944 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc), 2945 service_data[i].clientid, 2946 service_data[i].use_count, 2947 service_data[i].use_count ? nz : ""); 2948 } 2949 vchiq_log_warning(vchiq_susp_log_level, 2950 "----- VCHIQ use count count %d", peer_count); 2951 vchiq_log_warning(vchiq_susp_log_level, 2952 "--- Overall vchiq instance use count %d", vc_use_count); 2953 2954 vchiq_dump_platform_use_state(state); 2955 } 2956 2957 VCHIQ_STATUS_T 2958 vchiq_check_service(VCHIQ_SERVICE_T *service) 2959 { 2960 VCHIQ_ARM_STATE_T *arm_state; 2961 VCHIQ_STATUS_T ret = VCHIQ_ERROR; 2962 2963 if (!service || !service->state) 2964 goto out; 2965 2966 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__); 2967 2968 arm_state = vchiq_platform_get_arm_state(service->state); 2969 2970 read_lock_bh(&arm_state->susp_res_lock); 2971 if (service->service_use_count) 2972 ret = VCHIQ_SUCCESS; 2973 read_unlock_bh(&arm_state->susp_res_lock); 2974 2975 if (ret == VCHIQ_ERROR) { 2976 vchiq_log_error(vchiq_susp_log_level, 2977 "%s ERROR - %c%c%c%c:%8x service count %d, " 2978 "state count %d, videocore suspend state %s", __func__, 2979 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), 2980 service->client_id, service->service_use_count, 2981 arm_state->videocore_use_count, 2982 suspend_state_names[arm_state->vc_suspend_state + 2983 VC_SUSPEND_NUM_OFFSET]); 2984 vchiq_dump_service_use_state(service->state); 2985 } 2986 out: 2987 return ret; 2988 } 2989 2990 /* stub functions */ 2991 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state) 2992 { 2993 (void)state; 2994 } 2995 2996 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state, 2997 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate) 2998 { 2999 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state); 3000 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id, 3001 get_conn_state_name(oldstate), get_conn_state_name(newstate)); 3002 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) { 3003 write_lock_bh(&arm_state->susp_res_lock); 3004 if (!arm_state->first_connect) { 3005 char threadname[10]; 3006 arm_state->first_connect = 1; 3007 write_unlock_bh(&arm_state->susp_res_lock); 3008 snprintf(threadname, sizeof(threadname), "VCHIQka-%d", 3009 state->id); 3010 arm_state->ka_thread = vchiq_thread_create( 3011 &vchiq_keepalive_thread_func, 3012 (void *)state, 3013 threadname); 3014 if (arm_state->ka_thread == NULL) { 3015 vchiq_log_error(vchiq_susp_log_level, 3016 "vchiq: FATAL: couldn't create thread %s", 3017 threadname); 3018 } else { 3019 wake_up_process(arm_state->ka_thread); 3020 } 3021 } else 3022 write_unlock_bh(&arm_state->susp_res_lock); 3023 } 3024 } 3025 3026 /**************************************************************************** 3027 * 3028 * vchiq_init - called when the module is loaded. 3029 * 3030 ***************************************************************************/ 3031 3032 int __init vchiq_init(void); 3033 int __init 3034 vchiq_init(void) 3035 { 3036 int err; 3037 3038 #ifdef notyet 3039 /* create proc entries */ 3040 err = vchiq_proc_init(); 3041 if (err != 0) 3042 goto failed_proc_init; 3043 #endif 3044 3045 vchiq_cdev = make_dev(&vchiq_cdevsw, 0, 3046 UID_ROOT, GID_WHEEL, 0600, "vchiq"); 3047 if (!vchiq_cdev) { 3048 printf("Failed to create /dev/vchiq"); 3049 return (-ENXIO); 3050 } 3051 3052 spin_lock_init(&msg_queue_spinlock); 3053 3054 err = vchiq_platform_init(&g_state); 3055 if (err != 0) 3056 goto failed_platform_init; 3057 3058 vchiq_log_info(vchiq_arm_log_level, 3059 "vchiq: initialised - version %d (min %d)", 3060 VCHIQ_VERSION, VCHIQ_VERSION_MIN); 3061 3062 return 0; 3063 3064 failed_platform_init: 3065 if (vchiq_cdev) { 3066 destroy_dev(vchiq_cdev); 3067 vchiq_cdev = NULL; 3068 } 3069 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq"); 3070 return err; 3071 } 3072 3073 #ifdef notyet 3074 static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance) 3075 { 3076 VCHIQ_SERVICE_T *service; 3077 int use_count = 0, i; 3078 i = 0; 3079 while ((service = next_service_by_instance(instance->state, 3080 instance, &i)) != NULL) { 3081 use_count += service->service_use_count; 3082 unlock_service(service); 3083 } 3084 return use_count; 3085 } 3086 3087 /* read the per-process use-count */ 3088 static int proc_read_use_count(char *page, char **start, 3089 off_t off, int count, 3090 int *eof, void *data) 3091 { 3092 VCHIQ_INSTANCE_T instance = data; 3093 int len, use_count; 3094 3095 use_count = vchiq_instance_get_use_count(instance); 3096 len = snprintf(page+off, count, "%d\n", use_count); 3097 3098 return len; 3099 } 3100 3101 /* add an instance (process) to the proc entries */ 3102 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance) 3103 { 3104 char pidstr[32]; 3105 struct proc_dir_entry *top, *use_count; 3106 struct proc_dir_entry *clients = vchiq_clients_top(); 3107 int pid = instance->pid; 3108 3109 snprintf(pidstr, sizeof(pidstr), "%d", pid); 3110 top = proc_mkdir(pidstr, clients); 3111 if (!top) 3112 goto fail_top; 3113 3114 use_count = create_proc_read_entry("use_count", 3115 0444, top, 3116 proc_read_use_count, 3117 instance); 3118 if (!use_count) 3119 goto fail_use_count; 3120 3121 instance->proc_entry = top; 3122 3123 return 0; 3124 3125 fail_use_count: 3126 remove_proc_entry(top->name, clients); 3127 fail_top: 3128 return -ENOMEM; 3129 } 3130 3131 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance) 3132 { 3133 struct proc_dir_entry *clients = vchiq_clients_top(); 3134 remove_proc_entry("use_count", instance->proc_entry); 3135 remove_proc_entry(instance->proc_entry->name, clients); 3136 } 3137 3138 #endif 3139 3140 /**************************************************************************** 3141 * 3142 * vchiq_exit - called when the module is unloaded. 3143 * 3144 ***************************************************************************/ 3145 3146 void vchiq_exit(void); 3147 void 3148 vchiq_exit(void) 3149 { 3150 3151 vchiq_platform_exit(&g_state); 3152 if (vchiq_cdev) { 3153 destroy_dev(vchiq_cdev); 3154 vchiq_cdev = NULL; 3155 } 3156 } 3157