1 /** 2 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions, and the following disclaimer, 9 * without modification. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The names of the above-listed copyright holders may not be used 14 * to endorse or promote products derived from this software without 15 * specific prior written permission. 16 * 17 * ALTERNATIVELY, this software may be distributed under the terms of the 18 * GNU General Public License ("GPL") version 2, as published by the Free 19 * Software Foundation. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* ---- Include Files ---------------------------------------------------- */ 35 36 #include "vchiq_core.h" 37 #include "vchiq_arm.h" 38 39 /* ---- Public Variables ------------------------------------------------- */ 40 41 /* ---- Private Constants and Types -------------------------------------- */ 42 43 struct bulk_waiter_node { 44 struct bulk_waiter bulk_waiter; 45 int pid; 46 struct list_head list; 47 }; 48 49 struct vchiq_instance_struct { 50 VCHIQ_STATE_T *state; 51 52 int connected; 53 54 struct list_head bulk_waiter_list; 55 struct mutex bulk_waiter_list_mutex; 56 }; 57 58 static VCHIQ_STATUS_T 59 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data, 60 unsigned int size, VCHIQ_BULK_DIR_T dir); 61 62 /**************************************************************************** 63 * 64 * vchiq_initialise 65 * 66 ***************************************************************************/ 67 #define VCHIQ_INIT_RETRIES 10 68 VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut) 69 { 70 VCHIQ_STATUS_T status = VCHIQ_ERROR; 71 VCHIQ_STATE_T *state; 72 VCHIQ_INSTANCE_T instance = NULL; 73 int i; 74 75 vchiq_log_trace(vchiq_core_log_level, "%s called", __func__); 76 77 /* VideoCore may not be ready due to boot up timing. 78 It may never be ready if kernel and firmware are mismatched, so don't block forever. */ 79 for (i=0; i<VCHIQ_INIT_RETRIES; i++) { 80 state = vchiq_get_state(); 81 if (state) 82 break; 83 udelay(500); 84 } 85 if (i==VCHIQ_INIT_RETRIES) { 86 vchiq_log_error(vchiq_core_log_level, 87 "%s: videocore not initialized\n", __func__); 88 goto failed; 89 } else if (i>0) { 90 vchiq_log_warning(vchiq_core_log_level, 91 "%s: videocore initialized after %d retries\n", __func__, i); 92 } 93 94 instance = kzalloc(sizeof(*instance), GFP_KERNEL); 95 if (!instance) { 96 vchiq_log_error(vchiq_core_log_level, 97 "%s: error allocating vchiq instance\n", __func__); 98 goto failed; 99 } 100 101 instance->connected = 0; 102 instance->state = state; 103 lmutex_init(&instance->bulk_waiter_list_mutex); 104 INIT_LIST_HEAD(&instance->bulk_waiter_list); 105 106 *instanceOut = instance; 107 108 status = VCHIQ_SUCCESS; 109 110 failed: 111 vchiq_log_trace(vchiq_core_log_level, 112 "%s(%p): returning %d", __func__, instance, status); 113 114 return status; 115 } 116 EXPORT_SYMBOL(vchiq_initialise); 117 118 /**************************************************************************** 119 * 120 * vchiq_shutdown 121 * 122 ***************************************************************************/ 123 124 VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance) 125 { 126 VCHIQ_STATUS_T status; 127 VCHIQ_STATE_T *state = instance->state; 128 129 vchiq_log_trace(vchiq_core_log_level, 130 "%s(%p) called", __func__, instance); 131 132 if (lmutex_lock_interruptible(&state->mutex) != 0) 133 return VCHIQ_RETRY; 134 135 /* Remove all services */ 136 status = vchiq_shutdown_internal(state, instance); 137 138 lmutex_unlock(&state->mutex); 139 140 vchiq_log_trace(vchiq_core_log_level, 141 "%s(%p): returning %d", __func__, instance, status); 142 143 if (status == VCHIQ_SUCCESS) { 144 struct list_head *pos, *next; 145 list_for_each_safe(pos, next, 146 &instance->bulk_waiter_list) { 147 struct bulk_waiter_node *waiter; 148 waiter = list_entry(pos, 149 struct bulk_waiter_node, 150 list); 151 list_del(pos); 152 vchiq_log_info(vchiq_arm_log_level, 153 "bulk_waiter - cleaned up %x " 154 "for pid %d", 155 (unsigned int)waiter, waiter->pid); 156 _sema_destroy(&waiter->bulk_waiter.event); 157 158 kfree(waiter); 159 } 160 161 lmutex_destroy(&instance->bulk_waiter_list_mutex); 162 163 kfree(instance); 164 } 165 166 return status; 167 } 168 EXPORT_SYMBOL(vchiq_shutdown); 169 170 /**************************************************************************** 171 * 172 * vchiq_is_connected 173 * 174 ***************************************************************************/ 175 176 static int vchiq_is_connected(VCHIQ_INSTANCE_T instance) 177 { 178 return instance->connected; 179 } 180 181 /**************************************************************************** 182 * 183 * vchiq_connect 184 * 185 ***************************************************************************/ 186 187 VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance) 188 { 189 VCHIQ_STATUS_T status; 190 VCHIQ_STATE_T *state = instance->state; 191 192 vchiq_log_trace(vchiq_core_log_level, 193 "%s(%p) called", __func__, instance); 194 195 if (lmutex_lock_interruptible(&state->mutex) != 0) { 196 vchiq_log_trace(vchiq_core_log_level, 197 "%s: call to lmutex_lock failed", __func__); 198 status = VCHIQ_RETRY; 199 goto failed; 200 } 201 status = vchiq_connect_internal(state, instance); 202 203 if (status == VCHIQ_SUCCESS) 204 instance->connected = 1; 205 206 lmutex_unlock(&state->mutex); 207 208 failed: 209 vchiq_log_trace(vchiq_core_log_level, 210 "%s(%p): returning %d", __func__, instance, status); 211 212 return status; 213 } 214 EXPORT_SYMBOL(vchiq_connect); 215 216 /**************************************************************************** 217 * 218 * vchiq_add_service 219 * 220 ***************************************************************************/ 221 222 VCHIQ_STATUS_T vchiq_add_service( 223 VCHIQ_INSTANCE_T instance, 224 const VCHIQ_SERVICE_PARAMS_T *params, 225 VCHIQ_SERVICE_HANDLE_T *phandle) 226 { 227 VCHIQ_STATUS_T status; 228 VCHIQ_STATE_T *state = instance->state; 229 VCHIQ_SERVICE_T *service = NULL; 230 int srvstate; 231 232 vchiq_log_trace(vchiq_core_log_level, 233 "%s(%p) called", __func__, instance); 234 235 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 236 237 srvstate = vchiq_is_connected(instance) 238 ? VCHIQ_SRVSTATE_LISTENING 239 : VCHIQ_SRVSTATE_HIDDEN; 240 241 service = vchiq_add_service_internal( 242 state, 243 params, 244 srvstate, 245 instance, 246 NULL); 247 248 if (service) { 249 *phandle = service->handle; 250 status = VCHIQ_SUCCESS; 251 } else 252 status = VCHIQ_ERROR; 253 254 vchiq_log_trace(vchiq_core_log_level, 255 "%s(%p): returning %d", __func__, instance, status); 256 257 return status; 258 } 259 EXPORT_SYMBOL(vchiq_add_service); 260 261 /**************************************************************************** 262 * 263 * vchiq_open_service 264 * 265 ***************************************************************************/ 266 267 VCHIQ_STATUS_T vchiq_open_service( 268 VCHIQ_INSTANCE_T instance, 269 const VCHIQ_SERVICE_PARAMS_T *params, 270 VCHIQ_SERVICE_HANDLE_T *phandle) 271 { 272 VCHIQ_STATUS_T status = VCHIQ_ERROR; 273 VCHIQ_STATE_T *state = instance->state; 274 VCHIQ_SERVICE_T *service = NULL; 275 276 vchiq_log_trace(vchiq_core_log_level, 277 "%s(%p) called", __func__, instance); 278 279 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 280 281 if (!vchiq_is_connected(instance)) 282 goto failed; 283 284 service = vchiq_add_service_internal(state, 285 params, 286 VCHIQ_SRVSTATE_OPENING, 287 instance, 288 NULL); 289 290 if (service) { 291 *phandle = service->handle; 292 status = vchiq_open_service_internal(service, 293 (uintptr_t)current); 294 if (status != VCHIQ_SUCCESS) { 295 vchiq_remove_service(service->handle); 296 *phandle = VCHIQ_SERVICE_HANDLE_INVALID; 297 } 298 } 299 300 failed: 301 vchiq_log_trace(vchiq_core_log_level, 302 "%s(%p): returning %d", __func__, instance, status); 303 304 return status; 305 } 306 EXPORT_SYMBOL(vchiq_open_service); 307 308 VCHIQ_STATUS_T 309 vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, 310 void *data, unsigned int size, void *userdata) 311 { 312 return vchiq_bulk_transfer(handle, 313 VCHI_MEM_HANDLE_INVALID, data, size, userdata, 314 VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT); 315 } 316 EXPORT_SYMBOL(vchiq_queue_bulk_transmit); 317 318 VCHIQ_STATUS_T 319 vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data, 320 unsigned int size, void *userdata) 321 { 322 return vchiq_bulk_transfer(handle, 323 VCHI_MEM_HANDLE_INVALID, data, size, userdata, 324 VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE); 325 } 326 EXPORT_SYMBOL(vchiq_queue_bulk_receive); 327 328 VCHIQ_STATUS_T 329 vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, void *data, 330 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode) 331 { 332 VCHIQ_STATUS_T status; 333 334 switch (mode) { 335 case VCHIQ_BULK_MODE_NOCALLBACK: 336 case VCHIQ_BULK_MODE_CALLBACK: 337 status = vchiq_bulk_transfer(handle, 338 VCHI_MEM_HANDLE_INVALID, data, size, userdata, 339 mode, VCHIQ_BULK_TRANSMIT); 340 break; 341 case VCHIQ_BULK_MODE_BLOCKING: 342 status = vchiq_blocking_bulk_transfer(handle, 343 data, size, VCHIQ_BULK_TRANSMIT); 344 break; 345 default: 346 return VCHIQ_ERROR; 347 } 348 349 return status; 350 } 351 EXPORT_SYMBOL(vchiq_bulk_transmit); 352 353 VCHIQ_STATUS_T 354 vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data, 355 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode) 356 { 357 VCHIQ_STATUS_T status; 358 359 switch (mode) { 360 case VCHIQ_BULK_MODE_NOCALLBACK: 361 case VCHIQ_BULK_MODE_CALLBACK: 362 status = vchiq_bulk_transfer(handle, 363 VCHI_MEM_HANDLE_INVALID, data, size, userdata, 364 mode, VCHIQ_BULK_RECEIVE); 365 break; 366 case VCHIQ_BULK_MODE_BLOCKING: 367 status = vchiq_blocking_bulk_transfer(handle, 368 data, size, VCHIQ_BULK_RECEIVE); 369 break; 370 default: 371 return VCHIQ_ERROR; 372 } 373 374 return status; 375 } 376 EXPORT_SYMBOL(vchiq_bulk_receive); 377 378 static VCHIQ_STATUS_T 379 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data, 380 unsigned int size, VCHIQ_BULK_DIR_T dir) 381 { 382 VCHIQ_INSTANCE_T instance; 383 VCHIQ_SERVICE_T *service; 384 VCHIQ_STATUS_T status; 385 struct bulk_waiter_node *waiter = NULL; 386 struct list_head *pos; 387 388 service = find_service_by_handle(handle); 389 if (!service) 390 return VCHIQ_ERROR; 391 392 instance = service->instance; 393 394 unlock_service(service); 395 396 lmutex_lock(&instance->bulk_waiter_list_mutex); 397 list_for_each(pos, &instance->bulk_waiter_list) { 398 if (list_entry(pos, struct bulk_waiter_node, 399 list)->pid == current->p_pid) { 400 waiter = list_entry(pos, 401 struct bulk_waiter_node, 402 list); 403 list_del(pos); 404 break; 405 } 406 } 407 lmutex_unlock(&instance->bulk_waiter_list_mutex); 408 409 if (waiter) { 410 VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk; 411 if (bulk) { 412 /* This thread has an outstanding bulk transfer. */ 413 if ((bulk->data != data) || 414 (bulk->size != size)) { 415 /* This is not a retry of the previous one. 416 ** Cancel the signal when the transfer 417 ** completes. */ 418 spin_lock(&bulk_waiter_spinlock); 419 bulk->userdata = NULL; 420 spin_unlock(&bulk_waiter_spinlock); 421 } 422 } 423 } 424 425 if (!waiter) { 426 waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL); 427 if (!waiter) { 428 vchiq_log_error(vchiq_core_log_level, 429 "%s - out of memory", __func__); 430 return VCHIQ_ERROR; 431 } 432 } 433 434 status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID, 435 data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING, 436 dir); 437 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || 438 !waiter->bulk_waiter.bulk) { 439 VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk; 440 if (bulk) { 441 /* Cancel the signal when the transfer 442 ** completes. */ 443 spin_lock(&bulk_waiter_spinlock); 444 bulk->userdata = NULL; 445 spin_unlock(&bulk_waiter_spinlock); 446 } 447 _sema_destroy(&waiter->bulk_waiter.event); 448 449 kfree(waiter); 450 } else { 451 waiter->pid = current->p_pid; 452 lmutex_lock(&instance->bulk_waiter_list_mutex); 453 list_add(&waiter->list, &instance->bulk_waiter_list); 454 lmutex_unlock(&instance->bulk_waiter_list_mutex); 455 vchiq_log_info(vchiq_arm_log_level, 456 "saved bulk_waiter %x for pid %d", 457 (unsigned int)waiter, current->p_pid); 458 } 459 460 return status; 461 } 462