1 /* 2 * cx18 mailbox functions 3 * 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 * 02111-1307 USA 21 */ 22 23 #include <stdarg.h> 24 25 #include "cx18-driver.h" 26 #include "cx18-io.h" 27 #include "cx18-scb.h" 28 #include "cx18-irq.h" 29 #include "cx18-mailbox.h" 30 #include "cx18-queue.h" 31 #include "cx18-streams.h" 32 #include "cx18-alsa-pcm.h" /* FIXME make configurable */ 33 34 static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" }; 35 36 #define API_FAST (1 << 2) /* Short timeout */ 37 #define API_SLOW (1 << 3) /* Additional 300ms timeout */ 38 39 struct cx18_api_info { 40 u32 cmd; 41 u8 flags; /* Flags, see above */ 42 u8 rpu; /* Processing unit */ 43 const char *name; /* The name of the command */ 44 }; 45 46 #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x } 47 48 static const struct cx18_api_info api_info[] = { 49 /* MPEG encoder API */ 50 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 51 API_ENTRY(CPU, CX18_EPU_DEBUG, 0), 52 API_ENTRY(CPU, CX18_CREATE_TASK, 0), 53 API_ENTRY(CPU, CX18_DESTROY_TASK, 0), 54 API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW), 55 API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW), 56 API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0), 57 API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0), 58 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 59 API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0), 60 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0), 61 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0), 62 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0), 63 API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0), 64 API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0), 65 API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0), 66 API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0), 67 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0), 68 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0), 69 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0), 70 API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0), 71 API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW), 72 API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0), 73 API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0), 74 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0), 75 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0), 76 API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0), 77 API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0), 78 API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0), 79 API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0), 80 API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0), 81 API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0), 82 API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0), 83 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0), 84 API_ENTRY(CPU, CX18_CPU_SET_VFC_PARAM, 0), 85 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0), 86 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST), 87 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW), 88 API_ENTRY(APU, CX18_APU_START, 0), 89 API_ENTRY(APU, CX18_APU_STOP, 0), 90 API_ENTRY(APU, CX18_APU_RESETAI, 0), 91 API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32, 0), 92 API_ENTRY(0, 0, 0), 93 }; 94 95 static const struct cx18_api_info *find_api_info(u32 cmd) 96 { 97 int i; 98 99 for (i = 0; api_info[i].cmd; i++) 100 if (api_info[i].cmd == cmd) 101 return &api_info[i]; 102 return NULL; 103 } 104 105 /* Call with buf of n*11+1 bytes */ 106 static char *u32arr2hex(u32 data[], int n, char *buf) 107 { 108 char *p; 109 int i; 110 111 for (i = 0, p = buf; i < n; i++, p += 11) { 112 /* kernel snprintf() appends '\0' always */ 113 snprintf(p, 12, " %#010x", data[i]); 114 } 115 *p = '\0'; 116 return buf; 117 } 118 119 static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name) 120 { 121 char argstr[MAX_MB_ARGUMENTS*11+1]; 122 123 if (!(cx18_debug & CX18_DBGFLG_API)) 124 return; 125 126 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s" 127 "\n", name, mb->request, mb->ack, mb->cmd, mb->error, 128 u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr)); 129 } 130 131 132 /* 133 * Functions that run in a work_queue work handling context 134 */ 135 136 static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl) 137 { 138 struct cx18_buffer *buf; 139 140 if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0) 141 return; 142 143 /* We ignore mdl and buf readpos accounting here - it doesn't matter */ 144 145 /* The likely case */ 146 if (list_is_singular(&mdl->buf_list)) { 147 buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, 148 list); 149 if (buf->bytesused) 150 dvb_dmx_swfilter(&s->dvb->demux, 151 buf->buf, buf->bytesused); 152 return; 153 } 154 155 list_for_each_entry(buf, &mdl->buf_list, list) { 156 if (buf->bytesused == 0) 157 break; 158 dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused); 159 } 160 } 161 162 static void cx18_mdl_send_to_videobuf(struct cx18_stream *s, 163 struct cx18_mdl *mdl) 164 { 165 struct cx18_videobuf_buffer *vb_buf; 166 struct cx18_buffer *buf; 167 u8 *p; 168 u32 offset = 0; 169 int dispatch = 0; 170 171 if (mdl->bytesused == 0) 172 return; 173 174 /* Acquire a videobuf buffer, clone to and and release it */ 175 spin_lock(&s->vb_lock); 176 if (list_empty(&s->vb_capture)) 177 goto out; 178 179 vb_buf = list_first_entry(&s->vb_capture, struct cx18_videobuf_buffer, 180 vb.queue); 181 182 p = videobuf_to_vmalloc(&vb_buf->vb); 183 if (!p) 184 goto out; 185 186 offset = vb_buf->bytes_used; 187 list_for_each_entry(buf, &mdl->buf_list, list) { 188 if (buf->bytesused == 0) 189 break; 190 191 if ((offset + buf->bytesused) <= vb_buf->vb.bsize) { 192 memcpy(p + offset, buf->buf, buf->bytesused); 193 offset += buf->bytesused; 194 vb_buf->bytes_used += buf->bytesused; 195 } 196 } 197 198 /* If we've filled the buffer as per the callers res then dispatch it */ 199 if (vb_buf->bytes_used >= s->vb_bytes_per_frame) { 200 dispatch = 1; 201 vb_buf->bytes_used = 0; 202 } 203 204 if (dispatch) { 205 vb_buf->vb.ts = ktime_to_timeval(ktime_get()); 206 list_del(&vb_buf->vb.queue); 207 vb_buf->vb.state = VIDEOBUF_DONE; 208 wake_up(&vb_buf->vb.done); 209 } 210 211 mod_timer(&s->vb_timeout, msecs_to_jiffies(2000) + jiffies); 212 213 out: 214 spin_unlock(&s->vb_lock); 215 } 216 217 static void cx18_mdl_send_to_alsa(struct cx18 *cx, struct cx18_stream *s, 218 struct cx18_mdl *mdl) 219 { 220 struct cx18_buffer *buf; 221 222 if (mdl->bytesused == 0) 223 return; 224 225 /* We ignore mdl and buf readpos accounting here - it doesn't matter */ 226 227 /* The likely case */ 228 if (list_is_singular(&mdl->buf_list)) { 229 buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, 230 list); 231 if (buf->bytesused) 232 cx->pcm_announce_callback(cx->alsa, buf->buf, 233 buf->bytesused); 234 return; 235 } 236 237 list_for_each_entry(buf, &mdl->buf_list, list) { 238 if (buf->bytesused == 0) 239 break; 240 cx->pcm_announce_callback(cx->alsa, buf->buf, buf->bytesused); 241 } 242 } 243 244 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) 245 { 246 u32 handle, mdl_ack_count, id; 247 struct cx18_mailbox *mb; 248 struct cx18_mdl_ack *mdl_ack; 249 struct cx18_stream *s; 250 struct cx18_mdl *mdl; 251 int i; 252 253 mb = &order->mb; 254 handle = mb->args[0]; 255 s = cx18_handle_to_stream(cx, handle); 256 257 if (s == NULL) { 258 CX18_WARN("Got DMA done notification for unknown/inactive" 259 " handle %d, %s mailbox seq no %d\n", handle, 260 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? 261 "stale" : "good", mb->request); 262 return; 263 } 264 265 mdl_ack_count = mb->args[2]; 266 mdl_ack = order->mdl_ack; 267 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) { 268 id = mdl_ack->id; 269 /* 270 * Simple integrity check for processing a stale (and possibly 271 * inconsistent mailbox): make sure the MDL id is in the 272 * valid range for the stream. 273 * 274 * We go through the trouble of dealing with stale mailboxes 275 * because most of the time, the mailbox data is still valid and 276 * unchanged (and in practice the firmware ping-pongs the 277 * two mdl_ack buffers so mdl_acks are not stale). 278 * 279 * There are occasions when we get a half changed mailbox, 280 * which this check catches for a handle & id mismatch. If the 281 * handle and id do correspond, the worst case is that we 282 * completely lost the old MDL, but pick up the new MDL 283 * early (but the new mdl_ack is guaranteed to be good in this 284 * case as the firmware wouldn't point us to a new mdl_ack until 285 * it's filled in). 286 * 287 * cx18_queue_get_mdl() will detect the lost MDLs 288 * and send them back to q_free for fw rotation eventually. 289 */ 290 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && 291 !(id >= s->mdl_base_idx && 292 id < (s->mdl_base_idx + s->buffers))) { 293 CX18_WARN("Fell behind! Ignoring stale mailbox with " 294 " inconsistent data. Lost MDL for mailbox " 295 "seq no %d\n", mb->request); 296 break; 297 } 298 mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used); 299 300 CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id); 301 if (mdl == NULL) { 302 CX18_WARN("Could not find MDL %d for stream %s\n", 303 id, s->name); 304 continue; 305 } 306 307 CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n", 308 s->name, mdl->bytesused); 309 310 if (s->type == CX18_ENC_STREAM_TYPE_TS) { 311 cx18_mdl_send_to_dvb(s, mdl); 312 cx18_enqueue(s, mdl, &s->q_free); 313 } else if (s->type == CX18_ENC_STREAM_TYPE_PCM) { 314 /* Pass the data to cx18-alsa */ 315 if (cx->pcm_announce_callback != NULL) { 316 cx18_mdl_send_to_alsa(cx, s, mdl); 317 cx18_enqueue(s, mdl, &s->q_free); 318 } else { 319 cx18_enqueue(s, mdl, &s->q_full); 320 } 321 } else if (s->type == CX18_ENC_STREAM_TYPE_YUV) { 322 cx18_mdl_send_to_videobuf(s, mdl); 323 cx18_enqueue(s, mdl, &s->q_free); 324 } else { 325 cx18_enqueue(s, mdl, &s->q_full); 326 if (s->type == CX18_ENC_STREAM_TYPE_IDX) 327 cx18_stream_rotate_idx_mdls(cx); 328 } 329 } 330 /* Put as many MDLs as possible back into fw use */ 331 cx18_stream_load_fw_queue(s); 332 333 wake_up(&cx->dma_waitq); 334 if (s->id != -1) 335 wake_up(&s->waitq); 336 } 337 338 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) 339 { 340 char *p; 341 char *str = order->str; 342 343 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); 344 p = strchr(str, '.'); 345 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str) 346 CX18_INFO("FW version: %s\n", p - 1); 347 } 348 349 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) 350 { 351 switch (order->rpu) { 352 case CPU: 353 { 354 switch (order->mb.cmd) { 355 case CX18_EPU_DMA_DONE: 356 epu_dma_done(cx, order); 357 break; 358 case CX18_EPU_DEBUG: 359 epu_debug(cx, order); 360 break; 361 default: 362 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 363 order->mb.cmd); 364 break; 365 } 366 break; 367 } 368 case APU: 369 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 370 order->mb.cmd); 371 break; 372 default: 373 break; 374 } 375 } 376 377 static 378 void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order) 379 { 380 atomic_set(&order->pending, 0); 381 } 382 383 void cx18_in_work_handler(struct work_struct *work) 384 { 385 struct cx18_in_work_order *order = 386 container_of(work, struct cx18_in_work_order, work); 387 struct cx18 *cx = order->cx; 388 epu_cmd(cx, order); 389 free_in_work_order(cx, order); 390 } 391 392 393 /* 394 * Functions that run in an interrupt handling context 395 */ 396 397 static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order) 398 { 399 struct cx18_mailbox __iomem *ack_mb; 400 u32 ack_irq, req; 401 402 switch (order->rpu) { 403 case APU: 404 ack_irq = IRQ_EPU_TO_APU_ACK; 405 ack_mb = &cx->scb->apu2epu_mb; 406 break; 407 case CPU: 408 ack_irq = IRQ_EPU_TO_CPU_ACK; 409 ack_mb = &cx->scb->cpu2epu_mb; 410 break; 411 default: 412 CX18_WARN("Unhandled RPU (%d) for command %x ack\n", 413 order->rpu, order->mb.cmd); 414 return; 415 } 416 417 req = order->mb.request; 418 /* Don't ack if the RPU has gotten impatient and timed us out */ 419 if (req != cx18_readl(cx, &ack_mb->request) || 420 req == cx18_readl(cx, &ack_mb->ack)) { 421 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 422 "incoming %s to EPU mailbox (sequence no. %u) " 423 "while processing\n", 424 rpu_str[order->rpu], rpu_str[order->rpu], req); 425 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC; 426 return; 427 } 428 cx18_writel(cx, req, &ack_mb->ack); 429 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 430 return; 431 } 432 433 static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order) 434 { 435 u32 handle, mdl_ack_offset, mdl_ack_count; 436 struct cx18_mailbox *mb; 437 int i; 438 439 mb = &order->mb; 440 handle = mb->args[0]; 441 mdl_ack_offset = mb->args[1]; 442 mdl_ack_count = mb->args[2]; 443 444 if (handle == CX18_INVALID_TASK_HANDLE || 445 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) { 446 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 447 mb_ack_irq(cx, order); 448 return -1; 449 } 450 451 for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32)) 452 ((u32 *)order->mdl_ack)[i / sizeof(u32)] = 453 cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i); 454 455 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 456 mb_ack_irq(cx, order); 457 return 1; 458 } 459 460 static 461 int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order) 462 { 463 u32 str_offset; 464 char *str = order->str; 465 466 str[0] = '\0'; 467 str_offset = order->mb.args[1]; 468 if (str_offset) { 469 cx18_setup_page(cx, str_offset); 470 cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252); 471 str[252] = '\0'; 472 cx18_setup_page(cx, SCB_OFFSET); 473 } 474 475 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 476 mb_ack_irq(cx, order); 477 478 return str_offset ? 1 : 0; 479 } 480 481 static inline 482 int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order) 483 { 484 int ret = -1; 485 486 switch (order->rpu) { 487 case CPU: 488 { 489 switch (order->mb.cmd) { 490 case CX18_EPU_DMA_DONE: 491 ret = epu_dma_done_irq(cx, order); 492 break; 493 case CX18_EPU_DEBUG: 494 ret = epu_debug_irq(cx, order); 495 break; 496 default: 497 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 498 order->mb.cmd); 499 break; 500 } 501 break; 502 } 503 case APU: 504 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 505 order->mb.cmd); 506 break; 507 default: 508 break; 509 } 510 return ret; 511 } 512 513 static inline 514 struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx) 515 { 516 int i; 517 struct cx18_in_work_order *order = NULL; 518 519 for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) { 520 /* 521 * We only need "pending" atomic to inspect its contents, 522 * and need not do a check and set because: 523 * 1. Any work handler thread only clears "pending" and only 524 * on one, particular work order at a time, per handler thread. 525 * 2. "pending" is only set here, and we're serialized because 526 * we're called in an IRQ handler context. 527 */ 528 if (atomic_read(&cx->in_work_order[i].pending) == 0) { 529 order = &cx->in_work_order[i]; 530 atomic_set(&order->pending, 1); 531 break; 532 } 533 } 534 return order; 535 } 536 537 void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) 538 { 539 struct cx18_mailbox __iomem *mb; 540 struct cx18_mailbox *order_mb; 541 struct cx18_in_work_order *order; 542 int submit; 543 int i; 544 545 switch (rpu) { 546 case CPU: 547 mb = &cx->scb->cpu2epu_mb; 548 break; 549 case APU: 550 mb = &cx->scb->apu2epu_mb; 551 break; 552 default: 553 return; 554 } 555 556 order = alloc_in_work_order_irq(cx); 557 if (order == NULL) { 558 CX18_WARN("Unable to find blank work order form to schedule " 559 "incoming mailbox command processing\n"); 560 return; 561 } 562 563 order->flags = 0; 564 order->rpu = rpu; 565 order_mb = &order->mb; 566 567 /* mb->cmd and mb->args[0] through mb->args[2] */ 568 for (i = 0; i < 4; i++) 569 (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i); 570 571 /* mb->request and mb->ack. N.B. we want to read mb->ack last */ 572 for (i = 0; i < 2; i++) 573 (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i); 574 575 if (order_mb->request == order_mb->ack) { 576 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 577 "incoming %s to EPU mailbox (sequence no. %u)" 578 "\n", 579 rpu_str[rpu], rpu_str[rpu], order_mb->request); 580 if (cx18_debug & CX18_DBGFLG_WARN) 581 dump_mb(cx, order_mb, "incoming"); 582 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT; 583 } 584 585 /* 586 * Individual EPU command processing is responsible for ack-ing 587 * a non-stale mailbox as soon as possible 588 */ 589 submit = epu_cmd_irq(cx, order); 590 if (submit > 0) { 591 queue_work(cx->in_work_queue, &order->work); 592 } 593 } 594 595 596 /* 597 * Functions called from a non-interrupt, non work_queue context 598 */ 599 600 static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) 601 { 602 const struct cx18_api_info *info = find_api_info(cmd); 603 u32 irq, req, ack, err; 604 struct cx18_mailbox __iomem *mb; 605 wait_queue_head_t *waitq; 606 struct mutex *mb_lock; 607 unsigned long int t0, timeout, ret; 608 int i; 609 char argstr[MAX_MB_ARGUMENTS*11+1]; 610 DEFINE_WAIT(w); 611 612 if (info == NULL) { 613 CX18_WARN("unknown cmd %x\n", cmd); 614 return -EINVAL; 615 } 616 617 if (cx18_debug & CX18_DBGFLG_API) { /* only call u32arr2hex if needed */ 618 if (cmd == CX18_CPU_DE_SET_MDL) { 619 if (cx18_debug & CX18_DBGFLG_HIGHVOL) 620 CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n", 621 info->name, cmd, 622 u32arr2hex(data, args, argstr)); 623 } else 624 CX18_DEBUG_API("%s\tcmd %#010x args%s\n", 625 info->name, cmd, 626 u32arr2hex(data, args, argstr)); 627 } 628 629 switch (info->rpu) { 630 case APU: 631 waitq = &cx->mb_apu_waitq; 632 mb_lock = &cx->epu2apu_mb_lock; 633 irq = IRQ_EPU_TO_APU; 634 mb = &cx->scb->epu2apu_mb; 635 break; 636 case CPU: 637 waitq = &cx->mb_cpu_waitq; 638 mb_lock = &cx->epu2cpu_mb_lock; 639 irq = IRQ_EPU_TO_CPU; 640 mb = &cx->scb->epu2cpu_mb; 641 break; 642 default: 643 CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu); 644 return -EINVAL; 645 } 646 647 mutex_lock(mb_lock); 648 /* 649 * Wait for an in-use mailbox to complete 650 * 651 * If the XPU is responding with Ack's, the mailbox shouldn't be in 652 * a busy state, since we serialize access to it on our end. 653 * 654 * If the wait for ack after sending a previous command was interrupted 655 * by a signal, we may get here and find a busy mailbox. After waiting, 656 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still. 657 */ 658 req = cx18_readl(cx, &mb->request); 659 timeout = msecs_to_jiffies(10); 660 ret = wait_event_timeout(*waitq, 661 (ack = cx18_readl(cx, &mb->ack)) == req, 662 timeout); 663 if (req != ack) { 664 /* waited long enough, make the mbox "not busy" from our end */ 665 cx18_writel(cx, req, &mb->ack); 666 CX18_ERR("mbox was found stuck busy when setting up for %s; " 667 "clearing busy and trying to proceed\n", info->name); 668 } else if (ret != timeout) 669 CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n", 670 jiffies_to_msecs(timeout-ret)); 671 672 /* Build the outgoing mailbox */ 673 req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1; 674 675 cx18_writel(cx, cmd, &mb->cmd); 676 for (i = 0; i < args; i++) 677 cx18_writel(cx, data[i], &mb->args[i]); 678 cx18_writel(cx, 0, &mb->error); 679 cx18_writel(cx, req, &mb->request); 680 cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */ 681 682 /* 683 * Notify the XPU and wait for it to send an Ack back 684 */ 685 timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20); 686 687 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", 688 irq, info->name); 689 690 /* So we don't miss the wakeup, prepare to wait before notifying fw */ 691 prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE); 692 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); 693 694 t0 = jiffies; 695 ack = cx18_readl(cx, &mb->ack); 696 if (ack != req) { 697 schedule_timeout(timeout); 698 ret = jiffies - t0; 699 ack = cx18_readl(cx, &mb->ack); 700 } else { 701 ret = jiffies - t0; 702 } 703 704 finish_wait(waitq, &w); 705 706 if (req != ack) { 707 mutex_unlock(mb_lock); 708 if (ret >= timeout) { 709 /* Timed out */ 710 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs " 711 "for RPU acknowledgement\n", 712 info->name, jiffies_to_msecs(ret)); 713 } else { 714 CX18_DEBUG_WARN("woken up before mailbox ack was ready " 715 "after submitting %s to RPU. only " 716 "waited %d msecs on req %u but awakened" 717 " with unmatched ack %u\n", 718 info->name, 719 jiffies_to_msecs(ret), 720 req, ack); 721 } 722 return -EINVAL; 723 } 724 725 if (ret >= timeout) 726 CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment " 727 "sending %s; timed out waiting %d msecs\n", 728 info->name, jiffies_to_msecs(ret)); 729 else 730 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", 731 jiffies_to_msecs(ret), info->name); 732 733 /* Collect data returned by the XPU */ 734 for (i = 0; i < MAX_MB_ARGUMENTS; i++) 735 data[i] = cx18_readl(cx, &mb->args[i]); 736 err = cx18_readl(cx, &mb->error); 737 mutex_unlock(mb_lock); 738 739 /* 740 * Wait for XPU to perform extra actions for the caller in some cases. 741 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs 742 * back in a burst shortly thereafter 743 */ 744 if (info->flags & API_SLOW) 745 cx18_msleep_timeout(300, 0); 746 747 if (err) 748 CX18_DEBUG_API("mailbox error %08x for command %s\n", err, 749 info->name); 750 return err ? -EIO : 0; 751 } 752 753 int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]) 754 { 755 return cx18_api_call(cx, cmd, args, data); 756 } 757 758 static int cx18_set_filter_param(struct cx18_stream *s) 759 { 760 struct cx18 *cx = s->cx; 761 u32 mode; 762 int ret; 763 764 mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0); 765 ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 766 s->handle, 1, mode, cx->spatial_strength); 767 mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0); 768 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 769 s->handle, 0, mode, cx->temporal_strength); 770 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 771 s->handle, 2, cx->filter_mode >> 2, 0); 772 return ret; 773 } 774 775 int cx18_api_func(void *priv, u32 cmd, int in, int out, 776 u32 data[CX2341X_MBOX_MAX_DATA]) 777 { 778 struct cx18_stream *s = priv; 779 struct cx18 *cx = s->cx; 780 781 switch (cmd) { 782 case CX2341X_ENC_SET_OUTPUT_PORT: 783 return 0; 784 case CX2341X_ENC_SET_FRAME_RATE: 785 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6, 786 s->handle, 0, 0, 0, 0, data[0]); 787 case CX2341X_ENC_SET_FRAME_SIZE: 788 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3, 789 s->handle, data[1], data[0]); 790 case CX2341X_ENC_SET_STREAM_TYPE: 791 return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2, 792 s->handle, data[0]); 793 case CX2341X_ENC_SET_ASPECT_RATIO: 794 return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2, 795 s->handle, data[0]); 796 797 case CX2341X_ENC_SET_GOP_PROPERTIES: 798 return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3, 799 s->handle, data[0], data[1]); 800 case CX2341X_ENC_SET_GOP_CLOSURE: 801 return 0; 802 case CX2341X_ENC_SET_AUDIO_PROPERTIES: 803 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2, 804 s->handle, data[0]); 805 case CX2341X_ENC_MUTE_AUDIO: 806 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, 807 s->handle, data[0]); 808 case CX2341X_ENC_SET_BIT_RATE: 809 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5, 810 s->handle, data[0], data[1], data[2], data[3]); 811 case CX2341X_ENC_MUTE_VIDEO: 812 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, 813 s->handle, data[0]); 814 case CX2341X_ENC_SET_FRAME_DROP_RATE: 815 return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2, 816 s->handle, data[0]); 817 case CX2341X_ENC_MISC: 818 return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4, 819 s->handle, data[0], data[1], data[2]); 820 case CX2341X_ENC_SET_DNR_FILTER_MODE: 821 cx->filter_mode = (data[0] & 3) | (data[1] << 2); 822 return cx18_set_filter_param(s); 823 case CX2341X_ENC_SET_DNR_FILTER_PROPS: 824 cx->spatial_strength = data[0]; 825 cx->temporal_strength = data[1]; 826 return cx18_set_filter_param(s); 827 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE: 828 return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3, 829 s->handle, data[0], data[1]); 830 case CX2341X_ENC_SET_CORING_LEVELS: 831 return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5, 832 s->handle, data[0], data[1], data[2], data[3]); 833 } 834 CX18_WARN("Unknown cmd %x\n", cmd); 835 return 0; 836 } 837 838 int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], 839 u32 cmd, int args, ...) 840 { 841 va_list ap; 842 int i; 843 844 va_start(ap, args); 845 for (i = 0; i < args; i++) 846 data[i] = va_arg(ap, u32); 847 va_end(ap); 848 return cx18_api(cx, cmd, args, data); 849 } 850 851 int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...) 852 { 853 u32 data[MAX_MB_ARGUMENTS]; 854 va_list ap; 855 int i; 856 857 if (cx == NULL) { 858 CX18_ERR("cx == NULL (cmd=%x)\n", cmd); 859 return 0; 860 } 861 if (args > MAX_MB_ARGUMENTS) { 862 CX18_ERR("args too big (cmd=%x)\n", cmd); 863 args = MAX_MB_ARGUMENTS; 864 } 865 va_start(ap, args); 866 for (i = 0; i < args; i++) 867 data[i] = va_arg(ap, u32); 868 va_end(ap); 869 return cx18_api(cx, cmd, args, data); 870 } 871