1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices 4 5 Copyright (C) 2008 <srinivasa.deevi at conexant dot com> 6 Based on cx88 driver 7 8 */ 9 10 #include "cx231xx.h" 11 #include <linux/init.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/bitmap.h> 16 #include <linux/i2c.h> 17 #include <linux/mm.h> 18 #include <linux/mutex.h> 19 #include <linux/slab.h> 20 21 #include <media/v4l2-common.h> 22 #include <media/v4l2-ioctl.h> 23 #include <media/drv-intf/msp3400.h> 24 #include <media/tuner.h> 25 26 #include "cx231xx-vbi.h" 27 28 static inline void print_err_status(struct cx231xx *dev, int packet, int status) 29 { 30 char *errmsg = "Unknown"; 31 32 switch (status) { 33 case -ENOENT: 34 errmsg = "unlinked synchronously"; 35 break; 36 case -ECONNRESET: 37 errmsg = "unlinked asynchronously"; 38 break; 39 case -ENOSR: 40 errmsg = "Buffer error (overrun)"; 41 break; 42 case -EPIPE: 43 errmsg = "Stalled (device not responding)"; 44 break; 45 case -EOVERFLOW: 46 errmsg = "Babble (bad cable?)"; 47 break; 48 case -EPROTO: 49 errmsg = "Bit-stuff error (bad cable?)"; 50 break; 51 case -EILSEQ: 52 errmsg = "CRC/Timeout (could be anything)"; 53 break; 54 case -ETIME: 55 errmsg = "Device does not respond"; 56 break; 57 } 58 if (packet < 0) { 59 dev_err(dev->dev, 60 "URB status %d [%s].\n", status, errmsg); 61 } else { 62 dev_err(dev->dev, 63 "URB packet %d, status %d [%s].\n", 64 packet, status, errmsg); 65 } 66 } 67 68 /* 69 * Controls the isoc copy of each urb packet 70 */ 71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) 72 { 73 struct cx231xx_dmaqueue *dma_q = urb->context; 74 int rc = 1; 75 unsigned char *p_buffer; 76 u32 bytes_parsed = 0, buffer_size = 0; 77 u8 sav_eav = 0; 78 79 if (!dev) 80 return 0; 81 82 if (dev->state & DEV_DISCONNECTED) 83 return 0; 84 85 if (urb->status < 0) { 86 print_err_status(dev, -1, urb->status); 87 if (urb->status == -ENOENT) 88 return 0; 89 } 90 91 /* get buffer pointer and length */ 92 p_buffer = urb->transfer_buffer; 93 buffer_size = urb->actual_length; 94 95 if (buffer_size > 0) { 96 bytes_parsed = 0; 97 98 if (dma_q->is_partial_line) { 99 /* Handle the case where we were working on a partial 100 line */ 101 sav_eav = dma_q->last_sav; 102 } else { 103 /* Check for a SAV/EAV overlapping the 104 buffer boundary */ 105 106 sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer, 107 dma_q->partial_buf, 108 &bytes_parsed); 109 } 110 111 sav_eav &= 0xF0; 112 /* Get the first line if we have some portion of an SAV/EAV from 113 the last buffer or a partial line */ 114 if (sav_eav) { 115 bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, 116 sav_eav, /* SAV/EAV */ 117 p_buffer + bytes_parsed, /* p_buffer */ 118 buffer_size - bytes_parsed); /* buffer size */ 119 } 120 121 /* Now parse data that is completely in this buffer */ 122 dma_q->is_partial_line = 0; 123 124 while (bytes_parsed < buffer_size) { 125 u32 bytes_used = 0; 126 127 sav_eav = cx231xx_find_next_SAV_EAV( 128 p_buffer + bytes_parsed, /* p_buffer */ 129 buffer_size - bytes_parsed, /* buffer size */ 130 &bytes_used); /* bytes used to get SAV/EAV */ 131 132 bytes_parsed += bytes_used; 133 134 sav_eav &= 0xF0; 135 if (sav_eav && (bytes_parsed < buffer_size)) { 136 bytes_parsed += cx231xx_get_vbi_line(dev, 137 dma_q, sav_eav, /* SAV/EAV */ 138 p_buffer+bytes_parsed, /* p_buffer */ 139 buffer_size-bytes_parsed);/*buf size*/ 140 } 141 } 142 143 /* Save the last four bytes of the buffer so we can 144 check the buffer boundary condition next time */ 145 memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); 146 bytes_parsed = 0; 147 } 148 149 return rc; 150 } 151 152 /* ------------------------------------------------------------------ 153 Vbi buf operations 154 ------------------------------------------------------------------*/ 155 156 static int vbi_queue_setup(struct vb2_queue *vq, 157 unsigned int *nbuffers, unsigned int *nplanes, 158 unsigned int sizes[], struct device *alloc_devs[]) 159 { 160 struct cx231xx *dev = vb2_get_drv_priv(vq); 161 u32 height = 0; 162 163 height = ((dev->norm & V4L2_STD_625_50) ? 164 PAL_VBI_LINES : NTSC_VBI_LINES); 165 166 *nplanes = 1; 167 sizes[0] = (dev->width * height * 2 * 2); 168 return 0; 169 } 170 171 /* This is called *without* dev->slock held; please keep it that way */ 172 static int vbi_buf_prepare(struct vb2_buffer *vb) 173 { 174 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); 175 u32 height = 0; 176 u32 size; 177 178 height = ((dev->norm & V4L2_STD_625_50) ? 179 PAL_VBI_LINES : NTSC_VBI_LINES); 180 size = ((dev->width << 1) * height * 2); 181 182 if (vb2_plane_size(vb, 0) < size) 183 return -EINVAL; 184 vb2_set_plane_payload(vb, 0, size); 185 return 0; 186 } 187 188 static void vbi_buf_queue(struct vb2_buffer *vb) 189 { 190 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); 191 struct cx231xx_buffer *buf = 192 container_of(vb, struct cx231xx_buffer, vb.vb2_buf); 193 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 194 unsigned long flags; 195 196 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 197 list_add_tail(&buf->list, &vidq->active); 198 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 199 } 200 201 static void return_all_buffers(struct cx231xx *dev, 202 enum vb2_buffer_state state) 203 { 204 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 205 struct cx231xx_buffer *buf, *node; 206 unsigned long flags; 207 208 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 209 dev->vbi_mode.bulk_ctl.buf = NULL; 210 list_for_each_entry_safe(buf, node, &vidq->active, list) { 211 list_del(&buf->list); 212 vb2_buffer_done(&buf->vb.vb2_buf, state); 213 } 214 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 215 } 216 217 static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count) 218 { 219 struct cx231xx *dev = vb2_get_drv_priv(vq); 220 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 221 int ret; 222 223 vidq->sequence = 0; 224 ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, 225 CX231XX_NUM_VBI_BUFS, 226 dev->vbi_mode.alt_max_pkt_size[0], 227 cx231xx_isoc_vbi_copy); 228 if (ret) 229 return_all_buffers(dev, VB2_BUF_STATE_QUEUED); 230 return ret; 231 } 232 233 static void vbi_stop_streaming(struct vb2_queue *vq) 234 { 235 struct cx231xx *dev = vb2_get_drv_priv(vq); 236 237 return_all_buffers(dev, VB2_BUF_STATE_ERROR); 238 } 239 240 struct vb2_ops cx231xx_vbi_qops = { 241 .queue_setup = vbi_queue_setup, 242 .buf_prepare = vbi_buf_prepare, 243 .buf_queue = vbi_buf_queue, 244 .start_streaming = vbi_start_streaming, 245 .stop_streaming = vbi_stop_streaming, 246 }; 247 248 /* ------------------------------------------------------------------ 249 URB control 250 ------------------------------------------------------------------*/ 251 252 /* 253 * IRQ callback, called by URB callback 254 */ 255 static void cx231xx_irq_vbi_callback(struct urb *urb) 256 { 257 struct cx231xx_dmaqueue *dma_q = urb->context; 258 struct cx231xx_video_mode *vmode = 259 container_of(dma_q, struct cx231xx_video_mode, vidq); 260 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 261 unsigned long flags; 262 263 switch (urb->status) { 264 case 0: /* success */ 265 case -ETIMEDOUT: /* NAK */ 266 break; 267 case -ECONNRESET: /* kill */ 268 case -ENOENT: 269 case -ESHUTDOWN: 270 return; 271 default: /* error */ 272 dev_err(dev->dev, 273 "urb completion error %d.\n", urb->status); 274 break; 275 } 276 277 /* Copy data from URB */ 278 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 279 dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb); 280 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 281 282 /* Reset status */ 283 urb->status = 0; 284 285 urb->status = usb_submit_urb(urb, GFP_ATOMIC); 286 if (urb->status) { 287 dev_err(dev->dev, "urb resubmit failed (error=%i)\n", 288 urb->status); 289 } 290 } 291 292 /* 293 * Stop and Deallocate URBs 294 */ 295 void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) 296 { 297 struct urb *urb; 298 int i; 299 300 dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n"); 301 302 dev->vbi_mode.bulk_ctl.nfields = -1; 303 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 304 urb = dev->vbi_mode.bulk_ctl.urb[i]; 305 if (urb) { 306 if (!irqs_disabled()) 307 usb_kill_urb(urb); 308 else 309 usb_unlink_urb(urb); 310 311 if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 312 313 kfree(dev->vbi_mode.bulk_ctl. 314 transfer_buffer[i]); 315 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 316 NULL; 317 } 318 usb_free_urb(urb); 319 dev->vbi_mode.bulk_ctl.urb[i] = NULL; 320 } 321 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; 322 } 323 324 kfree(dev->vbi_mode.bulk_ctl.urb); 325 kfree(dev->vbi_mode.bulk_ctl.transfer_buffer); 326 327 dev->vbi_mode.bulk_ctl.urb = NULL; 328 dev->vbi_mode.bulk_ctl.transfer_buffer = NULL; 329 dev->vbi_mode.bulk_ctl.num_bufs = 0; 330 331 cx231xx_capture_start(dev, 0, Vbi); 332 } 333 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); 334 335 /* 336 * Allocate URBs and start IRQ 337 */ 338 int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, 339 int num_bufs, int max_pkt_size, 340 int (*bulk_copy) (struct cx231xx *dev, 341 struct urb *urb)) 342 { 343 struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; 344 int i; 345 int sb_size, pipe; 346 struct urb *urb; 347 int rc; 348 349 dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n"); 350 351 /* De-allocates all pending stuff */ 352 cx231xx_uninit_vbi_isoc(dev); 353 354 /* clear if any halt */ 355 usb_clear_halt(dev->udev, 356 usb_rcvbulkpipe(dev->udev, 357 dev->vbi_mode.end_point_addr)); 358 359 dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy; 360 dev->vbi_mode.bulk_ctl.num_bufs = num_bufs; 361 dma_q->pos = 0; 362 dma_q->is_partial_line = 0; 363 dma_q->last_sav = 0; 364 dma_q->current_field = -1; 365 dma_q->bytes_left_in_line = dev->width << 1; 366 dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? 367 PAL_VBI_LINES : NTSC_VBI_LINES); 368 dma_q->lines_completed = 0; 369 for (i = 0; i < 8; i++) 370 dma_q->partial_buf[i] = 0; 371 372 dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *), 373 GFP_KERNEL); 374 if (!dev->vbi_mode.bulk_ctl.urb) { 375 dev_err(dev->dev, 376 "cannot alloc memory for usb buffers\n"); 377 return -ENOMEM; 378 } 379 380 dev->vbi_mode.bulk_ctl.transfer_buffer = 381 kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); 382 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { 383 dev_err(dev->dev, 384 "cannot allocate memory for usbtransfer\n"); 385 kfree(dev->vbi_mode.bulk_ctl.urb); 386 return -ENOMEM; 387 } 388 389 dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size; 390 dev->vbi_mode.bulk_ctl.buf = NULL; 391 392 sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size; 393 394 /* allocate urbs and transfer buffers */ 395 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 396 397 urb = usb_alloc_urb(0, GFP_KERNEL); 398 if (!urb) { 399 cx231xx_uninit_vbi_isoc(dev); 400 return -ENOMEM; 401 } 402 dev->vbi_mode.bulk_ctl.urb[i] = urb; 403 urb->transfer_flags = 0; 404 405 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 406 kzalloc(sb_size, GFP_KERNEL); 407 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 408 dev_err(dev->dev, 409 "unable to allocate %i bytes for transfer buffer %i\n", 410 sb_size, i); 411 cx231xx_uninit_vbi_isoc(dev); 412 return -ENOMEM; 413 } 414 415 pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); 416 usb_fill_bulk_urb(urb, dev->udev, pipe, 417 dev->vbi_mode.bulk_ctl.transfer_buffer[i], 418 sb_size, cx231xx_irq_vbi_callback, dma_q); 419 } 420 421 init_waitqueue_head(&dma_q->wq); 422 423 /* submit urbs and enables IRQ */ 424 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 425 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); 426 if (rc) { 427 dev_err(dev->dev, 428 "submit of urb %i failed (error=%i)\n", i, rc); 429 cx231xx_uninit_vbi_isoc(dev); 430 return rc; 431 } 432 } 433 434 cx231xx_capture_start(dev, 1, Vbi); 435 436 return 0; 437 } 438 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); 439 440 u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 441 u8 sav_eav, u8 *p_buffer, u32 buffer_size) 442 { 443 u32 bytes_copied = 0; 444 int current_field = -1; 445 446 switch (sav_eav) { 447 448 case SAV_VBI_FIELD1: 449 current_field = 1; 450 break; 451 452 case SAV_VBI_FIELD2: 453 current_field = 2; 454 break; 455 default: 456 break; 457 } 458 459 if (current_field < 0) 460 return bytes_copied; 461 462 dma_q->last_sav = sav_eav; 463 464 bytes_copied = 465 cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, 466 current_field); 467 468 return bytes_copied; 469 } 470 471 /* 472 * Announces that a buffer were filled and request the next 473 */ 474 static inline void vbi_buffer_filled(struct cx231xx *dev, 475 struct cx231xx_dmaqueue *dma_q, 476 struct cx231xx_buffer *buf) 477 { 478 /* Advice that buffer was filled */ 479 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */ 480 481 buf->vb.sequence = dma_q->sequence++; 482 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 483 484 dev->vbi_mode.bulk_ctl.buf = NULL; 485 486 list_del(&buf->list); 487 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 488 } 489 490 u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 491 u8 *p_line, u32 length, int field_number) 492 { 493 u32 bytes_to_copy; 494 struct cx231xx_buffer *buf; 495 u32 _line_size = dev->width * 2; 496 497 if (dma_q->current_field == -1) { 498 /* Just starting up */ 499 cx231xx_reset_vbi_buffer(dev, dma_q); 500 } 501 502 if (dma_q->current_field != field_number) 503 dma_q->lines_completed = 0; 504 505 /* get the buffer pointer */ 506 buf = dev->vbi_mode.bulk_ctl.buf; 507 508 /* Remember the field number for next time */ 509 dma_q->current_field = field_number; 510 511 bytes_to_copy = dma_q->bytes_left_in_line; 512 if (bytes_to_copy > length) 513 bytes_to_copy = length; 514 515 if (dma_q->lines_completed >= dma_q->lines_per_field) { 516 dma_q->bytes_left_in_line -= bytes_to_copy; 517 dma_q->is_partial_line = 518 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 519 return 0; 520 } 521 522 dma_q->is_partial_line = 1; 523 524 /* If we don't have a buffer, just return the number of bytes we would 525 have copied if we had a buffer. */ 526 if (!buf) { 527 dma_q->bytes_left_in_line -= bytes_to_copy; 528 dma_q->is_partial_line = 529 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 530 return bytes_to_copy; 531 } 532 533 /* copy the data to video buffer */ 534 cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); 535 536 dma_q->pos += bytes_to_copy; 537 dma_q->bytes_left_in_line -= bytes_to_copy; 538 539 if (dma_q->bytes_left_in_line == 0) { 540 541 dma_q->bytes_left_in_line = _line_size; 542 dma_q->lines_completed++; 543 dma_q->is_partial_line = 0; 544 545 if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { 546 547 vbi_buffer_filled(dev, dma_q, buf); 548 549 dma_q->pos = 0; 550 dma_q->lines_completed = 0; 551 cx231xx_reset_vbi_buffer(dev, dma_q); 552 } 553 } 554 555 return bytes_to_copy; 556 } 557 558 /* 559 * generic routine to get the next available buffer 560 */ 561 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, 562 struct cx231xx_buffer **buf) 563 { 564 struct cx231xx_video_mode *vmode = 565 container_of(dma_q, struct cx231xx_video_mode, vidq); 566 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 567 char *outp; 568 569 if (list_empty(&dma_q->active)) { 570 dev_err(dev->dev, "No active queue to serve\n"); 571 dev->vbi_mode.bulk_ctl.buf = NULL; 572 *buf = NULL; 573 return; 574 } 575 576 /* Get the next buffer */ 577 *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list); 578 579 /* Cleans up buffer - Useful for testing for frame/URB loss */ 580 outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0); 581 memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0)); 582 583 dev->vbi_mode.bulk_ctl.buf = *buf; 584 585 return; 586 } 587 588 void cx231xx_reset_vbi_buffer(struct cx231xx *dev, 589 struct cx231xx_dmaqueue *dma_q) 590 { 591 struct cx231xx_buffer *buf; 592 593 buf = dev->vbi_mode.bulk_ctl.buf; 594 595 if (buf == NULL) { 596 /* first try to get the buffer */ 597 get_next_vbi_buf(dma_q, &buf); 598 599 dma_q->pos = 0; 600 dma_q->current_field = -1; 601 } 602 603 dma_q->bytes_left_in_line = dev->width << 1; 604 dma_q->lines_completed = 0; 605 } 606 607 int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 608 u8 *p_buffer, u32 bytes_to_copy) 609 { 610 u8 *p_out_buffer = NULL; 611 u32 current_line_bytes_copied = 0; 612 struct cx231xx_buffer *buf; 613 u32 _line_size = dev->width << 1; 614 void *startwrite; 615 int offset, lencopy; 616 617 buf = dev->vbi_mode.bulk_ctl.buf; 618 619 if (buf == NULL) 620 return -EINVAL; 621 622 p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); 623 624 if (dma_q->bytes_left_in_line != _line_size) { 625 current_line_bytes_copied = 626 _line_size - dma_q->bytes_left_in_line; 627 } 628 629 offset = (dma_q->lines_completed * _line_size) + 630 current_line_bytes_copied; 631 632 if (dma_q->current_field == 2) { 633 /* Populate the second half of the frame */ 634 offset += (dev->width * 2 * dma_q->lines_per_field); 635 } 636 637 /* prepare destination address */ 638 startwrite = p_out_buffer + offset; 639 640 lencopy = dma_q->bytes_left_in_line > bytes_to_copy ? 641 bytes_to_copy : dma_q->bytes_left_in_line; 642 643 memcpy(startwrite, p_buffer, lencopy); 644 645 return 0; 646 } 647 648 u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev, 649 struct cx231xx_dmaqueue *dma_q) 650 { 651 u32 height = 0; 652 653 height = ((dev->norm & V4L2_STD_625_50) ? 654 PAL_VBI_LINES : NTSC_VBI_LINES); 655 if (dma_q->lines_completed == height && dma_q->current_field == 2) 656 return 1; 657 else 658 return 0; 659 } 660