1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 /* Sizes must be in increasing order */ 25 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 26 { 320, 180 }, 27 { 640, 360 }, 28 { 640, 480 }, 29 { 1280, 720 }, 30 { 1920, 1080 }, 31 { 3840, 2160 }, 32 }; 33 34 /* 35 * Intervals must be in increasing order and there must be twice as many 36 * elements in this array as there are in webcam_sizes. 37 */ 38 static const struct v4l2_fract webcam_intervals[] = { 39 { 1, 1 }, 40 { 1, 2 }, 41 { 1, 4 }, 42 { 1, 5 }, 43 { 1, 10 }, 44 { 2, 25 }, 45 { 1, 15 }, /* 7 - maximum for 2160p */ 46 { 1, 25 }, 47 { 1, 30 }, /* 9 - maximum for 1080p */ 48 { 1, 40 }, 49 { 1, 50 }, 50 { 1, 60 }, /* 12 - maximum for 720p */ 51 { 1, 120 }, 52 }; 53 54 /* Limit maximum FPS rates for high resolutions */ 55 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 56 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 57 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 58 59 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 60 unsigned int frmsize_idx) 61 { 62 if (webcam_sizes[frmsize_idx].height >= 2160) 63 return IVAL_COUNT_2160P; 64 65 if (webcam_sizes[frmsize_idx].height >= 1080) 66 return IVAL_COUNT_1080P; 67 68 if (webcam_sizes[frmsize_idx].height >= 720) 69 return IVAL_COUNT_720P; 70 71 /* For low resolutions, allow all FPS rates */ 72 return ARRAY_SIZE(webcam_intervals); 73 } 74 75 static int vid_cap_queue_setup(struct vb2_queue *vq, 76 unsigned *nbuffers, unsigned *nplanes, 77 unsigned sizes[], struct device *alloc_devs[]) 78 { 79 struct vivid_dev *dev = vb2_get_drv_priv(vq); 80 unsigned buffers = tpg_g_buffers(&dev->tpg); 81 unsigned h = dev->fmt_cap_rect.height; 82 unsigned p; 83 84 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 85 /* 86 * You cannot use read() with FIELD_ALTERNATE since the field 87 * information (TOP/BOTTOM) cannot be passed back to the user. 88 */ 89 if (vb2_fileio_is_active(vq)) 90 return -EINVAL; 91 } 92 93 if (dev->queue_setup_error) { 94 /* 95 * Error injection: test what happens if queue_setup() returns 96 * an error. 97 */ 98 dev->queue_setup_error = false; 99 return -EINVAL; 100 } 101 if (*nplanes) { 102 /* 103 * Check if the number of requested planes match 104 * the number of buffers in the current format. You can't mix that. 105 */ 106 if (*nplanes != buffers) 107 return -EINVAL; 108 for (p = 0; p < buffers; p++) { 109 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / 110 dev->fmt_cap->vdownsampling[p] + 111 dev->fmt_cap->data_offset[p]) 112 return -EINVAL; 113 } 114 } else { 115 for (p = 0; p < buffers; p++) 116 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 117 dev->fmt_cap->vdownsampling[p] + 118 dev->fmt_cap->data_offset[p]; 119 } 120 121 *nplanes = buffers; 122 123 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 124 for (p = 0; p < buffers; p++) 125 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 126 127 return 0; 128 } 129 130 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 131 { 132 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 133 unsigned long size; 134 unsigned buffers = tpg_g_buffers(&dev->tpg); 135 unsigned p; 136 137 dprintk(dev, 1, "%s\n", __func__); 138 139 if (WARN_ON(NULL == dev->fmt_cap)) 140 return -EINVAL; 141 142 if (dev->buf_prepare_error) { 143 /* 144 * Error injection: test what happens if buf_prepare() returns 145 * an error. 146 */ 147 dev->buf_prepare_error = false; 148 return -EINVAL; 149 } 150 for (p = 0; p < buffers; p++) { 151 size = (tpg_g_line_width(&dev->tpg, p) * 152 dev->fmt_cap_rect.height) / 153 dev->fmt_cap->vdownsampling[p] + 154 dev->fmt_cap->data_offset[p]; 155 156 if (vb2_plane_size(vb, p) < size) { 157 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 158 __func__, p, vb2_plane_size(vb, p), size); 159 return -EINVAL; 160 } 161 162 vb2_set_plane_payload(vb, p, size); 163 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 164 } 165 166 return 0; 167 } 168 169 static void vid_cap_buf_finish(struct vb2_buffer *vb) 170 { 171 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 172 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 173 struct v4l2_timecode *tc = &vbuf->timecode; 174 unsigned fps = 25; 175 unsigned seq = vbuf->sequence; 176 177 if (!vivid_is_sdtv_cap(dev)) 178 return; 179 180 /* 181 * Set the timecode. Rarely used, so it is interesting to 182 * test this. 183 */ 184 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 185 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 186 fps = 30; 187 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 188 tc->flags = 0; 189 tc->frames = seq % fps; 190 tc->seconds = (seq / fps) % 60; 191 tc->minutes = (seq / (60 * fps)) % 60; 192 tc->hours = (seq / (60 * 60 * fps)) % 24; 193 } 194 195 static void vid_cap_buf_queue(struct vb2_buffer *vb) 196 { 197 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 198 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 199 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 200 201 dprintk(dev, 1, "%s\n", __func__); 202 203 spin_lock(&dev->slock); 204 list_add_tail(&buf->list, &dev->vid_cap_active); 205 spin_unlock(&dev->slock); 206 } 207 208 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 209 { 210 struct vivid_dev *dev = vb2_get_drv_priv(vq); 211 unsigned i; 212 int err; 213 214 dev->vid_cap_seq_count = 0; 215 dprintk(dev, 1, "%s\n", __func__); 216 for (i = 0; i < VIDEO_MAX_FRAME; i++) 217 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 218 if (dev->start_streaming_error) { 219 dev->start_streaming_error = false; 220 err = -EINVAL; 221 } else { 222 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 223 } 224 if (err) { 225 struct vivid_buffer *buf, *tmp; 226 227 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 228 list_del(&buf->list); 229 vb2_buffer_done(&buf->vb.vb2_buf, 230 VB2_BUF_STATE_QUEUED); 231 } 232 } 233 return err; 234 } 235 236 /* abort streaming and wait for last buffer */ 237 static void vid_cap_stop_streaming(struct vb2_queue *vq) 238 { 239 struct vivid_dev *dev = vb2_get_drv_priv(vq); 240 241 dprintk(dev, 1, "%s\n", __func__); 242 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 243 } 244 245 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 246 { 247 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 248 249 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 250 } 251 252 const struct vb2_ops vivid_vid_cap_qops = { 253 .queue_setup = vid_cap_queue_setup, 254 .buf_prepare = vid_cap_buf_prepare, 255 .buf_finish = vid_cap_buf_finish, 256 .buf_queue = vid_cap_buf_queue, 257 .start_streaming = vid_cap_start_streaming, 258 .stop_streaming = vid_cap_stop_streaming, 259 .buf_request_complete = vid_cap_buf_request_complete, 260 .wait_prepare = vb2_ops_wait_prepare, 261 .wait_finish = vb2_ops_wait_finish, 262 }; 263 264 /* 265 * Determine the 'picture' quality based on the current TV frequency: either 266 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 267 * signal or NOISE for no signal. 268 */ 269 void vivid_update_quality(struct vivid_dev *dev) 270 { 271 unsigned freq_modulus; 272 273 if (dev->input_is_connected_to_output[dev->input]) { 274 /* 275 * The 'noise' will only be replaced by the actual video 276 * if the output video matches the input video settings. 277 */ 278 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 279 return; 280 } 281 if (vivid_is_hdmi_cap(dev) && 282 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 283 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 284 return; 285 } 286 if (vivid_is_sdtv_cap(dev) && 287 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 288 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 289 return; 290 } 291 if (!vivid_is_tv_cap(dev)) { 292 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 293 return; 294 } 295 296 /* 297 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 298 * From +/- 0.25 MHz around the channel there is color, and from 299 * +/- 1 MHz there is grayscale (chroma is lost). 300 * Everywhere else it is just noise. 301 */ 302 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 303 if (freq_modulus > 2 * 16) { 304 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 305 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 306 return; 307 } 308 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 309 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 310 else 311 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 312 } 313 314 /* 315 * Get the current picture quality and the associated afc value. 316 */ 317 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 318 { 319 unsigned freq_modulus; 320 321 if (afc) 322 *afc = 0; 323 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 324 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 325 return tpg_g_quality(&dev->tpg); 326 327 /* 328 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 329 * From +/- 0.25 MHz around the channel there is color, and from 330 * +/- 1 MHz there is grayscale (chroma is lost). 331 * Everywhere else it is just gray. 332 */ 333 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 334 if (afc) 335 *afc = freq_modulus - 1 * 16; 336 return TPG_QUAL_GRAY; 337 } 338 339 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 340 { 341 if (vivid_is_sdtv_cap(dev)) 342 return dev->std_aspect_ratio[dev->input]; 343 344 if (vivid_is_hdmi_cap(dev)) 345 return dev->dv_timings_aspect_ratio[dev->input]; 346 347 return TPG_VIDEO_ASPECT_IMAGE; 348 } 349 350 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 351 { 352 if (vivid_is_sdtv_cap(dev)) 353 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 354 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 355 356 if (vivid_is_hdmi_cap(dev) && 357 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 358 return dev->src_rect.height == 480 ? 359 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 360 361 return TPG_PIXEL_ASPECT_SQUARE; 362 } 363 364 /* 365 * Called whenever the format has to be reset which can occur when 366 * changing inputs, standard, timings, etc. 367 */ 368 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 369 { 370 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 371 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 372 unsigned size; 373 u64 pixelclock; 374 375 switch (dev->input_type[dev->input]) { 376 case WEBCAM: 377 default: 378 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 379 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 380 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 381 dev->field_cap = V4L2_FIELD_NONE; 382 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 383 break; 384 case TV: 385 case SVID: 386 dev->field_cap = dev->tv_field_cap; 387 dev->src_rect.width = 720; 388 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 389 dev->src_rect.height = 480; 390 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 391 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 392 } else { 393 dev->src_rect.height = 576; 394 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 395 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 396 } 397 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 398 break; 399 case HDMI: 400 dev->src_rect.width = bt->width; 401 dev->src_rect.height = bt->height; 402 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 403 if (dev->reduced_fps && can_reduce_fps(bt)) { 404 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 405 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 406 } else { 407 pixelclock = bt->pixelclock; 408 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 409 } 410 dev->timeperframe_vid_cap = (struct v4l2_fract) { 411 size / 100, (u32)pixelclock / 100 412 }; 413 if (bt->interlaced) 414 dev->field_cap = V4L2_FIELD_ALTERNATE; 415 else 416 dev->field_cap = V4L2_FIELD_NONE; 417 418 /* 419 * We can be called from within s_ctrl, in that case we can't 420 * set/get controls. Luckily we don't need to in that case. 421 */ 422 if (keep_controls || !dev->colorspace) 423 break; 424 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 425 if (bt->width == 720 && bt->height <= 576) 426 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 427 else 428 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 429 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 430 } else { 431 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 432 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 433 } 434 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 435 break; 436 } 437 vivid_update_quality(dev); 438 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 439 dev->crop_cap = dev->src_rect; 440 dev->crop_bounds_cap = dev->src_rect; 441 dev->compose_cap = dev->crop_cap; 442 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 443 dev->compose_cap.height /= 2; 444 dev->fmt_cap_rect = dev->compose_cap; 445 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 446 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 447 tpg_update_mv_step(&dev->tpg); 448 449 /* 450 * We can be called from within s_ctrl, in that case we can't 451 * modify controls. Luckily we don't need to in that case. 452 */ 453 if (keep_controls) 454 return; 455 456 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 457 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 458 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 459 } 460 461 /* Map the field to something that is valid for the current input */ 462 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 463 { 464 if (vivid_is_sdtv_cap(dev)) { 465 switch (field) { 466 case V4L2_FIELD_INTERLACED_TB: 467 case V4L2_FIELD_INTERLACED_BT: 468 case V4L2_FIELD_SEQ_TB: 469 case V4L2_FIELD_SEQ_BT: 470 case V4L2_FIELD_TOP: 471 case V4L2_FIELD_BOTTOM: 472 case V4L2_FIELD_ALTERNATE: 473 return field; 474 case V4L2_FIELD_INTERLACED: 475 default: 476 return V4L2_FIELD_INTERLACED; 477 } 478 } 479 if (vivid_is_hdmi_cap(dev)) 480 return dev->dv_timings_cap[dev->input].bt.interlaced ? 481 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 482 return V4L2_FIELD_NONE; 483 } 484 485 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 486 { 487 if (!vivid_input_is_connected_to(dev)) 488 return tpg_g_colorspace(&dev->tpg); 489 return dev->colorspace_out; 490 } 491 492 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 493 { 494 if (!vivid_input_is_connected_to(dev)) 495 return tpg_g_xfer_func(&dev->tpg); 496 return dev->xfer_func_out; 497 } 498 499 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 500 { 501 if (!vivid_input_is_connected_to(dev)) 502 return tpg_g_ycbcr_enc(&dev->tpg); 503 return dev->ycbcr_enc_out; 504 } 505 506 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 507 { 508 if (!vivid_input_is_connected_to(dev)) 509 return tpg_g_hsv_enc(&dev->tpg); 510 return dev->hsv_enc_out; 511 } 512 513 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 514 { 515 if (!vivid_input_is_connected_to(dev)) 516 return tpg_g_quantization(&dev->tpg); 517 return dev->quantization_out; 518 } 519 520 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 521 struct v4l2_format *f) 522 { 523 struct vivid_dev *dev = video_drvdata(file); 524 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 525 unsigned p; 526 527 mp->width = dev->fmt_cap_rect.width; 528 mp->height = dev->fmt_cap_rect.height; 529 mp->field = dev->field_cap; 530 mp->pixelformat = dev->fmt_cap->fourcc; 531 mp->colorspace = vivid_colorspace_cap(dev); 532 mp->xfer_func = vivid_xfer_func_cap(dev); 533 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 534 mp->hsv_enc = vivid_hsv_enc_cap(dev); 535 else 536 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 537 mp->quantization = vivid_quantization_cap(dev); 538 mp->num_planes = dev->fmt_cap->buffers; 539 for (p = 0; p < mp->num_planes; p++) { 540 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 541 mp->plane_fmt[p].sizeimage = 542 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 543 dev->fmt_cap->vdownsampling[p] + 544 dev->fmt_cap->data_offset[p]; 545 } 546 return 0; 547 } 548 549 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 550 struct v4l2_format *f) 551 { 552 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 553 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 554 struct vivid_dev *dev = video_drvdata(file); 555 const struct vivid_fmt *fmt; 556 unsigned bytesperline, max_bpl; 557 unsigned factor = 1; 558 unsigned w, h; 559 unsigned p; 560 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 561 562 fmt = vivid_get_format(dev, mp->pixelformat); 563 if (!fmt) { 564 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 565 mp->pixelformat); 566 mp->pixelformat = V4L2_PIX_FMT_YUYV; 567 fmt = vivid_get_format(dev, mp->pixelformat); 568 } 569 570 mp->field = vivid_field_cap(dev, mp->field); 571 if (vivid_is_webcam(dev)) { 572 const struct v4l2_frmsize_discrete *sz = 573 v4l2_find_nearest_size(webcam_sizes, 574 ARRAY_SIZE(webcam_sizes), width, 575 height, mp->width, mp->height); 576 577 w = sz->width; 578 h = sz->height; 579 } else if (vivid_is_sdtv_cap(dev)) { 580 w = 720; 581 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 582 } else { 583 w = dev->src_rect.width; 584 h = dev->src_rect.height; 585 } 586 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 587 factor = 2; 588 if (vivid_is_webcam(dev) || 589 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 590 mp->width = w; 591 mp->height = h / factor; 592 } else { 593 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 594 595 v4l2_rect_set_min_size(&r, &vivid_min_rect); 596 v4l2_rect_set_max_size(&r, &vivid_max_rect); 597 if (dev->has_scaler_cap && !dev->has_compose_cap) { 598 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 599 600 v4l2_rect_set_max_size(&r, &max_r); 601 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 602 v4l2_rect_set_max_size(&r, &dev->src_rect); 603 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 604 v4l2_rect_set_min_size(&r, &dev->src_rect); 605 } 606 mp->width = r.width; 607 mp->height = r.height / factor; 608 } 609 610 /* This driver supports custom bytesperline values */ 611 612 mp->num_planes = fmt->buffers; 613 for (p = 0; p < fmt->buffers; p++) { 614 /* Calculate the minimum supported bytesperline value */ 615 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 616 /* Calculate the maximum supported bytesperline value */ 617 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 618 619 if (pfmt[p].bytesperline > max_bpl) 620 pfmt[p].bytesperline = max_bpl; 621 if (pfmt[p].bytesperline < bytesperline) 622 pfmt[p].bytesperline = bytesperline; 623 624 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 625 fmt->vdownsampling[p] + fmt->data_offset[p]; 626 627 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 628 } 629 for (p = fmt->buffers; p < fmt->planes; p++) 630 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 631 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 632 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 633 634 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 635 mp->colorspace = vivid_colorspace_cap(dev); 636 637 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 638 mp->xfer_func = vivid_xfer_func_cap(dev); 639 640 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 641 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 642 mp->hsv_enc = vivid_hsv_enc_cap(dev); 643 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 644 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 645 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 646 } else { 647 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 648 } 649 650 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 651 fmt->color_enc == TGP_COLOR_ENC_RGB) { 652 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 653 mp->quantization = vivid_quantization_cap(dev); 654 } else { 655 mp->quantization = vivid_quantization_cap(dev); 656 } 657 658 memset(mp->reserved, 0, sizeof(mp->reserved)); 659 return 0; 660 } 661 662 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 663 struct v4l2_format *f) 664 { 665 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 666 struct vivid_dev *dev = video_drvdata(file); 667 struct v4l2_rect *crop = &dev->crop_cap; 668 struct v4l2_rect *compose = &dev->compose_cap; 669 struct vb2_queue *q = &dev->vb_vid_cap_q; 670 int ret = vivid_try_fmt_vid_cap(file, priv, f); 671 unsigned factor = 1; 672 unsigned p; 673 unsigned i; 674 675 if (ret < 0) 676 return ret; 677 678 if (vb2_is_busy(q)) { 679 dprintk(dev, 1, "%s device busy\n", __func__); 680 return -EBUSY; 681 } 682 683 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 684 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 685 factor = 2; 686 687 /* Note: the webcam input doesn't support scaling, cropping or composing */ 688 689 if (!vivid_is_webcam(dev) && 690 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 691 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 692 693 if (dev->has_scaler_cap) { 694 if (dev->has_compose_cap) 695 v4l2_rect_map_inside(compose, &r); 696 else 697 *compose = r; 698 if (dev->has_crop_cap && !dev->has_compose_cap) { 699 struct v4l2_rect min_r = { 700 0, 0, 701 r.width / MAX_ZOOM, 702 factor * r.height / MAX_ZOOM 703 }; 704 struct v4l2_rect max_r = { 705 0, 0, 706 r.width * MAX_ZOOM, 707 factor * r.height * MAX_ZOOM 708 }; 709 710 v4l2_rect_set_min_size(crop, &min_r); 711 v4l2_rect_set_max_size(crop, &max_r); 712 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 713 } else if (dev->has_crop_cap) { 714 struct v4l2_rect min_r = { 715 0, 0, 716 compose->width / MAX_ZOOM, 717 factor * compose->height / MAX_ZOOM 718 }; 719 struct v4l2_rect max_r = { 720 0, 0, 721 compose->width * MAX_ZOOM, 722 factor * compose->height * MAX_ZOOM 723 }; 724 725 v4l2_rect_set_min_size(crop, &min_r); 726 v4l2_rect_set_max_size(crop, &max_r); 727 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 728 } 729 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 730 r.height *= factor; 731 v4l2_rect_set_size_to(crop, &r); 732 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 733 r = *crop; 734 r.height /= factor; 735 v4l2_rect_set_size_to(compose, &r); 736 } else if (!dev->has_crop_cap) { 737 v4l2_rect_map_inside(compose, &r); 738 } else { 739 r.height *= factor; 740 v4l2_rect_set_max_size(crop, &r); 741 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 742 compose->top *= factor; 743 compose->height *= factor; 744 v4l2_rect_set_size_to(compose, crop); 745 v4l2_rect_map_inside(compose, &r); 746 compose->top /= factor; 747 compose->height /= factor; 748 } 749 } else if (vivid_is_webcam(dev)) { 750 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 751 752 /* Guaranteed to be a match */ 753 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 754 if (webcam_sizes[i].width == mp->width && 755 webcam_sizes[i].height == mp->height) 756 break; 757 dev->webcam_size_idx = i; 758 if (dev->webcam_ival_idx >= ival_sz) 759 dev->webcam_ival_idx = ival_sz - 1; 760 vivid_update_format_cap(dev, false); 761 } else { 762 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 763 764 v4l2_rect_set_size_to(compose, &r); 765 r.height *= factor; 766 v4l2_rect_set_size_to(crop, &r); 767 } 768 769 dev->fmt_cap_rect.width = mp->width; 770 dev->fmt_cap_rect.height = mp->height; 771 tpg_s_buf_height(&dev->tpg, mp->height); 772 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 773 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 774 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 775 dev->field_cap = mp->field; 776 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 777 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 778 else 779 tpg_s_field(&dev->tpg, dev->field_cap, false); 780 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 781 if (vivid_is_sdtv_cap(dev)) 782 dev->tv_field_cap = mp->field; 783 tpg_update_mv_step(&dev->tpg); 784 dev->tpg.colorspace = mp->colorspace; 785 dev->tpg.xfer_func = mp->xfer_func; 786 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 787 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 788 else 789 dev->tpg.hsv_enc = mp->hsv_enc; 790 dev->tpg.quantization = mp->quantization; 791 792 return 0; 793 } 794 795 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 796 struct v4l2_format *f) 797 { 798 struct vivid_dev *dev = video_drvdata(file); 799 800 if (!dev->multiplanar) 801 return -ENOTTY; 802 return vivid_g_fmt_vid_cap(file, priv, f); 803 } 804 805 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 806 struct v4l2_format *f) 807 { 808 struct vivid_dev *dev = video_drvdata(file); 809 810 if (!dev->multiplanar) 811 return -ENOTTY; 812 return vivid_try_fmt_vid_cap(file, priv, f); 813 } 814 815 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 816 struct v4l2_format *f) 817 { 818 struct vivid_dev *dev = video_drvdata(file); 819 820 if (!dev->multiplanar) 821 return -ENOTTY; 822 return vivid_s_fmt_vid_cap(file, priv, f); 823 } 824 825 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 826 struct v4l2_format *f) 827 { 828 struct vivid_dev *dev = video_drvdata(file); 829 830 if (dev->multiplanar) 831 return -ENOTTY; 832 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 833 } 834 835 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 836 struct v4l2_format *f) 837 { 838 struct vivid_dev *dev = video_drvdata(file); 839 840 if (dev->multiplanar) 841 return -ENOTTY; 842 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 843 } 844 845 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 846 struct v4l2_format *f) 847 { 848 struct vivid_dev *dev = video_drvdata(file); 849 850 if (dev->multiplanar) 851 return -ENOTTY; 852 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 853 } 854 855 int vivid_vid_cap_g_selection(struct file *file, void *priv, 856 struct v4l2_selection *sel) 857 { 858 struct vivid_dev *dev = video_drvdata(file); 859 860 if (!dev->has_crop_cap && !dev->has_compose_cap) 861 return -ENOTTY; 862 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 863 return -EINVAL; 864 if (vivid_is_webcam(dev)) 865 return -ENODATA; 866 867 sel->r.left = sel->r.top = 0; 868 switch (sel->target) { 869 case V4L2_SEL_TGT_CROP: 870 if (!dev->has_crop_cap) 871 return -EINVAL; 872 sel->r = dev->crop_cap; 873 break; 874 case V4L2_SEL_TGT_CROP_DEFAULT: 875 case V4L2_SEL_TGT_CROP_BOUNDS: 876 if (!dev->has_crop_cap) 877 return -EINVAL; 878 sel->r = dev->src_rect; 879 break; 880 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 881 if (!dev->has_compose_cap) 882 return -EINVAL; 883 sel->r = vivid_max_rect; 884 break; 885 case V4L2_SEL_TGT_COMPOSE: 886 if (!dev->has_compose_cap) 887 return -EINVAL; 888 sel->r = dev->compose_cap; 889 break; 890 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 891 if (!dev->has_compose_cap) 892 return -EINVAL; 893 sel->r = dev->fmt_cap_rect; 894 break; 895 default: 896 return -EINVAL; 897 } 898 return 0; 899 } 900 901 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 902 { 903 struct vivid_dev *dev = video_drvdata(file); 904 struct v4l2_rect *crop = &dev->crop_cap; 905 struct v4l2_rect *compose = &dev->compose_cap; 906 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 907 int ret; 908 909 if (!dev->has_crop_cap && !dev->has_compose_cap) 910 return -ENOTTY; 911 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 912 return -EINVAL; 913 if (vivid_is_webcam(dev)) 914 return -ENODATA; 915 916 switch (s->target) { 917 case V4L2_SEL_TGT_CROP: 918 if (!dev->has_crop_cap) 919 return -EINVAL; 920 ret = vivid_vid_adjust_sel(s->flags, &s->r); 921 if (ret) 922 return ret; 923 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 924 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 925 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 926 s->r.top /= factor; 927 s->r.height /= factor; 928 if (dev->has_scaler_cap) { 929 struct v4l2_rect fmt = dev->fmt_cap_rect; 930 struct v4l2_rect max_rect = { 931 0, 0, 932 s->r.width * MAX_ZOOM, 933 s->r.height * MAX_ZOOM 934 }; 935 struct v4l2_rect min_rect = { 936 0, 0, 937 s->r.width / MAX_ZOOM, 938 s->r.height / MAX_ZOOM 939 }; 940 941 v4l2_rect_set_min_size(&fmt, &min_rect); 942 if (!dev->has_compose_cap) 943 v4l2_rect_set_max_size(&fmt, &max_rect); 944 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 945 vb2_is_busy(&dev->vb_vid_cap_q)) 946 return -EBUSY; 947 if (dev->has_compose_cap) { 948 v4l2_rect_set_min_size(compose, &min_rect); 949 v4l2_rect_set_max_size(compose, &max_rect); 950 v4l2_rect_map_inside(compose, &fmt); 951 } 952 dev->fmt_cap_rect = fmt; 953 tpg_s_buf_height(&dev->tpg, fmt.height); 954 } else if (dev->has_compose_cap) { 955 struct v4l2_rect fmt = dev->fmt_cap_rect; 956 957 v4l2_rect_set_min_size(&fmt, &s->r); 958 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 959 vb2_is_busy(&dev->vb_vid_cap_q)) 960 return -EBUSY; 961 dev->fmt_cap_rect = fmt; 962 tpg_s_buf_height(&dev->tpg, fmt.height); 963 v4l2_rect_set_size_to(compose, &s->r); 964 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 965 } else { 966 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 967 vb2_is_busy(&dev->vb_vid_cap_q)) 968 return -EBUSY; 969 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 970 v4l2_rect_set_size_to(compose, &s->r); 971 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 972 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 973 } 974 s->r.top *= factor; 975 s->r.height *= factor; 976 *crop = s->r; 977 break; 978 case V4L2_SEL_TGT_COMPOSE: 979 if (!dev->has_compose_cap) 980 return -EINVAL; 981 ret = vivid_vid_adjust_sel(s->flags, &s->r); 982 if (ret) 983 return ret; 984 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 985 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 986 if (dev->has_scaler_cap) { 987 struct v4l2_rect max_rect = { 988 0, 0, 989 dev->src_rect.width * MAX_ZOOM, 990 (dev->src_rect.height / factor) * MAX_ZOOM 991 }; 992 993 v4l2_rect_set_max_size(&s->r, &max_rect); 994 if (dev->has_crop_cap) { 995 struct v4l2_rect min_rect = { 996 0, 0, 997 s->r.width / MAX_ZOOM, 998 (s->r.height * factor) / MAX_ZOOM 999 }; 1000 struct v4l2_rect max_rect = { 1001 0, 0, 1002 s->r.width * MAX_ZOOM, 1003 (s->r.height * factor) * MAX_ZOOM 1004 }; 1005 1006 v4l2_rect_set_min_size(crop, &min_rect); 1007 v4l2_rect_set_max_size(crop, &max_rect); 1008 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1009 } 1010 } else if (dev->has_crop_cap) { 1011 s->r.top *= factor; 1012 s->r.height *= factor; 1013 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1014 v4l2_rect_set_size_to(crop, &s->r); 1015 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1016 s->r.top /= factor; 1017 s->r.height /= factor; 1018 } else { 1019 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1020 s->r.height /= factor; 1021 } 1022 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1023 *compose = s->r; 1024 break; 1025 default: 1026 return -EINVAL; 1027 } 1028 1029 tpg_s_crop_compose(&dev->tpg, crop, compose); 1030 return 0; 1031 } 1032 1033 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1034 int type, struct v4l2_fract *f) 1035 { 1036 struct vivid_dev *dev = video_drvdata(file); 1037 1038 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1039 return -EINVAL; 1040 1041 switch (vivid_get_pixel_aspect(dev)) { 1042 case TPG_PIXEL_ASPECT_NTSC: 1043 f->numerator = 11; 1044 f->denominator = 10; 1045 break; 1046 case TPG_PIXEL_ASPECT_PAL: 1047 f->numerator = 54; 1048 f->denominator = 59; 1049 break; 1050 default: 1051 break; 1052 } 1053 return 0; 1054 } 1055 1056 static const struct v4l2_audio vivid_audio_inputs[] = { 1057 { 0, "TV", V4L2_AUDCAP_STEREO }, 1058 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1059 }; 1060 1061 int vidioc_enum_input(struct file *file, void *priv, 1062 struct v4l2_input *inp) 1063 { 1064 struct vivid_dev *dev = video_drvdata(file); 1065 1066 if (inp->index >= dev->num_inputs) 1067 return -EINVAL; 1068 1069 inp->type = V4L2_INPUT_TYPE_CAMERA; 1070 switch (dev->input_type[inp->index]) { 1071 case WEBCAM: 1072 snprintf(inp->name, sizeof(inp->name), "Webcam %03u-%u", 1073 dev->inst, dev->input_name_counter[inp->index]); 1074 inp->capabilities = 0; 1075 break; 1076 case TV: 1077 snprintf(inp->name, sizeof(inp->name), "TV %03u-%u", 1078 dev->inst, dev->input_name_counter[inp->index]); 1079 inp->type = V4L2_INPUT_TYPE_TUNER; 1080 inp->std = V4L2_STD_ALL; 1081 if (dev->has_audio_inputs) 1082 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1083 inp->capabilities = V4L2_IN_CAP_STD; 1084 break; 1085 case SVID: 1086 snprintf(inp->name, sizeof(inp->name), "S-Video %03u-%u", 1087 dev->inst, dev->input_name_counter[inp->index]); 1088 inp->std = V4L2_STD_ALL; 1089 if (dev->has_audio_inputs) 1090 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1091 inp->capabilities = V4L2_IN_CAP_STD; 1092 break; 1093 case HDMI: 1094 snprintf(inp->name, sizeof(inp->name), "HDMI %03u-%u", 1095 dev->inst, dev->input_name_counter[inp->index]); 1096 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1097 if (dev->edid_blocks == 0 || 1098 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1099 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1100 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1101 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1102 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1103 break; 1104 } 1105 if (dev->sensor_hflip) 1106 inp->status |= V4L2_IN_ST_HFLIP; 1107 if (dev->sensor_vflip) 1108 inp->status |= V4L2_IN_ST_VFLIP; 1109 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1110 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1111 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1112 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1113 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1114 } else if (vivid_is_tv_cap(dev)) { 1115 switch (tpg_g_quality(&dev->tpg)) { 1116 case TPG_QUAL_GRAY: 1117 inp->status |= V4L2_IN_ST_COLOR_KILL; 1118 break; 1119 case TPG_QUAL_NOISE: 1120 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1121 break; 1122 default: 1123 break; 1124 } 1125 } 1126 } 1127 return 0; 1128 } 1129 1130 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1131 { 1132 struct vivid_dev *dev = video_drvdata(file); 1133 1134 *i = dev->input; 1135 return 0; 1136 } 1137 1138 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1139 { 1140 struct vivid_dev *dev = video_drvdata(file); 1141 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1142 unsigned brightness; 1143 1144 if (i >= dev->num_inputs) 1145 return -EINVAL; 1146 1147 if (i == dev->input) 1148 return 0; 1149 1150 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1151 vb2_is_busy(&dev->vb_vbi_cap_q) || 1152 vb2_is_busy(&dev->vb_meta_cap_q)) 1153 return -EBUSY; 1154 1155 dev->input = i; 1156 dev->vid_cap_dev.tvnorms = 0; 1157 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1158 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1159 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1160 } 1161 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1162 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1163 vivid_update_format_cap(dev, false); 1164 1165 if (dev->colorspace) { 1166 switch (dev->input_type[i]) { 1167 case WEBCAM: 1168 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1169 break; 1170 case TV: 1171 case SVID: 1172 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1173 break; 1174 case HDMI: 1175 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1176 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1177 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1178 else 1179 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1180 } else { 1181 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1182 } 1183 break; 1184 } 1185 } 1186 1187 /* 1188 * Modify the brightness range depending on the input. 1189 * This makes it easy to use vivid to test if applications can 1190 * handle control range modifications and is also how this is 1191 * typically used in practice as different inputs may be hooked 1192 * up to different receivers with different control ranges. 1193 */ 1194 brightness = 128 * i + dev->input_brightness[i]; 1195 v4l2_ctrl_modify_range(dev->brightness, 1196 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1197 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1198 1199 /* Restore per-input states. */ 1200 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1201 vivid_is_hdmi_cap(dev)); 1202 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1203 dev->dv_timings_signal_mode[dev->input] == 1204 SELECTED_DV_TIMINGS); 1205 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1206 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1207 dev->std_signal_mode[dev->input]); 1208 1209 if (vivid_is_hdmi_cap(dev)) { 1210 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1211 dev->dv_timings_signal_mode[dev->input]); 1212 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1213 dev->query_dv_timings[dev->input]); 1214 } else if (vivid_is_sdtv_cap(dev)) { 1215 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1216 dev->std_signal_mode[dev->input]); 1217 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1218 dev->std_signal_mode[dev->input]); 1219 } 1220 1221 return 0; 1222 } 1223 1224 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1225 { 1226 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1227 return -EINVAL; 1228 *vin = vivid_audio_inputs[vin->index]; 1229 return 0; 1230 } 1231 1232 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1233 { 1234 struct vivid_dev *dev = video_drvdata(file); 1235 1236 if (!vivid_is_sdtv_cap(dev)) 1237 return -EINVAL; 1238 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1239 return 0; 1240 } 1241 1242 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1243 { 1244 struct vivid_dev *dev = video_drvdata(file); 1245 1246 if (!vivid_is_sdtv_cap(dev)) 1247 return -EINVAL; 1248 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1249 return -EINVAL; 1250 dev->tv_audio_input = vin->index; 1251 return 0; 1252 } 1253 1254 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1255 { 1256 struct vivid_dev *dev = video_drvdata(file); 1257 1258 if (vf->tuner != 0) 1259 return -EINVAL; 1260 vf->frequency = dev->tv_freq; 1261 return 0; 1262 } 1263 1264 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1265 { 1266 struct vivid_dev *dev = video_drvdata(file); 1267 1268 if (vf->tuner != 0) 1269 return -EINVAL; 1270 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1271 if (vivid_is_tv_cap(dev)) 1272 vivid_update_quality(dev); 1273 return 0; 1274 } 1275 1276 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1277 { 1278 struct vivid_dev *dev = video_drvdata(file); 1279 1280 if (vt->index != 0) 1281 return -EINVAL; 1282 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1283 return -EINVAL; 1284 dev->tv_audmode = vt->audmode; 1285 return 0; 1286 } 1287 1288 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1289 { 1290 struct vivid_dev *dev = video_drvdata(file); 1291 enum tpg_quality qual; 1292 1293 if (vt->index != 0) 1294 return -EINVAL; 1295 1296 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1297 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1298 vt->audmode = dev->tv_audmode; 1299 vt->rangelow = MIN_TV_FREQ; 1300 vt->rangehigh = MAX_TV_FREQ; 1301 qual = vivid_get_quality(dev, &vt->afc); 1302 if (qual == TPG_QUAL_COLOR) 1303 vt->signal = 0xffff; 1304 else if (qual == TPG_QUAL_GRAY) 1305 vt->signal = 0x8000; 1306 else 1307 vt->signal = 0; 1308 if (qual == TPG_QUAL_NOISE) { 1309 vt->rxsubchans = 0; 1310 } else if (qual == TPG_QUAL_GRAY) { 1311 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1312 } else { 1313 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1314 unsigned int options = 1315 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1316 1317 switch (channel_nr % options) { 1318 case 0: 1319 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1320 break; 1321 case 1: 1322 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1323 break; 1324 case 2: 1325 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1326 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1327 else 1328 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1329 break; 1330 case 3: 1331 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1332 break; 1333 } 1334 } 1335 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1336 return 0; 1337 } 1338 1339 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1340 const v4l2_std_id vivid_standard[] = { 1341 V4L2_STD_NTSC_M, 1342 V4L2_STD_NTSC_M_JP, 1343 V4L2_STD_NTSC_M_KR, 1344 V4L2_STD_NTSC_443, 1345 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1346 V4L2_STD_PAL_I, 1347 V4L2_STD_PAL_DK, 1348 V4L2_STD_PAL_M, 1349 V4L2_STD_PAL_N, 1350 V4L2_STD_PAL_Nc, 1351 V4L2_STD_PAL_60, 1352 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1353 V4L2_STD_SECAM_DK, 1354 V4L2_STD_SECAM_L, 1355 V4L2_STD_SECAM_LC, 1356 V4L2_STD_UNKNOWN 1357 }; 1358 1359 /* Must remain in sync with the vivid_standard array */ 1360 const char * const vivid_ctrl_standard_strings[] = { 1361 "NTSC-M", 1362 "NTSC-M-JP", 1363 "NTSC-M-KR", 1364 "NTSC-443", 1365 "PAL-BGH", 1366 "PAL-I", 1367 "PAL-DK", 1368 "PAL-M", 1369 "PAL-N", 1370 "PAL-Nc", 1371 "PAL-60", 1372 "SECAM-BGH", 1373 "SECAM-DK", 1374 "SECAM-L", 1375 "SECAM-Lc", 1376 NULL, 1377 }; 1378 1379 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1380 { 1381 struct vivid_dev *dev = video_drvdata(file); 1382 unsigned int last = dev->query_std_last[dev->input]; 1383 1384 if (!vivid_is_sdtv_cap(dev)) 1385 return -ENODATA; 1386 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1387 dev->std_signal_mode[dev->input] == NO_LOCK) { 1388 *id = V4L2_STD_UNKNOWN; 1389 return 0; 1390 } 1391 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1392 *id = V4L2_STD_UNKNOWN; 1393 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1394 *id = dev->std_cap[dev->input]; 1395 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1396 *id = dev->query_std[dev->input]; 1397 } else { 1398 *id = vivid_standard[last]; 1399 dev->query_std_last[dev->input] = 1400 (last + 1) % ARRAY_SIZE(vivid_standard); 1401 } 1402 1403 return 0; 1404 } 1405 1406 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1407 { 1408 struct vivid_dev *dev = video_drvdata(file); 1409 1410 if (!vivid_is_sdtv_cap(dev)) 1411 return -ENODATA; 1412 if (dev->std_cap[dev->input] == id) 1413 return 0; 1414 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1415 return -EBUSY; 1416 dev->std_cap[dev->input] = id; 1417 vivid_update_format_cap(dev, false); 1418 return 0; 1419 } 1420 1421 static void find_aspect_ratio(u32 width, u32 height, 1422 u32 *num, u32 *denom) 1423 { 1424 if (!(height % 3) && ((height * 4 / 3) == width)) { 1425 *num = 4; 1426 *denom = 3; 1427 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1428 *num = 16; 1429 *denom = 9; 1430 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1431 *num = 16; 1432 *denom = 10; 1433 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1434 *num = 5; 1435 *denom = 4; 1436 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1437 *num = 15; 1438 *denom = 9; 1439 } else { /* default to 16:9 */ 1440 *num = 16; 1441 *denom = 9; 1442 } 1443 } 1444 1445 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1446 { 1447 struct v4l2_bt_timings *bt = &timings->bt; 1448 u32 total_h_pixel; 1449 u32 total_v_lines; 1450 u32 h_freq; 1451 1452 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1453 NULL, NULL)) 1454 return false; 1455 1456 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1457 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1458 1459 h_freq = (u32)bt->pixelclock / total_h_pixel; 1460 1461 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1462 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1463 bt->polarities, bt->interlaced, timings)) 1464 return true; 1465 } 1466 1467 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1468 struct v4l2_fract aspect_ratio; 1469 1470 find_aspect_ratio(bt->width, bt->height, 1471 &aspect_ratio.numerator, 1472 &aspect_ratio.denominator); 1473 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1474 bt->polarities, bt->interlaced, 1475 aspect_ratio, timings)) 1476 return true; 1477 } 1478 return false; 1479 } 1480 1481 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1482 struct v4l2_dv_timings *timings) 1483 { 1484 struct vivid_dev *dev = video_drvdata(file); 1485 1486 if (!vivid_is_hdmi_cap(dev)) 1487 return -ENODATA; 1488 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1489 0, NULL, NULL) && 1490 !valid_cvt_gtf_timings(timings)) 1491 return -EINVAL; 1492 1493 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1494 0, false)) 1495 return 0; 1496 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1497 return -EBUSY; 1498 1499 dev->dv_timings_cap[dev->input] = *timings; 1500 vivid_update_format_cap(dev, false); 1501 return 0; 1502 } 1503 1504 int vidioc_query_dv_timings(struct file *file, void *_fh, 1505 struct v4l2_dv_timings *timings) 1506 { 1507 struct vivid_dev *dev = video_drvdata(file); 1508 unsigned int input = dev->input; 1509 unsigned int last = dev->query_dv_timings_last[input]; 1510 1511 if (!vivid_is_hdmi_cap(dev)) 1512 return -ENODATA; 1513 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1514 dev->edid_blocks == 0) 1515 return -ENOLINK; 1516 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1517 return -ENOLCK; 1518 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1519 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1520 return -ERANGE; 1521 } 1522 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1523 *timings = dev->dv_timings_cap[input]; 1524 } else if (dev->dv_timings_signal_mode[input] == 1525 SELECTED_DV_TIMINGS) { 1526 *timings = 1527 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1528 } else { 1529 *timings = 1530 v4l2_dv_timings_presets[last]; 1531 dev->query_dv_timings_last[input] = 1532 (last + 1) % dev->query_dv_timings_size; 1533 } 1534 return 0; 1535 } 1536 1537 void vivid_update_outputs(struct vivid_dev *dev) 1538 { 1539 u32 edid_present = 0; 1540 1541 if (!dev || !dev->num_outputs) 1542 return; 1543 for (unsigned int i = 0, j = 0; i < dev->num_outputs; i++) { 1544 if (dev->output_type[i] != HDMI) 1545 continue; 1546 1547 struct vivid_dev *dev_rx = dev->output_to_input_instance[i]; 1548 1549 if (dev_rx && dev_rx->edid_blocks) 1550 edid_present |= 1 << j; 1551 j++; 1552 } 1553 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, edid_present); 1554 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, edid_present); 1555 v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, edid_present); 1556 } 1557 1558 void vivid_update_connected_outputs(struct vivid_dev *dev) 1559 { 1560 u16 phys_addr = cec_get_edid_phys_addr(dev->edid, dev->edid_blocks * 128, NULL); 1561 1562 for (unsigned int i = 0, j = 0; i < dev->num_inputs; i++) { 1563 unsigned int menu_idx = 1564 dev->input_is_connected_to_output[i]; 1565 1566 if (dev->input_type[i] != HDMI) 1567 continue; 1568 j++; 1569 if (menu_idx < FIXED_MENU_ITEMS) 1570 continue; 1571 1572 struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; 1573 unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; 1574 1575 if (!dev_tx) 1576 continue; 1577 1578 unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; 1579 1580 vivid_update_outputs(dev_tx); 1581 if (dev->edid_blocks) { 1582 cec_s_phys_addr(dev_tx->cec_tx_adap[hdmi_output], 1583 v4l2_phys_addr_for_input(phys_addr, j), 1584 false); 1585 } else { 1586 cec_phys_addr_invalidate(dev_tx->cec_tx_adap[hdmi_output]); 1587 } 1588 } 1589 } 1590 1591 int vidioc_s_edid(struct file *file, void *_fh, 1592 struct v4l2_edid *edid) 1593 { 1594 struct vivid_dev *dev = video_drvdata(file); 1595 u16 phys_addr; 1596 int ret; 1597 1598 memset(edid->reserved, 0, sizeof(edid->reserved)); 1599 if (edid->pad >= dev->num_inputs) 1600 return -EINVAL; 1601 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1602 return -EINVAL; 1603 if (edid->blocks == 0) { 1604 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1605 return -EBUSY; 1606 dev->edid_blocks = 0; 1607 vivid_update_connected_outputs(dev); 1608 return 0; 1609 } 1610 if (edid->blocks > dev->edid_max_blocks) { 1611 edid->blocks = dev->edid_max_blocks; 1612 return -E2BIG; 1613 } 1614 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1615 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1616 if (ret) 1617 return ret; 1618 1619 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1620 return -EBUSY; 1621 1622 dev->edid_blocks = edid->blocks; 1623 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1624 1625 vivid_update_connected_outputs(dev); 1626 return 0; 1627 } 1628 1629 int vidioc_enum_framesizes(struct file *file, void *fh, 1630 struct v4l2_frmsizeenum *fsize) 1631 { 1632 struct vivid_dev *dev = video_drvdata(file); 1633 1634 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1635 return -EINVAL; 1636 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1637 return -EINVAL; 1638 if (vivid_is_webcam(dev)) { 1639 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1640 return -EINVAL; 1641 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1642 fsize->discrete = webcam_sizes[fsize->index]; 1643 return 0; 1644 } 1645 if (fsize->index) 1646 return -EINVAL; 1647 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1648 fsize->stepwise.min_width = MIN_WIDTH; 1649 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1650 fsize->stepwise.step_width = 2; 1651 fsize->stepwise.min_height = MIN_HEIGHT; 1652 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1653 fsize->stepwise.step_height = 2; 1654 return 0; 1655 } 1656 1657 /* timeperframe is arbitrary and continuous */ 1658 int vidioc_enum_frameintervals(struct file *file, void *priv, 1659 struct v4l2_frmivalenum *fival) 1660 { 1661 struct vivid_dev *dev = video_drvdata(file); 1662 const struct vivid_fmt *fmt; 1663 int i; 1664 1665 fmt = vivid_get_format(dev, fival->pixel_format); 1666 if (!fmt) 1667 return -EINVAL; 1668 1669 if (!vivid_is_webcam(dev)) { 1670 if (fival->index) 1671 return -EINVAL; 1672 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1673 return -EINVAL; 1674 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1675 return -EINVAL; 1676 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1677 fival->discrete = dev->timeperframe_vid_cap; 1678 return 0; 1679 } 1680 1681 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1682 if (fival->width == webcam_sizes[i].width && 1683 fival->height == webcam_sizes[i].height) 1684 break; 1685 if (i == ARRAY_SIZE(webcam_sizes)) 1686 return -EINVAL; 1687 if (fival->index >= webcam_ival_count(dev, i)) 1688 return -EINVAL; 1689 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1690 fival->discrete = webcam_intervals[fival->index]; 1691 return 0; 1692 } 1693 1694 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1695 struct v4l2_streamparm *parm) 1696 { 1697 struct vivid_dev *dev = video_drvdata(file); 1698 1699 if (parm->type != (dev->multiplanar ? 1700 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1701 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1702 return -EINVAL; 1703 1704 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1705 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1706 parm->parm.capture.readbuffers = 1; 1707 return 0; 1708 } 1709 1710 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1711 struct v4l2_streamparm *parm) 1712 { 1713 struct vivid_dev *dev = video_drvdata(file); 1714 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1715 struct v4l2_fract tpf; 1716 unsigned i; 1717 1718 if (parm->type != (dev->multiplanar ? 1719 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1720 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1721 return -EINVAL; 1722 if (!vivid_is_webcam(dev)) 1723 return vivid_vid_cap_g_parm(file, priv, parm); 1724 1725 tpf = parm->parm.capture.timeperframe; 1726 1727 if (tpf.denominator == 0) 1728 tpf = webcam_intervals[ival_sz - 1]; 1729 for (i = 0; i < ival_sz; i++) 1730 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1731 break; 1732 if (i == ival_sz) 1733 i = ival_sz - 1; 1734 dev->webcam_ival_idx = i; 1735 tpf = webcam_intervals[dev->webcam_ival_idx]; 1736 1737 /* resync the thread's timings */ 1738 dev->cap_seq_resync = true; 1739 dev->timeperframe_vid_cap = tpf; 1740 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1741 parm->parm.capture.timeperframe = tpf; 1742 parm->parm.capture.readbuffers = 1; 1743 return 0; 1744 } 1745