1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/prandom.h> 14 #include <linux/v4l2-dv-timings.h> 15 #include <media/v4l2-common.h> 16 #include <media/v4l2-event.h> 17 #include <media/v4l2-dv-timings.h> 18 #include <media/v4l2-rect.h> 19 20 #include "vivid-core.h" 21 #include "vivid-vid-common.h" 22 #include "vivid-kthread-cap.h" 23 #include "vivid-vid-cap.h" 24 25 /* Sizes must be in increasing order */ 26 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 27 { 320, 180 }, 28 { 640, 360 }, 29 { 640, 480 }, 30 { 1280, 720 }, 31 { 1920, 1080 }, 32 { 3840, 2160 }, 33 }; 34 35 /* 36 * Intervals must be in increasing order and there must be twice as many 37 * elements in this array as there are in webcam_sizes. 38 */ 39 static const struct v4l2_fract webcam_intervals[] = { 40 { 1, 1 }, 41 { 1, 2 }, 42 { 1, 4 }, 43 { 1, 5 }, 44 { 1, 10 }, 45 { 2, 25 }, 46 { 1, 15 }, /* 7 - maximum for 2160p */ 47 { 1, 25 }, 48 { 1, 30 }, /* 9 - maximum for 1080p */ 49 { 1, 40 }, 50 { 1, 50 }, 51 { 1, 60 }, /* 12 - maximum for 720p */ 52 { 1, 120 }, 53 }; 54 55 /* Limit maximum FPS rates for high resolutions */ 56 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 57 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 58 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 59 60 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 61 unsigned int frmsize_idx) 62 { 63 if (webcam_sizes[frmsize_idx].height >= 2160) 64 return IVAL_COUNT_2160P; 65 66 if (webcam_sizes[frmsize_idx].height >= 1080) 67 return IVAL_COUNT_1080P; 68 69 if (webcam_sizes[frmsize_idx].height >= 720) 70 return IVAL_COUNT_720P; 71 72 /* For low resolutions, allow all FPS rates */ 73 return ARRAY_SIZE(webcam_intervals); 74 } 75 76 static int vid_cap_queue_setup(struct vb2_queue *vq, 77 unsigned *nbuffers, unsigned *nplanes, 78 unsigned sizes[], struct device *alloc_devs[]) 79 { 80 struct vivid_dev *dev = vb2_get_drv_priv(vq); 81 unsigned buffers = tpg_g_buffers(&dev->tpg); 82 unsigned h = dev->fmt_cap_rect.height; 83 unsigned p; 84 85 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 86 /* 87 * You cannot use read() with FIELD_ALTERNATE since the field 88 * information (TOP/BOTTOM) cannot be passed back to the user. 89 */ 90 if (vb2_fileio_is_active(vq)) 91 return -EINVAL; 92 } 93 94 if (dev->queue_setup_error) { 95 /* 96 * Error injection: test what happens if queue_setup() returns 97 * an error. 98 */ 99 dev->queue_setup_error = false; 100 return -EINVAL; 101 } 102 if (*nplanes) { 103 /* 104 * Check if the number of requested planes match 105 * the number of buffers in the current format. You can't mix that. 106 */ 107 if (*nplanes != buffers) 108 return -EINVAL; 109 for (p = 0; p < buffers; p++) { 110 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / 111 dev->fmt_cap->vdownsampling[p] + 112 dev->fmt_cap->data_offset[p]) 113 return -EINVAL; 114 } 115 } else { 116 for (p = 0; p < buffers; p++) 117 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 118 dev->fmt_cap->vdownsampling[p] + 119 dev->fmt_cap->data_offset[p]; 120 } 121 122 *nplanes = buffers; 123 124 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 125 for (p = 0; p < buffers; p++) 126 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 127 128 return 0; 129 } 130 131 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 132 { 133 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 134 unsigned long size; 135 unsigned buffers = tpg_g_buffers(&dev->tpg); 136 unsigned p; 137 138 dprintk(dev, 1, "%s\n", __func__); 139 140 if (WARN_ON(NULL == dev->fmt_cap)) 141 return -EINVAL; 142 143 if (dev->buf_prepare_error) { 144 /* 145 * Error injection: test what happens if buf_prepare() returns 146 * an error. 147 */ 148 dev->buf_prepare_error = false; 149 return -EINVAL; 150 } 151 for (p = 0; p < buffers; p++) { 152 size = (tpg_g_line_width(&dev->tpg, p) * 153 dev->fmt_cap_rect.height) / 154 dev->fmt_cap->vdownsampling[p] + 155 dev->fmt_cap->data_offset[p]; 156 157 if (vb2_plane_size(vb, p) < size) { 158 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 159 __func__, p, vb2_plane_size(vb, p), size); 160 return -EINVAL; 161 } 162 163 vb2_set_plane_payload(vb, p, size); 164 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 165 } 166 167 return 0; 168 } 169 170 static void vid_cap_buf_finish(struct vb2_buffer *vb) 171 { 172 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 173 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 174 struct v4l2_timecode *tc = &vbuf->timecode; 175 unsigned fps = 25; 176 unsigned seq = vbuf->sequence; 177 178 if (!vivid_is_sdtv_cap(dev)) 179 return; 180 181 /* 182 * Set the timecode. Rarely used, so it is interesting to 183 * test this. 184 */ 185 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 186 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 187 fps = 30; 188 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 189 tc->flags = 0; 190 tc->frames = seq % fps; 191 tc->seconds = (seq / fps) % 60; 192 tc->minutes = (seq / (60 * fps)) % 60; 193 tc->hours = (seq / (60 * 60 * fps)) % 24; 194 } 195 196 static void vid_cap_buf_queue(struct vb2_buffer *vb) 197 { 198 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 199 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 200 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 201 202 dprintk(dev, 1, "%s\n", __func__); 203 204 spin_lock(&dev->slock); 205 list_add_tail(&buf->list, &dev->vid_cap_active); 206 spin_unlock(&dev->slock); 207 } 208 209 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 210 { 211 struct vivid_dev *dev = vb2_get_drv_priv(vq); 212 unsigned i; 213 int err; 214 215 dev->vid_cap_seq_count = 0; 216 dprintk(dev, 1, "%s\n", __func__); 217 for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) 218 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 219 if (dev->start_streaming_error) { 220 dev->start_streaming_error = false; 221 err = -EINVAL; 222 } else { 223 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 224 } 225 if (err) { 226 struct vivid_buffer *buf, *tmp; 227 228 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 229 list_del(&buf->list); 230 vb2_buffer_done(&buf->vb.vb2_buf, 231 VB2_BUF_STATE_QUEUED); 232 } 233 } 234 return err; 235 } 236 237 /* abort streaming and wait for last buffer */ 238 static void vid_cap_stop_streaming(struct vb2_queue *vq) 239 { 240 struct vivid_dev *dev = vb2_get_drv_priv(vq); 241 242 dprintk(dev, 1, "%s\n", __func__); 243 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 244 } 245 246 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 247 { 248 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 249 250 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 251 } 252 253 const struct vb2_ops vivid_vid_cap_qops = { 254 .queue_setup = vid_cap_queue_setup, 255 .buf_prepare = vid_cap_buf_prepare, 256 .buf_finish = vid_cap_buf_finish, 257 .buf_queue = vid_cap_buf_queue, 258 .start_streaming = vid_cap_start_streaming, 259 .stop_streaming = vid_cap_stop_streaming, 260 .buf_request_complete = vid_cap_buf_request_complete, 261 .wait_prepare = vb2_ops_wait_prepare, 262 .wait_finish = vb2_ops_wait_finish, 263 }; 264 265 /* 266 * Determine the 'picture' quality based on the current TV frequency: either 267 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 268 * signal or NOISE for no signal. 269 */ 270 void vivid_update_quality(struct vivid_dev *dev) 271 { 272 unsigned freq_modulus; 273 274 if (dev->input_is_connected_to_output[dev->input]) { 275 /* 276 * The 'noise' will only be replaced by the actual video 277 * if the output video matches the input video settings. 278 */ 279 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 280 return; 281 } 282 if (vivid_is_hdmi_cap(dev) && 283 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 284 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 285 return; 286 } 287 if (vivid_is_sdtv_cap(dev) && 288 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 289 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 290 return; 291 } 292 if (!vivid_is_tv_cap(dev)) { 293 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 294 return; 295 } 296 297 /* 298 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 299 * From +/- 0.25 MHz around the channel there is color, and from 300 * +/- 1 MHz there is grayscale (chroma is lost). 301 * Everywhere else it is just noise. 302 */ 303 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 304 if (freq_modulus > 2 * 16) { 305 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 306 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 307 return; 308 } 309 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 310 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 311 else 312 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 313 } 314 315 /* 316 * Get the current picture quality and the associated afc value. 317 */ 318 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 319 { 320 unsigned freq_modulus; 321 322 if (afc) 323 *afc = 0; 324 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 325 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 326 return tpg_g_quality(&dev->tpg); 327 328 /* 329 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 330 * From +/- 0.25 MHz around the channel there is color, and from 331 * +/- 1 MHz there is grayscale (chroma is lost). 332 * Everywhere else it is just gray. 333 */ 334 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 335 if (afc) 336 *afc = freq_modulus - 1 * 16; 337 return TPG_QUAL_GRAY; 338 } 339 340 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 341 { 342 if (vivid_is_sdtv_cap(dev)) 343 return dev->std_aspect_ratio[dev->input]; 344 345 if (vivid_is_hdmi_cap(dev)) 346 return dev->dv_timings_aspect_ratio[dev->input]; 347 348 return TPG_VIDEO_ASPECT_IMAGE; 349 } 350 351 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 352 { 353 if (vivid_is_sdtv_cap(dev)) 354 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 355 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 356 357 if (vivid_is_hdmi_cap(dev) && 358 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 359 return dev->src_rect.height == 480 ? 360 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 361 362 return TPG_PIXEL_ASPECT_SQUARE; 363 } 364 365 /* 366 * Called whenever the format has to be reset which can occur when 367 * changing inputs, standard, timings, etc. 368 */ 369 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 370 { 371 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 372 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 373 unsigned size; 374 u64 pixelclock; 375 376 switch (dev->input_type[dev->input]) { 377 case WEBCAM: 378 default: 379 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 380 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 381 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 382 dev->field_cap = V4L2_FIELD_NONE; 383 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 384 break; 385 case TV: 386 case SVID: 387 dev->field_cap = dev->tv_field_cap; 388 dev->src_rect.width = 720; 389 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 390 dev->src_rect.height = 480; 391 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 392 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 393 } else { 394 dev->src_rect.height = 576; 395 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 396 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 397 } 398 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 399 break; 400 case HDMI: 401 dev->src_rect.width = bt->width; 402 dev->src_rect.height = bt->height; 403 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 404 if (dev->reduced_fps && can_reduce_fps(bt)) { 405 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 406 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 407 } else { 408 pixelclock = bt->pixelclock; 409 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 410 } 411 dev->timeperframe_vid_cap = (struct v4l2_fract) { 412 size / 100, (u32)pixelclock / 100 413 }; 414 if (bt->interlaced) 415 dev->field_cap = V4L2_FIELD_ALTERNATE; 416 else 417 dev->field_cap = V4L2_FIELD_NONE; 418 419 /* 420 * We can be called from within s_ctrl, in that case we can't 421 * set/get controls. Luckily we don't need to in that case. 422 */ 423 if (keep_controls || !dev->colorspace) 424 break; 425 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 426 if (bt->width == 720 && bt->height <= 576) 427 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 428 else 429 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 430 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 431 } else { 432 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 433 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 434 } 435 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 436 break; 437 } 438 vivid_update_quality(dev); 439 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 440 dev->crop_cap = dev->src_rect; 441 dev->crop_bounds_cap = dev->src_rect; 442 dev->compose_cap = dev->crop_cap; 443 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 444 dev->compose_cap.height /= 2; 445 dev->fmt_cap_rect = dev->compose_cap; 446 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 447 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 448 tpg_update_mv_step(&dev->tpg); 449 450 /* 451 * We can be called from within s_ctrl, in that case we can't 452 * modify controls. Luckily we don't need to in that case. 453 */ 454 if (keep_controls) 455 return; 456 457 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 458 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 459 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 460 } 461 462 /* Map the field to something that is valid for the current input */ 463 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 464 { 465 if (vivid_is_sdtv_cap(dev)) { 466 switch (field) { 467 case V4L2_FIELD_INTERLACED_TB: 468 case V4L2_FIELD_INTERLACED_BT: 469 case V4L2_FIELD_SEQ_TB: 470 case V4L2_FIELD_SEQ_BT: 471 case V4L2_FIELD_TOP: 472 case V4L2_FIELD_BOTTOM: 473 case V4L2_FIELD_ALTERNATE: 474 return field; 475 case V4L2_FIELD_INTERLACED: 476 default: 477 return V4L2_FIELD_INTERLACED; 478 } 479 } 480 if (vivid_is_hdmi_cap(dev)) 481 return dev->dv_timings_cap[dev->input].bt.interlaced ? 482 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 483 return V4L2_FIELD_NONE; 484 } 485 486 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 487 { 488 if (!vivid_input_is_connected_to(dev)) 489 return tpg_g_colorspace(&dev->tpg); 490 return dev->colorspace_out; 491 } 492 493 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 494 { 495 if (!vivid_input_is_connected_to(dev)) 496 return tpg_g_xfer_func(&dev->tpg); 497 return dev->xfer_func_out; 498 } 499 500 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 501 { 502 if (!vivid_input_is_connected_to(dev)) 503 return tpg_g_ycbcr_enc(&dev->tpg); 504 return dev->ycbcr_enc_out; 505 } 506 507 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 508 { 509 if (!vivid_input_is_connected_to(dev)) 510 return tpg_g_hsv_enc(&dev->tpg); 511 return dev->hsv_enc_out; 512 } 513 514 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 515 { 516 if (!vivid_input_is_connected_to(dev)) 517 return tpg_g_quantization(&dev->tpg); 518 return dev->quantization_out; 519 } 520 521 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 522 struct v4l2_format *f) 523 { 524 struct vivid_dev *dev = video_drvdata(file); 525 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 526 unsigned p; 527 528 mp->width = dev->fmt_cap_rect.width; 529 mp->height = dev->fmt_cap_rect.height; 530 mp->field = dev->field_cap; 531 mp->pixelformat = dev->fmt_cap->fourcc; 532 mp->colorspace = vivid_colorspace_cap(dev); 533 mp->xfer_func = vivid_xfer_func_cap(dev); 534 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 535 mp->hsv_enc = vivid_hsv_enc_cap(dev); 536 else 537 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 538 mp->quantization = vivid_quantization_cap(dev); 539 mp->num_planes = dev->fmt_cap->buffers; 540 for (p = 0; p < mp->num_planes; p++) { 541 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 542 mp->plane_fmt[p].sizeimage = 543 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 544 dev->fmt_cap->vdownsampling[p] + 545 dev->fmt_cap->data_offset[p]; 546 } 547 return 0; 548 } 549 550 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 551 struct v4l2_format *f) 552 { 553 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 554 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 555 struct vivid_dev *dev = video_drvdata(file); 556 const struct vivid_fmt *fmt; 557 unsigned bytesperline, max_bpl; 558 unsigned factor = 1; 559 unsigned w, h; 560 unsigned p; 561 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 562 563 fmt = vivid_get_format(dev, mp->pixelformat); 564 if (!fmt) { 565 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 566 mp->pixelformat); 567 mp->pixelformat = V4L2_PIX_FMT_YUYV; 568 fmt = vivid_get_format(dev, mp->pixelformat); 569 } 570 571 mp->field = vivid_field_cap(dev, mp->field); 572 if (vivid_is_webcam(dev)) { 573 const struct v4l2_frmsize_discrete *sz = 574 v4l2_find_nearest_size(webcam_sizes, 575 ARRAY_SIZE(webcam_sizes), width, 576 height, mp->width, mp->height); 577 578 w = sz->width; 579 h = sz->height; 580 } else if (vivid_is_sdtv_cap(dev)) { 581 w = 720; 582 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 583 } else { 584 w = dev->src_rect.width; 585 h = dev->src_rect.height; 586 } 587 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 588 factor = 2; 589 if (vivid_is_webcam(dev) || 590 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 591 mp->width = w; 592 mp->height = h / factor; 593 } else { 594 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 595 596 v4l2_rect_set_min_size(&r, &vivid_min_rect); 597 v4l2_rect_set_max_size(&r, &vivid_max_rect); 598 if (dev->has_scaler_cap && !dev->has_compose_cap) { 599 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 600 601 v4l2_rect_set_max_size(&r, &max_r); 602 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 603 v4l2_rect_set_max_size(&r, &dev->src_rect); 604 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 605 v4l2_rect_set_min_size(&r, &dev->src_rect); 606 } 607 mp->width = r.width; 608 mp->height = r.height / factor; 609 } 610 611 /* This driver supports custom bytesperline values */ 612 613 mp->num_planes = fmt->buffers; 614 for (p = 0; p < fmt->buffers; p++) { 615 /* Calculate the minimum supported bytesperline value */ 616 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 617 /* Calculate the maximum supported bytesperline value */ 618 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 619 620 if (pfmt[p].bytesperline > max_bpl) 621 pfmt[p].bytesperline = max_bpl; 622 if (pfmt[p].bytesperline < bytesperline) 623 pfmt[p].bytesperline = bytesperline; 624 625 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 626 fmt->vdownsampling[p] + fmt->data_offset[p]; 627 628 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 629 } 630 for (p = fmt->buffers; p < fmt->planes; p++) 631 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 632 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 633 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 634 635 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 636 mp->colorspace = vivid_colorspace_cap(dev); 637 638 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 639 mp->xfer_func = vivid_xfer_func_cap(dev); 640 641 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 642 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 643 mp->hsv_enc = vivid_hsv_enc_cap(dev); 644 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 645 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 646 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 647 } else { 648 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 649 } 650 651 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 652 fmt->color_enc == TGP_COLOR_ENC_RGB) { 653 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 654 mp->quantization = vivid_quantization_cap(dev); 655 } else { 656 mp->quantization = vivid_quantization_cap(dev); 657 } 658 659 memset(mp->reserved, 0, sizeof(mp->reserved)); 660 return 0; 661 } 662 663 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 664 struct v4l2_format *f) 665 { 666 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 667 struct vivid_dev *dev = video_drvdata(file); 668 struct v4l2_rect *crop = &dev->crop_cap; 669 struct v4l2_rect *compose = &dev->compose_cap; 670 struct vb2_queue *q = &dev->vb_vid_cap_q; 671 int ret = vivid_try_fmt_vid_cap(file, priv, f); 672 unsigned factor = 1; 673 unsigned p; 674 unsigned i; 675 676 if (ret < 0) 677 return ret; 678 679 if (vb2_is_busy(q)) { 680 dprintk(dev, 1, "%s device busy\n", __func__); 681 return -EBUSY; 682 } 683 684 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 685 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 686 factor = 2; 687 688 /* Note: the webcam input doesn't support scaling, cropping or composing */ 689 690 if (!vivid_is_webcam(dev) && 691 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 692 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 693 694 if (dev->has_scaler_cap) { 695 if (dev->has_compose_cap) 696 v4l2_rect_map_inside(compose, &r); 697 else 698 *compose = r; 699 if (dev->has_crop_cap && !dev->has_compose_cap) { 700 struct v4l2_rect min_r = { 701 0, 0, 702 r.width / MAX_ZOOM, 703 factor * r.height / MAX_ZOOM 704 }; 705 struct v4l2_rect max_r = { 706 0, 0, 707 r.width * MAX_ZOOM, 708 factor * r.height * MAX_ZOOM 709 }; 710 711 v4l2_rect_set_min_size(crop, &min_r); 712 v4l2_rect_set_max_size(crop, &max_r); 713 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 714 } else if (dev->has_crop_cap) { 715 struct v4l2_rect min_r = { 716 0, 0, 717 compose->width / MAX_ZOOM, 718 factor * compose->height / MAX_ZOOM 719 }; 720 struct v4l2_rect max_r = { 721 0, 0, 722 compose->width * MAX_ZOOM, 723 factor * compose->height * MAX_ZOOM 724 }; 725 726 v4l2_rect_set_min_size(crop, &min_r); 727 v4l2_rect_set_max_size(crop, &max_r); 728 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 729 } 730 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 731 r.height *= factor; 732 v4l2_rect_set_size_to(crop, &r); 733 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 734 r = *crop; 735 r.height /= factor; 736 v4l2_rect_set_size_to(compose, &r); 737 } else if (!dev->has_crop_cap) { 738 v4l2_rect_map_inside(compose, &r); 739 } else { 740 r.height *= factor; 741 v4l2_rect_set_max_size(crop, &r); 742 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 743 compose->top *= factor; 744 compose->height *= factor; 745 v4l2_rect_set_size_to(compose, crop); 746 v4l2_rect_map_inside(compose, &r); 747 compose->top /= factor; 748 compose->height /= factor; 749 } 750 } else if (vivid_is_webcam(dev)) { 751 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 752 753 /* Guaranteed to be a match */ 754 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 755 if (webcam_sizes[i].width == mp->width && 756 webcam_sizes[i].height == mp->height) 757 break; 758 dev->webcam_size_idx = i; 759 if (dev->webcam_ival_idx >= ival_sz) 760 dev->webcam_ival_idx = ival_sz - 1; 761 vivid_update_format_cap(dev, false); 762 } else { 763 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 764 765 v4l2_rect_set_size_to(compose, &r); 766 r.height *= factor; 767 v4l2_rect_set_size_to(crop, &r); 768 } 769 770 dev->fmt_cap_rect.width = mp->width; 771 dev->fmt_cap_rect.height = mp->height; 772 tpg_s_buf_height(&dev->tpg, mp->height); 773 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 774 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 775 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 776 dev->field_cap = mp->field; 777 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 778 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 779 else 780 tpg_s_field(&dev->tpg, dev->field_cap, false); 781 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 782 if (vivid_is_sdtv_cap(dev)) 783 dev->tv_field_cap = mp->field; 784 tpg_update_mv_step(&dev->tpg); 785 dev->tpg.colorspace = mp->colorspace; 786 dev->tpg.xfer_func = mp->xfer_func; 787 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 788 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 789 else 790 dev->tpg.hsv_enc = mp->hsv_enc; 791 dev->tpg.quantization = mp->quantization; 792 793 return 0; 794 } 795 796 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 797 struct v4l2_format *f) 798 { 799 struct vivid_dev *dev = video_drvdata(file); 800 801 if (!dev->multiplanar) 802 return -ENOTTY; 803 return vivid_g_fmt_vid_cap(file, priv, f); 804 } 805 806 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 807 struct v4l2_format *f) 808 { 809 struct vivid_dev *dev = video_drvdata(file); 810 811 if (!dev->multiplanar) 812 return -ENOTTY; 813 return vivid_try_fmt_vid_cap(file, priv, f); 814 } 815 816 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 817 struct v4l2_format *f) 818 { 819 struct vivid_dev *dev = video_drvdata(file); 820 821 if (!dev->multiplanar) 822 return -ENOTTY; 823 return vivid_s_fmt_vid_cap(file, priv, f); 824 } 825 826 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 827 struct v4l2_format *f) 828 { 829 struct vivid_dev *dev = video_drvdata(file); 830 831 if (dev->multiplanar) 832 return -ENOTTY; 833 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 834 } 835 836 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 837 struct v4l2_format *f) 838 { 839 struct vivid_dev *dev = video_drvdata(file); 840 841 if (dev->multiplanar) 842 return -ENOTTY; 843 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 844 } 845 846 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 847 struct v4l2_format *f) 848 { 849 struct vivid_dev *dev = video_drvdata(file); 850 851 if (dev->multiplanar) 852 return -ENOTTY; 853 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 854 } 855 856 int vivid_vid_cap_g_selection(struct file *file, void *priv, 857 struct v4l2_selection *sel) 858 { 859 struct vivid_dev *dev = video_drvdata(file); 860 861 if (!dev->has_crop_cap && !dev->has_compose_cap) 862 return -ENOTTY; 863 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 864 return -EINVAL; 865 if (vivid_is_webcam(dev)) 866 return -ENODATA; 867 868 sel->r.left = sel->r.top = 0; 869 switch (sel->target) { 870 case V4L2_SEL_TGT_CROP: 871 if (!dev->has_crop_cap) 872 return -EINVAL; 873 sel->r = dev->crop_cap; 874 break; 875 case V4L2_SEL_TGT_CROP_DEFAULT: 876 case V4L2_SEL_TGT_CROP_BOUNDS: 877 if (!dev->has_crop_cap) 878 return -EINVAL; 879 sel->r = dev->src_rect; 880 break; 881 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 882 if (!dev->has_compose_cap) 883 return -EINVAL; 884 sel->r = vivid_max_rect; 885 break; 886 case V4L2_SEL_TGT_COMPOSE: 887 if (!dev->has_compose_cap) 888 return -EINVAL; 889 sel->r = dev->compose_cap; 890 break; 891 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 892 if (!dev->has_compose_cap) 893 return -EINVAL; 894 sel->r = dev->fmt_cap_rect; 895 break; 896 default: 897 return -EINVAL; 898 } 899 return 0; 900 } 901 902 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 903 { 904 struct vivid_dev *dev = video_drvdata(file); 905 struct v4l2_rect *crop = &dev->crop_cap; 906 struct v4l2_rect *compose = &dev->compose_cap; 907 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 908 int ret; 909 910 if (!dev->has_crop_cap && !dev->has_compose_cap) 911 return -ENOTTY; 912 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 913 return -EINVAL; 914 if (vivid_is_webcam(dev)) 915 return -ENODATA; 916 917 switch (s->target) { 918 case V4L2_SEL_TGT_CROP: 919 if (!dev->has_crop_cap) 920 return -EINVAL; 921 ret = vivid_vid_adjust_sel(s->flags, &s->r); 922 if (ret) 923 return ret; 924 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 925 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 926 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 927 s->r.top /= factor; 928 s->r.height /= factor; 929 if (dev->has_scaler_cap) { 930 struct v4l2_rect fmt = dev->fmt_cap_rect; 931 struct v4l2_rect max_rect = { 932 0, 0, 933 s->r.width * MAX_ZOOM, 934 s->r.height * MAX_ZOOM 935 }; 936 struct v4l2_rect min_rect = { 937 0, 0, 938 s->r.width / MAX_ZOOM, 939 s->r.height / MAX_ZOOM 940 }; 941 942 v4l2_rect_set_min_size(&fmt, &min_rect); 943 if (!dev->has_compose_cap) 944 v4l2_rect_set_max_size(&fmt, &max_rect); 945 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 946 vb2_is_busy(&dev->vb_vid_cap_q)) 947 return -EBUSY; 948 if (dev->has_compose_cap) { 949 v4l2_rect_set_min_size(compose, &min_rect); 950 v4l2_rect_set_max_size(compose, &max_rect); 951 v4l2_rect_map_inside(compose, &fmt); 952 } 953 dev->fmt_cap_rect = fmt; 954 tpg_s_buf_height(&dev->tpg, fmt.height); 955 } else if (dev->has_compose_cap) { 956 struct v4l2_rect fmt = dev->fmt_cap_rect; 957 958 v4l2_rect_set_min_size(&fmt, &s->r); 959 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 960 vb2_is_busy(&dev->vb_vid_cap_q)) 961 return -EBUSY; 962 dev->fmt_cap_rect = fmt; 963 tpg_s_buf_height(&dev->tpg, fmt.height); 964 v4l2_rect_set_size_to(compose, &s->r); 965 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 966 } else { 967 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 968 vb2_is_busy(&dev->vb_vid_cap_q)) 969 return -EBUSY; 970 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 971 v4l2_rect_set_size_to(compose, &s->r); 972 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 973 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 974 } 975 s->r.top *= factor; 976 s->r.height *= factor; 977 *crop = s->r; 978 break; 979 case V4L2_SEL_TGT_COMPOSE: 980 if (!dev->has_compose_cap) 981 return -EINVAL; 982 ret = vivid_vid_adjust_sel(s->flags, &s->r); 983 if (ret) 984 return ret; 985 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 986 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 987 if (dev->has_scaler_cap) { 988 struct v4l2_rect max_rect = { 989 0, 0, 990 dev->src_rect.width * MAX_ZOOM, 991 (dev->src_rect.height / factor) * MAX_ZOOM 992 }; 993 994 v4l2_rect_set_max_size(&s->r, &max_rect); 995 if (dev->has_crop_cap) { 996 struct v4l2_rect min_rect = { 997 0, 0, 998 s->r.width / MAX_ZOOM, 999 (s->r.height * factor) / MAX_ZOOM 1000 }; 1001 struct v4l2_rect max_rect = { 1002 0, 0, 1003 s->r.width * MAX_ZOOM, 1004 (s->r.height * factor) * MAX_ZOOM 1005 }; 1006 1007 v4l2_rect_set_min_size(crop, &min_rect); 1008 v4l2_rect_set_max_size(crop, &max_rect); 1009 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1010 } 1011 } else if (dev->has_crop_cap) { 1012 s->r.top *= factor; 1013 s->r.height *= factor; 1014 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1015 v4l2_rect_set_size_to(crop, &s->r); 1016 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1017 s->r.top /= factor; 1018 s->r.height /= factor; 1019 } else { 1020 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1021 s->r.height /= factor; 1022 } 1023 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1024 *compose = s->r; 1025 break; 1026 default: 1027 return -EINVAL; 1028 } 1029 1030 tpg_s_crop_compose(&dev->tpg, crop, compose); 1031 return 0; 1032 } 1033 1034 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1035 int type, struct v4l2_fract *f) 1036 { 1037 struct vivid_dev *dev = video_drvdata(file); 1038 1039 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1040 return -EINVAL; 1041 1042 switch (vivid_get_pixel_aspect(dev)) { 1043 case TPG_PIXEL_ASPECT_NTSC: 1044 f->numerator = 11; 1045 f->denominator = 10; 1046 break; 1047 case TPG_PIXEL_ASPECT_PAL: 1048 f->numerator = 54; 1049 f->denominator = 59; 1050 break; 1051 default: 1052 break; 1053 } 1054 return 0; 1055 } 1056 1057 static const struct v4l2_audio vivid_audio_inputs[] = { 1058 { 0, "TV", V4L2_AUDCAP_STEREO }, 1059 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1060 }; 1061 1062 int vidioc_enum_input(struct file *file, void *priv, 1063 struct v4l2_input *inp) 1064 { 1065 struct vivid_dev *dev = video_drvdata(file); 1066 1067 if (inp->index >= dev->num_inputs) 1068 return -EINVAL; 1069 1070 inp->type = V4L2_INPUT_TYPE_CAMERA; 1071 switch (dev->input_type[inp->index]) { 1072 case WEBCAM: 1073 snprintf(inp->name, sizeof(inp->name), "Webcam %03u-%u", 1074 dev->inst, dev->input_name_counter[inp->index]); 1075 inp->capabilities = 0; 1076 break; 1077 case TV: 1078 snprintf(inp->name, sizeof(inp->name), "TV %03u-%u", 1079 dev->inst, dev->input_name_counter[inp->index]); 1080 inp->type = V4L2_INPUT_TYPE_TUNER; 1081 inp->std = V4L2_STD_ALL; 1082 if (dev->has_audio_inputs) 1083 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1084 inp->capabilities = V4L2_IN_CAP_STD; 1085 break; 1086 case SVID: 1087 snprintf(inp->name, sizeof(inp->name), "S-Video %03u-%u", 1088 dev->inst, dev->input_name_counter[inp->index]); 1089 inp->std = V4L2_STD_ALL; 1090 if (dev->has_audio_inputs) 1091 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1092 inp->capabilities = V4L2_IN_CAP_STD; 1093 break; 1094 case HDMI: 1095 snprintf(inp->name, sizeof(inp->name), "HDMI %03u-%u", 1096 dev->inst, dev->input_name_counter[inp->index]); 1097 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1098 if (dev->edid_blocks == 0 || 1099 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1100 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1101 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1102 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1103 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1104 break; 1105 } 1106 if (dev->sensor_hflip) 1107 inp->status |= V4L2_IN_ST_HFLIP; 1108 if (dev->sensor_vflip) 1109 inp->status |= V4L2_IN_ST_VFLIP; 1110 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1111 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1112 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1113 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1114 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1115 } else if (vivid_is_tv_cap(dev)) { 1116 switch (tpg_g_quality(&dev->tpg)) { 1117 case TPG_QUAL_GRAY: 1118 inp->status |= V4L2_IN_ST_COLOR_KILL; 1119 break; 1120 case TPG_QUAL_NOISE: 1121 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1122 break; 1123 default: 1124 break; 1125 } 1126 } 1127 } 1128 return 0; 1129 } 1130 1131 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1132 { 1133 struct vivid_dev *dev = video_drvdata(file); 1134 1135 *i = dev->input; 1136 return 0; 1137 } 1138 1139 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1140 { 1141 struct vivid_dev *dev = video_drvdata(file); 1142 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1143 unsigned brightness; 1144 1145 if (i >= dev->num_inputs) 1146 return -EINVAL; 1147 1148 if (i == dev->input) 1149 return 0; 1150 1151 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1152 vb2_is_busy(&dev->vb_vbi_cap_q) || 1153 vb2_is_busy(&dev->vb_meta_cap_q)) 1154 return -EBUSY; 1155 1156 dev->input = i; 1157 dev->vid_cap_dev.tvnorms = 0; 1158 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1159 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1160 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1161 } 1162 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1163 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1164 vivid_update_format_cap(dev, false); 1165 1166 if (dev->colorspace) { 1167 switch (dev->input_type[i]) { 1168 case WEBCAM: 1169 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1170 break; 1171 case TV: 1172 case SVID: 1173 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1174 break; 1175 case HDMI: 1176 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1177 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1178 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1179 else 1180 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1181 } else { 1182 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1183 } 1184 break; 1185 } 1186 } 1187 1188 /* 1189 * Modify the brightness range depending on the input. 1190 * This makes it easy to use vivid to test if applications can 1191 * handle control range modifications and is also how this is 1192 * typically used in practice as different inputs may be hooked 1193 * up to different receivers with different control ranges. 1194 */ 1195 brightness = 128 * i + dev->input_brightness[i]; 1196 v4l2_ctrl_modify_range(dev->brightness, 1197 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1198 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1199 1200 /* Restore per-input states. */ 1201 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1202 vivid_is_hdmi_cap(dev)); 1203 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1204 dev->dv_timings_signal_mode[dev->input] == 1205 SELECTED_DV_TIMINGS); 1206 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1207 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1208 dev->std_signal_mode[dev->input]); 1209 1210 if (vivid_is_hdmi_cap(dev)) { 1211 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1212 dev->dv_timings_signal_mode[dev->input]); 1213 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1214 dev->query_dv_timings[dev->input]); 1215 } else if (vivid_is_sdtv_cap(dev)) { 1216 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1217 dev->std_signal_mode[dev->input]); 1218 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1219 dev->std_signal_mode[dev->input]); 1220 } 1221 1222 return 0; 1223 } 1224 1225 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1226 { 1227 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1228 return -EINVAL; 1229 *vin = vivid_audio_inputs[vin->index]; 1230 return 0; 1231 } 1232 1233 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1234 { 1235 struct vivid_dev *dev = video_drvdata(file); 1236 1237 if (!vivid_is_sdtv_cap(dev)) 1238 return -EINVAL; 1239 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1240 return 0; 1241 } 1242 1243 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1244 { 1245 struct vivid_dev *dev = video_drvdata(file); 1246 1247 if (!vivid_is_sdtv_cap(dev)) 1248 return -EINVAL; 1249 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1250 return -EINVAL; 1251 dev->tv_audio_input = vin->index; 1252 return 0; 1253 } 1254 1255 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1256 { 1257 struct vivid_dev *dev = video_drvdata(file); 1258 1259 if (vf->tuner != 0) 1260 return -EINVAL; 1261 vf->frequency = dev->tv_freq; 1262 return 0; 1263 } 1264 1265 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1266 { 1267 struct vivid_dev *dev = video_drvdata(file); 1268 1269 if (vf->tuner != 0) 1270 return -EINVAL; 1271 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1272 if (vivid_is_tv_cap(dev)) 1273 vivid_update_quality(dev); 1274 return 0; 1275 } 1276 1277 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1278 { 1279 struct vivid_dev *dev = video_drvdata(file); 1280 1281 if (vt->index != 0) 1282 return -EINVAL; 1283 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1284 return -EINVAL; 1285 dev->tv_audmode = vt->audmode; 1286 return 0; 1287 } 1288 1289 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1290 { 1291 struct vivid_dev *dev = video_drvdata(file); 1292 enum tpg_quality qual; 1293 1294 if (vt->index != 0) 1295 return -EINVAL; 1296 1297 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1298 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1299 vt->audmode = dev->tv_audmode; 1300 vt->rangelow = MIN_TV_FREQ; 1301 vt->rangehigh = MAX_TV_FREQ; 1302 qual = vivid_get_quality(dev, &vt->afc); 1303 if (qual == TPG_QUAL_COLOR) 1304 vt->signal = 0xffff; 1305 else if (qual == TPG_QUAL_GRAY) 1306 vt->signal = 0x8000; 1307 else 1308 vt->signal = 0; 1309 if (qual == TPG_QUAL_NOISE) { 1310 vt->rxsubchans = 0; 1311 } else if (qual == TPG_QUAL_GRAY) { 1312 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1313 } else { 1314 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1315 unsigned int options = 1316 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1317 1318 switch (channel_nr % options) { 1319 case 0: 1320 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1321 break; 1322 case 1: 1323 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1324 break; 1325 case 2: 1326 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1327 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1328 else 1329 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1330 break; 1331 case 3: 1332 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1333 break; 1334 } 1335 } 1336 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1337 return 0; 1338 } 1339 1340 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1341 const v4l2_std_id vivid_standard[] = { 1342 V4L2_STD_NTSC_M, 1343 V4L2_STD_NTSC_M_JP, 1344 V4L2_STD_NTSC_M_KR, 1345 V4L2_STD_NTSC_443, 1346 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1347 V4L2_STD_PAL_I, 1348 V4L2_STD_PAL_DK, 1349 V4L2_STD_PAL_M, 1350 V4L2_STD_PAL_N, 1351 V4L2_STD_PAL_Nc, 1352 V4L2_STD_PAL_60, 1353 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1354 V4L2_STD_SECAM_DK, 1355 V4L2_STD_SECAM_L, 1356 V4L2_STD_SECAM_LC, 1357 V4L2_STD_UNKNOWN 1358 }; 1359 1360 /* Must remain in sync with the vivid_standard array */ 1361 const char * const vivid_ctrl_standard_strings[] = { 1362 "NTSC-M", 1363 "NTSC-M-JP", 1364 "NTSC-M-KR", 1365 "NTSC-443", 1366 "PAL-BGH", 1367 "PAL-I", 1368 "PAL-DK", 1369 "PAL-M", 1370 "PAL-N", 1371 "PAL-Nc", 1372 "PAL-60", 1373 "SECAM-BGH", 1374 "SECAM-DK", 1375 "SECAM-L", 1376 "SECAM-Lc", 1377 NULL, 1378 }; 1379 1380 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1381 { 1382 struct vivid_dev *dev = video_drvdata(file); 1383 unsigned int last = dev->query_std_last[dev->input]; 1384 1385 if (!vivid_is_sdtv_cap(dev)) 1386 return -ENODATA; 1387 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1388 dev->std_signal_mode[dev->input] == NO_LOCK) { 1389 *id = V4L2_STD_UNKNOWN; 1390 return 0; 1391 } 1392 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1393 *id = V4L2_STD_UNKNOWN; 1394 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1395 *id = dev->std_cap[dev->input]; 1396 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1397 *id = dev->query_std[dev->input]; 1398 } else { 1399 *id = vivid_standard[last]; 1400 dev->query_std_last[dev->input] = 1401 (last + 1) % ARRAY_SIZE(vivid_standard); 1402 } 1403 1404 return 0; 1405 } 1406 1407 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1408 { 1409 struct vivid_dev *dev = video_drvdata(file); 1410 1411 if (!vivid_is_sdtv_cap(dev)) 1412 return -ENODATA; 1413 if (dev->std_cap[dev->input] == id) 1414 return 0; 1415 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1416 return -EBUSY; 1417 dev->std_cap[dev->input] = id; 1418 vivid_update_format_cap(dev, false); 1419 return 0; 1420 } 1421 1422 static void find_aspect_ratio(u32 width, u32 height, 1423 u32 *num, u32 *denom) 1424 { 1425 if (!(height % 3) && ((height * 4 / 3) == width)) { 1426 *num = 4; 1427 *denom = 3; 1428 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1429 *num = 16; 1430 *denom = 9; 1431 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1432 *num = 16; 1433 *denom = 10; 1434 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1435 *num = 5; 1436 *denom = 4; 1437 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1438 *num = 15; 1439 *denom = 9; 1440 } else { /* default to 16:9 */ 1441 *num = 16; 1442 *denom = 9; 1443 } 1444 } 1445 1446 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1447 { 1448 struct v4l2_bt_timings *bt = &timings->bt; 1449 u32 total_h_pixel; 1450 u32 total_v_lines; 1451 u32 h_freq; 1452 1453 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1454 NULL, NULL)) 1455 return false; 1456 1457 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1458 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1459 1460 h_freq = (u32)bt->pixelclock / total_h_pixel; 1461 1462 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1463 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1464 bt->polarities, bt->interlaced, timings)) 1465 return true; 1466 } 1467 1468 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1469 struct v4l2_fract aspect_ratio; 1470 1471 find_aspect_ratio(bt->width, bt->height, 1472 &aspect_ratio.numerator, 1473 &aspect_ratio.denominator); 1474 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1475 bt->polarities, bt->interlaced, 1476 aspect_ratio, timings)) 1477 return true; 1478 } 1479 return false; 1480 } 1481 1482 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1483 struct v4l2_dv_timings *timings) 1484 { 1485 struct vivid_dev *dev = video_drvdata(file); 1486 1487 if (!vivid_is_hdmi_cap(dev)) 1488 return -ENODATA; 1489 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1490 0, NULL, NULL) && 1491 !valid_cvt_gtf_timings(timings)) 1492 return -EINVAL; 1493 1494 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1495 0, false)) 1496 return 0; 1497 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1498 return -EBUSY; 1499 1500 dev->dv_timings_cap[dev->input] = *timings; 1501 vivid_update_format_cap(dev, false); 1502 return 0; 1503 } 1504 1505 int vidioc_query_dv_timings(struct file *file, void *_fh, 1506 struct v4l2_dv_timings *timings) 1507 { 1508 struct vivid_dev *dev = video_drvdata(file); 1509 unsigned int input = dev->input; 1510 unsigned int last = dev->query_dv_timings_last[input]; 1511 1512 if (!vivid_is_hdmi_cap(dev)) 1513 return -ENODATA; 1514 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1515 dev->edid_blocks == 0) 1516 return -ENOLINK; 1517 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1518 return -ENOLCK; 1519 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1520 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1521 return -ERANGE; 1522 } 1523 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1524 *timings = dev->dv_timings_cap[input]; 1525 } else if (dev->dv_timings_signal_mode[input] == 1526 SELECTED_DV_TIMINGS) { 1527 *timings = 1528 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1529 } else { 1530 *timings = 1531 v4l2_dv_timings_presets[last]; 1532 dev->query_dv_timings_last[input] = 1533 (last + 1) % dev->query_dv_timings_size; 1534 } 1535 return 0; 1536 } 1537 1538 void vivid_update_outputs(struct vivid_dev *dev) 1539 { 1540 u32 edid_present = 0; 1541 1542 if (!dev || !dev->num_outputs) 1543 return; 1544 for (unsigned int i = 0, j = 0; i < dev->num_outputs; i++) { 1545 if (dev->output_type[i] != HDMI) 1546 continue; 1547 1548 struct vivid_dev *dev_rx = dev->output_to_input_instance[i]; 1549 1550 if (dev_rx && dev_rx->edid_blocks) 1551 edid_present |= 1 << j; 1552 j++; 1553 } 1554 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, edid_present); 1555 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, edid_present); 1556 v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, edid_present); 1557 } 1558 1559 void vivid_update_connected_outputs(struct vivid_dev *dev) 1560 { 1561 u16 phys_addr = cec_get_edid_phys_addr(dev->edid, dev->edid_blocks * 128, NULL); 1562 1563 for (unsigned int i = 0, j = 0; i < dev->num_inputs; i++) { 1564 unsigned int menu_idx = 1565 dev->input_is_connected_to_output[i]; 1566 1567 if (dev->input_type[i] != HDMI) 1568 continue; 1569 j++; 1570 if (menu_idx < FIXED_MENU_ITEMS) 1571 continue; 1572 1573 struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; 1574 unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; 1575 1576 if (!dev_tx) 1577 continue; 1578 1579 unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; 1580 1581 vivid_update_outputs(dev_tx); 1582 if (dev->edid_blocks) { 1583 cec_s_phys_addr(dev_tx->cec_tx_adap[hdmi_output], 1584 v4l2_phys_addr_for_input(phys_addr, j), 1585 false); 1586 } else { 1587 cec_phys_addr_invalidate(dev_tx->cec_tx_adap[hdmi_output]); 1588 } 1589 } 1590 } 1591 1592 int vidioc_s_edid(struct file *file, void *_fh, 1593 struct v4l2_edid *edid) 1594 { 1595 struct vivid_dev *dev = video_drvdata(file); 1596 u16 phys_addr; 1597 int ret; 1598 1599 memset(edid->reserved, 0, sizeof(edid->reserved)); 1600 if (edid->pad >= dev->num_inputs) 1601 return -EINVAL; 1602 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1603 return -EINVAL; 1604 if (edid->blocks == 0) { 1605 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1606 return -EBUSY; 1607 dev->edid_blocks = 0; 1608 vivid_update_connected_outputs(dev); 1609 return 0; 1610 } 1611 if (edid->blocks > dev->edid_max_blocks) { 1612 edid->blocks = dev->edid_max_blocks; 1613 return -E2BIG; 1614 } 1615 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1616 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1617 if (ret) 1618 return ret; 1619 1620 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1621 return -EBUSY; 1622 1623 dev->edid_blocks = edid->blocks; 1624 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1625 1626 vivid_update_connected_outputs(dev); 1627 return 0; 1628 } 1629 1630 int vidioc_enum_framesizes(struct file *file, void *fh, 1631 struct v4l2_frmsizeenum *fsize) 1632 { 1633 struct vivid_dev *dev = video_drvdata(file); 1634 1635 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1636 return -EINVAL; 1637 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1638 return -EINVAL; 1639 if (vivid_is_webcam(dev)) { 1640 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1641 return -EINVAL; 1642 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1643 fsize->discrete = webcam_sizes[fsize->index]; 1644 return 0; 1645 } 1646 if (fsize->index) 1647 return -EINVAL; 1648 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1649 fsize->stepwise.min_width = MIN_WIDTH; 1650 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1651 fsize->stepwise.step_width = 2; 1652 fsize->stepwise.min_height = MIN_HEIGHT; 1653 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1654 fsize->stepwise.step_height = 2; 1655 return 0; 1656 } 1657 1658 /* timeperframe is arbitrary and continuous */ 1659 int vidioc_enum_frameintervals(struct file *file, void *priv, 1660 struct v4l2_frmivalenum *fival) 1661 { 1662 struct vivid_dev *dev = video_drvdata(file); 1663 const struct vivid_fmt *fmt; 1664 int i; 1665 1666 fmt = vivid_get_format(dev, fival->pixel_format); 1667 if (!fmt) 1668 return -EINVAL; 1669 1670 if (!vivid_is_webcam(dev)) { 1671 if (fival->index) 1672 return -EINVAL; 1673 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1674 return -EINVAL; 1675 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1676 return -EINVAL; 1677 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1678 fival->discrete = dev->timeperframe_vid_cap; 1679 return 0; 1680 } 1681 1682 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1683 if (fival->width == webcam_sizes[i].width && 1684 fival->height == webcam_sizes[i].height) 1685 break; 1686 if (i == ARRAY_SIZE(webcam_sizes)) 1687 return -EINVAL; 1688 if (fival->index >= webcam_ival_count(dev, i)) 1689 return -EINVAL; 1690 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1691 fival->discrete = webcam_intervals[fival->index]; 1692 return 0; 1693 } 1694 1695 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1696 struct v4l2_streamparm *parm) 1697 { 1698 struct vivid_dev *dev = video_drvdata(file); 1699 1700 if (parm->type != (dev->multiplanar ? 1701 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1702 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1703 return -EINVAL; 1704 1705 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1706 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1707 parm->parm.capture.readbuffers = 1; 1708 return 0; 1709 } 1710 1711 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1712 struct v4l2_streamparm *parm) 1713 { 1714 struct vivid_dev *dev = video_drvdata(file); 1715 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1716 struct v4l2_fract tpf; 1717 unsigned i; 1718 1719 if (parm->type != (dev->multiplanar ? 1720 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1721 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1722 return -EINVAL; 1723 if (!vivid_is_webcam(dev)) 1724 return vivid_vid_cap_g_parm(file, priv, parm); 1725 1726 tpf = parm->parm.capture.timeperframe; 1727 1728 if (tpf.denominator == 0) 1729 tpf = webcam_intervals[ival_sz - 1]; 1730 for (i = 0; i < ival_sz; i++) 1731 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1732 break; 1733 if (i == ival_sz) 1734 i = ival_sz - 1; 1735 dev->webcam_ival_idx = i; 1736 tpf = webcam_intervals[dev->webcam_ival_idx]; 1737 1738 /* resync the thread's timings */ 1739 dev->cap_seq_resync = true; 1740 dev->timeperframe_vid_cap = tpf; 1741 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1742 parm->parm.capture.timeperframe = tpf; 1743 parm->parm.capture.readbuffers = 1; 1744 return 0; 1745 } 1746