1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/v4l2-dv-timings.h> 14 #include <media/v4l2-common.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-dv-timings.h> 17 #include <media/v4l2-rect.h> 18 19 #include "vivid-core.h" 20 #include "vivid-vid-common.h" 21 #include "vivid-kthread-cap.h" 22 #include "vivid-vid-cap.h" 23 24 /* Sizes must be in increasing order */ 25 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 26 { 320, 180 }, 27 { 640, 360 }, 28 { 640, 480 }, 29 { 1280, 720 }, 30 { 1920, 1080 }, 31 { 3840, 2160 }, 32 }; 33 34 /* 35 * Intervals must be in increasing order and there must be twice as many 36 * elements in this array as there are in webcam_sizes. 37 */ 38 static const struct v4l2_fract webcam_intervals[] = { 39 { 1, 1 }, 40 { 1, 2 }, 41 { 1, 4 }, 42 { 1, 5 }, 43 { 1, 10 }, 44 { 2, 25 }, 45 { 1, 15 }, /* 7 - maximum for 2160p */ 46 { 1, 25 }, 47 { 1, 30 }, /* 9 - maximum for 1080p */ 48 { 1, 40 }, 49 { 1, 50 }, 50 { 1, 60 }, /* 12 - maximum for 720p */ 51 { 1, 120 }, 52 }; 53 54 /* Limit maximum FPS rates for high resolutions */ 55 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 56 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 57 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 58 59 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 60 unsigned int frmsize_idx) 61 { 62 if (webcam_sizes[frmsize_idx].height >= 2160) 63 return IVAL_COUNT_2160P; 64 65 if (webcam_sizes[frmsize_idx].height >= 1080) 66 return IVAL_COUNT_1080P; 67 68 if (webcam_sizes[frmsize_idx].height >= 720) 69 return IVAL_COUNT_720P; 70 71 /* For low resolutions, allow all FPS rates */ 72 return ARRAY_SIZE(webcam_intervals); 73 } 74 75 static int vid_cap_queue_setup(struct vb2_queue *vq, 76 unsigned *nbuffers, unsigned *nplanes, 77 unsigned sizes[], struct device *alloc_devs[]) 78 { 79 struct vivid_dev *dev = vb2_get_drv_priv(vq); 80 unsigned buffers = tpg_g_buffers(&dev->tpg); 81 unsigned h = dev->fmt_cap_rect.height; 82 unsigned p; 83 84 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 85 /* 86 * You cannot use read() with FIELD_ALTERNATE since the field 87 * information (TOP/BOTTOM) cannot be passed back to the user. 88 */ 89 if (vb2_fileio_is_active(vq)) 90 return -EINVAL; 91 } 92 93 if (dev->queue_setup_error) { 94 /* 95 * Error injection: test what happens if queue_setup() returns 96 * an error. 97 */ 98 dev->queue_setup_error = false; 99 return -EINVAL; 100 } 101 if (*nplanes) { 102 /* 103 * Check if the number of requested planes match 104 * the number of buffers in the current format. You can't mix that. 105 */ 106 if (*nplanes != buffers) 107 return -EINVAL; 108 for (p = 0; p < buffers; p++) { 109 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / 110 dev->fmt_cap->vdownsampling[p] + 111 dev->fmt_cap->data_offset[p]) 112 return -EINVAL; 113 } 114 } else { 115 for (p = 0; p < buffers; p++) 116 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 117 dev->fmt_cap->vdownsampling[p] + 118 dev->fmt_cap->data_offset[p]; 119 } 120 121 *nplanes = buffers; 122 123 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 124 for (p = 0; p < buffers; p++) 125 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 126 127 return 0; 128 } 129 130 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 131 { 132 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 133 unsigned long size; 134 unsigned buffers = tpg_g_buffers(&dev->tpg); 135 unsigned p; 136 137 dprintk(dev, 1, "%s\n", __func__); 138 139 if (WARN_ON(NULL == dev->fmt_cap)) 140 return -EINVAL; 141 142 if (dev->buf_prepare_error) { 143 /* 144 * Error injection: test what happens if buf_prepare() returns 145 * an error. 146 */ 147 dev->buf_prepare_error = false; 148 return -EINVAL; 149 } 150 for (p = 0; p < buffers; p++) { 151 size = (tpg_g_line_width(&dev->tpg, p) * 152 dev->fmt_cap_rect.height) / 153 dev->fmt_cap->vdownsampling[p] + 154 dev->fmt_cap->data_offset[p]; 155 156 if (vb2_plane_size(vb, p) < size) { 157 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 158 __func__, p, vb2_plane_size(vb, p), size); 159 return -EINVAL; 160 } 161 162 vb2_set_plane_payload(vb, p, size); 163 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 164 } 165 166 return 0; 167 } 168 169 static void vid_cap_buf_finish(struct vb2_buffer *vb) 170 { 171 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 172 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 173 struct v4l2_timecode *tc = &vbuf->timecode; 174 unsigned fps = 25; 175 unsigned seq = vbuf->sequence; 176 177 if (!vivid_is_sdtv_cap(dev)) 178 return; 179 180 /* 181 * Set the timecode. Rarely used, so it is interesting to 182 * test this. 183 */ 184 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 185 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 186 fps = 30; 187 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 188 tc->flags = 0; 189 tc->frames = seq % fps; 190 tc->seconds = (seq / fps) % 60; 191 tc->minutes = (seq / (60 * fps)) % 60; 192 tc->hours = (seq / (60 * 60 * fps)) % 24; 193 } 194 195 static void vid_cap_buf_queue(struct vb2_buffer *vb) 196 { 197 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 198 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 199 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 200 201 dprintk(dev, 1, "%s\n", __func__); 202 203 spin_lock(&dev->slock); 204 list_add_tail(&buf->list, &dev->vid_cap_active); 205 spin_unlock(&dev->slock); 206 } 207 208 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 209 { 210 struct vivid_dev *dev = vb2_get_drv_priv(vq); 211 unsigned i; 212 int err; 213 214 dev->vid_cap_seq_count = 0; 215 dprintk(dev, 1, "%s\n", __func__); 216 for (i = 0; i < VIDEO_MAX_FRAME; i++) 217 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 218 if (dev->start_streaming_error) { 219 dev->start_streaming_error = false; 220 err = -EINVAL; 221 } else { 222 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 223 } 224 if (err) { 225 struct vivid_buffer *buf, *tmp; 226 227 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 228 list_del(&buf->list); 229 vb2_buffer_done(&buf->vb.vb2_buf, 230 VB2_BUF_STATE_QUEUED); 231 } 232 } 233 return err; 234 } 235 236 /* abort streaming and wait for last buffer */ 237 static void vid_cap_stop_streaming(struct vb2_queue *vq) 238 { 239 struct vivid_dev *dev = vb2_get_drv_priv(vq); 240 241 dprintk(dev, 1, "%s\n", __func__); 242 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 243 } 244 245 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 246 { 247 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 248 249 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 250 } 251 252 const struct vb2_ops vivid_vid_cap_qops = { 253 .queue_setup = vid_cap_queue_setup, 254 .buf_prepare = vid_cap_buf_prepare, 255 .buf_finish = vid_cap_buf_finish, 256 .buf_queue = vid_cap_buf_queue, 257 .start_streaming = vid_cap_start_streaming, 258 .stop_streaming = vid_cap_stop_streaming, 259 .buf_request_complete = vid_cap_buf_request_complete, 260 }; 261 262 /* 263 * Determine the 'picture' quality based on the current TV frequency: either 264 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 265 * signal or NOISE for no signal. 266 */ 267 void vivid_update_quality(struct vivid_dev *dev) 268 { 269 unsigned freq_modulus; 270 271 if (dev->input_is_connected_to_output[dev->input]) { 272 /* 273 * The 'noise' will only be replaced by the actual video 274 * if the output video matches the input video settings. 275 */ 276 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 277 return; 278 } 279 if (vivid_is_hdmi_cap(dev) && 280 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 281 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 282 return; 283 } 284 if (vivid_is_sdtv_cap(dev) && 285 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 286 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 287 return; 288 } 289 if (!vivid_is_tv_cap(dev)) { 290 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 291 return; 292 } 293 294 /* 295 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 296 * From +/- 0.25 MHz around the channel there is color, and from 297 * +/- 1 MHz there is grayscale (chroma is lost). 298 * Everywhere else it is just noise. 299 */ 300 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 301 if (freq_modulus > 2 * 16) { 302 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 303 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 304 return; 305 } 306 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 307 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 308 else 309 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 310 } 311 312 /* 313 * Get the current picture quality and the associated afc value. 314 */ 315 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 316 { 317 unsigned freq_modulus; 318 319 if (afc) 320 *afc = 0; 321 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 322 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 323 return tpg_g_quality(&dev->tpg); 324 325 /* 326 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 327 * From +/- 0.25 MHz around the channel there is color, and from 328 * +/- 1 MHz there is grayscale (chroma is lost). 329 * Everywhere else it is just gray. 330 */ 331 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 332 if (afc) 333 *afc = freq_modulus - 1 * 16; 334 return TPG_QUAL_GRAY; 335 } 336 337 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 338 { 339 if (vivid_is_sdtv_cap(dev)) 340 return dev->std_aspect_ratio[dev->input]; 341 342 if (vivid_is_hdmi_cap(dev)) 343 return dev->dv_timings_aspect_ratio[dev->input]; 344 345 return TPG_VIDEO_ASPECT_IMAGE; 346 } 347 348 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 349 { 350 if (vivid_is_sdtv_cap(dev)) 351 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 352 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 353 354 if (vivid_is_hdmi_cap(dev) && 355 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 356 return dev->src_rect.height == 480 ? 357 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 358 359 return TPG_PIXEL_ASPECT_SQUARE; 360 } 361 362 /* 363 * Called whenever the format has to be reset which can occur when 364 * changing inputs, standard, timings, etc. 365 */ 366 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 367 { 368 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 369 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 370 unsigned size; 371 u64 pixelclock; 372 373 switch (dev->input_type[dev->input]) { 374 case WEBCAM: 375 default: 376 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 377 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 378 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 379 dev->field_cap = V4L2_FIELD_NONE; 380 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 381 break; 382 case TV: 383 case SVID: 384 dev->field_cap = dev->tv_field_cap; 385 dev->src_rect.width = 720; 386 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 387 dev->src_rect.height = 480; 388 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 389 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 390 } else { 391 dev->src_rect.height = 576; 392 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 393 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 394 } 395 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 396 break; 397 case HDMI: 398 dev->src_rect.width = bt->width; 399 dev->src_rect.height = bt->height; 400 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 401 if (dev->reduced_fps && can_reduce_fps(bt)) { 402 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 403 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 404 } else { 405 pixelclock = bt->pixelclock; 406 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 407 } 408 dev->timeperframe_vid_cap = (struct v4l2_fract) { 409 size / 100, (u32)pixelclock / 100 410 }; 411 if (bt->interlaced) 412 dev->field_cap = V4L2_FIELD_ALTERNATE; 413 else 414 dev->field_cap = V4L2_FIELD_NONE; 415 416 /* 417 * We can be called from within s_ctrl, in that case we can't 418 * set/get controls. Luckily we don't need to in that case. 419 */ 420 if (keep_controls || !dev->colorspace) 421 break; 422 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 423 if (bt->width == 720 && bt->height <= 576) 424 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 425 else 426 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 427 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 428 } else { 429 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 430 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 431 } 432 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 433 break; 434 } 435 vivid_update_quality(dev); 436 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 437 dev->crop_cap = dev->src_rect; 438 dev->crop_bounds_cap = dev->src_rect; 439 dev->compose_cap = dev->crop_cap; 440 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 441 dev->compose_cap.height /= 2; 442 dev->fmt_cap_rect = dev->compose_cap; 443 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 444 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 445 tpg_update_mv_step(&dev->tpg); 446 447 /* 448 * We can be called from within s_ctrl, in that case we can't 449 * modify controls. Luckily we don't need to in that case. 450 */ 451 if (keep_controls) 452 return; 453 454 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 455 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 456 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 457 } 458 459 /* Map the field to something that is valid for the current input */ 460 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 461 { 462 if (vivid_is_sdtv_cap(dev)) { 463 switch (field) { 464 case V4L2_FIELD_INTERLACED_TB: 465 case V4L2_FIELD_INTERLACED_BT: 466 case V4L2_FIELD_SEQ_TB: 467 case V4L2_FIELD_SEQ_BT: 468 case V4L2_FIELD_TOP: 469 case V4L2_FIELD_BOTTOM: 470 case V4L2_FIELD_ALTERNATE: 471 return field; 472 case V4L2_FIELD_INTERLACED: 473 default: 474 return V4L2_FIELD_INTERLACED; 475 } 476 } 477 if (vivid_is_hdmi_cap(dev)) 478 return dev->dv_timings_cap[dev->input].bt.interlaced ? 479 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 480 return V4L2_FIELD_NONE; 481 } 482 483 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 484 { 485 if (!vivid_input_is_connected_to(dev)) 486 return tpg_g_colorspace(&dev->tpg); 487 return dev->colorspace_out; 488 } 489 490 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 491 { 492 if (!vivid_input_is_connected_to(dev)) 493 return tpg_g_xfer_func(&dev->tpg); 494 return dev->xfer_func_out; 495 } 496 497 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 498 { 499 if (!vivid_input_is_connected_to(dev)) 500 return tpg_g_ycbcr_enc(&dev->tpg); 501 return dev->ycbcr_enc_out; 502 } 503 504 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 505 { 506 if (!vivid_input_is_connected_to(dev)) 507 return tpg_g_hsv_enc(&dev->tpg); 508 return dev->hsv_enc_out; 509 } 510 511 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 512 { 513 if (!vivid_input_is_connected_to(dev)) 514 return tpg_g_quantization(&dev->tpg); 515 return dev->quantization_out; 516 } 517 518 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 519 struct v4l2_format *f) 520 { 521 struct vivid_dev *dev = video_drvdata(file); 522 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 523 unsigned p; 524 525 mp->width = dev->fmt_cap_rect.width; 526 mp->height = dev->fmt_cap_rect.height; 527 mp->field = dev->field_cap; 528 mp->pixelformat = dev->fmt_cap->fourcc; 529 mp->colorspace = vivid_colorspace_cap(dev); 530 mp->xfer_func = vivid_xfer_func_cap(dev); 531 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 532 mp->hsv_enc = vivid_hsv_enc_cap(dev); 533 else 534 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 535 mp->quantization = vivid_quantization_cap(dev); 536 mp->num_planes = dev->fmt_cap->buffers; 537 for (p = 0; p < mp->num_planes; p++) { 538 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 539 mp->plane_fmt[p].sizeimage = 540 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 541 dev->fmt_cap->vdownsampling[p] + 542 dev->fmt_cap->data_offset[p]; 543 } 544 return 0; 545 } 546 547 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 548 struct v4l2_format *f) 549 { 550 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 551 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 552 struct vivid_dev *dev = video_drvdata(file); 553 const struct vivid_fmt *fmt; 554 unsigned bytesperline, max_bpl; 555 unsigned factor = 1; 556 unsigned w, h; 557 unsigned p; 558 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 559 560 fmt = vivid_get_format(dev, mp->pixelformat); 561 if (!fmt) { 562 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 563 mp->pixelformat); 564 mp->pixelformat = V4L2_PIX_FMT_YUYV; 565 fmt = vivid_get_format(dev, mp->pixelformat); 566 } 567 568 mp->field = vivid_field_cap(dev, mp->field); 569 if (vivid_is_webcam(dev)) { 570 const struct v4l2_frmsize_discrete *sz = 571 v4l2_find_nearest_size(webcam_sizes, 572 ARRAY_SIZE(webcam_sizes), width, 573 height, mp->width, mp->height); 574 575 w = sz->width; 576 h = sz->height; 577 } else if (vivid_is_sdtv_cap(dev)) { 578 w = 720; 579 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 580 } else { 581 w = dev->src_rect.width; 582 h = dev->src_rect.height; 583 } 584 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 585 factor = 2; 586 if (vivid_is_webcam(dev) || 587 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 588 mp->width = w; 589 mp->height = h / factor; 590 } else { 591 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 592 593 v4l2_rect_set_min_size(&r, &vivid_min_rect); 594 v4l2_rect_set_max_size(&r, &vivid_max_rect); 595 if (dev->has_scaler_cap && !dev->has_compose_cap) { 596 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 597 598 v4l2_rect_set_max_size(&r, &max_r); 599 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 600 v4l2_rect_set_max_size(&r, &dev->src_rect); 601 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 602 v4l2_rect_set_min_size(&r, &dev->src_rect); 603 } 604 mp->width = r.width; 605 mp->height = r.height / factor; 606 } 607 608 /* This driver supports custom bytesperline values */ 609 610 mp->num_planes = fmt->buffers; 611 for (p = 0; p < fmt->buffers; p++) { 612 /* Calculate the minimum supported bytesperline value */ 613 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 614 /* Calculate the maximum supported bytesperline value */ 615 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 616 617 if (pfmt[p].bytesperline > max_bpl) 618 pfmt[p].bytesperline = max_bpl; 619 if (pfmt[p].bytesperline < bytesperline) 620 pfmt[p].bytesperline = bytesperline; 621 622 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 623 fmt->vdownsampling[p] + fmt->data_offset[p]; 624 625 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 626 } 627 for (p = fmt->buffers; p < fmt->planes; p++) 628 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 629 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 630 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 631 632 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 633 mp->colorspace = vivid_colorspace_cap(dev); 634 635 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 636 mp->xfer_func = vivid_xfer_func_cap(dev); 637 638 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 639 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 640 mp->hsv_enc = vivid_hsv_enc_cap(dev); 641 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 642 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 643 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 644 } else { 645 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 646 } 647 648 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 649 fmt->color_enc == TGP_COLOR_ENC_RGB) { 650 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 651 mp->quantization = vivid_quantization_cap(dev); 652 } else { 653 mp->quantization = vivid_quantization_cap(dev); 654 } 655 656 memset(mp->reserved, 0, sizeof(mp->reserved)); 657 return 0; 658 } 659 660 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 661 struct v4l2_format *f) 662 { 663 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 664 struct vivid_dev *dev = video_drvdata(file); 665 struct v4l2_rect *crop = &dev->crop_cap; 666 struct v4l2_rect *compose = &dev->compose_cap; 667 struct vb2_queue *q = &dev->vb_vid_cap_q; 668 int ret = vivid_try_fmt_vid_cap(file, priv, f); 669 unsigned factor = 1; 670 unsigned p; 671 unsigned i; 672 673 if (ret < 0) 674 return ret; 675 676 if (vb2_is_busy(q)) { 677 dprintk(dev, 1, "%s device busy\n", __func__); 678 return -EBUSY; 679 } 680 681 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 682 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 683 factor = 2; 684 685 /* Note: the webcam input doesn't support scaling, cropping or composing */ 686 687 if (!vivid_is_webcam(dev) && 688 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 689 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 690 691 if (dev->has_scaler_cap) { 692 if (dev->has_compose_cap) 693 v4l2_rect_map_inside(compose, &r); 694 else 695 *compose = r; 696 if (dev->has_crop_cap && !dev->has_compose_cap) { 697 struct v4l2_rect min_r = { 698 0, 0, 699 r.width / MAX_ZOOM, 700 factor * r.height / MAX_ZOOM 701 }; 702 struct v4l2_rect max_r = { 703 0, 0, 704 r.width * MAX_ZOOM, 705 factor * r.height * MAX_ZOOM 706 }; 707 708 v4l2_rect_set_min_size(crop, &min_r); 709 v4l2_rect_set_max_size(crop, &max_r); 710 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 711 } else if (dev->has_crop_cap) { 712 struct v4l2_rect min_r = { 713 0, 0, 714 compose->width / MAX_ZOOM, 715 factor * compose->height / MAX_ZOOM 716 }; 717 struct v4l2_rect max_r = { 718 0, 0, 719 compose->width * MAX_ZOOM, 720 factor * compose->height * MAX_ZOOM 721 }; 722 723 v4l2_rect_set_min_size(crop, &min_r); 724 v4l2_rect_set_max_size(crop, &max_r); 725 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 726 } 727 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 728 r.height *= factor; 729 v4l2_rect_set_size_to(crop, &r); 730 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 731 r = *crop; 732 r.height /= factor; 733 v4l2_rect_set_size_to(compose, &r); 734 } else if (!dev->has_crop_cap) { 735 v4l2_rect_map_inside(compose, &r); 736 } else { 737 r.height *= factor; 738 v4l2_rect_set_max_size(crop, &r); 739 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 740 compose->top *= factor; 741 compose->height *= factor; 742 v4l2_rect_set_size_to(compose, crop); 743 v4l2_rect_map_inside(compose, &r); 744 compose->top /= factor; 745 compose->height /= factor; 746 } 747 } else if (vivid_is_webcam(dev)) { 748 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 749 750 /* Guaranteed to be a match */ 751 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 752 if (webcam_sizes[i].width == mp->width && 753 webcam_sizes[i].height == mp->height) 754 break; 755 dev->webcam_size_idx = i; 756 if (dev->webcam_ival_idx >= ival_sz) 757 dev->webcam_ival_idx = ival_sz - 1; 758 vivid_update_format_cap(dev, false); 759 } else { 760 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 761 762 v4l2_rect_set_size_to(compose, &r); 763 r.height *= factor; 764 v4l2_rect_set_size_to(crop, &r); 765 } 766 767 dev->fmt_cap_rect.width = mp->width; 768 dev->fmt_cap_rect.height = mp->height; 769 tpg_s_buf_height(&dev->tpg, mp->height); 770 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 771 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 772 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 773 dev->field_cap = mp->field; 774 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 775 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 776 else 777 tpg_s_field(&dev->tpg, dev->field_cap, false); 778 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 779 if (vivid_is_sdtv_cap(dev)) 780 dev->tv_field_cap = mp->field; 781 tpg_update_mv_step(&dev->tpg); 782 dev->tpg.colorspace = mp->colorspace; 783 dev->tpg.xfer_func = mp->xfer_func; 784 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 785 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 786 else 787 dev->tpg.hsv_enc = mp->hsv_enc; 788 dev->tpg.quantization = mp->quantization; 789 790 return 0; 791 } 792 793 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 794 struct v4l2_format *f) 795 { 796 struct vivid_dev *dev = video_drvdata(file); 797 798 if (!dev->multiplanar) 799 return -ENOTTY; 800 return vivid_g_fmt_vid_cap(file, priv, f); 801 } 802 803 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 804 struct v4l2_format *f) 805 { 806 struct vivid_dev *dev = video_drvdata(file); 807 808 if (!dev->multiplanar) 809 return -ENOTTY; 810 return vivid_try_fmt_vid_cap(file, priv, f); 811 } 812 813 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 814 struct v4l2_format *f) 815 { 816 struct vivid_dev *dev = video_drvdata(file); 817 818 if (!dev->multiplanar) 819 return -ENOTTY; 820 return vivid_s_fmt_vid_cap(file, priv, f); 821 } 822 823 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 824 struct v4l2_format *f) 825 { 826 struct vivid_dev *dev = video_drvdata(file); 827 828 if (dev->multiplanar) 829 return -ENOTTY; 830 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 831 } 832 833 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 834 struct v4l2_format *f) 835 { 836 struct vivid_dev *dev = video_drvdata(file); 837 838 if (dev->multiplanar) 839 return -ENOTTY; 840 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 841 } 842 843 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 844 struct v4l2_format *f) 845 { 846 struct vivid_dev *dev = video_drvdata(file); 847 848 if (dev->multiplanar) 849 return -ENOTTY; 850 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 851 } 852 853 int vivid_vid_cap_g_selection(struct file *file, void *priv, 854 struct v4l2_selection *sel) 855 { 856 struct vivid_dev *dev = video_drvdata(file); 857 858 if (!dev->has_crop_cap && !dev->has_compose_cap) 859 return -ENOTTY; 860 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 861 return -EINVAL; 862 if (vivid_is_webcam(dev)) 863 return -ENODATA; 864 865 sel->r.left = sel->r.top = 0; 866 switch (sel->target) { 867 case V4L2_SEL_TGT_CROP: 868 if (!dev->has_crop_cap) 869 return -EINVAL; 870 sel->r = dev->crop_cap; 871 break; 872 case V4L2_SEL_TGT_CROP_DEFAULT: 873 case V4L2_SEL_TGT_CROP_BOUNDS: 874 if (!dev->has_crop_cap) 875 return -EINVAL; 876 sel->r = dev->src_rect; 877 break; 878 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 879 if (!dev->has_compose_cap) 880 return -EINVAL; 881 sel->r = vivid_max_rect; 882 break; 883 case V4L2_SEL_TGT_COMPOSE: 884 if (!dev->has_compose_cap) 885 return -EINVAL; 886 sel->r = dev->compose_cap; 887 break; 888 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 889 if (!dev->has_compose_cap) 890 return -EINVAL; 891 sel->r = dev->fmt_cap_rect; 892 break; 893 default: 894 return -EINVAL; 895 } 896 return 0; 897 } 898 899 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 900 { 901 struct vivid_dev *dev = video_drvdata(file); 902 struct v4l2_rect *crop = &dev->crop_cap; 903 struct v4l2_rect *compose = &dev->compose_cap; 904 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 905 int ret; 906 907 if (!dev->has_crop_cap && !dev->has_compose_cap) 908 return -ENOTTY; 909 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 910 return -EINVAL; 911 if (vivid_is_webcam(dev)) 912 return -ENODATA; 913 914 switch (s->target) { 915 case V4L2_SEL_TGT_CROP: 916 if (!dev->has_crop_cap) 917 return -EINVAL; 918 ret = vivid_vid_adjust_sel(s->flags, &s->r); 919 if (ret) 920 return ret; 921 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 922 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 923 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 924 s->r.top /= factor; 925 s->r.height /= factor; 926 if (dev->has_scaler_cap) { 927 struct v4l2_rect fmt = dev->fmt_cap_rect; 928 struct v4l2_rect max_rect = { 929 0, 0, 930 s->r.width * MAX_ZOOM, 931 s->r.height * MAX_ZOOM 932 }; 933 struct v4l2_rect min_rect = { 934 0, 0, 935 s->r.width / MAX_ZOOM, 936 s->r.height / MAX_ZOOM 937 }; 938 939 v4l2_rect_set_min_size(&fmt, &min_rect); 940 if (!dev->has_compose_cap) 941 v4l2_rect_set_max_size(&fmt, &max_rect); 942 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 943 vb2_is_busy(&dev->vb_vid_cap_q)) 944 return -EBUSY; 945 if (dev->has_compose_cap) { 946 v4l2_rect_set_min_size(compose, &min_rect); 947 v4l2_rect_set_max_size(compose, &max_rect); 948 v4l2_rect_map_inside(compose, &fmt); 949 } 950 dev->fmt_cap_rect = fmt; 951 tpg_s_buf_height(&dev->tpg, fmt.height); 952 } else if (dev->has_compose_cap) { 953 struct v4l2_rect fmt = dev->fmt_cap_rect; 954 955 v4l2_rect_set_min_size(&fmt, &s->r); 956 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 957 vb2_is_busy(&dev->vb_vid_cap_q)) 958 return -EBUSY; 959 dev->fmt_cap_rect = fmt; 960 tpg_s_buf_height(&dev->tpg, fmt.height); 961 v4l2_rect_set_size_to(compose, &s->r); 962 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 963 } else { 964 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 965 vb2_is_busy(&dev->vb_vid_cap_q)) 966 return -EBUSY; 967 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 968 v4l2_rect_set_size_to(compose, &s->r); 969 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 970 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 971 } 972 s->r.top *= factor; 973 s->r.height *= factor; 974 *crop = s->r; 975 break; 976 case V4L2_SEL_TGT_COMPOSE: 977 if (!dev->has_compose_cap) 978 return -EINVAL; 979 ret = vivid_vid_adjust_sel(s->flags, &s->r); 980 if (ret) 981 return ret; 982 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 983 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 984 if (dev->has_scaler_cap) { 985 struct v4l2_rect max_rect = { 986 0, 0, 987 dev->src_rect.width * MAX_ZOOM, 988 (dev->src_rect.height / factor) * MAX_ZOOM 989 }; 990 991 v4l2_rect_set_max_size(&s->r, &max_rect); 992 if (dev->has_crop_cap) { 993 struct v4l2_rect min_rect = { 994 0, 0, 995 s->r.width / MAX_ZOOM, 996 (s->r.height * factor) / MAX_ZOOM 997 }; 998 struct v4l2_rect max_rect = { 999 0, 0, 1000 s->r.width * MAX_ZOOM, 1001 (s->r.height * factor) * MAX_ZOOM 1002 }; 1003 1004 v4l2_rect_set_min_size(crop, &min_rect); 1005 v4l2_rect_set_max_size(crop, &max_rect); 1006 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1007 } 1008 } else if (dev->has_crop_cap) { 1009 s->r.top *= factor; 1010 s->r.height *= factor; 1011 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1012 v4l2_rect_set_size_to(crop, &s->r); 1013 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1014 s->r.top /= factor; 1015 s->r.height /= factor; 1016 } else { 1017 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1018 s->r.height /= factor; 1019 } 1020 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1021 *compose = s->r; 1022 break; 1023 default: 1024 return -EINVAL; 1025 } 1026 1027 tpg_s_crop_compose(&dev->tpg, crop, compose); 1028 return 0; 1029 } 1030 1031 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1032 int type, struct v4l2_fract *f) 1033 { 1034 struct vivid_dev *dev = video_drvdata(file); 1035 1036 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1037 return -EINVAL; 1038 1039 switch (vivid_get_pixel_aspect(dev)) { 1040 case TPG_PIXEL_ASPECT_NTSC: 1041 f->numerator = 11; 1042 f->denominator = 10; 1043 break; 1044 case TPG_PIXEL_ASPECT_PAL: 1045 f->numerator = 54; 1046 f->denominator = 59; 1047 break; 1048 default: 1049 break; 1050 } 1051 return 0; 1052 } 1053 1054 static const struct v4l2_audio vivid_audio_inputs[] = { 1055 { 0, "TV", V4L2_AUDCAP_STEREO }, 1056 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1057 }; 1058 1059 int vidioc_enum_input(struct file *file, void *priv, 1060 struct v4l2_input *inp) 1061 { 1062 struct vivid_dev *dev = video_drvdata(file); 1063 1064 if (inp->index >= dev->num_inputs) 1065 return -EINVAL; 1066 1067 inp->type = V4L2_INPUT_TYPE_CAMERA; 1068 switch (dev->input_type[inp->index]) { 1069 case WEBCAM: 1070 snprintf(inp->name, sizeof(inp->name), "Webcam %03u-%u", 1071 dev->inst, dev->input_name_counter[inp->index]); 1072 inp->capabilities = 0; 1073 break; 1074 case TV: 1075 snprintf(inp->name, sizeof(inp->name), "TV %03u-%u", 1076 dev->inst, dev->input_name_counter[inp->index]); 1077 inp->type = V4L2_INPUT_TYPE_TUNER; 1078 inp->std = V4L2_STD_ALL; 1079 if (dev->has_audio_inputs) 1080 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1081 inp->capabilities = V4L2_IN_CAP_STD; 1082 break; 1083 case SVID: 1084 snprintf(inp->name, sizeof(inp->name), "S-Video %03u-%u", 1085 dev->inst, dev->input_name_counter[inp->index]); 1086 inp->std = V4L2_STD_ALL; 1087 if (dev->has_audio_inputs) 1088 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1089 inp->capabilities = V4L2_IN_CAP_STD; 1090 break; 1091 case HDMI: 1092 snprintf(inp->name, sizeof(inp->name), "HDMI %03u-%u", 1093 dev->inst, dev->input_name_counter[inp->index]); 1094 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1095 if (dev->edid_blocks == 0 || 1096 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1097 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1098 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1099 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1100 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1101 break; 1102 } 1103 if (dev->sensor_hflip) 1104 inp->status |= V4L2_IN_ST_HFLIP; 1105 if (dev->sensor_vflip) 1106 inp->status |= V4L2_IN_ST_VFLIP; 1107 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1108 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1109 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1110 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1111 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1112 } else if (vivid_is_tv_cap(dev)) { 1113 switch (tpg_g_quality(&dev->tpg)) { 1114 case TPG_QUAL_GRAY: 1115 inp->status |= V4L2_IN_ST_COLOR_KILL; 1116 break; 1117 case TPG_QUAL_NOISE: 1118 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1119 break; 1120 default: 1121 break; 1122 } 1123 } 1124 } 1125 return 0; 1126 } 1127 1128 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1129 { 1130 struct vivid_dev *dev = video_drvdata(file); 1131 1132 *i = dev->input; 1133 return 0; 1134 } 1135 1136 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1137 { 1138 struct vivid_dev *dev = video_drvdata(file); 1139 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1140 unsigned brightness; 1141 1142 if (i >= dev->num_inputs) 1143 return -EINVAL; 1144 1145 if (i == dev->input) 1146 return 0; 1147 1148 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1149 vb2_is_busy(&dev->vb_vbi_cap_q) || 1150 vb2_is_busy(&dev->vb_meta_cap_q)) 1151 return -EBUSY; 1152 1153 dev->input = i; 1154 dev->vid_cap_dev.tvnorms = 0; 1155 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1156 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1157 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1158 } 1159 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1160 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1161 vivid_update_format_cap(dev, false); 1162 1163 if (dev->colorspace) { 1164 switch (dev->input_type[i]) { 1165 case WEBCAM: 1166 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1167 break; 1168 case TV: 1169 case SVID: 1170 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1171 break; 1172 case HDMI: 1173 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1174 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1175 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1176 else 1177 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1178 } else { 1179 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1180 } 1181 break; 1182 } 1183 } 1184 1185 /* 1186 * Modify the brightness range depending on the input. 1187 * This makes it easy to use vivid to test if applications can 1188 * handle control range modifications and is also how this is 1189 * typically used in practice as different inputs may be hooked 1190 * up to different receivers with different control ranges. 1191 */ 1192 brightness = 128 * i + dev->input_brightness[i]; 1193 v4l2_ctrl_modify_range(dev->brightness, 1194 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1195 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1196 1197 /* Restore per-input states. */ 1198 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1199 vivid_is_hdmi_cap(dev)); 1200 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1201 dev->dv_timings_signal_mode[dev->input] == 1202 SELECTED_DV_TIMINGS); 1203 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1204 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1205 dev->std_signal_mode[dev->input]); 1206 1207 if (vivid_is_hdmi_cap(dev)) { 1208 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1209 dev->dv_timings_signal_mode[dev->input]); 1210 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1211 dev->query_dv_timings[dev->input]); 1212 } else if (vivid_is_sdtv_cap(dev)) { 1213 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1214 dev->std_signal_mode[dev->input]); 1215 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1216 dev->std_signal_mode[dev->input]); 1217 } 1218 1219 return 0; 1220 } 1221 1222 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1223 { 1224 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1225 return -EINVAL; 1226 *vin = vivid_audio_inputs[vin->index]; 1227 return 0; 1228 } 1229 1230 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1231 { 1232 struct vivid_dev *dev = video_drvdata(file); 1233 1234 if (!vivid_is_sdtv_cap(dev)) 1235 return -EINVAL; 1236 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1237 return 0; 1238 } 1239 1240 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1241 { 1242 struct vivid_dev *dev = video_drvdata(file); 1243 1244 if (!vivid_is_sdtv_cap(dev)) 1245 return -EINVAL; 1246 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1247 return -EINVAL; 1248 dev->tv_audio_input = vin->index; 1249 return 0; 1250 } 1251 1252 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1253 { 1254 struct vivid_dev *dev = video_drvdata(file); 1255 1256 if (vf->tuner != 0) 1257 return -EINVAL; 1258 vf->frequency = dev->tv_freq; 1259 return 0; 1260 } 1261 1262 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1263 { 1264 struct vivid_dev *dev = video_drvdata(file); 1265 1266 if (vf->tuner != 0) 1267 return -EINVAL; 1268 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1269 if (vivid_is_tv_cap(dev)) 1270 vivid_update_quality(dev); 1271 return 0; 1272 } 1273 1274 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1275 { 1276 struct vivid_dev *dev = video_drvdata(file); 1277 1278 if (vt->index != 0) 1279 return -EINVAL; 1280 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1281 return -EINVAL; 1282 dev->tv_audmode = vt->audmode; 1283 return 0; 1284 } 1285 1286 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1287 { 1288 struct vivid_dev *dev = video_drvdata(file); 1289 enum tpg_quality qual; 1290 1291 if (vt->index != 0) 1292 return -EINVAL; 1293 1294 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1295 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1296 vt->audmode = dev->tv_audmode; 1297 vt->rangelow = MIN_TV_FREQ; 1298 vt->rangehigh = MAX_TV_FREQ; 1299 qual = vivid_get_quality(dev, &vt->afc); 1300 if (qual == TPG_QUAL_COLOR) 1301 vt->signal = 0xffff; 1302 else if (qual == TPG_QUAL_GRAY) 1303 vt->signal = 0x8000; 1304 else 1305 vt->signal = 0; 1306 if (qual == TPG_QUAL_NOISE) { 1307 vt->rxsubchans = 0; 1308 } else if (qual == TPG_QUAL_GRAY) { 1309 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1310 } else { 1311 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1312 unsigned int options = 1313 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1314 1315 switch (channel_nr % options) { 1316 case 0: 1317 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1318 break; 1319 case 1: 1320 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1321 break; 1322 case 2: 1323 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1324 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1325 else 1326 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1327 break; 1328 case 3: 1329 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1330 break; 1331 } 1332 } 1333 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1334 return 0; 1335 } 1336 1337 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1338 const v4l2_std_id vivid_standard[] = { 1339 V4L2_STD_NTSC_M, 1340 V4L2_STD_NTSC_M_JP, 1341 V4L2_STD_NTSC_M_KR, 1342 V4L2_STD_NTSC_443, 1343 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1344 V4L2_STD_PAL_I, 1345 V4L2_STD_PAL_DK, 1346 V4L2_STD_PAL_M, 1347 V4L2_STD_PAL_N, 1348 V4L2_STD_PAL_Nc, 1349 V4L2_STD_PAL_60, 1350 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1351 V4L2_STD_SECAM_DK, 1352 V4L2_STD_SECAM_L, 1353 V4L2_STD_SECAM_LC, 1354 V4L2_STD_UNKNOWN 1355 }; 1356 1357 /* Must remain in sync with the vivid_standard array */ 1358 const char * const vivid_ctrl_standard_strings[] = { 1359 "NTSC-M", 1360 "NTSC-M-JP", 1361 "NTSC-M-KR", 1362 "NTSC-443", 1363 "PAL-BGH", 1364 "PAL-I", 1365 "PAL-DK", 1366 "PAL-M", 1367 "PAL-N", 1368 "PAL-Nc", 1369 "PAL-60", 1370 "SECAM-BGH", 1371 "SECAM-DK", 1372 "SECAM-L", 1373 "SECAM-Lc", 1374 NULL, 1375 }; 1376 1377 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1378 { 1379 struct vivid_dev *dev = video_drvdata(file); 1380 unsigned int last = dev->query_std_last[dev->input]; 1381 1382 if (!vivid_is_sdtv_cap(dev)) 1383 return -ENODATA; 1384 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1385 dev->std_signal_mode[dev->input] == NO_LOCK) { 1386 *id = V4L2_STD_UNKNOWN; 1387 return 0; 1388 } 1389 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1390 *id = V4L2_STD_UNKNOWN; 1391 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1392 *id = dev->std_cap[dev->input]; 1393 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1394 *id = dev->query_std[dev->input]; 1395 } else { 1396 *id = vivid_standard[last]; 1397 dev->query_std_last[dev->input] = 1398 (last + 1) % ARRAY_SIZE(vivid_standard); 1399 } 1400 1401 return 0; 1402 } 1403 1404 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1405 { 1406 struct vivid_dev *dev = video_drvdata(file); 1407 1408 if (!vivid_is_sdtv_cap(dev)) 1409 return -ENODATA; 1410 if (dev->std_cap[dev->input] == id) 1411 return 0; 1412 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1413 return -EBUSY; 1414 dev->std_cap[dev->input] = id; 1415 vivid_update_format_cap(dev, false); 1416 return 0; 1417 } 1418 1419 static void find_aspect_ratio(u32 width, u32 height, 1420 u32 *num, u32 *denom) 1421 { 1422 if (!(height % 3) && ((height * 4 / 3) == width)) { 1423 *num = 4; 1424 *denom = 3; 1425 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1426 *num = 16; 1427 *denom = 9; 1428 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1429 *num = 16; 1430 *denom = 10; 1431 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1432 *num = 5; 1433 *denom = 4; 1434 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1435 *num = 15; 1436 *denom = 9; 1437 } else { /* default to 16:9 */ 1438 *num = 16; 1439 *denom = 9; 1440 } 1441 } 1442 1443 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1444 { 1445 struct v4l2_bt_timings *bt = &timings->bt; 1446 u32 total_h_pixel; 1447 u32 total_v_lines; 1448 u32 h_freq; 1449 1450 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1451 NULL, NULL)) 1452 return false; 1453 1454 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1455 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1456 1457 h_freq = (u32)bt->pixelclock / total_h_pixel; 1458 1459 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1460 struct v4l2_dv_timings cvt = {}; 1461 1462 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1463 bt->polarities, bt->interlaced, 1464 &vivid_dv_timings_cap, &cvt) && 1465 cvt.bt.width == bt->width && cvt.bt.height == bt->height) { 1466 *timings = cvt; 1467 return true; 1468 } 1469 } 1470 1471 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1472 struct v4l2_dv_timings gtf = {}; 1473 struct v4l2_fract aspect_ratio; 1474 1475 find_aspect_ratio(bt->width, bt->height, 1476 &aspect_ratio.numerator, 1477 &aspect_ratio.denominator); 1478 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1479 bt->polarities, bt->interlaced, 1480 aspect_ratio, &vivid_dv_timings_cap, 1481 >f) && 1482 gtf.bt.width == bt->width && gtf.bt.height == bt->height) { 1483 *timings = gtf; 1484 return true; 1485 } 1486 } 1487 return false; 1488 } 1489 1490 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1491 struct v4l2_dv_timings *timings) 1492 { 1493 struct vivid_dev *dev = video_drvdata(file); 1494 1495 if (!vivid_is_hdmi_cap(dev)) 1496 return -ENODATA; 1497 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1498 0, NULL, NULL) && 1499 !valid_cvt_gtf_timings(timings)) 1500 return -EINVAL; 1501 1502 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1503 0, false)) 1504 return 0; 1505 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1506 return -EBUSY; 1507 1508 dev->dv_timings_cap[dev->input] = *timings; 1509 vivid_update_format_cap(dev, false); 1510 return 0; 1511 } 1512 1513 int vidioc_query_dv_timings(struct file *file, void *_fh, 1514 struct v4l2_dv_timings *timings) 1515 { 1516 struct vivid_dev *dev = video_drvdata(file); 1517 unsigned int input = dev->input; 1518 unsigned int last = dev->query_dv_timings_last[input]; 1519 1520 if (!vivid_is_hdmi_cap(dev)) 1521 return -ENODATA; 1522 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1523 dev->edid_blocks == 0) 1524 return -ENOLINK; 1525 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1526 return -ENOLCK; 1527 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1528 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1529 return -ERANGE; 1530 } 1531 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1532 *timings = dev->dv_timings_cap[input]; 1533 } else if (dev->dv_timings_signal_mode[input] == 1534 SELECTED_DV_TIMINGS) { 1535 *timings = 1536 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1537 } else { 1538 *timings = 1539 v4l2_dv_timings_presets[last]; 1540 dev->query_dv_timings_last[input] = 1541 (last + 1) % dev->query_dv_timings_size; 1542 } 1543 return 0; 1544 } 1545 1546 void vivid_update_outputs(struct vivid_dev *dev) 1547 { 1548 u32 edid_present = 0; 1549 1550 if (!dev || !dev->num_outputs) 1551 return; 1552 for (unsigned int i = 0, j = 0; i < dev->num_outputs; i++) { 1553 if (dev->output_type[i] != HDMI) 1554 continue; 1555 1556 struct vivid_dev *dev_rx = dev->output_to_input_instance[i]; 1557 1558 if (dev_rx && dev_rx->edid_blocks) 1559 edid_present |= 1 << j; 1560 j++; 1561 } 1562 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, edid_present); 1563 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, edid_present); 1564 v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, edid_present); 1565 } 1566 1567 void vivid_update_connected_outputs(struct vivid_dev *dev) 1568 { 1569 u16 phys_addr = cec_get_edid_phys_addr(dev->edid, dev->edid_blocks * 128, NULL); 1570 1571 for (unsigned int i = 0, j = 0; i < dev->num_inputs; i++) { 1572 unsigned int menu_idx = 1573 dev->input_is_connected_to_output[i]; 1574 1575 if (dev->input_type[i] != HDMI) 1576 continue; 1577 j++; 1578 if (menu_idx < FIXED_MENU_ITEMS) 1579 continue; 1580 1581 struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; 1582 unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; 1583 1584 if (!dev_tx) 1585 continue; 1586 1587 unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; 1588 1589 vivid_update_outputs(dev_tx); 1590 if (dev->edid_blocks) { 1591 cec_s_phys_addr(dev_tx->cec_tx_adap[hdmi_output], 1592 v4l2_phys_addr_for_input(phys_addr, j), 1593 false); 1594 } else { 1595 cec_phys_addr_invalidate(dev_tx->cec_tx_adap[hdmi_output]); 1596 } 1597 } 1598 } 1599 1600 int vidioc_s_edid(struct file *file, void *_fh, 1601 struct v4l2_edid *edid) 1602 { 1603 struct vivid_dev *dev = video_drvdata(file); 1604 u16 phys_addr; 1605 int ret; 1606 1607 memset(edid->reserved, 0, sizeof(edid->reserved)); 1608 if (edid->pad >= dev->num_inputs) 1609 return -EINVAL; 1610 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1611 return -EINVAL; 1612 if (edid->blocks == 0) { 1613 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1614 return -EBUSY; 1615 dev->edid_blocks = 0; 1616 vivid_update_connected_outputs(dev); 1617 return 0; 1618 } 1619 if (edid->blocks > dev->edid_max_blocks) { 1620 edid->blocks = dev->edid_max_blocks; 1621 return -E2BIG; 1622 } 1623 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1624 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1625 if (ret) 1626 return ret; 1627 1628 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1629 return -EBUSY; 1630 1631 dev->edid_blocks = edid->blocks; 1632 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1633 1634 vivid_update_connected_outputs(dev); 1635 return 0; 1636 } 1637 1638 int vidioc_enum_framesizes(struct file *file, void *fh, 1639 struct v4l2_frmsizeenum *fsize) 1640 { 1641 struct vivid_dev *dev = video_drvdata(file); 1642 1643 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1644 return -EINVAL; 1645 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1646 return -EINVAL; 1647 if (vivid_is_webcam(dev)) { 1648 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1649 return -EINVAL; 1650 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1651 fsize->discrete = webcam_sizes[fsize->index]; 1652 return 0; 1653 } 1654 if (fsize->index) 1655 return -EINVAL; 1656 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1657 fsize->stepwise.min_width = MIN_WIDTH; 1658 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1659 fsize->stepwise.step_width = 2; 1660 fsize->stepwise.min_height = MIN_HEIGHT; 1661 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1662 fsize->stepwise.step_height = 2; 1663 return 0; 1664 } 1665 1666 /* timeperframe is arbitrary and continuous */ 1667 int vidioc_enum_frameintervals(struct file *file, void *priv, 1668 struct v4l2_frmivalenum *fival) 1669 { 1670 struct vivid_dev *dev = video_drvdata(file); 1671 const struct vivid_fmt *fmt; 1672 int i; 1673 1674 fmt = vivid_get_format(dev, fival->pixel_format); 1675 if (!fmt) 1676 return -EINVAL; 1677 1678 if (!vivid_is_webcam(dev)) { 1679 if (fival->index) 1680 return -EINVAL; 1681 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1682 return -EINVAL; 1683 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1684 return -EINVAL; 1685 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1686 fival->discrete = dev->timeperframe_vid_cap; 1687 return 0; 1688 } 1689 1690 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1691 if (fival->width == webcam_sizes[i].width && 1692 fival->height == webcam_sizes[i].height) 1693 break; 1694 if (i == ARRAY_SIZE(webcam_sizes)) 1695 return -EINVAL; 1696 if (fival->index >= webcam_ival_count(dev, i)) 1697 return -EINVAL; 1698 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1699 fival->discrete = webcam_intervals[fival->index]; 1700 return 0; 1701 } 1702 1703 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1704 struct v4l2_streamparm *parm) 1705 { 1706 struct vivid_dev *dev = video_drvdata(file); 1707 1708 if (parm->type != (dev->multiplanar ? 1709 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1710 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1711 return -EINVAL; 1712 1713 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1714 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1715 parm->parm.capture.readbuffers = 1; 1716 return 0; 1717 } 1718 1719 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1720 struct v4l2_streamparm *parm) 1721 { 1722 struct vivid_dev *dev = video_drvdata(file); 1723 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1724 struct v4l2_fract tpf; 1725 unsigned i; 1726 1727 if (parm->type != (dev->multiplanar ? 1728 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1729 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1730 return -EINVAL; 1731 if (!vivid_is_webcam(dev)) 1732 return vivid_vid_cap_g_parm(file, priv, parm); 1733 1734 tpf = parm->parm.capture.timeperframe; 1735 1736 if (tpf.denominator == 0) 1737 tpf = webcam_intervals[ival_sz - 1]; 1738 for (i = 0; i < ival_sz; i++) 1739 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1740 break; 1741 if (i == ival_sz) 1742 i = ival_sz - 1; 1743 dev->webcam_ival_idx = i; 1744 tpf = webcam_intervals[dev->webcam_ival_idx]; 1745 1746 /* resync the thread's timings */ 1747 dev->cap_seq_resync = true; 1748 dev->timeperframe_vid_cap = tpf; 1749 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1750 parm->parm.capture.timeperframe = tpf; 1751 parm->parm.capture.readbuffers = 1; 1752 return 0; 1753 } 1754