1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/vmalloc.h> 12 #include <linux/videodev2.h> 13 #include <linux/prandom.h> 14 #include <linux/v4l2-dv-timings.h> 15 #include <media/v4l2-common.h> 16 #include <media/v4l2-event.h> 17 #include <media/v4l2-dv-timings.h> 18 #include <media/v4l2-rect.h> 19 20 #include "vivid-core.h" 21 #include "vivid-vid-common.h" 22 #include "vivid-kthread-cap.h" 23 #include "vivid-vid-cap.h" 24 25 /* Sizes must be in increasing order */ 26 static const struct v4l2_frmsize_discrete webcam_sizes[] = { 27 { 320, 180 }, 28 { 640, 360 }, 29 { 640, 480 }, 30 { 1280, 720 }, 31 { 1920, 1080 }, 32 { 3840, 2160 }, 33 }; 34 35 /* 36 * Intervals must be in increasing order and there must be twice as many 37 * elements in this array as there are in webcam_sizes. 38 */ 39 static const struct v4l2_fract webcam_intervals[] = { 40 { 1, 1 }, 41 { 1, 2 }, 42 { 1, 4 }, 43 { 1, 5 }, 44 { 1, 10 }, 45 { 2, 25 }, 46 { 1, 15 }, /* 7 - maximum for 2160p */ 47 { 1, 25 }, 48 { 1, 30 }, /* 9 - maximum for 1080p */ 49 { 1, 40 }, 50 { 1, 50 }, 51 { 1, 60 }, /* 12 - maximum for 720p */ 52 { 1, 120 }, 53 }; 54 55 /* Limit maximum FPS rates for high resolutions */ 56 #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ 57 #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ 58 #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ 59 60 static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, 61 unsigned int frmsize_idx) 62 { 63 if (webcam_sizes[frmsize_idx].height >= 2160) 64 return IVAL_COUNT_2160P; 65 66 if (webcam_sizes[frmsize_idx].height >= 1080) 67 return IVAL_COUNT_1080P; 68 69 if (webcam_sizes[frmsize_idx].height >= 720) 70 return IVAL_COUNT_720P; 71 72 /* For low resolutions, allow all FPS rates */ 73 return ARRAY_SIZE(webcam_intervals); 74 } 75 76 static int vid_cap_queue_setup(struct vb2_queue *vq, 77 unsigned *nbuffers, unsigned *nplanes, 78 unsigned sizes[], struct device *alloc_devs[]) 79 { 80 struct vivid_dev *dev = vb2_get_drv_priv(vq); 81 unsigned buffers = tpg_g_buffers(&dev->tpg); 82 unsigned h = dev->fmt_cap_rect.height; 83 unsigned p; 84 85 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 86 /* 87 * You cannot use read() with FIELD_ALTERNATE since the field 88 * information (TOP/BOTTOM) cannot be passed back to the user. 89 */ 90 if (vb2_fileio_is_active(vq)) 91 return -EINVAL; 92 } 93 94 if (dev->queue_setup_error) { 95 /* 96 * Error injection: test what happens if queue_setup() returns 97 * an error. 98 */ 99 dev->queue_setup_error = false; 100 return -EINVAL; 101 } 102 if (*nplanes) { 103 /* 104 * Check if the number of requested planes match 105 * the number of buffers in the current format. You can't mix that. 106 */ 107 if (*nplanes != buffers) 108 return -EINVAL; 109 for (p = 0; p < buffers; p++) { 110 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / 111 dev->fmt_cap->vdownsampling[p] + 112 dev->fmt_cap->data_offset[p]) 113 return -EINVAL; 114 } 115 } else { 116 for (p = 0; p < buffers; p++) 117 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 118 dev->fmt_cap->vdownsampling[p] + 119 dev->fmt_cap->data_offset[p]; 120 } 121 122 *nplanes = buffers; 123 124 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 125 for (p = 0; p < buffers; p++) 126 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 127 128 return 0; 129 } 130 131 static int vid_cap_buf_prepare(struct vb2_buffer *vb) 132 { 133 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 134 unsigned long size; 135 unsigned buffers = tpg_g_buffers(&dev->tpg); 136 unsigned p; 137 138 dprintk(dev, 1, "%s\n", __func__); 139 140 if (WARN_ON(NULL == dev->fmt_cap)) 141 return -EINVAL; 142 143 if (dev->buf_prepare_error) { 144 /* 145 * Error injection: test what happens if buf_prepare() returns 146 * an error. 147 */ 148 dev->buf_prepare_error = false; 149 return -EINVAL; 150 } 151 for (p = 0; p < buffers; p++) { 152 size = (tpg_g_line_width(&dev->tpg, p) * 153 dev->fmt_cap_rect.height) / 154 dev->fmt_cap->vdownsampling[p] + 155 dev->fmt_cap->data_offset[p]; 156 157 if (vb2_plane_size(vb, p) < size) { 158 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 159 __func__, p, vb2_plane_size(vb, p), size); 160 return -EINVAL; 161 } 162 163 vb2_set_plane_payload(vb, p, size); 164 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 165 } 166 167 return 0; 168 } 169 170 static void vid_cap_buf_finish(struct vb2_buffer *vb) 171 { 172 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 173 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 174 struct v4l2_timecode *tc = &vbuf->timecode; 175 unsigned fps = 25; 176 unsigned seq = vbuf->sequence; 177 178 if (!vivid_is_sdtv_cap(dev)) 179 return; 180 181 /* 182 * Set the timecode. Rarely used, so it is interesting to 183 * test this. 184 */ 185 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 186 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 187 fps = 30; 188 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 189 tc->flags = 0; 190 tc->frames = seq % fps; 191 tc->seconds = (seq / fps) % 60; 192 tc->minutes = (seq / (60 * fps)) % 60; 193 tc->hours = (seq / (60 * 60 * fps)) % 24; 194 } 195 196 static void vid_cap_buf_queue(struct vb2_buffer *vb) 197 { 198 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 199 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 200 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 201 202 dprintk(dev, 1, "%s\n", __func__); 203 204 spin_lock(&dev->slock); 205 list_add_tail(&buf->list, &dev->vid_cap_active); 206 spin_unlock(&dev->slock); 207 } 208 209 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 210 { 211 struct vivid_dev *dev = vb2_get_drv_priv(vq); 212 unsigned i; 213 int err; 214 215 dev->vid_cap_seq_count = 0; 216 dprintk(dev, 1, "%s\n", __func__); 217 for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) 218 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 219 if (dev->start_streaming_error) { 220 dev->start_streaming_error = false; 221 err = -EINVAL; 222 } else { 223 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 224 } 225 if (err) { 226 struct vivid_buffer *buf, *tmp; 227 228 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 229 list_del(&buf->list); 230 vb2_buffer_done(&buf->vb.vb2_buf, 231 VB2_BUF_STATE_QUEUED); 232 } 233 } 234 return err; 235 } 236 237 /* abort streaming and wait for last buffer */ 238 static void vid_cap_stop_streaming(struct vb2_queue *vq) 239 { 240 struct vivid_dev *dev = vb2_get_drv_priv(vq); 241 242 dprintk(dev, 1, "%s\n", __func__); 243 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 244 } 245 246 static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 247 { 248 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 249 250 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 251 } 252 253 const struct vb2_ops vivid_vid_cap_qops = { 254 .queue_setup = vid_cap_queue_setup, 255 .buf_prepare = vid_cap_buf_prepare, 256 .buf_finish = vid_cap_buf_finish, 257 .buf_queue = vid_cap_buf_queue, 258 .start_streaming = vid_cap_start_streaming, 259 .stop_streaming = vid_cap_stop_streaming, 260 .buf_request_complete = vid_cap_buf_request_complete, 261 }; 262 263 /* 264 * Determine the 'picture' quality based on the current TV frequency: either 265 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 266 * signal or NOISE for no signal. 267 */ 268 void vivid_update_quality(struct vivid_dev *dev) 269 { 270 unsigned freq_modulus; 271 272 if (dev->input_is_connected_to_output[dev->input]) { 273 /* 274 * The 'noise' will only be replaced by the actual video 275 * if the output video matches the input video settings. 276 */ 277 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 278 return; 279 } 280 if (vivid_is_hdmi_cap(dev) && 281 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 282 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 283 return; 284 } 285 if (vivid_is_sdtv_cap(dev) && 286 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 287 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 288 return; 289 } 290 if (!vivid_is_tv_cap(dev)) { 291 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 292 return; 293 } 294 295 /* 296 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 297 * From +/- 0.25 MHz around the channel there is color, and from 298 * +/- 1 MHz there is grayscale (chroma is lost). 299 * Everywhere else it is just noise. 300 */ 301 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 302 if (freq_modulus > 2 * 16) { 303 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 304 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 305 return; 306 } 307 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 308 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 309 else 310 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 311 } 312 313 /* 314 * Get the current picture quality and the associated afc value. 315 */ 316 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 317 { 318 unsigned freq_modulus; 319 320 if (afc) 321 *afc = 0; 322 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 323 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 324 return tpg_g_quality(&dev->tpg); 325 326 /* 327 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 328 * From +/- 0.25 MHz around the channel there is color, and from 329 * +/- 1 MHz there is grayscale (chroma is lost). 330 * Everywhere else it is just gray. 331 */ 332 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 333 if (afc) 334 *afc = freq_modulus - 1 * 16; 335 return TPG_QUAL_GRAY; 336 } 337 338 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 339 { 340 if (vivid_is_sdtv_cap(dev)) 341 return dev->std_aspect_ratio[dev->input]; 342 343 if (vivid_is_hdmi_cap(dev)) 344 return dev->dv_timings_aspect_ratio[dev->input]; 345 346 return TPG_VIDEO_ASPECT_IMAGE; 347 } 348 349 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 350 { 351 if (vivid_is_sdtv_cap(dev)) 352 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 353 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 354 355 if (vivid_is_hdmi_cap(dev) && 356 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 357 return dev->src_rect.height == 480 ? 358 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 359 360 return TPG_PIXEL_ASPECT_SQUARE; 361 } 362 363 /* 364 * Called whenever the format has to be reset which can occur when 365 * changing inputs, standard, timings, etc. 366 */ 367 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 368 { 369 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 370 u32 dims[V4L2_CTRL_MAX_DIMS] = {}; 371 unsigned size; 372 u64 pixelclock; 373 374 switch (dev->input_type[dev->input]) { 375 case WEBCAM: 376 default: 377 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 378 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 379 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 380 dev->field_cap = V4L2_FIELD_NONE; 381 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 382 break; 383 case TV: 384 case SVID: 385 dev->field_cap = dev->tv_field_cap; 386 dev->src_rect.width = 720; 387 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 388 dev->src_rect.height = 480; 389 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 390 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 391 } else { 392 dev->src_rect.height = 576; 393 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 394 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 395 } 396 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 397 break; 398 case HDMI: 399 dev->src_rect.width = bt->width; 400 dev->src_rect.height = bt->height; 401 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 402 if (dev->reduced_fps && can_reduce_fps(bt)) { 403 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 404 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 405 } else { 406 pixelclock = bt->pixelclock; 407 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 408 } 409 dev->timeperframe_vid_cap = (struct v4l2_fract) { 410 size / 100, (u32)pixelclock / 100 411 }; 412 if (bt->interlaced) 413 dev->field_cap = V4L2_FIELD_ALTERNATE; 414 else 415 dev->field_cap = V4L2_FIELD_NONE; 416 417 /* 418 * We can be called from within s_ctrl, in that case we can't 419 * set/get controls. Luckily we don't need to in that case. 420 */ 421 if (keep_controls || !dev->colorspace) 422 break; 423 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 424 if (bt->width == 720 && bt->height <= 576) 425 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 426 else 427 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 428 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 429 } else { 430 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 431 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 432 } 433 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 434 break; 435 } 436 vivid_update_quality(dev); 437 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 438 dev->crop_cap = dev->src_rect; 439 dev->crop_bounds_cap = dev->src_rect; 440 dev->compose_cap = dev->crop_cap; 441 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 442 dev->compose_cap.height /= 2; 443 dev->fmt_cap_rect = dev->compose_cap; 444 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 445 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 446 tpg_update_mv_step(&dev->tpg); 447 448 /* 449 * We can be called from within s_ctrl, in that case we can't 450 * modify controls. Luckily we don't need to in that case. 451 */ 452 if (keep_controls) 453 return; 454 455 dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); 456 dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); 457 v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); 458 } 459 460 /* Map the field to something that is valid for the current input */ 461 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 462 { 463 if (vivid_is_sdtv_cap(dev)) { 464 switch (field) { 465 case V4L2_FIELD_INTERLACED_TB: 466 case V4L2_FIELD_INTERLACED_BT: 467 case V4L2_FIELD_SEQ_TB: 468 case V4L2_FIELD_SEQ_BT: 469 case V4L2_FIELD_TOP: 470 case V4L2_FIELD_BOTTOM: 471 case V4L2_FIELD_ALTERNATE: 472 return field; 473 case V4L2_FIELD_INTERLACED: 474 default: 475 return V4L2_FIELD_INTERLACED; 476 } 477 } 478 if (vivid_is_hdmi_cap(dev)) 479 return dev->dv_timings_cap[dev->input].bt.interlaced ? 480 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 481 return V4L2_FIELD_NONE; 482 } 483 484 static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 485 { 486 if (!vivid_input_is_connected_to(dev)) 487 return tpg_g_colorspace(&dev->tpg); 488 return dev->colorspace_out; 489 } 490 491 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 492 { 493 if (!vivid_input_is_connected_to(dev)) 494 return tpg_g_xfer_func(&dev->tpg); 495 return dev->xfer_func_out; 496 } 497 498 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 499 { 500 if (!vivid_input_is_connected_to(dev)) 501 return tpg_g_ycbcr_enc(&dev->tpg); 502 return dev->ycbcr_enc_out; 503 } 504 505 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 506 { 507 if (!vivid_input_is_connected_to(dev)) 508 return tpg_g_hsv_enc(&dev->tpg); 509 return dev->hsv_enc_out; 510 } 511 512 static unsigned vivid_quantization_cap(struct vivid_dev *dev) 513 { 514 if (!vivid_input_is_connected_to(dev)) 515 return tpg_g_quantization(&dev->tpg); 516 return dev->quantization_out; 517 } 518 519 int vivid_g_fmt_vid_cap(struct file *file, void *priv, 520 struct v4l2_format *f) 521 { 522 struct vivid_dev *dev = video_drvdata(file); 523 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 524 unsigned p; 525 526 mp->width = dev->fmt_cap_rect.width; 527 mp->height = dev->fmt_cap_rect.height; 528 mp->field = dev->field_cap; 529 mp->pixelformat = dev->fmt_cap->fourcc; 530 mp->colorspace = vivid_colorspace_cap(dev); 531 mp->xfer_func = vivid_xfer_func_cap(dev); 532 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 533 mp->hsv_enc = vivid_hsv_enc_cap(dev); 534 else 535 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 536 mp->quantization = vivid_quantization_cap(dev); 537 mp->num_planes = dev->fmt_cap->buffers; 538 for (p = 0; p < mp->num_planes; p++) { 539 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 540 mp->plane_fmt[p].sizeimage = 541 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 542 dev->fmt_cap->vdownsampling[p] + 543 dev->fmt_cap->data_offset[p]; 544 } 545 return 0; 546 } 547 548 int vivid_try_fmt_vid_cap(struct file *file, void *priv, 549 struct v4l2_format *f) 550 { 551 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 552 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 553 struct vivid_dev *dev = video_drvdata(file); 554 const struct vivid_fmt *fmt; 555 unsigned bytesperline, max_bpl; 556 unsigned factor = 1; 557 unsigned w, h; 558 unsigned p; 559 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 560 561 fmt = vivid_get_format(dev, mp->pixelformat); 562 if (!fmt) { 563 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 564 mp->pixelformat); 565 mp->pixelformat = V4L2_PIX_FMT_YUYV; 566 fmt = vivid_get_format(dev, mp->pixelformat); 567 } 568 569 mp->field = vivid_field_cap(dev, mp->field); 570 if (vivid_is_webcam(dev)) { 571 const struct v4l2_frmsize_discrete *sz = 572 v4l2_find_nearest_size(webcam_sizes, 573 ARRAY_SIZE(webcam_sizes), width, 574 height, mp->width, mp->height); 575 576 w = sz->width; 577 h = sz->height; 578 } else if (vivid_is_sdtv_cap(dev)) { 579 w = 720; 580 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 581 } else { 582 w = dev->src_rect.width; 583 h = dev->src_rect.height; 584 } 585 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 586 factor = 2; 587 if (vivid_is_webcam(dev) || 588 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 589 mp->width = w; 590 mp->height = h / factor; 591 } else { 592 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 593 594 v4l2_rect_set_min_size(&r, &vivid_min_rect); 595 v4l2_rect_set_max_size(&r, &vivid_max_rect); 596 if (dev->has_scaler_cap && !dev->has_compose_cap) { 597 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 598 599 v4l2_rect_set_max_size(&r, &max_r); 600 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 601 v4l2_rect_set_max_size(&r, &dev->src_rect); 602 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 603 v4l2_rect_set_min_size(&r, &dev->src_rect); 604 } 605 mp->width = r.width; 606 mp->height = r.height / factor; 607 } 608 609 /* This driver supports custom bytesperline values */ 610 611 mp->num_planes = fmt->buffers; 612 for (p = 0; p < fmt->buffers; p++) { 613 /* Calculate the minimum supported bytesperline value */ 614 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 615 /* Calculate the maximum supported bytesperline value */ 616 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 617 618 if (pfmt[p].bytesperline > max_bpl) 619 pfmt[p].bytesperline = max_bpl; 620 if (pfmt[p].bytesperline < bytesperline) 621 pfmt[p].bytesperline = bytesperline; 622 623 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 624 fmt->vdownsampling[p] + fmt->data_offset[p]; 625 626 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 627 } 628 for (p = fmt->buffers; p < fmt->planes; p++) 629 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 630 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 631 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 632 633 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 634 mp->colorspace = vivid_colorspace_cap(dev); 635 636 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 637 mp->xfer_func = vivid_xfer_func_cap(dev); 638 639 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 640 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 641 mp->hsv_enc = vivid_hsv_enc_cap(dev); 642 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 643 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 644 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 645 } else { 646 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 647 } 648 649 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 650 fmt->color_enc == TGP_COLOR_ENC_RGB) { 651 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 652 mp->quantization = vivid_quantization_cap(dev); 653 } else { 654 mp->quantization = vivid_quantization_cap(dev); 655 } 656 657 memset(mp->reserved, 0, sizeof(mp->reserved)); 658 return 0; 659 } 660 661 int vivid_s_fmt_vid_cap(struct file *file, void *priv, 662 struct v4l2_format *f) 663 { 664 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 665 struct vivid_dev *dev = video_drvdata(file); 666 struct v4l2_rect *crop = &dev->crop_cap; 667 struct v4l2_rect *compose = &dev->compose_cap; 668 struct vb2_queue *q = &dev->vb_vid_cap_q; 669 int ret = vivid_try_fmt_vid_cap(file, priv, f); 670 unsigned factor = 1; 671 unsigned p; 672 unsigned i; 673 674 if (ret < 0) 675 return ret; 676 677 if (vb2_is_busy(q)) { 678 dprintk(dev, 1, "%s device busy\n", __func__); 679 return -EBUSY; 680 } 681 682 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 683 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 684 factor = 2; 685 686 /* Note: the webcam input doesn't support scaling, cropping or composing */ 687 688 if (!vivid_is_webcam(dev) && 689 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 690 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 691 692 if (dev->has_scaler_cap) { 693 if (dev->has_compose_cap) 694 v4l2_rect_map_inside(compose, &r); 695 else 696 *compose = r; 697 if (dev->has_crop_cap && !dev->has_compose_cap) { 698 struct v4l2_rect min_r = { 699 0, 0, 700 r.width / MAX_ZOOM, 701 factor * r.height / MAX_ZOOM 702 }; 703 struct v4l2_rect max_r = { 704 0, 0, 705 r.width * MAX_ZOOM, 706 factor * r.height * MAX_ZOOM 707 }; 708 709 v4l2_rect_set_min_size(crop, &min_r); 710 v4l2_rect_set_max_size(crop, &max_r); 711 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 712 } else if (dev->has_crop_cap) { 713 struct v4l2_rect min_r = { 714 0, 0, 715 compose->width / MAX_ZOOM, 716 factor * compose->height / MAX_ZOOM 717 }; 718 struct v4l2_rect max_r = { 719 0, 0, 720 compose->width * MAX_ZOOM, 721 factor * compose->height * MAX_ZOOM 722 }; 723 724 v4l2_rect_set_min_size(crop, &min_r); 725 v4l2_rect_set_max_size(crop, &max_r); 726 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 727 } 728 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 729 r.height *= factor; 730 v4l2_rect_set_size_to(crop, &r); 731 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 732 r = *crop; 733 r.height /= factor; 734 v4l2_rect_set_size_to(compose, &r); 735 } else if (!dev->has_crop_cap) { 736 v4l2_rect_map_inside(compose, &r); 737 } else { 738 r.height *= factor; 739 v4l2_rect_set_max_size(crop, &r); 740 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 741 compose->top *= factor; 742 compose->height *= factor; 743 v4l2_rect_set_size_to(compose, crop); 744 v4l2_rect_map_inside(compose, &r); 745 compose->top /= factor; 746 compose->height /= factor; 747 } 748 } else if (vivid_is_webcam(dev)) { 749 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 750 751 /* Guaranteed to be a match */ 752 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 753 if (webcam_sizes[i].width == mp->width && 754 webcam_sizes[i].height == mp->height) 755 break; 756 dev->webcam_size_idx = i; 757 if (dev->webcam_ival_idx >= ival_sz) 758 dev->webcam_ival_idx = ival_sz - 1; 759 vivid_update_format_cap(dev, false); 760 } else { 761 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 762 763 v4l2_rect_set_size_to(compose, &r); 764 r.height *= factor; 765 v4l2_rect_set_size_to(crop, &r); 766 } 767 768 dev->fmt_cap_rect.width = mp->width; 769 dev->fmt_cap_rect.height = mp->height; 770 tpg_s_buf_height(&dev->tpg, mp->height); 771 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 772 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 773 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 774 dev->field_cap = mp->field; 775 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 776 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 777 else 778 tpg_s_field(&dev->tpg, dev->field_cap, false); 779 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 780 if (vivid_is_sdtv_cap(dev)) 781 dev->tv_field_cap = mp->field; 782 tpg_update_mv_step(&dev->tpg); 783 dev->tpg.colorspace = mp->colorspace; 784 dev->tpg.xfer_func = mp->xfer_func; 785 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 786 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 787 else 788 dev->tpg.hsv_enc = mp->hsv_enc; 789 dev->tpg.quantization = mp->quantization; 790 791 return 0; 792 } 793 794 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 795 struct v4l2_format *f) 796 { 797 struct vivid_dev *dev = video_drvdata(file); 798 799 if (!dev->multiplanar) 800 return -ENOTTY; 801 return vivid_g_fmt_vid_cap(file, priv, f); 802 } 803 804 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 805 struct v4l2_format *f) 806 { 807 struct vivid_dev *dev = video_drvdata(file); 808 809 if (!dev->multiplanar) 810 return -ENOTTY; 811 return vivid_try_fmt_vid_cap(file, priv, f); 812 } 813 814 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 815 struct v4l2_format *f) 816 { 817 struct vivid_dev *dev = video_drvdata(file); 818 819 if (!dev->multiplanar) 820 return -ENOTTY; 821 return vivid_s_fmt_vid_cap(file, priv, f); 822 } 823 824 int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 825 struct v4l2_format *f) 826 { 827 struct vivid_dev *dev = video_drvdata(file); 828 829 if (dev->multiplanar) 830 return -ENOTTY; 831 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 832 } 833 834 int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 835 struct v4l2_format *f) 836 { 837 struct vivid_dev *dev = video_drvdata(file); 838 839 if (dev->multiplanar) 840 return -ENOTTY; 841 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 842 } 843 844 int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 845 struct v4l2_format *f) 846 { 847 struct vivid_dev *dev = video_drvdata(file); 848 849 if (dev->multiplanar) 850 return -ENOTTY; 851 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 852 } 853 854 int vivid_vid_cap_g_selection(struct file *file, void *priv, 855 struct v4l2_selection *sel) 856 { 857 struct vivid_dev *dev = video_drvdata(file); 858 859 if (!dev->has_crop_cap && !dev->has_compose_cap) 860 return -ENOTTY; 861 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 862 return -EINVAL; 863 if (vivid_is_webcam(dev)) 864 return -ENODATA; 865 866 sel->r.left = sel->r.top = 0; 867 switch (sel->target) { 868 case V4L2_SEL_TGT_CROP: 869 if (!dev->has_crop_cap) 870 return -EINVAL; 871 sel->r = dev->crop_cap; 872 break; 873 case V4L2_SEL_TGT_CROP_DEFAULT: 874 case V4L2_SEL_TGT_CROP_BOUNDS: 875 if (!dev->has_crop_cap) 876 return -EINVAL; 877 sel->r = dev->src_rect; 878 break; 879 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 880 if (!dev->has_compose_cap) 881 return -EINVAL; 882 sel->r = vivid_max_rect; 883 break; 884 case V4L2_SEL_TGT_COMPOSE: 885 if (!dev->has_compose_cap) 886 return -EINVAL; 887 sel->r = dev->compose_cap; 888 break; 889 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 890 if (!dev->has_compose_cap) 891 return -EINVAL; 892 sel->r = dev->fmt_cap_rect; 893 break; 894 default: 895 return -EINVAL; 896 } 897 return 0; 898 } 899 900 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 901 { 902 struct vivid_dev *dev = video_drvdata(file); 903 struct v4l2_rect *crop = &dev->crop_cap; 904 struct v4l2_rect *compose = &dev->compose_cap; 905 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 906 int ret; 907 908 if (!dev->has_crop_cap && !dev->has_compose_cap) 909 return -ENOTTY; 910 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 911 return -EINVAL; 912 if (vivid_is_webcam(dev)) 913 return -ENODATA; 914 915 switch (s->target) { 916 case V4L2_SEL_TGT_CROP: 917 if (!dev->has_crop_cap) 918 return -EINVAL; 919 ret = vivid_vid_adjust_sel(s->flags, &s->r); 920 if (ret) 921 return ret; 922 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 923 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 924 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 925 s->r.top /= factor; 926 s->r.height /= factor; 927 if (dev->has_scaler_cap) { 928 struct v4l2_rect fmt = dev->fmt_cap_rect; 929 struct v4l2_rect max_rect = { 930 0, 0, 931 s->r.width * MAX_ZOOM, 932 s->r.height * MAX_ZOOM 933 }; 934 struct v4l2_rect min_rect = { 935 0, 0, 936 s->r.width / MAX_ZOOM, 937 s->r.height / MAX_ZOOM 938 }; 939 940 v4l2_rect_set_min_size(&fmt, &min_rect); 941 if (!dev->has_compose_cap) 942 v4l2_rect_set_max_size(&fmt, &max_rect); 943 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 944 vb2_is_busy(&dev->vb_vid_cap_q)) 945 return -EBUSY; 946 if (dev->has_compose_cap) { 947 v4l2_rect_set_min_size(compose, &min_rect); 948 v4l2_rect_set_max_size(compose, &max_rect); 949 v4l2_rect_map_inside(compose, &fmt); 950 } 951 dev->fmt_cap_rect = fmt; 952 tpg_s_buf_height(&dev->tpg, fmt.height); 953 } else if (dev->has_compose_cap) { 954 struct v4l2_rect fmt = dev->fmt_cap_rect; 955 956 v4l2_rect_set_min_size(&fmt, &s->r); 957 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 958 vb2_is_busy(&dev->vb_vid_cap_q)) 959 return -EBUSY; 960 dev->fmt_cap_rect = fmt; 961 tpg_s_buf_height(&dev->tpg, fmt.height); 962 v4l2_rect_set_size_to(compose, &s->r); 963 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 964 } else { 965 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 966 vb2_is_busy(&dev->vb_vid_cap_q)) 967 return -EBUSY; 968 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 969 v4l2_rect_set_size_to(compose, &s->r); 970 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 971 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 972 } 973 s->r.top *= factor; 974 s->r.height *= factor; 975 *crop = s->r; 976 break; 977 case V4L2_SEL_TGT_COMPOSE: 978 if (!dev->has_compose_cap) 979 return -EINVAL; 980 ret = vivid_vid_adjust_sel(s->flags, &s->r); 981 if (ret) 982 return ret; 983 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 984 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 985 if (dev->has_scaler_cap) { 986 struct v4l2_rect max_rect = { 987 0, 0, 988 dev->src_rect.width * MAX_ZOOM, 989 (dev->src_rect.height / factor) * MAX_ZOOM 990 }; 991 992 v4l2_rect_set_max_size(&s->r, &max_rect); 993 if (dev->has_crop_cap) { 994 struct v4l2_rect min_rect = { 995 0, 0, 996 s->r.width / MAX_ZOOM, 997 (s->r.height * factor) / MAX_ZOOM 998 }; 999 struct v4l2_rect max_rect = { 1000 0, 0, 1001 s->r.width * MAX_ZOOM, 1002 (s->r.height * factor) * MAX_ZOOM 1003 }; 1004 1005 v4l2_rect_set_min_size(crop, &min_rect); 1006 v4l2_rect_set_max_size(crop, &max_rect); 1007 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1008 } 1009 } else if (dev->has_crop_cap) { 1010 s->r.top *= factor; 1011 s->r.height *= factor; 1012 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1013 v4l2_rect_set_size_to(crop, &s->r); 1014 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1015 s->r.top /= factor; 1016 s->r.height /= factor; 1017 } else { 1018 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1019 s->r.height /= factor; 1020 } 1021 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1022 *compose = s->r; 1023 break; 1024 default: 1025 return -EINVAL; 1026 } 1027 1028 tpg_s_crop_compose(&dev->tpg, crop, compose); 1029 return 0; 1030 } 1031 1032 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1033 int type, struct v4l2_fract *f) 1034 { 1035 struct vivid_dev *dev = video_drvdata(file); 1036 1037 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1038 return -EINVAL; 1039 1040 switch (vivid_get_pixel_aspect(dev)) { 1041 case TPG_PIXEL_ASPECT_NTSC: 1042 f->numerator = 11; 1043 f->denominator = 10; 1044 break; 1045 case TPG_PIXEL_ASPECT_PAL: 1046 f->numerator = 54; 1047 f->denominator = 59; 1048 break; 1049 default: 1050 break; 1051 } 1052 return 0; 1053 } 1054 1055 static const struct v4l2_audio vivid_audio_inputs[] = { 1056 { 0, "TV", V4L2_AUDCAP_STEREO }, 1057 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1058 }; 1059 1060 int vidioc_enum_input(struct file *file, void *priv, 1061 struct v4l2_input *inp) 1062 { 1063 struct vivid_dev *dev = video_drvdata(file); 1064 1065 if (inp->index >= dev->num_inputs) 1066 return -EINVAL; 1067 1068 inp->type = V4L2_INPUT_TYPE_CAMERA; 1069 switch (dev->input_type[inp->index]) { 1070 case WEBCAM: 1071 snprintf(inp->name, sizeof(inp->name), "Webcam %03u-%u", 1072 dev->inst, dev->input_name_counter[inp->index]); 1073 inp->capabilities = 0; 1074 break; 1075 case TV: 1076 snprintf(inp->name, sizeof(inp->name), "TV %03u-%u", 1077 dev->inst, dev->input_name_counter[inp->index]); 1078 inp->type = V4L2_INPUT_TYPE_TUNER; 1079 inp->std = V4L2_STD_ALL; 1080 if (dev->has_audio_inputs) 1081 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1082 inp->capabilities = V4L2_IN_CAP_STD; 1083 break; 1084 case SVID: 1085 snprintf(inp->name, sizeof(inp->name), "S-Video %03u-%u", 1086 dev->inst, dev->input_name_counter[inp->index]); 1087 inp->std = V4L2_STD_ALL; 1088 if (dev->has_audio_inputs) 1089 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1090 inp->capabilities = V4L2_IN_CAP_STD; 1091 break; 1092 case HDMI: 1093 snprintf(inp->name, sizeof(inp->name), "HDMI %03u-%u", 1094 dev->inst, dev->input_name_counter[inp->index]); 1095 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1096 if (dev->edid_blocks == 0 || 1097 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1098 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1099 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1100 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1101 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1102 break; 1103 } 1104 if (dev->sensor_hflip) 1105 inp->status |= V4L2_IN_ST_HFLIP; 1106 if (dev->sensor_vflip) 1107 inp->status |= V4L2_IN_ST_VFLIP; 1108 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1109 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1110 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1111 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1112 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1113 } else if (vivid_is_tv_cap(dev)) { 1114 switch (tpg_g_quality(&dev->tpg)) { 1115 case TPG_QUAL_GRAY: 1116 inp->status |= V4L2_IN_ST_COLOR_KILL; 1117 break; 1118 case TPG_QUAL_NOISE: 1119 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1120 break; 1121 default: 1122 break; 1123 } 1124 } 1125 } 1126 return 0; 1127 } 1128 1129 int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1130 { 1131 struct vivid_dev *dev = video_drvdata(file); 1132 1133 *i = dev->input; 1134 return 0; 1135 } 1136 1137 int vidioc_s_input(struct file *file, void *priv, unsigned i) 1138 { 1139 struct vivid_dev *dev = video_drvdata(file); 1140 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1141 unsigned brightness; 1142 1143 if (i >= dev->num_inputs) 1144 return -EINVAL; 1145 1146 if (i == dev->input) 1147 return 0; 1148 1149 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1150 vb2_is_busy(&dev->vb_vbi_cap_q) || 1151 vb2_is_busy(&dev->vb_meta_cap_q)) 1152 return -EBUSY; 1153 1154 dev->input = i; 1155 dev->vid_cap_dev.tvnorms = 0; 1156 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1157 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1158 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1159 } 1160 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1161 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1162 vivid_update_format_cap(dev, false); 1163 1164 if (dev->colorspace) { 1165 switch (dev->input_type[i]) { 1166 case WEBCAM: 1167 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1168 break; 1169 case TV: 1170 case SVID: 1171 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1172 break; 1173 case HDMI: 1174 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1175 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1176 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1177 else 1178 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1179 } else { 1180 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1181 } 1182 break; 1183 } 1184 } 1185 1186 /* 1187 * Modify the brightness range depending on the input. 1188 * This makes it easy to use vivid to test if applications can 1189 * handle control range modifications and is also how this is 1190 * typically used in practice as different inputs may be hooked 1191 * up to different receivers with different control ranges. 1192 */ 1193 brightness = 128 * i + dev->input_brightness[i]; 1194 v4l2_ctrl_modify_range(dev->brightness, 1195 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1196 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1197 1198 /* Restore per-input states. */ 1199 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1200 vivid_is_hdmi_cap(dev)); 1201 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1202 dev->dv_timings_signal_mode[dev->input] == 1203 SELECTED_DV_TIMINGS); 1204 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1205 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1206 dev->std_signal_mode[dev->input]); 1207 1208 if (vivid_is_hdmi_cap(dev)) { 1209 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1210 dev->dv_timings_signal_mode[dev->input]); 1211 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1212 dev->query_dv_timings[dev->input]); 1213 } else if (vivid_is_sdtv_cap(dev)) { 1214 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1215 dev->std_signal_mode[dev->input]); 1216 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1217 dev->std_signal_mode[dev->input]); 1218 } 1219 1220 return 0; 1221 } 1222 1223 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1224 { 1225 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1226 return -EINVAL; 1227 *vin = vivid_audio_inputs[vin->index]; 1228 return 0; 1229 } 1230 1231 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1232 { 1233 struct vivid_dev *dev = video_drvdata(file); 1234 1235 if (!vivid_is_sdtv_cap(dev)) 1236 return -EINVAL; 1237 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1238 return 0; 1239 } 1240 1241 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1242 { 1243 struct vivid_dev *dev = video_drvdata(file); 1244 1245 if (!vivid_is_sdtv_cap(dev)) 1246 return -EINVAL; 1247 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1248 return -EINVAL; 1249 dev->tv_audio_input = vin->index; 1250 return 0; 1251 } 1252 1253 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1254 { 1255 struct vivid_dev *dev = video_drvdata(file); 1256 1257 if (vf->tuner != 0) 1258 return -EINVAL; 1259 vf->frequency = dev->tv_freq; 1260 return 0; 1261 } 1262 1263 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1264 { 1265 struct vivid_dev *dev = video_drvdata(file); 1266 1267 if (vf->tuner != 0) 1268 return -EINVAL; 1269 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1270 if (vivid_is_tv_cap(dev)) 1271 vivid_update_quality(dev); 1272 return 0; 1273 } 1274 1275 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1276 { 1277 struct vivid_dev *dev = video_drvdata(file); 1278 1279 if (vt->index != 0) 1280 return -EINVAL; 1281 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1282 return -EINVAL; 1283 dev->tv_audmode = vt->audmode; 1284 return 0; 1285 } 1286 1287 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1288 { 1289 struct vivid_dev *dev = video_drvdata(file); 1290 enum tpg_quality qual; 1291 1292 if (vt->index != 0) 1293 return -EINVAL; 1294 1295 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1296 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1297 vt->audmode = dev->tv_audmode; 1298 vt->rangelow = MIN_TV_FREQ; 1299 vt->rangehigh = MAX_TV_FREQ; 1300 qual = vivid_get_quality(dev, &vt->afc); 1301 if (qual == TPG_QUAL_COLOR) 1302 vt->signal = 0xffff; 1303 else if (qual == TPG_QUAL_GRAY) 1304 vt->signal = 0x8000; 1305 else 1306 vt->signal = 0; 1307 if (qual == TPG_QUAL_NOISE) { 1308 vt->rxsubchans = 0; 1309 } else if (qual == TPG_QUAL_GRAY) { 1310 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1311 } else { 1312 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1313 unsigned int options = 1314 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1315 1316 switch (channel_nr % options) { 1317 case 0: 1318 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1319 break; 1320 case 1: 1321 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1322 break; 1323 case 2: 1324 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1325 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1326 else 1327 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1328 break; 1329 case 3: 1330 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1331 break; 1332 } 1333 } 1334 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1335 return 0; 1336 } 1337 1338 /* Must remain in sync with the vivid_ctrl_standard_strings array */ 1339 const v4l2_std_id vivid_standard[] = { 1340 V4L2_STD_NTSC_M, 1341 V4L2_STD_NTSC_M_JP, 1342 V4L2_STD_NTSC_M_KR, 1343 V4L2_STD_NTSC_443, 1344 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1345 V4L2_STD_PAL_I, 1346 V4L2_STD_PAL_DK, 1347 V4L2_STD_PAL_M, 1348 V4L2_STD_PAL_N, 1349 V4L2_STD_PAL_Nc, 1350 V4L2_STD_PAL_60, 1351 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1352 V4L2_STD_SECAM_DK, 1353 V4L2_STD_SECAM_L, 1354 V4L2_STD_SECAM_LC, 1355 V4L2_STD_UNKNOWN 1356 }; 1357 1358 /* Must remain in sync with the vivid_standard array */ 1359 const char * const vivid_ctrl_standard_strings[] = { 1360 "NTSC-M", 1361 "NTSC-M-JP", 1362 "NTSC-M-KR", 1363 "NTSC-443", 1364 "PAL-BGH", 1365 "PAL-I", 1366 "PAL-DK", 1367 "PAL-M", 1368 "PAL-N", 1369 "PAL-Nc", 1370 "PAL-60", 1371 "SECAM-BGH", 1372 "SECAM-DK", 1373 "SECAM-L", 1374 "SECAM-Lc", 1375 NULL, 1376 }; 1377 1378 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1379 { 1380 struct vivid_dev *dev = video_drvdata(file); 1381 unsigned int last = dev->query_std_last[dev->input]; 1382 1383 if (!vivid_is_sdtv_cap(dev)) 1384 return -ENODATA; 1385 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1386 dev->std_signal_mode[dev->input] == NO_LOCK) { 1387 *id = V4L2_STD_UNKNOWN; 1388 return 0; 1389 } 1390 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1391 *id = V4L2_STD_UNKNOWN; 1392 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1393 *id = dev->std_cap[dev->input]; 1394 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1395 *id = dev->query_std[dev->input]; 1396 } else { 1397 *id = vivid_standard[last]; 1398 dev->query_std_last[dev->input] = 1399 (last + 1) % ARRAY_SIZE(vivid_standard); 1400 } 1401 1402 return 0; 1403 } 1404 1405 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1406 { 1407 struct vivid_dev *dev = video_drvdata(file); 1408 1409 if (!vivid_is_sdtv_cap(dev)) 1410 return -ENODATA; 1411 if (dev->std_cap[dev->input] == id) 1412 return 0; 1413 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1414 return -EBUSY; 1415 dev->std_cap[dev->input] = id; 1416 vivid_update_format_cap(dev, false); 1417 return 0; 1418 } 1419 1420 static void find_aspect_ratio(u32 width, u32 height, 1421 u32 *num, u32 *denom) 1422 { 1423 if (!(height % 3) && ((height * 4 / 3) == width)) { 1424 *num = 4; 1425 *denom = 3; 1426 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1427 *num = 16; 1428 *denom = 9; 1429 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1430 *num = 16; 1431 *denom = 10; 1432 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1433 *num = 5; 1434 *denom = 4; 1435 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1436 *num = 15; 1437 *denom = 9; 1438 } else { /* default to 16:9 */ 1439 *num = 16; 1440 *denom = 9; 1441 } 1442 } 1443 1444 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1445 { 1446 struct v4l2_bt_timings *bt = &timings->bt; 1447 u32 total_h_pixel; 1448 u32 total_v_lines; 1449 u32 h_freq; 1450 1451 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1452 NULL, NULL)) 1453 return false; 1454 1455 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1456 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1457 1458 h_freq = (u32)bt->pixelclock / total_h_pixel; 1459 1460 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1461 struct v4l2_dv_timings cvt = {}; 1462 1463 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1464 bt->polarities, bt->interlaced, 1465 &vivid_dv_timings_cap, &cvt) && 1466 cvt.bt.width == bt->width && cvt.bt.height == bt->height) { 1467 *timings = cvt; 1468 return true; 1469 } 1470 } 1471 1472 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1473 struct v4l2_dv_timings gtf = {}; 1474 struct v4l2_fract aspect_ratio; 1475 1476 find_aspect_ratio(bt->width, bt->height, 1477 &aspect_ratio.numerator, 1478 &aspect_ratio.denominator); 1479 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1480 bt->polarities, bt->interlaced, 1481 aspect_ratio, &vivid_dv_timings_cap, 1482 >f) && 1483 gtf.bt.width == bt->width && gtf.bt.height == bt->height) { 1484 *timings = gtf; 1485 return true; 1486 } 1487 } 1488 return false; 1489 } 1490 1491 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1492 struct v4l2_dv_timings *timings) 1493 { 1494 struct vivid_dev *dev = video_drvdata(file); 1495 1496 if (!vivid_is_hdmi_cap(dev)) 1497 return -ENODATA; 1498 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1499 0, NULL, NULL) && 1500 !valid_cvt_gtf_timings(timings)) 1501 return -EINVAL; 1502 1503 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1504 0, false)) 1505 return 0; 1506 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1507 return -EBUSY; 1508 1509 dev->dv_timings_cap[dev->input] = *timings; 1510 vivid_update_format_cap(dev, false); 1511 return 0; 1512 } 1513 1514 int vidioc_query_dv_timings(struct file *file, void *_fh, 1515 struct v4l2_dv_timings *timings) 1516 { 1517 struct vivid_dev *dev = video_drvdata(file); 1518 unsigned int input = dev->input; 1519 unsigned int last = dev->query_dv_timings_last[input]; 1520 1521 if (!vivid_is_hdmi_cap(dev)) 1522 return -ENODATA; 1523 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1524 dev->edid_blocks == 0) 1525 return -ENOLINK; 1526 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1527 return -ENOLCK; 1528 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1529 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1530 return -ERANGE; 1531 } 1532 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1533 *timings = dev->dv_timings_cap[input]; 1534 } else if (dev->dv_timings_signal_mode[input] == 1535 SELECTED_DV_TIMINGS) { 1536 *timings = 1537 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1538 } else { 1539 *timings = 1540 v4l2_dv_timings_presets[last]; 1541 dev->query_dv_timings_last[input] = 1542 (last + 1) % dev->query_dv_timings_size; 1543 } 1544 return 0; 1545 } 1546 1547 void vivid_update_outputs(struct vivid_dev *dev) 1548 { 1549 u32 edid_present = 0; 1550 1551 if (!dev || !dev->num_outputs) 1552 return; 1553 for (unsigned int i = 0, j = 0; i < dev->num_outputs; i++) { 1554 if (dev->output_type[i] != HDMI) 1555 continue; 1556 1557 struct vivid_dev *dev_rx = dev->output_to_input_instance[i]; 1558 1559 if (dev_rx && dev_rx->edid_blocks) 1560 edid_present |= 1 << j; 1561 j++; 1562 } 1563 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, edid_present); 1564 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, edid_present); 1565 v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, edid_present); 1566 } 1567 1568 void vivid_update_connected_outputs(struct vivid_dev *dev) 1569 { 1570 u16 phys_addr = cec_get_edid_phys_addr(dev->edid, dev->edid_blocks * 128, NULL); 1571 1572 for (unsigned int i = 0, j = 0; i < dev->num_inputs; i++) { 1573 unsigned int menu_idx = 1574 dev->input_is_connected_to_output[i]; 1575 1576 if (dev->input_type[i] != HDMI) 1577 continue; 1578 j++; 1579 if (menu_idx < FIXED_MENU_ITEMS) 1580 continue; 1581 1582 struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; 1583 unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; 1584 1585 if (!dev_tx) 1586 continue; 1587 1588 unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; 1589 1590 vivid_update_outputs(dev_tx); 1591 if (dev->edid_blocks) { 1592 cec_s_phys_addr(dev_tx->cec_tx_adap[hdmi_output], 1593 v4l2_phys_addr_for_input(phys_addr, j), 1594 false); 1595 } else { 1596 cec_phys_addr_invalidate(dev_tx->cec_tx_adap[hdmi_output]); 1597 } 1598 } 1599 } 1600 1601 int vidioc_s_edid(struct file *file, void *_fh, 1602 struct v4l2_edid *edid) 1603 { 1604 struct vivid_dev *dev = video_drvdata(file); 1605 u16 phys_addr; 1606 int ret; 1607 1608 memset(edid->reserved, 0, sizeof(edid->reserved)); 1609 if (edid->pad >= dev->num_inputs) 1610 return -EINVAL; 1611 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1612 return -EINVAL; 1613 if (edid->blocks == 0) { 1614 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1615 return -EBUSY; 1616 dev->edid_blocks = 0; 1617 vivid_update_connected_outputs(dev); 1618 return 0; 1619 } 1620 if (edid->blocks > dev->edid_max_blocks) { 1621 edid->blocks = dev->edid_max_blocks; 1622 return -E2BIG; 1623 } 1624 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1625 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1626 if (ret) 1627 return ret; 1628 1629 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1630 return -EBUSY; 1631 1632 dev->edid_blocks = edid->blocks; 1633 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1634 1635 vivid_update_connected_outputs(dev); 1636 return 0; 1637 } 1638 1639 int vidioc_enum_framesizes(struct file *file, void *fh, 1640 struct v4l2_frmsizeenum *fsize) 1641 { 1642 struct vivid_dev *dev = video_drvdata(file); 1643 1644 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1645 return -EINVAL; 1646 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1647 return -EINVAL; 1648 if (vivid_is_webcam(dev)) { 1649 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1650 return -EINVAL; 1651 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1652 fsize->discrete = webcam_sizes[fsize->index]; 1653 return 0; 1654 } 1655 if (fsize->index) 1656 return -EINVAL; 1657 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1658 fsize->stepwise.min_width = MIN_WIDTH; 1659 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1660 fsize->stepwise.step_width = 2; 1661 fsize->stepwise.min_height = MIN_HEIGHT; 1662 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1663 fsize->stepwise.step_height = 2; 1664 return 0; 1665 } 1666 1667 /* timeperframe is arbitrary and continuous */ 1668 int vidioc_enum_frameintervals(struct file *file, void *priv, 1669 struct v4l2_frmivalenum *fival) 1670 { 1671 struct vivid_dev *dev = video_drvdata(file); 1672 const struct vivid_fmt *fmt; 1673 int i; 1674 1675 fmt = vivid_get_format(dev, fival->pixel_format); 1676 if (!fmt) 1677 return -EINVAL; 1678 1679 if (!vivid_is_webcam(dev)) { 1680 if (fival->index) 1681 return -EINVAL; 1682 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1683 return -EINVAL; 1684 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1685 return -EINVAL; 1686 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1687 fival->discrete = dev->timeperframe_vid_cap; 1688 return 0; 1689 } 1690 1691 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1692 if (fival->width == webcam_sizes[i].width && 1693 fival->height == webcam_sizes[i].height) 1694 break; 1695 if (i == ARRAY_SIZE(webcam_sizes)) 1696 return -EINVAL; 1697 if (fival->index >= webcam_ival_count(dev, i)) 1698 return -EINVAL; 1699 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1700 fival->discrete = webcam_intervals[fival->index]; 1701 return 0; 1702 } 1703 1704 int vivid_vid_cap_g_parm(struct file *file, void *priv, 1705 struct v4l2_streamparm *parm) 1706 { 1707 struct vivid_dev *dev = video_drvdata(file); 1708 1709 if (parm->type != (dev->multiplanar ? 1710 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1711 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1712 return -EINVAL; 1713 1714 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1715 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1716 parm->parm.capture.readbuffers = 1; 1717 return 0; 1718 } 1719 1720 int vivid_vid_cap_s_parm(struct file *file, void *priv, 1721 struct v4l2_streamparm *parm) 1722 { 1723 struct vivid_dev *dev = video_drvdata(file); 1724 unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); 1725 struct v4l2_fract tpf; 1726 unsigned i; 1727 1728 if (parm->type != (dev->multiplanar ? 1729 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1730 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1731 return -EINVAL; 1732 if (!vivid_is_webcam(dev)) 1733 return vivid_vid_cap_g_parm(file, priv, parm); 1734 1735 tpf = parm->parm.capture.timeperframe; 1736 1737 if (tpf.denominator == 0) 1738 tpf = webcam_intervals[ival_sz - 1]; 1739 for (i = 0; i < ival_sz; i++) 1740 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1741 break; 1742 if (i == ival_sz) 1743 i = ival_sz - 1; 1744 dev->webcam_ival_idx = i; 1745 tpf = webcam_intervals[dev->webcam_ival_idx]; 1746 1747 /* resync the thread's timings */ 1748 dev->cap_seq_resync = true; 1749 dev->timeperframe_vid_cap = tpf; 1750 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1751 parm->parm.capture.timeperframe = tpf; 1752 parm->parm.capture.readbuffers = 1; 1753 return 0; 1754 } 1755