1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vivid-kthread-cap.h - video/vbi capture thread support functions.
4 *
5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8 #include <linux/module.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/font.h>
15 #include <linux/mutex.h>
16 #include <linux/videodev2.h>
17 #include <linux/kthread.h>
18 #include <linux/freezer.h>
19 #include <linux/random.h>
20 #include <linux/v4l2-dv-timings.h>
21 #include <linux/jiffies.h>
22 #include <asm/div64.h>
23 #include <media/videobuf2-vmalloc.h>
24 #include <media/v4l2-dv-timings.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-rect.h>
29
30 #include "vivid-core.h"
31 #include "vivid-vid-common.h"
32 #include "vivid-vid-cap.h"
33 #include "vivid-vid-out.h"
34 #include "vivid-radio-common.h"
35 #include "vivid-radio-rx.h"
36 #include "vivid-radio-tx.h"
37 #include "vivid-sdr-cap.h"
38 #include "vivid-vbi-cap.h"
39 #include "vivid-vbi-out.h"
40 #include "vivid-osd.h"
41 #include "vivid-ctrls.h"
42 #include "vivid-kthread-cap.h"
43 #include "vivid-meta-cap.h"
44
vivid_get_std_cap(const struct vivid_dev * dev)45 static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
46 {
47 if (vivid_is_sdtv_cap(dev))
48 return dev->std_cap[dev->input];
49 return 0;
50 }
51
copy_pix(struct vivid_dev * dev,int win_y,int win_x,u16 * cap,const u16 * osd)52 static void copy_pix(struct vivid_dev *dev, int win_y, int win_x,
53 u16 *cap, const u16 *osd)
54 {
55 u16 out;
56
57 out = *cap;
58 *cap = *osd;
59
60 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
61 *osd != dev->chromakey_out)
62 return;
63 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
64 out == dev->chromakey_out)
65 return;
66 if (dev->fmt_cap->alpha_mask) {
67 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) &&
68 dev->global_alpha_out)
69 return;
70 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) &&
71 *cap & dev->fmt_cap->alpha_mask)
72 return;
73 if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_INV_ALPHA) &&
74 !(*cap & dev->fmt_cap->alpha_mask))
75 return;
76 }
77 *cap = out;
78 }
79
blend_line(struct vivid_dev * dev,unsigned y_offset,unsigned x_offset,u8 * vcapbuf,const u8 * vosdbuf,unsigned width,unsigned pixsize)80 static void blend_line(struct vivid_dev *dev, unsigned y_offset, unsigned x_offset,
81 u8 *vcapbuf, const u8 *vosdbuf,
82 unsigned width, unsigned pixsize)
83 {
84 unsigned x;
85
86 for (x = 0; x < width; x++, vcapbuf += pixsize, vosdbuf += pixsize) {
87 copy_pix(dev, y_offset, x_offset + x,
88 (u16 *)vcapbuf, (const u16 *)vosdbuf);
89 }
90 }
91
scale_line(const u8 * src,u8 * dst,unsigned srcw,unsigned dstw,unsigned twopixsize)92 static void scale_line(const u8 *src, u8 *dst, unsigned srcw, unsigned dstw, unsigned twopixsize)
93 {
94 /* Coarse scaling with Bresenham */
95 unsigned int_part;
96 unsigned fract_part;
97 unsigned src_x = 0;
98 unsigned error = 0;
99 unsigned x;
100
101 /*
102 * We always combine two pixels to prevent color bleed in the packed
103 * yuv case.
104 */
105 srcw /= 2;
106 dstw /= 2;
107 int_part = srcw / dstw;
108 fract_part = srcw % dstw;
109 for (x = 0; x < dstw; x++, dst += twopixsize) {
110 memcpy(dst, src + src_x * twopixsize, twopixsize);
111 src_x += int_part;
112 error += fract_part;
113 if (error >= dstw) {
114 error -= dstw;
115 src_x++;
116 }
117 }
118 }
119
120 /*
121 * Precalculate the rectangles needed to perform video looping:
122 *
123 * The nominal pipeline is that the video output buffer is cropped by
124 * crop_out, scaled to compose_out, overlaid with the output overlay,
125 * cropped on the capture side by crop_cap and scaled again to the video
126 * capture buffer using compose_cap.
127 *
128 * To keep things efficient we calculate the intersection of compose_out
129 * and crop_cap (since that's the only part of the video that will
130 * actually end up in the capture buffer), determine which part of the
131 * video output buffer that is and which part of the video capture buffer
132 * so we can scale the video straight from the output buffer to the capture
133 * buffer without any intermediate steps.
134 *
135 * If we need to deal with an output overlay, then there is no choice and
136 * that intermediate step still has to be taken. For the output overlay
137 * support we calculate the intersection of the framebuffer and the overlay
138 * window (which may be partially or wholly outside of the framebuffer
139 * itself) and the intersection of that with loop_vid_copy (i.e. the part of
140 * the actual looped video that will be overlaid). The result is calculated
141 * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates
142 * (loop_vid_overlay). Finally calculate the part of the capture buffer that
143 * will receive that overlaid video.
144 */
vivid_precalc_copy_rects(struct vivid_dev * dev,struct vivid_dev * out_dev)145 static void vivid_precalc_copy_rects(struct vivid_dev *dev, struct vivid_dev *out_dev)
146 {
147 /* Framebuffer rectangle */
148 struct v4l2_rect r_fb = {
149 0, 0, dev->display_width, dev->display_height
150 };
151 /* Overlay window rectangle in framebuffer coordinates */
152 struct v4l2_rect r_overlay = {
153 out_dev->overlay_out_left, out_dev->overlay_out_top,
154 out_dev->compose_out.width, out_dev->compose_out.height
155 };
156
157 v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &out_dev->compose_out);
158
159 dev->loop_vid_out = dev->loop_vid_copy;
160 v4l2_rect_scale(&dev->loop_vid_out, &out_dev->compose_out, &out_dev->crop_out);
161 dev->loop_vid_out.left += out_dev->crop_out.left;
162 dev->loop_vid_out.top += out_dev->crop_out.top;
163
164 dev->loop_vid_cap = dev->loop_vid_copy;
165 v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
166
167 dprintk(dev, 1,
168 "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
169 dev->loop_vid_copy.width, dev->loop_vid_copy.height,
170 dev->loop_vid_copy.left, dev->loop_vid_copy.top,
171 dev->loop_vid_out.width, dev->loop_vid_out.height,
172 dev->loop_vid_out.left, dev->loop_vid_out.top,
173 dev->loop_vid_cap.width, dev->loop_vid_cap.height,
174 dev->loop_vid_cap.left, dev->loop_vid_cap.top);
175
176 v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
177
178 /* shift r_overlay to the same origin as compose_out */
179 r_overlay.left += out_dev->compose_out.left - out_dev->overlay_out_left;
180 r_overlay.top += out_dev->compose_out.top - out_dev->overlay_out_top;
181
182 v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy);
183 dev->loop_fb_copy = dev->loop_vid_overlay;
184
185 /* shift dev->loop_fb_copy back again to the fb origin */
186 dev->loop_fb_copy.left -= out_dev->compose_out.left - out_dev->overlay_out_left;
187 dev->loop_fb_copy.top -= out_dev->compose_out.top - out_dev->overlay_out_top;
188
189 dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
190 v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
191
192 dprintk(dev, 1,
193 "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
194 dev->loop_fb_copy.width, dev->loop_fb_copy.height,
195 dev->loop_fb_copy.left, dev->loop_fb_copy.top,
196 dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
197 dev->loop_vid_overlay.left, dev->loop_vid_overlay.top,
198 dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height,
199 dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
200 }
201
plane_vaddr(struct tpg_data * tpg,struct vivid_buffer * buf,unsigned p,unsigned bpl[TPG_MAX_PLANES],unsigned h)202 static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
203 unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h)
204 {
205 unsigned i;
206 void *vbuf;
207
208 if (p == 0 || tpg_g_buffers(tpg) > 1)
209 return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
210 vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
211 for (i = 0; i < p; i++)
212 vbuf += bpl[i] * h / tpg->vdownsampling[i];
213 return vbuf;
214 }
215
vivid_copy_buffer(struct vivid_dev * dev,struct vivid_dev * out_dev,unsigned p,u8 * vcapbuf,struct vivid_buffer * vid_cap_buf)216 static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev,
217 struct vivid_dev *out_dev, unsigned p,
218 u8 *vcapbuf, struct vivid_buffer *vid_cap_buf)
219 {
220 bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
221 struct tpg_data *tpg = &dev->tpg;
222 struct vivid_buffer *vid_out_buf = NULL;
223 unsigned vdiv = out_dev->fmt_out->vdownsampling[p];
224 unsigned twopixsize = tpg_g_twopixelsize(tpg, p);
225 unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width);
226 unsigned img_height = dev->compose_cap.height;
227 unsigned stride_cap = tpg->bytesperline[p];
228 unsigned stride_out = out_dev->bytesperline_out[p];
229 unsigned stride_osd = dev->display_byte_stride;
230 unsigned hmax = (img_height * tpg->perc_fill) / 100;
231 u8 *voutbuf;
232 u8 *vosdbuf = NULL;
233 unsigned y;
234 bool blend = out_dev->fbuf_out_flags;
235 /* Coarse scaling with Bresenham */
236 unsigned vid_out_int_part;
237 unsigned vid_out_fract_part;
238 unsigned vid_out_y = 0;
239 unsigned vid_out_error = 0;
240 unsigned vid_overlay_int_part = 0;
241 unsigned vid_overlay_fract_part = 0;
242 unsigned vid_overlay_y = 0;
243 unsigned vid_overlay_error = 0;
244 unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left);
245 unsigned vid_cap_right;
246 bool quick;
247
248 vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
249 vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;
250
251 if (!list_empty(&out_dev->vid_out_active))
252 vid_out_buf = list_entry(out_dev->vid_out_active.next,
253 struct vivid_buffer, list);
254 if (vid_out_buf == NULL)
255 return -ENODATA;
256
257 vid_cap_buf->vb.field = vid_out_buf->vb.field;
258
259 voutbuf = plane_vaddr(tpg, vid_out_buf, p,
260 out_dev->bytesperline_out, out_dev->fmt_out_rect.height);
261 if (p < out_dev->fmt_out->buffers)
262 voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
263 voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
264 (dev->loop_vid_out.top / vdiv) * stride_out;
265 vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
266 (dev->compose_cap.top / vdiv) * stride_cap;
267
268 if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
269 /*
270 * If there is nothing to copy, then just fill the capture window
271 * with black.
272 */
273 for (y = 0; y < hmax / vdiv; y++, vcapbuf += stride_cap)
274 memcpy(vcapbuf, tpg->black_line[p], img_width);
275 return 0;
276 }
277
278 if (out_dev->overlay_out_enabled &&
279 dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
280 vosdbuf = dev->video_vbase;
281 vosdbuf += (dev->loop_fb_copy.left * twopixsize) / 2 +
282 dev->loop_fb_copy.top * stride_osd;
283 vid_overlay_int_part = dev->loop_vid_overlay.height /
284 dev->loop_vid_overlay_cap.height;
285 vid_overlay_fract_part = dev->loop_vid_overlay.height %
286 dev->loop_vid_overlay_cap.height;
287 }
288
289 vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width);
290 /* quick is true if no video scaling is needed */
291 quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;
292
293 dev->cur_scaled_line = dev->loop_vid_out.height;
294 for (y = 0; y < hmax; y += vdiv, vcapbuf += stride_cap) {
295 /* osdline is true if this line requires overlay blending */
296 bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
297 y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;
298
299 /*
300 * If this line of the capture buffer doesn't get any video, then
301 * just fill with black.
302 */
303 if (y < dev->loop_vid_cap.top ||
304 y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
305 memcpy(vcapbuf, tpg->black_line[p], img_width);
306 continue;
307 }
308
309 /* fill the left border with black */
310 if (dev->loop_vid_cap.left)
311 memcpy(vcapbuf, tpg->black_line[p], vid_cap_left);
312
313 /* fill the right border with black */
314 if (vid_cap_right < img_width)
315 memcpy(vcapbuf + vid_cap_right, tpg->black_line[p],
316 img_width - vid_cap_right);
317
318 if (quick && !osdline) {
319 memcpy(vcapbuf + vid_cap_left,
320 voutbuf + vid_out_y * stride_out,
321 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
322 goto update_vid_out_y;
323 }
324 if (dev->cur_scaled_line == vid_out_y) {
325 memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
326 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
327 goto update_vid_out_y;
328 }
329 if (!osdline) {
330 scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
331 tpg_hdiv(tpg, p, dev->loop_vid_out.width),
332 tpg_hdiv(tpg, p, dev->loop_vid_cap.width),
333 tpg_g_twopixelsize(tpg, p));
334 } else {
335 /*
336 * Offset in bytes within loop_vid_copy to the start of the
337 * loop_vid_overlay rectangle.
338 */
339 unsigned offset =
340 ((dev->loop_vid_overlay.left - dev->loop_vid_copy.left) *
341 twopixsize) / 2;
342 u8 *osd = vosdbuf + vid_overlay_y * stride_osd;
343
344 scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
345 dev->loop_vid_out.width, dev->loop_vid_copy.width,
346 tpg_g_twopixelsize(tpg, p));
347 if (blend)
348 blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
349 dev->loop_vid_overlay.left,
350 dev->blended_line + offset, osd,
351 dev->loop_vid_overlay.width, twopixsize / 2);
352 else
353 memcpy(dev->blended_line + offset,
354 osd, (dev->loop_vid_overlay.width * twopixsize) / 2);
355 scale_line(dev->blended_line, dev->scaled_line,
356 dev->loop_vid_copy.width, dev->loop_vid_cap.width,
357 tpg_g_twopixelsize(tpg, p));
358 }
359 dev->cur_scaled_line = vid_out_y;
360 memcpy(vcapbuf + vid_cap_left, dev->scaled_line,
361 tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
362
363 update_vid_out_y:
364 if (osdline) {
365 vid_overlay_y += vid_overlay_int_part;
366 vid_overlay_error += vid_overlay_fract_part;
367 if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
368 vid_overlay_error -= dev->loop_vid_overlay_cap.height;
369 vid_overlay_y++;
370 }
371 }
372 vid_out_y += vid_out_int_part;
373 vid_out_error += vid_out_fract_part;
374 if (vid_out_error >= dev->loop_vid_cap.height / vdiv) {
375 vid_out_error -= dev->loop_vid_cap.height / vdiv;
376 vid_out_y++;
377 }
378 }
379
380 if (!blank)
381 return 0;
382 for (; y < img_height; y += vdiv, vcapbuf += stride_cap)
383 memcpy(vcapbuf, tpg->contrast_line[p], img_width);
384 return 0;
385 }
386
vivid_fillbuff(struct vivid_dev * dev,struct vivid_buffer * buf)387 static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
388 {
389 struct vivid_dev *out_dev = NULL;
390 struct tpg_data *tpg = &dev->tpg;
391 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
392 unsigned line_height = 16 / factor;
393 bool is_tv = vivid_is_sdtv_cap(dev);
394 bool is_60hz = is_tv && (dev->std_cap[dev->input] & V4L2_STD_525_60);
395 unsigned p;
396 int line = 1;
397 u8 *basep[TPG_MAX_PLANES][2];
398 unsigned ms;
399 char str[100];
400 s32 gain;
401
402 buf->vb.sequence = dev->vid_cap_seq_count;
403 v4l2_ctrl_s_ctrl(dev->ro_int32, buf->vb.sequence & 0xff);
404 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
405 /*
406 * 60 Hz standards start with the bottom field, 50 Hz standards
407 * with the top field. So if the 0-based seq_count is even,
408 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
409 * standards.
410 */
411 buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
412 V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
413 /*
414 * The sequence counter counts frames, not fields. So divide
415 * by two.
416 */
417 buf->vb.sequence /= 2;
418 } else {
419 buf->vb.field = dev->field_cap;
420 }
421 tpg_s_field(tpg, buf->vb.field,
422 dev->field_cap == V4L2_FIELD_ALTERNATE);
423 tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]);
424
425 if (vivid_vid_can_loop(dev) &&
426 ((vivid_is_svid_cap(dev) &&
427 !VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) ||
428 (vivid_is_hdmi_cap(dev) &&
429 !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])))) {
430 out_dev = vivid_input_is_connected_to(dev);
431 /*
432 * If the vivid instance of the output device is different
433 * from the vivid instance of this input device, then we
434 * must take care to properly serialize the output device to
435 * prevent that the buffer we are copying from is being freed.
436 *
437 * If the output device is part of the same instance, then the
438 * lock is already taken and there is no need to take the mutex.
439 *
440 * The problem with taking the mutex is that you can get
441 * deadlocked if instance A locks instance B and vice versa.
442 * It is not really worth trying to be very smart about this,
443 * so just try to take the lock, and if you can't, then just
444 * set out_dev to NULL and you will end up with a single frame
445 * of Noise (the default test pattern in this case).
446 */
447 if (out_dev && dev != out_dev && !mutex_trylock(&out_dev->mutex))
448 out_dev = NULL;
449 }
450
451 if (out_dev)
452 vivid_precalc_copy_rects(dev, out_dev);
453
454 for (p = 0; p < tpg_g_planes(tpg); p++) {
455 void *vbuf = plane_vaddr(tpg, buf, p,
456 tpg->bytesperline, tpg->buf_height);
457
458 /*
459 * The first plane of a multiplanar format has a non-zero
460 * data_offset. This helps testing whether the application
461 * correctly supports non-zero data offsets.
462 */
463 if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) {
464 memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
465 dev->fmt_cap->data_offset[p]);
466 vbuf += dev->fmt_cap->data_offset[p];
467 }
468 tpg_calc_text_basep(tpg, basep, p, vbuf);
469 if (!out_dev || vivid_copy_buffer(dev, out_dev, p, vbuf, buf))
470 tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev),
471 p, vbuf);
472 }
473 if (out_dev && dev != out_dev)
474 mutex_unlock(&out_dev->mutex);
475
476 dev->must_blank[buf->vb.vb2_buf.index] = false;
477
478 /* Updates stream time, only update at the start of a new frame. */
479 if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
480 (dev->vid_cap_seq_count & 1) == 0)
481 dev->ms_vid_cap =
482 jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
483
484 ms = dev->ms_vid_cap;
485 if (dev->osd_mode <= 1) {
486 snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
487 (ms / (60 * 60 * 1000)) % 24,
488 (ms / (60 * 1000)) % 60,
489 (ms / 1000) % 60,
490 ms % 1000,
491 buf->vb.sequence,
492 (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
493 (buf->vb.field == V4L2_FIELD_TOP ?
494 " top" : " bottom") : "");
495 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
496 }
497 if (dev->osd_mode == 0) {
498 snprintf(str, sizeof(str), " %dx%d, input %d ",
499 dev->src_rect.width, dev->src_rect.height, dev->input);
500 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
501
502 gain = v4l2_ctrl_g_ctrl(dev->gain);
503 mutex_lock(dev->ctrl_hdl_user_vid.lock);
504 snprintf(str, sizeof(str),
505 " brightness %3d, contrast %3d, saturation %3d, hue %d ",
506 dev->brightness->cur.val,
507 dev->contrast->cur.val,
508 dev->saturation->cur.val,
509 dev->hue->cur.val);
510 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
511 snprintf(str, sizeof(str),
512 " autogain %d, gain %3d, alpha 0x%02x ",
513 dev->autogain->cur.val, gain, dev->alpha->cur.val);
514 mutex_unlock(dev->ctrl_hdl_user_vid.lock);
515 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
516 mutex_lock(dev->ctrl_hdl_user_aud.lock);
517 snprintf(str, sizeof(str),
518 " volume %3d, mute %d ",
519 dev->volume->cur.val, dev->mute->cur.val);
520 mutex_unlock(dev->ctrl_hdl_user_aud.lock);
521 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
522 mutex_lock(dev->ctrl_hdl_user_gen.lock);
523 snprintf(str, sizeof(str), " int32 %d, ro_int32 %d, int64 %lld, bitmask %08x ",
524 dev->int32->cur.val,
525 dev->ro_int32->cur.val,
526 *dev->int64->p_cur.p_s64,
527 dev->bitmask->cur.val);
528 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
529 snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
530 dev->boolean->cur.val,
531 dev->menu->qmenu[dev->menu->cur.val],
532 dev->string->p_cur.p_char);
533 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
534 snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
535 dev->int_menu->qmenu_int[dev->int_menu->cur.val],
536 dev->int_menu->cur.val);
537 mutex_unlock(dev->ctrl_hdl_user_gen.lock);
538 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
539 if (dev->button_pressed) {
540 dev->button_pressed--;
541 snprintf(str, sizeof(str), " button pressed!");
542 tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
543 }
544 if (dev->osd[0]) {
545 if (vivid_is_hdmi_cap(dev)) {
546 snprintf(str, sizeof(str),
547 " OSD \"%s\"", dev->osd);
548 tpg_gen_text(tpg, basep, line++ * line_height,
549 16, str);
550 }
551 if (dev->osd_jiffies &&
552 time_is_before_jiffies(dev->osd_jiffies + 5 * HZ)) {
553 dev->osd[0] = 0;
554 dev->osd_jiffies = 0;
555 }
556 }
557 }
558 }
559
vivid_cap_update_frame_period(struct vivid_dev * dev)560 static void vivid_cap_update_frame_period(struct vivid_dev *dev)
561 {
562 u64 f_period;
563
564 f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
565 if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0))
566 dev->timeperframe_vid_cap.denominator = 1;
567 do_div(f_period, dev->timeperframe_vid_cap.denominator);
568 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
569 f_period >>= 1;
570 /*
571 * If "End of Frame", then offset the exposure time by 0.9
572 * of the frame period.
573 */
574 dev->cap_frame_eof_offset = f_period * 9;
575 do_div(dev->cap_frame_eof_offset, 10);
576 dev->cap_frame_period = f_period;
577 }
578
vivid_thread_vid_cap_tick(struct vivid_dev * dev,int dropped_bufs)579 static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
580 int dropped_bufs)
581 {
582 struct vivid_buffer *vid_cap_buf = NULL;
583 struct vivid_buffer *vbi_cap_buf = NULL;
584 struct vivid_buffer *meta_cap_buf = NULL;
585 u64 f_time = 0;
586
587 dprintk(dev, 1, "Video Capture Thread Tick\n");
588
589 while (dropped_bufs-- > 1)
590 tpg_update_mv_count(&dev->tpg,
591 dev->field_cap == V4L2_FIELD_NONE ||
592 dev->field_cap == V4L2_FIELD_ALTERNATE);
593
594 /* Drop a certain percentage of buffers. */
595 if (dev->perc_dropped_buffers &&
596 get_random_u32_below(100) < dev->perc_dropped_buffers)
597 goto update_mv;
598
599 spin_lock(&dev->slock);
600 if (!list_empty(&dev->vid_cap_active)) {
601 vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list);
602 list_del(&vid_cap_buf->list);
603 }
604 if (!list_empty(&dev->vbi_cap_active)) {
605 if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
606 (dev->vbi_cap_seq_count & 1)) {
607 vbi_cap_buf = list_entry(dev->vbi_cap_active.next,
608 struct vivid_buffer, list);
609 list_del(&vbi_cap_buf->list);
610 }
611 }
612 if (!list_empty(&dev->meta_cap_active)) {
613 meta_cap_buf = list_entry(dev->meta_cap_active.next,
614 struct vivid_buffer, list);
615 list_del(&meta_cap_buf->list);
616 }
617
618 spin_unlock(&dev->slock);
619
620 if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
621 goto update_mv;
622
623 f_time = ktime_get_ns() + dev->time_wrap_offset;
624
625 if (vid_cap_buf) {
626 v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
627 &dev->ctrl_hdl_vid_cap);
628 /* Fill buffer */
629 vivid_fillbuff(dev, vid_cap_buf);
630 dprintk(dev, 1, "filled buffer %d\n",
631 vid_cap_buf->vb.vb2_buf.index);
632
633 v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req,
634 &dev->ctrl_hdl_vid_cap);
635 vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
636 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
637 dprintk(dev, 2, "vid_cap buffer %d done\n",
638 vid_cap_buf->vb.vb2_buf.index);
639
640 vid_cap_buf->vb.vb2_buf.timestamp = f_time;
641 if (!dev->tstamp_src_is_soe)
642 vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
643 }
644
645 if (vbi_cap_buf) {
646 u64 vbi_period;
647
648 v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
649 &dev->ctrl_hdl_vbi_cap);
650 if (vbi_cap_buf->vb.vb2_buf.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
651 vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
652 else
653 vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
654 v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req,
655 &dev->ctrl_hdl_vbi_cap);
656 vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
657 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
658 dprintk(dev, 2, "vbi_cap %d done\n",
659 vbi_cap_buf->vb.vb2_buf.index);
660
661 /* If capturing a VBI, offset by 0.05 */
662 vbi_period = dev->cap_frame_period * 5;
663 do_div(vbi_period, 100);
664 vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
665 }
666
667 if (meta_cap_buf) {
668 v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
669 &dev->ctrl_hdl_meta_cap);
670 vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
671 v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
672 &dev->ctrl_hdl_meta_cap);
673 vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
674 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
675 dprintk(dev, 2, "meta_cap %d done\n",
676 meta_cap_buf->vb.vb2_buf.index);
677 meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
678 }
679
680 dev->dqbuf_error = false;
681
682 update_mv:
683 /* Update the test pattern movement counters */
684 tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE ||
685 dev->field_cap == V4L2_FIELD_ALTERNATE);
686 }
687
vivid_thread_vid_cap(void * data)688 static int vivid_thread_vid_cap(void *data)
689 {
690 struct vivid_dev *dev = data;
691 u64 numerators_since_start;
692 u64 buffers_since_start;
693 u64 next_jiffies_since_start;
694 unsigned long jiffies_since_start;
695 unsigned long cur_jiffies;
696 unsigned wait_jiffies;
697 unsigned numerator;
698 unsigned denominator;
699 int dropped_bufs;
700
701 dprintk(dev, 1, "Video Capture Thread Start\n");
702
703 set_freezable();
704
705 /* Resets frame counters */
706 dev->cap_seq_offset = 0;
707 dev->cap_seq_count = 0;
708 dev->cap_seq_resync = false;
709 dev->jiffies_vid_cap = jiffies;
710 dev->cap_stream_start = ktime_get_ns();
711 if (dev->time_wrap)
712 dev->time_wrap_offset = dev->time_wrap - dev->cap_stream_start;
713 else
714 dev->time_wrap_offset = 0;
715 vivid_cap_update_frame_period(dev);
716
717 for (;;) {
718 try_to_freeze();
719 if (kthread_should_stop())
720 break;
721
722 if (!mutex_trylock(&dev->mutex)) {
723 schedule();
724 continue;
725 }
726
727 cur_jiffies = jiffies;
728 if (dev->cap_seq_resync) {
729 dev->jiffies_vid_cap = cur_jiffies;
730 dev->cap_seq_offset = dev->cap_seq_count + 1;
731 dev->cap_seq_count = 0;
732 dev->cap_stream_start += dev->cap_frame_period *
733 dev->cap_seq_offset;
734 vivid_cap_update_frame_period(dev);
735 dev->cap_seq_resync = false;
736 }
737 numerator = dev->timeperframe_vid_cap.numerator;
738 denominator = dev->timeperframe_vid_cap.denominator;
739
740 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
741 denominator *= 2;
742
743 /* Calculate the number of jiffies since we started streaming */
744 jiffies_since_start = cur_jiffies - dev->jiffies_vid_cap;
745 /* Get the number of buffers streamed since the start */
746 buffers_since_start = (u64)jiffies_since_start * denominator +
747 (HZ * numerator) / 2;
748 do_div(buffers_since_start, HZ * numerator);
749
750 /*
751 * After more than 0xf0000000 (rounded down to a multiple of
752 * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
753 * jiffies have passed since we started streaming reset the
754 * counters and keep track of the sequence offset.
755 */
756 if (jiffies_since_start > JIFFIES_RESYNC) {
757 dev->jiffies_vid_cap = cur_jiffies;
758 dev->cap_seq_offset = buffers_since_start;
759 buffers_since_start = 0;
760 }
761 dropped_bufs = buffers_since_start + dev->cap_seq_offset - dev->cap_seq_count;
762 dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
763 dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
764 dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
765 dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
766
767 vivid_thread_vid_cap_tick(dev, dropped_bufs);
768
769 /*
770 * Calculate the number of 'numerators' streamed since we started,
771 * including the current buffer.
772 */
773 numerators_since_start = ++buffers_since_start * numerator;
774
775 /* And the number of jiffies since we started */
776 jiffies_since_start = jiffies - dev->jiffies_vid_cap;
777
778 mutex_unlock(&dev->mutex);
779
780 /*
781 * Calculate when that next buffer is supposed to start
782 * in jiffies since we started streaming.
783 */
784 next_jiffies_since_start = numerators_since_start * HZ +
785 denominator / 2;
786 do_div(next_jiffies_since_start, denominator);
787 /* If it is in the past, then just schedule asap */
788 if (next_jiffies_since_start < jiffies_since_start)
789 next_jiffies_since_start = jiffies_since_start;
790
791 wait_jiffies = next_jiffies_since_start - jiffies_since_start;
792 while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
793 !kthread_should_stop())
794 schedule();
795 }
796 dprintk(dev, 1, "Video Capture Thread End\n");
797 return 0;
798 }
799
vivid_grab_controls(struct vivid_dev * dev,bool grab)800 static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
801 {
802 v4l2_ctrl_grab(dev->ctrl_has_crop_cap, grab);
803 v4l2_ctrl_grab(dev->ctrl_has_compose_cap, grab);
804 v4l2_ctrl_grab(dev->ctrl_has_scaler_cap, grab);
805 }
806
vivid_start_generating_vid_cap(struct vivid_dev * dev,bool * pstreaming)807 int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
808 {
809 dprintk(dev, 1, "%s\n", __func__);
810
811 if (dev->kthread_vid_cap) {
812 u32 seq_count = dev->cap_seq_count + dev->seq_wrap * 128;
813
814 if (pstreaming == &dev->vid_cap_streaming)
815 dev->vid_cap_seq_start = seq_count;
816 else if (pstreaming == &dev->vbi_cap_streaming)
817 dev->vbi_cap_seq_start = seq_count;
818 else
819 dev->meta_cap_seq_start = seq_count;
820 *pstreaming = true;
821 return 0;
822 }
823
824 /* Resets frame counters */
825 tpg_init_mv_count(&dev->tpg);
826
827 dev->vid_cap_seq_start = dev->seq_wrap * 128;
828 dev->vbi_cap_seq_start = dev->seq_wrap * 128;
829 dev->meta_cap_seq_start = dev->seq_wrap * 128;
830
831 dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
832 "%s-vid-cap", dev->v4l2_dev.name);
833
834 if (IS_ERR(dev->kthread_vid_cap)) {
835 int err = PTR_ERR(dev->kthread_vid_cap);
836
837 dev->kthread_vid_cap = NULL;
838 v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
839 return err;
840 }
841 *pstreaming = true;
842 vivid_grab_controls(dev, true);
843
844 dprintk(dev, 1, "returning from %s\n", __func__);
845 return 0;
846 }
847
vivid_stop_generating_vid_cap(struct vivid_dev * dev,bool * pstreaming)848 void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
849 {
850 dprintk(dev, 1, "%s\n", __func__);
851
852 if (dev->kthread_vid_cap == NULL)
853 return;
854
855 *pstreaming = false;
856 if (pstreaming == &dev->vid_cap_streaming) {
857 /* Release all active buffers */
858 while (!list_empty(&dev->vid_cap_active)) {
859 struct vivid_buffer *buf;
860
861 buf = list_entry(dev->vid_cap_active.next,
862 struct vivid_buffer, list);
863 list_del(&buf->list);
864 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
865 &dev->ctrl_hdl_vid_cap);
866 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
867 dprintk(dev, 2, "vid_cap buffer %d done\n",
868 buf->vb.vb2_buf.index);
869 }
870 }
871
872 if (pstreaming == &dev->vbi_cap_streaming) {
873 while (!list_empty(&dev->vbi_cap_active)) {
874 struct vivid_buffer *buf;
875
876 buf = list_entry(dev->vbi_cap_active.next,
877 struct vivid_buffer, list);
878 list_del(&buf->list);
879 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
880 &dev->ctrl_hdl_vbi_cap);
881 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
882 dprintk(dev, 2, "vbi_cap buffer %d done\n",
883 buf->vb.vb2_buf.index);
884 }
885 }
886
887 if (pstreaming == &dev->meta_cap_streaming) {
888 while (!list_empty(&dev->meta_cap_active)) {
889 struct vivid_buffer *buf;
890
891 buf = list_entry(dev->meta_cap_active.next,
892 struct vivid_buffer, list);
893 list_del(&buf->list);
894 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
895 &dev->ctrl_hdl_meta_cap);
896 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
897 dprintk(dev, 2, "meta_cap buffer %d done\n",
898 buf->vb.vb2_buf.index);
899 }
900 }
901
902 if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
903 dev->meta_cap_streaming)
904 return;
905
906 /* shutdown control thread */
907 vivid_grab_controls(dev, false);
908 kthread_stop(dev->kthread_vid_cap);
909 dev->kthread_vid_cap = NULL;
910 }
911